1 /*
2 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8 #include <stdbool.h>
9 #include <errno.h>
10
11 #include <zephyr/kernel.h>
12 #include <soc.h>
13 #include <zephyr/device.h>
14 #include <zephyr/drivers/entropy.h>
15 #include <zephyr/bluetooth/hci_types.h>
16
17 #include "hal/cpu.h"
18 #include "hal/ecb.h"
19 #include "hal/ccm.h"
20 #include "hal/cntr.h"
21 #include "hal/ticker.h"
22
23 #include "util/util.h"
24 #include "util/mem.h"
25 #include "util/mfifo.h"
26 #include "util/memq.h"
27 #include "util/mayfly.h"
28 #include "util/dbuf.h"
29
30 #include "ticker/ticker.h"
31
32 #include "pdu_df.h"
33 #include "lll/pdu_vendor.h"
34 #include "pdu.h"
35
36 #include "lll.h"
37 #include "lll/lll_vendor.h"
38 #include "lll/lll_adv_types.h"
39 #include "lll_adv.h"
40 #include "lll/lll_adv_pdu.h"
41 #include "lll_chan.h"
42 #include "lll_scan.h"
43 #include "lll/lll_df_types.h"
44 #include "lll_sync.h"
45 #include "lll_sync_iso.h"
46 #include "lll_iso_tx.h"
47 #include "lll_conn.h"
48 #include "lll_conn_iso.h"
49 #include "lll_df.h"
50
51 #include "ull_adv_types.h"
52 #include "ull_scan_types.h"
53 #include "ull_sync_types.h"
54 #include "ll_sw/ull_tx_queue.h"
55 #include "ull_conn_types.h"
56 #include "ull_filter.h"
57 #include "ull_df_types.h"
58 #include "ull_df_internal.h"
59
60 #if defined(CONFIG_BT_CTLR_USER_EXT)
61 #include "ull_vendor.h"
62 #endif /* CONFIG_BT_CTLR_USER_EXT */
63
64 #include "isoal.h"
65 #include "ll_feat_internal.h"
66 #include "ull_internal.h"
67 #include "ull_chan_internal.h"
68 #include "ull_iso_internal.h"
69 #include "ull_adv_internal.h"
70 #include "ull_scan_internal.h"
71 #include "ull_sync_internal.h"
72 #include "ull_sync_iso_internal.h"
73 #include "ull_central_internal.h"
74 #include "ull_iso_types.h"
75 #include "ull_conn_internal.h"
76 #include "ull_conn_iso_types.h"
77 #include "ull_central_iso_internal.h"
78 #include "ull_llcp_internal.h"
79 #include "ull_llcp.h"
80
81 #include "ull_conn_iso_internal.h"
82 #include "ull_peripheral_iso_internal.h"
83
84 #include "ll.h"
85 #include "ll_feat.h"
86 #include "ll_test.h"
87 #include "ll_settings.h"
88
89 #include "hal/debug.h"
90
91 #if defined(CONFIG_BT_BROADCASTER)
92 #define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
93 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
94 #define BT_ADV_AUX_TICKER_NODES ((TICKER_ID_ADV_AUX_LAST) - \
95 (TICKER_ID_ADV_AUX_BASE) + 1)
96 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
97 #define BT_ADV_SYNC_TICKER_NODES ((TICKER_ID_ADV_SYNC_LAST) - \
98 (TICKER_ID_ADV_SYNC_BASE) + 1)
99 #if defined(CONFIG_BT_CTLR_ADV_ISO)
100 #define BT_ADV_ISO_TICKER_NODES ((TICKER_ID_ADV_ISO_LAST) - \
101 (TICKER_ID_ADV_ISO_BASE) + 1)
102 #else /* !CONFIG_BT_CTLR_ADV_ISO */
103 #define BT_ADV_ISO_TICKER_NODES 0
104 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
105 #else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
106 #define BT_ADV_SYNC_TICKER_NODES 0
107 #define BT_ADV_ISO_TICKER_NODES 0
108 #endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
109 #else /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
110 #define BT_ADV_AUX_TICKER_NODES 0
111 #define BT_ADV_SYNC_TICKER_NODES 0
112 #define BT_ADV_ISO_TICKER_NODES 0
113 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
114 #else /* !CONFIG_BT_BROADCASTER */
115 #define BT_ADV_TICKER_NODES 0
116 #define BT_ADV_AUX_TICKER_NODES 0
117 #define BT_ADV_SYNC_TICKER_NODES 0
118 #define BT_ADV_ISO_TICKER_NODES 0
119 #endif /* !CONFIG_BT_BROADCASTER */
120
121 #if defined(CONFIG_BT_OBSERVER)
122 #define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1)
123 #if defined(CONFIG_BT_CTLR_ADV_EXT)
124 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
125 #define BT_SCAN_AUX_TICKER_NODES 1
126 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
127 #define BT_SCAN_AUX_TICKER_NODES ((TICKER_ID_SCAN_AUX_LAST) - \
128 (TICKER_ID_SCAN_AUX_BASE) + 1)
129 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
130 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
131 #define BT_SCAN_SYNC_TICKER_NODES ((TICKER_ID_SCAN_SYNC_LAST) - \
132 (TICKER_ID_SCAN_SYNC_BASE) + 1)
133 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
134 #define BT_SCAN_SYNC_ISO_TICKER_NODES ((TICKER_ID_SCAN_SYNC_ISO_LAST) - \
135 (TICKER_ID_SCAN_SYNC_ISO_BASE) + 1 + \
136 (TICKER_ID_SCAN_SYNC_ISO_RESUME_LAST) - \
137 (TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE) + 1)
138 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
139 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
140 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
141 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
142 #define BT_SCAN_SYNC_TICKER_NODES 0
143 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
144 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
145 #else /* !CONFIG_BT_CTLR_ADV_EXT */
146 #define BT_SCAN_AUX_TICKER_NODES 0
147 #define BT_SCAN_SYNC_TICKER_NODES 0
148 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
149 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
150 #else
151 #define BT_SCAN_TICKER_NODES 0
152 #define BT_SCAN_AUX_TICKER_NODES 0
153 #define BT_SCAN_SYNC_TICKER_NODES 0
154 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
155 #endif
156
157 #if defined(CONFIG_BT_CONN)
158 #define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1)
159 #else
160 #define BT_CONN_TICKER_NODES 0
161 #endif
162
163 #if defined(CONFIG_BT_CTLR_CONN_ISO)
164 #define BT_CIG_TICKER_NODES ((TICKER_ID_CONN_ISO_LAST) - \
165 (TICKER_ID_CONN_ISO_BASE) + 1 + \
166 (TICKER_ID_CONN_ISO_RESUME_LAST) - \
167 (TICKER_ID_CONN_ISO_RESUME_BASE) + 1)
168
169 #else
170 #define BT_CIG_TICKER_NODES 0
171 #endif
172
173 #if defined(CONFIG_BT_CTLR_USER_EXT)
174 #define USER_TICKER_NODES CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
175 #else
176 #define USER_TICKER_NODES 0
177 #endif
178
179
180 #if defined(CONFIG_BT_CTLR_COEX_TICKER)
181 #define COEX_TICKER_NODES 1
182 /* No. of tickers reserved for coex drivers */
183 #else
184 #define COEX_TICKER_NODES 0
185 #endif
186
187
188 #if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
189 #define FLASH_TICKER_NODES 2 /* No. of tickers reserved for flash
190 * driver
191 */
192 #define TICKER_USER_ULL_HIGH_FLASH_OPS 1 /* No. of additional ticker ULL_HIGH
193 * context operations
194 */
195 #define TICKER_USER_THREAD_FLASH_OPS 1 /* No. of additional ticker thread
196 * context operations
197 */
198 #else
199 #define FLASH_TICKER_NODES 0
200 #define TICKER_USER_ULL_HIGH_FLASH_OPS 0
201 #define TICKER_USER_THREAD_FLASH_OPS 0
202 #endif
203
204 /* Define ticker nodes */
205 /* NOTE: FLASH_TICKER_NODES shall be after Link Layer's list of ticker id
206 * allocations, refer to ll_timeslice_ticker_id_get on how ticker id
207 * used by flash driver is returned.
208 */
209 #define TICKER_NODES (TICKER_ID_ULL_BASE + \
210 BT_ADV_TICKER_NODES + \
211 BT_ADV_AUX_TICKER_NODES + \
212 BT_ADV_SYNC_TICKER_NODES + \
213 BT_ADV_ISO_TICKER_NODES + \
214 BT_SCAN_TICKER_NODES + \
215 BT_SCAN_AUX_TICKER_NODES + \
216 BT_SCAN_SYNC_TICKER_NODES + \
217 BT_SCAN_SYNC_ISO_TICKER_NODES + \
218 BT_CONN_TICKER_NODES + \
219 BT_CIG_TICKER_NODES + \
220 USER_TICKER_NODES + \
221 FLASH_TICKER_NODES + \
222 COEX_TICKER_NODES)
223
224 /* Ticker implementation supports up to 255 ticker node count value */
225 BUILD_ASSERT(TICKER_NODES <= UINT8_MAX);
226
227 /* When both central and peripheral are supported, one each Rx node will be
228 * needed by connectable advertising and the initiator to generate connection
229 * complete event, hence conditionally set the count.
230 */
231 #if defined(CONFIG_BT_MAX_CONN)
232 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
233 #define BT_CTLR_MAX_CONNECTABLE (1U + MIN(((CONFIG_BT_MAX_CONN) - 1U), \
234 (BT_CTLR_ADV_SET)))
235 #else
236 #define BT_CTLR_MAX_CONNECTABLE MAX(1U, (BT_CTLR_ADV_SET))
237 #endif
238 #define BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN
239 #else
240 #define BT_CTLR_MAX_CONNECTABLE 0
241 #define BT_CTLR_MAX_CONN 0
242 #endif
243
244 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
245 #if defined(CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX)
246 /* Note: Need node for PDU and CTE sample */
247 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
248 #define BT_CTLR_ADV_EXT_RX_CNT (MIN(CONFIG_BT_CTLR_SCAN_AUX_CHAIN_COUNT, \
249 CONFIG_BT_PER_ADV_SYNC_MAX) * \
250 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
251 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
252 #define BT_CTLR_ADV_EXT_RX_CNT (CONFIG_BT_CTLR_SCAN_AUX_SET * \
253 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
254 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
255 #else /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
256 #define BT_CTLR_ADV_EXT_RX_CNT 1
257 #endif /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
258 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
259 #define BT_CTLR_ADV_EXT_RX_CNT 0
260 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
261
262 #if !defined(TICKER_USER_LLL_VENDOR_OPS)
263 #define TICKER_USER_LLL_VENDOR_OPS 0
264 #endif /* TICKER_USER_LLL_VENDOR_OPS */
265
266 #if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
267 #define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
268 #endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
269
270 #if !defined(TICKER_USER_ULL_LOW_VENDOR_OPS)
271 #define TICKER_USER_ULL_LOW_VENDOR_OPS 0
272 #endif /* TICKER_USER_ULL_LOW_VENDOR_OPS */
273
274 #if !defined(TICKER_USER_THREAD_VENDOR_OPS)
275 #define TICKER_USER_THREAD_VENDOR_OPS 0
276 #endif /* TICKER_USER_THREAD_VENDOR_OPS */
277
278 /* Define ticker user operations */
279 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
280 (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
281 /* NOTE: When ticker job is disabled inside radio events then all advertising,
282 * scanning, and peripheral latency cancel ticker operations will be deferred,
283 * requiring increased ticker thread context operation queue count.
284 */
285 #define TICKER_USER_THREAD_OPS (BT_CTLR_ADV_SET + BT_CTLR_SCAN_SET + \
286 BT_CTLR_MAX_CONN + \
287 TICKER_USER_THREAD_VENDOR_OPS + \
288 TICKER_USER_THREAD_FLASH_OPS + \
289 1)
290 #else /* !CONFIG_BT_CTLR_LOW_LAT */
291 /* NOTE: As ticker job is not disabled inside radio events, no need for extra
292 * thread operations queue element for flash driver.
293 */
294 #define TICKER_USER_THREAD_OPS (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
295 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
296
297 #define TICKER_USER_ULL_LOW_OPS (1 + TICKER_USER_ULL_LOW_VENDOR_OPS + 1)
298
299 /* NOTE: Extended Advertising needs one extra ticker operation being enqueued
300 * for scheduling the auxiliary PDU reception while there can already
301 * be three other operations being enqueued.
302 *
303 * This value also covers the case were initiator with 1M and Coded PHY
304 * scan window is stopping the two scan tickers, stopping one scan stop
305 * ticker and starting one new ticker for establishing an ACL connection.
306 */
307 #if defined(CONFIG_BT_CTLR_ADV_EXT)
308 #define TICKER_USER_ULL_HIGH_OPS (4 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
309 TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
310 #else /* !CONFIG_BT_CTLR_ADV_EXT */
311 #define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
312 TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
313 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
314
315 #define TICKER_USER_LLL_OPS (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
316
317 #define TICKER_USER_OPS (TICKER_USER_LLL_OPS + \
318 TICKER_USER_ULL_HIGH_OPS + \
319 TICKER_USER_ULL_LOW_OPS + \
320 TICKER_USER_THREAD_OPS)
321
322 /* Memory for ticker nodes/instances */
323 static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
324
325 /* Memory for users/contexts operating on ticker module */
326 static uint8_t MALIGN(4) ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
327
328 /* Memory for user/context simultaneous API operations */
329 static uint8_t MALIGN(4) ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
330
331 /* Semaphore to wakeup thread on ticker API callback */
332 static struct k_sem sem_ticker_api_cb;
333
334 /* Semaphore to wakeup thread on Rx-ed objects */
335 static struct k_sem *sem_recv;
336
337 /* Declare prepare-event FIFO: mfifo_prep.
338 * Queue of struct node_rx_event_done
339 */
340 static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX);
341
342 /* Declare done-event RXFIFO. This is a composite pool-backed MFIFO for rx_nodes.
343 * The declaration constructs the following data structures:
344 * - mfifo_done: FIFO with pointers to struct node_rx_event_done
345 * - mem_done: Backing data pool for struct node_rx_event_done elements
346 * - mem_link_done: Pool of memq_link_t elements
347 *
348 * Queue of pointers to struct node_rx_event_done.
349 * The actual backing behind these pointers is mem_done.
350 *
351 * When there are radio events with time reservations lower than the preemption
352 * timeout of 1.5 ms, the pipeline has to account for the maximum radio events
353 * that can be enqueued during the preempt timeout duration. All these enqueued
354 * events could be aborted in case of late scheduling, needing as many done
355 * event buffers.
356 *
357 * During continuous scanning, there can be 1 active radio event, 1 scan resume
358 * and 1 new scan prepare. If there are peripheral prepares in addition, and due
359 * to late scheduling all these will abort needing 4 done buffers.
360 *
361 * If there are additional peripheral prepares enqueued, which are apart by
362 * their time reservations, these are not yet late and hence no more additional
363 * done buffers are needed.
364 *
365 * If Extended Scanning is supported, then an additional auxiliary scan event's
366 * prepare could be enqueued in the pipeline during the preemption duration.
367 *
368 * If Extended Scanning with Coded PHY is supported, then an additional 1 resume
369 * prepare could be enqueued in the pipeline during the preemption duration.
370 */
371 #if !defined(VENDOR_EVENT_DONE_MAX)
372 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
373 #if defined(CONFIG_BT_CTLR_PHY_CODED)
374 #define EVENT_DONE_MAX 6
375 #else /* !CONFIG_BT_CTLR_PHY_CODED */
376 #define EVENT_DONE_MAX 5
377 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
378 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
379 #define EVENT_DONE_MAX 4
380 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
381 #else
382 #define EVENT_DONE_MAX VENDOR_EVENT_DONE_MAX
383 #endif
384
385 /* Maximum time allowed for comleting synchronous LLL disabling via
386 * ull_disable.
387 */
388 #define ULL_DISABLE_TIMEOUT K_MSEC(1000)
389
390 static RXFIFO_DEFINE(done, sizeof(struct node_rx_event_done),
391 EVENT_DONE_MAX, 0U);
392
393 /* Minimum number of node rx for ULL to LL/HCI thread per connection.
394 * Increasing this by times the max. simultaneous connection count will permit
395 * simultaneous parallel PHY update or Connection Update procedures amongst
396 * active connections.
397 * Minimum node rx of 2 that can be reserved happens when:
398 * Central and peripheral always use two new nodes for handling completion
399 * notification one for PHY update complete and another for Data Length Update
400 * complete.
401 */
402 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
403 #define LL_PDU_RX_CNT (2 * (CONFIG_BT_CTLR_LLCP_CONN))
404 #elif defined(CONFIG_BT_CONN)
405 #define LL_PDU_RX_CNT (CONFIG_BT_CTLR_LLCP_CONN)
406 #else
407 #define LL_PDU_RX_CNT 0
408 #endif
409
410 /* No. of node rx for LLL to ULL.
411 * Reserve 3, 1 for adv data, 1 for scan response and 1 for empty PDU reception.
412 */
413 #define PDU_RX_CNT (3 + BT_CTLR_ADV_EXT_RX_CNT + CONFIG_BT_CTLR_RX_BUFFERS)
414
415 /* Part sum of LLL to ULL and ULL to LL/HCI thread node rx count.
416 * Will be used below in allocating node rx pool.
417 */
418 #define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT)
419
420 static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
421
422 #if defined(CONFIG_BT_RX_USER_PDU_LEN)
423 #define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
424 #else
425 #define PDU_RX_USER_PDU_OCTETS_MAX 0
426 #endif
427
428 #define PDU_ADV_SIZE MAX(PDU_AC_LL_SIZE_MAX, \
429 (PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
430
431 #define PDU_DATA_SIZE MAX((PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX), \
432 (PDU_BIS_LL_HEADER_SIZE + LL_BIS_OCTETS_RX_MAX))
433
434 #define PDU_CTRL_SIZE (PDU_DC_LL_HEADER_SIZE + PDU_DC_CTRL_RX_SIZE_MAX)
435
436 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
437
438 #define PDU_RX_NODE_POOL_ELEMENT_SIZE MROUND(NODE_RX_HEADER_SIZE + \
439 MAX(MAX(PDU_ADV_SIZE, \
440 MAX(PDU_DATA_SIZE, \
441 PDU_CTRL_SIZE)), \
442 PDU_RX_USER_PDU_OCTETS_MAX))
443
444 #if defined(CONFIG_BT_CTLR_ADV_ISO_SET)
445 #define BT_CTLR_ADV_ISO_SET CONFIG_BT_CTLR_ADV_ISO_SET
446 #else
447 #define BT_CTLR_ADV_ISO_SET 0
448 #endif
449
450 #if defined(CONFIG_BT_PER_ADV_SYNC_MAX)
451 #define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_PER_ADV_SYNC_MAX
452 #else
453 #define BT_CTLR_SCAN_SYNC_SET 0
454 #endif
455
456 #if defined(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET)
457 #define BT_CTLR_SCAN_SYNC_ISO_SET CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET
458 #else
459 #define BT_CTLR_SCAN_SYNC_ISO_SET 0
460 #endif
461
462 #define PDU_RX_POOL_SIZE (PDU_RX_NODE_POOL_ELEMENT_SIZE * \
463 (RX_CNT + BT_CTLR_MAX_CONNECTABLE + \
464 BT_CTLR_ADV_SET + BT_CTLR_SCAN_SYNC_SET))
465
466 /* Macros for encoding number of completed packets.
467 *
468 * If the pointer is numerically below 0x100, the pointer is treated as either
469 * data or control PDU.
470 *
471 * NOTE: For any architecture which would map RAM below address 0x100, this will
472 * not work.
473 */
474 #define IS_NODE_TX_PTR(_p) ((uint32_t)(_p) & ~0xFFUL)
475 #define IS_NODE_TX_DATA(_p) ((uint32_t)(_p) == 0x01UL)
476 #define IS_NODE_TX_CTRL(_p) ((uint32_t)(_p) == 0x02UL)
477 #define NODE_TX_DATA_SET(_p) ((_p) = (void *)0x01UL)
478 #define NODE_TX_CTRL_SET(_p) ((_p) = (void *)0x012UL)
479
480 /* Macros for encoding number of ISO SDU fragments in the enqueued TX node
481 * pointer. This is needed to ensure only a single release of the node and link
482 * in tx_cmplt_get, even when called several times. At all times, the number of
483 * fragments must be available for HCI complete-counting.
484 *
485 * If the pointer is numerically below 0x100, the pointer is treated as a one
486 * byte fragments count.
487 *
488 * NOTE: For any architecture which would map RAM below address 0x100, this will
489 * not work.
490 */
491 #define NODE_TX_FRAGMENTS_GET(_p) ((uint32_t)(_p) & 0xFFUL)
492 #define NODE_TX_FRAGMENTS_SET(_p, _cmplt) ((_p) = (void *)(uint32_t)(_cmplt))
493
494 static struct {
495 void *free;
496 uint8_t pool[PDU_RX_POOL_SIZE];
497 } mem_pdu_rx;
498
499 /* NOTE: Two memq_link structures are reserved in the case of periodic sync,
500 * one each for sync established and sync lost respectively. Where as in
501 * comparison to a connection, the connection established uses incoming Rx-ed
502 * CONNECT_IND PDU to piggy back generation of connection complete, and hence
503 * only one is reserved for the generation of disconnection event (which can
504 * happen due to supervision timeout and other reasons that dont have an
505 * incoming Rx-ed PDU).
506 */
507 #define LINK_RX_POOL_SIZE \
508 (sizeof(memq_link_t) * \
509 (RX_CNT + 2 + BT_CTLR_MAX_CONN + BT_CTLR_ADV_SET + \
510 (BT_CTLR_ADV_ISO_SET * 2) + (BT_CTLR_SCAN_SYNC_SET * 2) + \
511 (BT_CTLR_SCAN_SYNC_ISO_SET * 2) + \
512 (IQ_REPORT_CNT)))
513 static struct {
514 uint16_t quota_pdu; /* Number of un-utilized buffers */
515
516 void *free;
517 uint8_t pool[LINK_RX_POOL_SIZE];
518 } mem_link_rx;
519
520 static MEMQ_DECLARE(ull_rx);
521 static MEMQ_DECLARE(ll_rx);
522
523 #if defined(CONFIG_BT_CTLR_ISO) || \
524 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
525 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
526 #define ULL_TIME_WRAPPING_POINT_US (HAL_TICKER_TICKS_TO_US_64BIT(HAL_TICKER_CNTR_MASK))
527 #define ULL_TIME_SPAN_FULL_US (ULL_TIME_WRAPPING_POINT_US + 1)
528 #endif /* CONFIG_BT_CTLR_ISO ||
529 * CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
530 * CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
531 */
532
533 #if defined(CONFIG_BT_CONN)
534 static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
535
536 static void *mark_update;
537 #endif /* CONFIG_BT_CONN */
538
539 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
540 #if defined(CONFIG_BT_CONN)
541 #define BT_CTLR_TX_BUFFERS (CONFIG_BT_BUF_ACL_TX_COUNT + LLCP_TX_CTRL_BUF_COUNT)
542 #else
543 #define BT_CTLR_TX_BUFFERS 0
544 #endif /* CONFIG_BT_CONN */
545
546 static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
547 BT_CTLR_TX_BUFFERS + BT_CTLR_ISO_TX_PDU_BUFFERS);
548 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
549
550 static void *mark_disable;
551
552 static inline int init_reset(void);
553 static void perform_lll_reset(void *param);
554 static inline void *mark_set(void **m, void *param);
555 static inline void *mark_unset(void **m, void *param);
556 static inline void *mark_get(void *m);
557 static void rx_replenish_all(void);
558 #if defined(CONFIG_BT_CONN) || \
559 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
560 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
561 defined(CONFIG_BT_CTLR_ADV_ISO)
562 static void rx_release_replenish(struct node_rx_hdr *rx);
563 static void rx_link_dequeue_release_quota_inc(memq_link_t *link);
564 #endif /* CONFIG_BT_CONN ||
565 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
566 * CONFIG_BT_CTLR_ADV_PERIODIC ||
567 * CONFIG_BT_CTLR_ADV_ISO
568 */
569 static void rx_demux(void *param);
570 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
571 static void rx_demux_yield(void);
572 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
573 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
574 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last);
575 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
576 memq_link_t *link,
577 struct node_tx *node_tx);
578 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
579 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx_hdr);
580 static inline void rx_demux_event_done(memq_link_t *link,
581 struct node_rx_event_done *done);
582 static void ll_rx_link_quota_inc(void);
583 static void ll_rx_link_quota_dec(void);
584 static void disabled_cb(void *param);
585
ll_init(struct k_sem * sem_rx)586 int ll_init(struct k_sem *sem_rx)
587 {
588 static bool mayfly_initialized;
589 int err;
590
591 /* Store the semaphore to be used to wakeup Thread context */
592 sem_recv = sem_rx;
593
594 /* Initialize counter */
595 /* TODO: Bind and use counter driver? */
596 cntr_init();
597
598 /* Initialize mayfly. It may be done only once due to mayfly design.
599 *
600 * On init mayfly memq head and tail is assigned with a link instance
601 * that is used during enqueue operation. New link provided by enqueue
602 * is added as a tail and will be used in future enqueue. While dequeue,
603 * the link that was used for storage of the job is released and stored
604 * in a job it was related to. The job may store initial link. If mayfly
605 * is re-initialized but job objects were not re-initialized there is a
606 * risk that enqueued job will point to the same link as it is in a memq
607 * just after re-initialization. After enqueue operation with that link,
608 * head and tail still points to the same link object, so memq is
609 * considered as empty.
610 */
611 if (!mayfly_initialized) {
612 mayfly_init();
613 mayfly_initialized = true;
614 }
615
616
617 /* Initialize Ticker */
618 ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
619 ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
620 ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
621 ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_THREAD_OPS;
622
623 err = ticker_init(TICKER_INSTANCE_ID_CTLR,
624 TICKER_NODES, &ticker_nodes[0],
625 MAYFLY_CALLER_COUNT, &ticker_users[0],
626 TICKER_USER_OPS, &ticker_user_ops[0],
627 hal_ticker_instance0_caller_id_get,
628 hal_ticker_instance0_sched,
629 hal_ticker_instance0_trigger_set);
630 LL_ASSERT(!err);
631
632 /* Initialize semaphore for ticker API blocking wait */
633 k_sem_init(&sem_ticker_api_cb, 0, 1);
634
635 /* Initialize LLL */
636 err = lll_init();
637 if (err) {
638 return err;
639 }
640
641 /* Initialize ULL internals */
642 /* TODO: globals? */
643
644 /* Common to init and reset */
645 err = init_reset();
646 if (err) {
647 return err;
648 }
649
650 #if defined(CONFIG_BT_BROADCASTER)
651 err = lll_adv_init();
652 if (err) {
653 return err;
654 }
655
656 err = ull_adv_init();
657 if (err) {
658 return err;
659 }
660 #endif /* CONFIG_BT_BROADCASTER */
661
662 #if defined(CONFIG_BT_OBSERVER)
663 err = lll_scan_init();
664 if (err) {
665 return err;
666 }
667
668 err = ull_scan_init();
669 if (err) {
670 return err;
671 }
672 #endif /* CONFIG_BT_OBSERVER */
673
674 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
675 err = lll_sync_init();
676 if (err) {
677 return err;
678 }
679
680 err = ull_sync_init();
681 if (err) {
682 return err;
683 }
684
685 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
686 err = ull_sync_iso_init();
687 if (err) {
688 return err;
689 }
690 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
691 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
692
693 #if defined(CONFIG_BT_CONN)
694 err = lll_conn_init();
695 if (err) {
696 return err;
697 }
698
699 err = ull_conn_init();
700 if (err) {
701 return err;
702 }
703 #endif /* CONFIG_BT_CONN */
704
705 #if defined(CONFIG_BT_CTLR_DF)
706 err = ull_df_init();
707 if (err) {
708 return err;
709 }
710 #endif
711
712 #if defined(CONFIG_BT_CTLR_ISO)
713 err = ull_iso_init();
714 if (err) {
715 return err;
716 }
717 #endif /* CONFIG_BT_CTLR_ISO */
718
719 #if defined(CONFIG_BT_CTLR_CONN_ISO)
720 err = ull_conn_iso_init();
721 if (err) {
722 return err;
723 }
724 #endif /* CONFIG_BT_CTLR_CONN_ISO */
725
726 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
727 err = ull_peripheral_iso_init();
728 if (err) {
729 return err;
730 }
731 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
732
733 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
734 err = ull_central_iso_init();
735 if (err) {
736 return err;
737 }
738 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
739
740 #if defined(CONFIG_BT_CTLR_ADV_ISO)
741 err = ull_adv_iso_init();
742 if (err) {
743 return err;
744 }
745 #endif /* CONFIG_BT_CTLR_ADV_ISO */
746
747 #if defined(CONFIG_BT_CTLR_DF)
748 err = lll_df_init();
749 if (err) {
750 return err;
751 }
752 #endif
753
754 #if defined(CONFIG_BT_CTLR_USER_EXT)
755 err = ull_user_init();
756 if (err) {
757 return err;
758 }
759 #endif /* CONFIG_BT_CTLR_USER_EXT */
760
761 /* reset filter accept list, resolving list and initialise RPA timeout*/
762 if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
763 ull_filter_reset(true);
764 }
765
766 #if defined(CONFIG_BT_CTLR_TEST)
767 err = mem_ut();
768 if (err) {
769 return err;
770 }
771
772 err = ecb_ut();
773 if (err) {
774 return err;
775 }
776
777 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
778 lll_chan_sel_2_ut();
779 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
780 #endif /* CONFIG_BT_CTLR_TEST */
781
782 return 0;
783 }
784
ll_deinit(void)785 int ll_deinit(void)
786 {
787 ll_reset();
788 return lll_deinit();
789 }
790
ll_reset(void)791 void ll_reset(void)
792 {
793 int err;
794
795 /* Note: The sequence of reset control flow is as follows:
796 * - Reset ULL context, i.e. stop ULL scheduling, abort LLL events etc.
797 * - Reset LLL context, i.e. post LLL event abort, let LLL cleanup its
798 * variables, if any.
799 * - Reset ULL static variables (which otherwise was mem-zeroed in cases
800 * if power-on reset wherein architecture startup mem-zeroes .bss
801 * sections.
802 * - Initialize ULL context variable, similar to on-power-up.
803 */
804
805 #if defined(CONFIG_BT_BROADCASTER)
806 #if defined(CONFIG_BT_CTLR_ADV_ISO)
807 /* Reset adv iso sets */
808 err = ull_adv_iso_reset();
809 LL_ASSERT(!err);
810 #endif /* CONFIG_BT_CTLR_ADV_ISO */
811
812 /* Reset adv state */
813 err = ull_adv_reset();
814 LL_ASSERT(!err);
815 #endif /* CONFIG_BT_BROADCASTER */
816
817 #if defined(CONFIG_BT_OBSERVER)
818 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
819 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
820 /* Reset sync iso sets */
821 err = ull_sync_iso_reset();
822 LL_ASSERT(!err);
823 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
824
825 /* Reset periodic sync sets */
826 err = ull_sync_reset();
827 LL_ASSERT(!err);
828 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
829
830 /* Reset scan state */
831 err = ull_scan_reset();
832 LL_ASSERT(!err);
833 #endif /* CONFIG_BT_OBSERVER */
834
835 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
836 err = ull_peripheral_iso_reset();
837 LL_ASSERT(!err);
838 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
839
840 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
841 err = ull_central_iso_reset();
842 LL_ASSERT(!err);
843 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
844
845 #if defined(CONFIG_BT_CTLR_CONN_ISO)
846 err = ull_conn_iso_reset();
847 LL_ASSERT(!err);
848 #endif /* CONFIG_BT_CTLR_CONN_ISO */
849
850 #if defined(CONFIG_BT_CTLR_ISO)
851 err = ull_iso_reset();
852 LL_ASSERT(!err);
853 #endif /* CONFIG_BT_CTLR_ISO */
854
855 #if defined(CONFIG_BT_CONN)
856 /* Reset conn role */
857 err = ull_conn_reset();
858 LL_ASSERT(!err);
859
860 MFIFO_INIT(tx_ack);
861 #endif /* CONFIG_BT_CONN */
862
863 /* reset filter accept list and resolving list */
864 if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
865 ull_filter_reset(false);
866 }
867
868 /* Re-initialize ULL internals */
869
870 /* Re-initialize the prep mfifo */
871 MFIFO_INIT(prep);
872
873 /* Re-initialize the free rx mfifo */
874 MFIFO_INIT(pdu_rx_free);
875
876 #if defined(CONFIG_BT_CONN)
877 /* Re-initialize the free ll rx mfifo */
878 MFIFO_INIT(ll_pdu_rx_free);
879 #endif /* CONFIG_BT_CONN */
880
881 /* Reset LLL via mayfly */
882 {
883 static memq_link_t link;
884 static struct mayfly mfy = {0, 0, &link, NULL,
885 perform_lll_reset};
886 uint32_t retval;
887
888 /* NOTE: If Zero Latency Interrupt is used, then LLL context
889 * will be the highest priority IRQ in the system, hence
890 * mayfly_enqueue will be done running the callee inline
891 * (vector to the callee function) in this function. Else
892 * we use semaphore to wait for perform_lll_reset to
893 * complete.
894 */
895
896 #if !defined(CONFIG_BT_CTLR_ZLI)
897 struct k_sem sem;
898
899 k_sem_init(&sem, 0, 1);
900 mfy.param = &sem;
901 #endif /* !CONFIG_BT_CTLR_ZLI */
902
903 retval = mayfly_enqueue(TICKER_USER_ID_THREAD,
904 TICKER_USER_ID_LLL, 0, &mfy);
905 LL_ASSERT(!retval);
906
907 #if !defined(CONFIG_BT_CTLR_ZLI)
908 /* LLL reset must complete before returning - wait for
909 * reset completion in LLL mayfly thread
910 */
911 k_sem_take(&sem, K_FOREVER);
912 #endif /* !CONFIG_BT_CTLR_ZLI */
913 }
914
915 #if defined(CONFIG_BT_BROADCASTER)
916 /* Finalize after adv state LLL context reset */
917 err = ull_adv_reset_finalize();
918 LL_ASSERT(!err);
919 #endif /* CONFIG_BT_BROADCASTER */
920
921 /* Reset/End DTM Tx or Rx commands */
922 if (IS_ENABLED(CONFIG_BT_CTLR_DTM)) {
923 uint16_t num_rx;
924
925 (void)ll_test_end(&num_rx);
926 ARG_UNUSED(num_rx);
927 }
928
929 /* Common to init and reset */
930 err = init_reset();
931 LL_ASSERT(!err);
932
933 #if defined(CONFIG_BT_CTLR_DF)
934 /* Direction Finding has to be reset after ull init_reset call because
935 * it uses mem_link_rx for node_rx_iq_report. The mem_linx_rx is reset
936 * in common ull init_reset.
937 */
938 err = ull_df_reset();
939 LL_ASSERT(!err);
940 #endif
941
942 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
943 ll_feat_reset();
944 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
945
946 /* clear static random address */
947 (void)ll_addr_set(1U, NULL);
948 }
949
950 /**
951 * @brief Peek the next node_rx to send up to Host
952 * @details Tightly coupled with prio_recv_thread()
953 * Execution context: Controller thread
954 *
955 * @param node_rx[out] Pointer to rx node at head of queue
956 * @param handle[out] Connection handle
957 * @return TX completed
958 */
ll_rx_get(void ** node_rx,uint16_t * handle)959 uint8_t ll_rx_get(void **node_rx, uint16_t *handle)
960 {
961 struct node_rx_pdu *rx;
962 memq_link_t *link;
963 uint8_t cmplt = 0U;
964
965 #if defined(CONFIG_BT_CONN) || \
966 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
967 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
968 defined(CONFIG_BT_CTLR_ADV_ISO)
969 ll_rx_get_again:
970 #endif /* CONFIG_BT_CONN ||
971 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
972 * CONFIG_BT_CTLR_ADV_PERIODIC ||
973 * CONFIG_BT_CTLR_ADV_ISO
974 */
975
976 *node_rx = NULL;
977
978 link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
979 if (link) {
980 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
981 cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f, rx->hdr.ack_last);
982 if (!cmplt) {
983 uint8_t f, cmplt_prev, cmplt_curr;
984 uint16_t h;
985
986 cmplt_curr = 0U;
987 f = mfifo_fifo_tx_ack.f;
988 do {
989 cmplt_prev = cmplt_curr;
990 cmplt_curr = tx_cmplt_get(&h, &f,
991 mfifo_fifo_tx_ack.l);
992 } while ((cmplt_prev != 0U) ||
993 (cmplt_prev != cmplt_curr));
994 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
995
996 if (0) {
997 #if defined(CONFIG_BT_CONN) || \
998 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT))
999 /* Do not send up buffers to Host thread that are
1000 * marked for release
1001 */
1002 } else if (rx->hdr.type == NODE_RX_TYPE_RELEASE) {
1003 rx_link_dequeue_release_quota_inc(link);
1004 rx_release_replenish((struct node_rx_hdr *)rx);
1005
1006 goto ll_rx_get_again;
1007 #endif /* CONFIG_BT_CONN ||
1008 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT)
1009 */
1010
1011 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1012 } else if (rx->hdr.type == NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE) {
1013 const uint8_t report_cnt = 1U;
1014
1015 (void)memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head, NULL);
1016 ll_rx_link_release(link);
1017 ull_iq_report_link_inc_quota(report_cnt);
1018 ull_df_iq_report_mem_release(rx);
1019 ull_df_rx_iq_report_alloc(report_cnt);
1020
1021 goto ll_rx_get_again;
1022 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1023
1024 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1025 } else if (rx->hdr.type == NODE_RX_TYPE_SYNC_CHM_COMPLETE) {
1026 rx_link_dequeue_release_quota_inc(link);
1027
1028 /* Remove Channel Map Update Indication from
1029 * ACAD.
1030 */
1031 ull_adv_sync_chm_complete(rx);
1032
1033 rx_release_replenish((struct node_rx_hdr *)rx);
1034
1035 goto ll_rx_get_again;
1036 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1037
1038 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1039 } else if (rx->hdr.type == NODE_RX_TYPE_BIG_CHM_COMPLETE) {
1040 rx_link_dequeue_release_quota_inc(link);
1041
1042 /* Update Channel Map in BIGInfo present in
1043 * Periodic Advertising PDU.
1044 */
1045 ull_adv_iso_chm_complete(rx);
1046
1047 rx_release_replenish((struct node_rx_hdr *)rx);
1048
1049 goto ll_rx_get_again;
1050 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1051 }
1052
1053 *node_rx = rx;
1054
1055 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1056 }
1057 } else {
1058 cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f,
1059 mfifo_fifo_tx_ack.l);
1060 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1061 }
1062
1063 return cmplt;
1064 }
1065
1066 /**
1067 * @brief Commit the dequeue from memq_ll_rx, where ll_rx_get() did the peek
1068 * @details Execution context: Controller thread
1069 */
ll_rx_dequeue(void)1070 void ll_rx_dequeue(void)
1071 {
1072 struct node_rx_pdu *rx = NULL;
1073 memq_link_t *link;
1074
1075 link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head,
1076 (void **)&rx);
1077 LL_ASSERT(link);
1078
1079 ll_rx_link_release(link);
1080
1081 /* handle object specific clean up */
1082 switch (rx->hdr.type) {
1083 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1084 #if defined(CONFIG_BT_OBSERVER)
1085 case NODE_RX_TYPE_EXT_1M_REPORT:
1086 case NODE_RX_TYPE_EXT_2M_REPORT:
1087 case NODE_RX_TYPE_EXT_CODED_REPORT:
1088 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1089 case NODE_RX_TYPE_SYNC_REPORT:
1090 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1091 {
1092 struct node_rx_pdu *rx_curr;
1093 struct pdu_adv *adv;
1094 uint8_t loop = PDU_RX_POOL_SIZE / PDU_RX_NODE_POOL_ELEMENT_SIZE;
1095
1096 adv = (struct pdu_adv *)rx->pdu;
1097 if (adv->type != PDU_ADV_TYPE_EXT_IND) {
1098 break;
1099 }
1100
1101 rx_curr = rx->rx_ftr.extra;
1102 while (rx_curr) {
1103 memq_link_t *link_free;
1104
1105 LL_ASSERT(loop);
1106 loop--;
1107
1108 link_free = rx_curr->hdr.link;
1109 rx_curr = rx_curr->rx_ftr.extra;
1110
1111 ll_rx_link_release(link_free);
1112 }
1113 }
1114 break;
1115
1116 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1117 {
1118 ull_scan_term_dequeue(rx->hdr.handle);
1119 }
1120 break;
1121 #endif /* CONFIG_BT_OBSERVER */
1122
1123 #if defined(CONFIG_BT_BROADCASTER)
1124 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1125 {
1126 struct ll_adv_set *adv;
1127 struct lll_adv_aux *lll_aux;
1128
1129 adv = ull_adv_set_get(rx->hdr.handle);
1130 LL_ASSERT(adv);
1131
1132 lll_aux = adv->lll.aux;
1133 if (lll_aux) {
1134 struct ll_adv_aux_set *aux;
1135
1136 aux = HDR_LLL2ULL(lll_aux);
1137
1138 aux->is_started = 0U;
1139 }
1140
1141 #if defined(CONFIG_BT_PERIPHERAL)
1142 struct lll_conn *lll_conn = adv->lll.conn;
1143
1144 if (!lll_conn) {
1145 adv->is_enabled = 0U;
1146
1147 break;
1148 }
1149
1150 LL_ASSERT(!lll_conn->link_tx_free);
1151
1152 memq_link_t *memq_link = memq_deinit(&lll_conn->memq_tx.head,
1153 &lll_conn->memq_tx.tail);
1154 LL_ASSERT(memq_link);
1155
1156 lll_conn->link_tx_free = memq_link;
1157
1158 struct ll_conn *conn = HDR_LLL2ULL(lll_conn);
1159
1160 ll_conn_release(conn);
1161 adv->lll.conn = NULL;
1162
1163 ll_rx_release(adv->node_rx_cc_free);
1164 adv->node_rx_cc_free = NULL;
1165
1166 ll_rx_link_release(adv->link_cc_free);
1167 adv->link_cc_free = NULL;
1168 #endif /* CONFIG_BT_PERIPHERAL */
1169
1170 adv->is_enabled = 0U;
1171 }
1172 break;
1173 #endif /* CONFIG_BT_BROADCASTER */
1174 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1175
1176 #if defined(CONFIG_BT_CONN)
1177 case NODE_RX_TYPE_CONNECTION:
1178 {
1179 struct node_rx_cc *cc = (void *)rx->pdu;
1180 struct node_rx_ftr *ftr = &(rx->rx_ftr);
1181
1182 if (0) {
1183
1184 #if defined(CONFIG_BT_PERIPHERAL)
1185 } else if ((cc->status == BT_HCI_ERR_ADV_TIMEOUT) || cc->role) {
1186 struct ll_adv_set *adv;
1187 struct lll_adv *lll;
1188
1189 /* Get reference to ULL context */
1190 lll = ftr->param;
1191 adv = HDR_LLL2ULL(lll);
1192
1193 if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1194 struct lll_conn *conn_lll;
1195 struct ll_conn *conn;
1196 memq_link_t *memq_link;
1197
1198 conn_lll = lll->conn;
1199 LL_ASSERT(conn_lll);
1200 lll->conn = NULL;
1201
1202 LL_ASSERT(!conn_lll->link_tx_free);
1203 memq_link = memq_deinit(&conn_lll->memq_tx.head,
1204 &conn_lll->memq_tx.tail);
1205 LL_ASSERT(memq_link);
1206 conn_lll->link_tx_free = memq_link;
1207
1208 conn = HDR_LLL2ULL(conn_lll);
1209 ll_conn_release(conn);
1210 } else {
1211 /* Release un-utilized node rx */
1212 if (adv->node_rx_cc_free) {
1213 void *rx_free;
1214
1215 rx_free = adv->node_rx_cc_free;
1216 adv->node_rx_cc_free = NULL;
1217
1218 ll_rx_release(rx_free);
1219 }
1220 }
1221
1222 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1223 if (lll->aux) {
1224 struct ll_adv_aux_set *aux;
1225
1226 aux = HDR_LLL2ULL(lll->aux);
1227 aux->is_started = 0U;
1228 }
1229
1230 /* If Extended Advertising Commands used, reset
1231 * is_enabled when advertising set terminated event is
1232 * dequeued. Otherwise, legacy advertising commands used
1233 * then reset is_enabled here.
1234 */
1235 if (!lll->node_rx_adv_term) {
1236 adv->is_enabled = 0U;
1237 }
1238 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1239 adv->is_enabled = 0U;
1240 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1241
1242 #else /* !CONFIG_BT_PERIPHERAL */
1243 ARG_UNUSED(cc);
1244 #endif /* !CONFIG_BT_PERIPHERAL */
1245
1246 #if defined(CONFIG_BT_CENTRAL)
1247 } else {
1248 struct ll_scan_set *scan = HDR_LLL2ULL(ftr->param);
1249
1250 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
1251 struct ll_scan_set *scan_other =
1252 ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
1253
1254 if (scan_other) {
1255 if (scan_other == scan) {
1256 scan_other = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
1257 }
1258
1259 if (scan_other) {
1260 scan_other->lll.conn = NULL;
1261 scan_other->is_enabled = 0U;
1262 }
1263 }
1264 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
1265
1266 scan->lll.conn = NULL;
1267 scan->is_enabled = 0U;
1268 #else /* !CONFIG_BT_CENTRAL */
1269 } else {
1270 LL_ASSERT(0);
1271 #endif /* !CONFIG_BT_CENTRAL */
1272 }
1273
1274 if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
1275 uint8_t bm;
1276
1277 /* FIXME: use the correct adv and scan set to get
1278 * enabled status bitmask
1279 */
1280 bm = (IS_ENABLED(CONFIG_BT_OBSERVER)?(ull_scan_is_enabled(0) << 1):0) |
1281 (IS_ENABLED(CONFIG_BT_BROADCASTER)?ull_adv_is_enabled(0):0);
1282
1283 if (!bm) {
1284 ull_filter_adv_scan_state_cb(0);
1285 }
1286 }
1287 }
1288 break;
1289
1290 case NODE_RX_TYPE_TERMINATE:
1291 case NODE_RX_TYPE_DC_PDU:
1292 #endif /* CONFIG_BT_CONN */
1293
1294 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1295 case NODE_RX_TYPE_BIG_COMPLETE:
1296 case NODE_RX_TYPE_BIG_TERMINATE:
1297 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1298
1299 #if defined(CONFIG_BT_OBSERVER)
1300 case NODE_RX_TYPE_REPORT:
1301
1302 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1303 /* fall through */
1304 case NODE_RX_TYPE_SYNC:
1305 case NODE_RX_TYPE_SYNC_LOST:
1306 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1307 /* fall through */
1308 case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
1309 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1310 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1311 /* fall through */
1312 case NODE_RX_TYPE_SYNC_ISO:
1313 case NODE_RX_TYPE_SYNC_ISO_LOST:
1314 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1315 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1316 #endif /* CONFIG_BT_OBSERVER */
1317
1318 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1319 case NODE_RX_TYPE_SCAN_REQ:
1320 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1321
1322 #if defined(CONFIG_BT_CONN)
1323 case NODE_RX_TYPE_CONN_UPDATE:
1324 case NODE_RX_TYPE_ENC_REFRESH:
1325
1326 #if defined(CONFIG_BT_CTLR_LE_PING)
1327 case NODE_RX_TYPE_APTO:
1328 #endif /* CONFIG_BT_CTLR_LE_PING */
1329
1330 case NODE_RX_TYPE_CHAN_SEL_ALGO:
1331
1332 #if defined(CONFIG_BT_CTLR_PHY)
1333 case NODE_RX_TYPE_PHY_UPDATE:
1334 #endif /* CONFIG_BT_CTLR_PHY */
1335
1336 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1337 case NODE_RX_TYPE_RSSI:
1338 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1339 #endif /* CONFIG_BT_CONN */
1340
1341 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1342 case NODE_RX_TYPE_PROFILE:
1343 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1344
1345 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1346 case NODE_RX_TYPE_ADV_INDICATION:
1347 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1348
1349 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1350 case NODE_RX_TYPE_SCAN_INDICATION:
1351 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1352
1353 #if defined(CONFIG_BT_HCI_MESH_EXT)
1354 case NODE_RX_TYPE_MESH_ADV_CPLT:
1355 case NODE_RX_TYPE_MESH_REPORT:
1356 #endif /* CONFIG_BT_HCI_MESH_EXT */
1357
1358 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1359 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1360 __fallthrough;
1361 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1362
1363 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1364 case NODE_RX_TYPE_CIS_REQUEST:
1365 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1366
1367 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1368 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1369 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1370
1371 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1372 case NODE_RX_TYPE_CIS_ESTABLISHED:
1373 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1374
1375 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1376 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1377 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1378
1379 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1380 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1381 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1382
1383 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1384 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1385 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
1386
1387 /* Ensure that at least one 'case' statement is present for this
1388 * code block.
1389 */
1390 case NODE_RX_TYPE_NONE:
1391 LL_ASSERT(rx->hdr.type != NODE_RX_TYPE_NONE);
1392 break;
1393
1394 default:
1395 LL_ASSERT(0);
1396 break;
1397 }
1398
1399 /* FIXME: clean up when porting Mesh Ext. */
1400 if (0) {
1401 #if defined(CONFIG_BT_HCI_MESH_EXT)
1402 } else if (rx->hdr.type == NODE_RX_TYPE_MESH_ADV_CPLT) {
1403 struct ll_adv_set *adv;
1404 struct ll_scan_set *scan;
1405
1406 adv = ull_adv_is_enabled_get(0);
1407 LL_ASSERT(adv);
1408 adv->is_enabled = 0U;
1409
1410 scan = ull_scan_is_enabled_get(0);
1411 LL_ASSERT(scan);
1412
1413 scan->is_enabled = 0U;
1414
1415 ll_adv_scan_state_cb(0);
1416 #endif /* CONFIG_BT_HCI_MESH_EXT */
1417 }
1418 }
1419
ll_rx_mem_release(void ** node_rx)1420 void ll_rx_mem_release(void **node_rx)
1421 {
1422 struct node_rx_pdu *rx;
1423
1424 rx = *node_rx;
1425 while (rx) {
1426 struct node_rx_pdu *rx_free;
1427
1428 rx_free = rx;
1429 rx = rx->hdr.next;
1430
1431 switch (rx_free->hdr.type) {
1432 #if defined(CONFIG_BT_BROADCASTER)
1433 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1434 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1435 ll_rx_release(rx_free);
1436 break;
1437
1438 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1439 case NODE_RX_TYPE_BIG_COMPLETE:
1440 /* Nothing to release */
1441 break;
1442
1443 case NODE_RX_TYPE_BIG_TERMINATE:
1444 {
1445 struct ll_adv_iso_set *adv_iso = rx_free->rx_ftr.param;
1446
1447 ull_adv_iso_stream_release(adv_iso);
1448 }
1449 break;
1450 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1451 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1452 #endif /* CONFIG_BT_BROADCASTER */
1453
1454 #if defined(CONFIG_BT_OBSERVER)
1455 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1456 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1457 {
1458 ll_rx_release(rx_free);
1459 }
1460 break;
1461 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1462 #endif /* CONFIG_BT_OBSERVER */
1463
1464 #if defined(CONFIG_BT_CONN)
1465 case NODE_RX_TYPE_CONNECTION:
1466 {
1467 struct node_rx_cc *cc =
1468 (void *)rx_free->pdu;
1469
1470 if (0) {
1471
1472 #if defined(CONFIG_BT_PERIPHERAL)
1473 } else if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1474 ll_rx_release(rx_free);
1475
1476 break;
1477 #endif /* !CONFIG_BT_PERIPHERAL */
1478
1479 #if defined(CONFIG_BT_CENTRAL)
1480 } else if (cc->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1481 ull_central_cleanup(rx_free);
1482
1483 #if defined(CONFIG_BT_CTLR_PRIVACY)
1484 #if defined(CONFIG_BT_BROADCASTER)
1485 if (!ull_adv_is_enabled_get(0))
1486 #endif /* CONFIG_BT_BROADCASTER */
1487 {
1488 ull_filter_adv_scan_state_cb(0);
1489 }
1490 #endif /* CONFIG_BT_CTLR_PRIVACY */
1491 break;
1492 #endif /* CONFIG_BT_CENTRAL */
1493
1494 } else {
1495 LL_ASSERT(!cc->status);
1496 }
1497 }
1498
1499 __fallthrough;
1500 case NODE_RX_TYPE_DC_PDU:
1501 #endif /* CONFIG_BT_CONN */
1502
1503 #if defined(CONFIG_BT_OBSERVER)
1504 case NODE_RX_TYPE_REPORT:
1505
1506 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1507 __fallthrough;
1508 case NODE_RX_TYPE_EXT_1M_REPORT:
1509 case NODE_RX_TYPE_EXT_2M_REPORT:
1510 case NODE_RX_TYPE_EXT_CODED_REPORT:
1511 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1512 case NODE_RX_TYPE_SYNC_REPORT:
1513 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1514 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1515 #endif /* CONFIG_BT_OBSERVER */
1516
1517 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1518 case NODE_RX_TYPE_SCAN_REQ:
1519 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1520
1521 #if defined(CONFIG_BT_CONN)
1522 case NODE_RX_TYPE_CONN_UPDATE:
1523 case NODE_RX_TYPE_ENC_REFRESH:
1524
1525 #if defined(CONFIG_BT_CTLR_LE_PING)
1526 case NODE_RX_TYPE_APTO:
1527 #endif /* CONFIG_BT_CTLR_LE_PING */
1528
1529 case NODE_RX_TYPE_CHAN_SEL_ALGO:
1530
1531 #if defined(CONFIG_BT_CTLR_PHY)
1532 case NODE_RX_TYPE_PHY_UPDATE:
1533 #endif /* CONFIG_BT_CTLR_PHY */
1534
1535 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1536 case NODE_RX_TYPE_RSSI:
1537 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1538 #endif /* CONFIG_BT_CONN */
1539
1540 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1541 case NODE_RX_TYPE_PROFILE:
1542 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1543
1544 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1545 case NODE_RX_TYPE_ADV_INDICATION:
1546 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1547
1548 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1549 case NODE_RX_TYPE_SCAN_INDICATION:
1550 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1551
1552 #if defined(CONFIG_BT_HCI_MESH_EXT)
1553 case NODE_RX_TYPE_MESH_ADV_CPLT:
1554 case NODE_RX_TYPE_MESH_REPORT:
1555 #endif /* CONFIG_BT_HCI_MESH_EXT */
1556
1557 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1558 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1559 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1560
1561 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1562 case NODE_RX_TYPE_CIS_REQUEST:
1563 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1564
1565 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1566 case NODE_RX_TYPE_CIS_ESTABLISHED:
1567 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1568
1569 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1570 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1571 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1572
1573 #if defined(CONFIG_BT_CTLR_ISO)
1574 case NODE_RX_TYPE_ISO_PDU:
1575 #endif
1576
1577 /* Ensure that at least one 'case' statement is present for this
1578 * code block.
1579 */
1580 case NODE_RX_TYPE_NONE:
1581 LL_ASSERT(rx_free->hdr.type != NODE_RX_TYPE_NONE);
1582 ll_rx_link_quota_inc();
1583 ll_rx_release(rx_free);
1584 break;
1585
1586 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1587 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1588 case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
1589 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1590 case NODE_RX_TYPE_SYNC:
1591 {
1592 struct node_rx_sync *se =
1593 (void *)rx_free->pdu;
1594 uint8_t status = se->status;
1595
1596 /* Below status codes use node_rx_sync_estab, hence
1597 * release the node_rx memory and release sync context
1598 * if sync establishment failed.
1599 */
1600 if ((status == BT_HCI_ERR_SUCCESS) ||
1601 (status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE) ||
1602 (status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB)) {
1603 struct ll_sync_set *sync;
1604
1605 /* pick the sync context before node_rx
1606 * release.
1607 */
1608 sync = (void *)rx_free->rx_ftr.param;
1609
1610 ll_rx_release(rx_free);
1611
1612 ull_sync_setup_reset(sync);
1613
1614 if (status != BT_HCI_ERR_SUCCESS) {
1615 memq_link_t *link_sync_lost;
1616
1617 link_sync_lost =
1618 sync->node_rx_lost.rx.hdr.link;
1619 ll_rx_link_release(link_sync_lost);
1620
1621 ull_sync_release(sync);
1622 }
1623
1624 break;
1625 } else {
1626 LL_ASSERT(status == BT_HCI_ERR_OP_CANCELLED_BY_HOST);
1627
1628 /* Fall through and release sync context */
1629 }
1630 }
1631 /* Pass through */
1632
1633 case NODE_RX_TYPE_SYNC_LOST:
1634 {
1635 struct ll_sync_set *sync =
1636 (void *)rx_free->rx_ftr.param;
1637
1638 ull_sync_release(sync);
1639 }
1640 break;
1641
1642 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1643 case NODE_RX_TYPE_SYNC_ISO:
1644 {
1645 struct node_rx_sync_iso *se =
1646 (void *)rx_free->pdu;
1647
1648 if (!se->status) {
1649 ll_rx_release(rx_free);
1650
1651 break;
1652 }
1653 }
1654 /* Pass through */
1655
1656 case NODE_RX_TYPE_SYNC_ISO_LOST:
1657 {
1658 struct ll_sync_iso_set *sync_iso =
1659 (void *)rx_free->rx_ftr.param;
1660
1661 ull_sync_iso_stream_release(sync_iso);
1662 }
1663 break;
1664 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1665 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1666
1667 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
1668 defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1669 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1670 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1671 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1672 {
1673 const uint8_t report_cnt = 1U;
1674
1675 ull_iq_report_link_inc_quota(report_cnt);
1676 ull_df_iq_report_mem_release(rx_free);
1677 ull_df_rx_iq_report_alloc(report_cnt);
1678 }
1679 break;
1680 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1681
1682 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_CONN_ISO)
1683 case NODE_RX_TYPE_TERMINATE:
1684 {
1685 if (IS_ACL_HANDLE(rx_free->hdr.handle)) {
1686 struct ll_conn *conn;
1687 memq_link_t *link;
1688
1689 conn = ll_conn_get(rx_free->hdr.handle);
1690
1691 LL_ASSERT(!conn->lll.link_tx_free);
1692 link = memq_deinit(&conn->lll.memq_tx.head,
1693 &conn->lll.memq_tx.tail);
1694 LL_ASSERT(link);
1695 conn->lll.link_tx_free = link;
1696
1697 ll_conn_release(conn);
1698 } else if (IS_CIS_HANDLE(rx_free->hdr.handle)) {
1699 ll_rx_link_quota_inc();
1700 ll_rx_release(rx_free);
1701 }
1702 }
1703 break;
1704 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_CONN_ISO */
1705
1706 case NODE_RX_TYPE_EVENT_DONE:
1707 default:
1708 LL_ASSERT(0);
1709 break;
1710 }
1711 }
1712
1713 *node_rx = rx;
1714
1715 rx_replenish_all();
1716 }
1717
ll_rx_link_quota_update(int8_t delta)1718 static void ll_rx_link_quota_update(int8_t delta)
1719 {
1720 LL_ASSERT(delta <= 0 || mem_link_rx.quota_pdu < RX_CNT);
1721 mem_link_rx.quota_pdu += delta;
1722 }
1723
ll_rx_link_quota_inc(void)1724 static void ll_rx_link_quota_inc(void)
1725 {
1726 ll_rx_link_quota_update(1);
1727 }
1728
ll_rx_link_quota_dec(void)1729 static void ll_rx_link_quota_dec(void)
1730 {
1731 ll_rx_link_quota_update(-1);
1732 }
1733
ll_rx_link_alloc(void)1734 void *ll_rx_link_alloc(void)
1735 {
1736 return mem_acquire(&mem_link_rx.free);
1737 }
1738
ll_rx_link_release(memq_link_t * link)1739 void ll_rx_link_release(memq_link_t *link)
1740 {
1741 mem_release(link, &mem_link_rx.free);
1742 }
1743
ll_rx_alloc(void)1744 void *ll_rx_alloc(void)
1745 {
1746 return mem_acquire(&mem_pdu_rx.free);
1747 }
1748
ll_rx_release(void * node_rx)1749 void ll_rx_release(void *node_rx)
1750 {
1751 mem_release(node_rx, &mem_pdu_rx.free);
1752 }
1753
ll_rx_put(memq_link_t * link,void * rx)1754 void ll_rx_put(memq_link_t *link, void *rx)
1755 {
1756 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1757 struct node_rx_hdr *rx_hdr = rx;
1758
1759 /* Serialize Tx ack with Rx enqueue by storing reference to
1760 * last element index in Tx ack FIFO.
1761 */
1762 rx_hdr->ack_last = mfifo_fifo_tx_ack.l;
1763 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1764
1765 /* Enqueue the Rx object */
1766 memq_enqueue(link, rx, &memq_ll_rx.tail);
1767 }
1768
1769 /**
1770 * @brief Permit another loop in the controller thread (prio_recv_thread)
1771 * @details Execution context: ULL mayfly
1772 */
ll_rx_sched(void)1773 void ll_rx_sched(void)
1774 {
1775 /* sem_recv references the same semaphore (sem_prio_recv)
1776 * in prio_recv_thread
1777 */
1778 k_sem_give(sem_recv);
1779 }
1780
ll_rx_put_sched(memq_link_t * link,void * rx)1781 void ll_rx_put_sched(memq_link_t *link, void *rx)
1782 {
1783 ll_rx_put(link, rx);
1784 ll_rx_sched();
1785 }
1786
1787 #if defined(CONFIG_BT_CONN)
ll_pdu_rx_alloc_peek(uint8_t count)1788 void *ll_pdu_rx_alloc_peek(uint8_t count)
1789 {
1790 if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
1791 return NULL;
1792 }
1793
1794 return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
1795 }
1796
ll_pdu_rx_alloc(void)1797 void *ll_pdu_rx_alloc(void)
1798 {
1799 return MFIFO_DEQUEUE(ll_pdu_rx_free);
1800 }
1801 #endif /* CONFIG_BT_CONN */
1802
1803 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
ll_tx_ack_put(uint16_t handle,struct node_tx * node_tx)1804 void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
1805 {
1806 struct lll_tx *tx;
1807 uint8_t idx;
1808
1809 idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx);
1810 LL_ASSERT(tx);
1811
1812 tx->handle = handle;
1813 tx->node = node_tx;
1814
1815 MFIFO_ENQUEUE(tx_ack, idx);
1816 }
1817 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1818
ll_timeslice_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1819 void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
1820 uint8_t * const ticker_id)
1821 {
1822 *instance_index = TICKER_INSTANCE_ID_CTLR;
1823 *ticker_id = (TICKER_NODES - FLASH_TICKER_NODES - COEX_TICKER_NODES);
1824 }
1825
ll_coex_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1826 void ll_coex_ticker_id_get(uint8_t * const instance_index,
1827 uint8_t * const ticker_id)
1828 {
1829 *instance_index = TICKER_INSTANCE_ID_CTLR;
1830 *ticker_id = (TICKER_NODES - COEX_TICKER_NODES);
1831 }
1832
ll_radio_state_abort(void)1833 void ll_radio_state_abort(void)
1834 {
1835 static memq_link_t link;
1836 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1837 uint32_t ret;
1838
1839 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1840 &mfy);
1841 LL_ASSERT(!ret);
1842 }
1843
ll_radio_state_is_idle(void)1844 uint32_t ll_radio_state_is_idle(void)
1845 {
1846 return lll_radio_is_idle();
1847 }
1848
ull_ticker_status_give(uint32_t status,void * param)1849 void ull_ticker_status_give(uint32_t status, void *param)
1850 {
1851 *((uint32_t volatile *)param) = status;
1852
1853 k_sem_give(&sem_ticker_api_cb);
1854 }
1855
1856 /**
1857 * @brief Take the ticker API semaphore (if applicable) and wait for operation
1858 * complete.
1859 *
1860 * Waits for ticker operation to complete by taking ticker API semaphore,
1861 * unless the operation was executed inline due to same-priority caller/
1862 * callee id.
1863 *
1864 * In case of asynchronous ticker operation (caller priority !=
1865 * callee priority), the function grabs the semaphore and waits for
1866 * ull_ticker_status_give, which assigns the ret_cb variable and releases
1867 * the semaphore.
1868 *
1869 * In case of synchronous ticker operation, the result is already known at
1870 * entry, and semaphore is only taken if ret_cb has been updated. This is done
1871 * to balance take/give counts. If *ret_cb is still TICKER_STATUS_BUSY, but
1872 * ret is not, the ticker operation has failed early, and no callback will be
1873 * invoked. In this case the semaphore shall not be taken.
1874 *
1875 * @param ret Return value from ticker API call:
1876 * TICKER_STATUS_BUSY: Ticker operation is queued
1877 * TICKER_STATUS_SUCCESS: Operation completed OK
1878 * TICKER_STATUS_FAILURE: Operation failed
1879 *
1880 * @param ret_cb Pointer to user data passed to ticker operation
1881 * callback, which holds the operation result. Value
1882 * upon entry:
1883 * TICKER_STATUS_BUSY: Ticker has not yet called CB
1884 * TICKER_STATUS_SUCCESS: Operation completed OK via CB
1885 * TICKER_STATUS_FAILURE: Operation failed via CB
1886 *
1887 * NOTE: For correct operation, *ret_cb must be initialized
1888 * to TICKER_STATUS_BUSY before initiating the ticker API call.
1889 *
1890 * @return uint32_t Returns result of completed ticker operation
1891 */
ull_ticker_status_take(uint32_t ret,uint32_t volatile * ret_cb)1892 uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
1893 {
1894 if ((ret == TICKER_STATUS_BUSY) || (*ret_cb != TICKER_STATUS_BUSY)) {
1895 /* Operation is either pending of completed via callback
1896 * prior to this function call. Take the semaphore and wait,
1897 * or take it to balance take/give counting.
1898 */
1899 k_sem_take(&sem_ticker_api_cb, K_FOREVER);
1900 return *ret_cb;
1901 }
1902
1903 return ret;
1904 }
1905
ull_disable_mark(void * param)1906 void *ull_disable_mark(void *param)
1907 {
1908 return mark_set(&mark_disable, param);
1909 }
1910
ull_disable_unmark(void * param)1911 void *ull_disable_unmark(void *param)
1912 {
1913 return mark_unset(&mark_disable, param);
1914 }
1915
ull_disable_mark_get(void)1916 void *ull_disable_mark_get(void)
1917 {
1918 return mark_get(mark_disable);
1919 }
1920
1921 /**
1922 * @brief Stops a specified ticker using the ull_disable_(un)mark functions.
1923 *
1924 * @param ticker_handle The handle of the ticker.
1925 * @param param The object to mark.
1926 * @param lll_disable Optional object when calling @ref ull_disable
1927 *
1928 * @return 0 if success, else ERRNO.
1929 */
ull_ticker_stop_with_mark(uint8_t ticker_handle,void * param,void * lll_disable)1930 int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param,
1931 void *lll_disable)
1932 {
1933 uint32_t volatile ret_cb;
1934 uint32_t ret;
1935 void *mark;
1936 int err;
1937
1938 mark = ull_disable_mark(param);
1939 if (mark != param) {
1940 return -ENOLCK;
1941 }
1942
1943 ret_cb = TICKER_STATUS_BUSY;
1944 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1945 ticker_handle, ull_ticker_status_give,
1946 (void *)&ret_cb);
1947 ret = ull_ticker_status_take(ret, &ret_cb);
1948 if (ret) {
1949 mark = ull_disable_unmark(param);
1950 if (mark != param) {
1951 return -ENOLCK;
1952 }
1953
1954 return -EALREADY;
1955 }
1956
1957 err = ull_disable(lll_disable);
1958
1959 mark = ull_disable_unmark(param);
1960 if (mark != param) {
1961 return -ENOLCK;
1962 }
1963
1964 if (err && (err != -EALREADY)) {
1965 return err;
1966 }
1967
1968 return 0;
1969 }
1970
1971 #if defined(CONFIG_BT_CONN)
ull_update_mark(void * param)1972 void *ull_update_mark(void *param)
1973 {
1974 return mark_set(&mark_update, param);
1975 }
1976
ull_update_unmark(void * param)1977 void *ull_update_unmark(void *param)
1978 {
1979 return mark_unset(&mark_update, param);
1980 }
1981
ull_update_mark_get(void)1982 void *ull_update_mark_get(void)
1983 {
1984 return mark_get(mark_update);
1985 }
1986 #endif /* CONFIG_BT_CONN */
1987
ull_disable(void * lll)1988 int ull_disable(void *lll)
1989 {
1990 static memq_link_t link;
1991 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1992 struct ull_hdr *hdr;
1993 struct k_sem sem;
1994 uint32_t ret;
1995
1996 hdr = HDR_LLL2ULL(lll);
1997 if (!ull_ref_get(hdr)) {
1998 return -EALREADY;
1999 }
2000 cpu_dmb(); /* Ensure synchronized data access */
2001
2002 k_sem_init(&sem, 0, 1);
2003
2004 hdr->disabled_param = &sem;
2005 hdr->disabled_cb = disabled_cb;
2006
2007 cpu_dmb(); /* Ensure synchronized data access */
2008
2009 /* ULL_HIGH can run after we have call `ull_ref_get` and it can
2010 * decrement the ref count. Hence, handle this race condition by
2011 * ensuring that `disabled_cb` has been set while the ref count is still
2012 * set.
2013 * No need to call `lll_disable` and take the semaphore thereafter if
2014 * reference count is zero.
2015 * If the `sem` is given when reference count was decremented, we do not
2016 * care.
2017 */
2018 if (!ull_ref_get(hdr)) {
2019 return -EALREADY;
2020 }
2021
2022 mfy.param = lll;
2023 ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0,
2024 &mfy);
2025 LL_ASSERT(!ret);
2026
2027 return k_sem_take(&sem, ULL_DISABLE_TIMEOUT);
2028 }
2029
ull_pdu_rx_alloc_peek(uint8_t count)2030 void *ull_pdu_rx_alloc_peek(uint8_t count)
2031 {
2032 if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) {
2033 return NULL;
2034 }
2035
2036 return MFIFO_DEQUEUE_PEEK(pdu_rx_free);
2037 }
2038
ull_pdu_rx_alloc_peek_iter(uint8_t * idx)2039 void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx)
2040 {
2041 return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx);
2042 }
2043
ull_pdu_rx_alloc(void)2044 void *ull_pdu_rx_alloc(void)
2045 {
2046 return MFIFO_DEQUEUE(pdu_rx_free);
2047 }
2048
ull_rx_put(memq_link_t * link,void * rx)2049 void ull_rx_put(memq_link_t *link, void *rx)
2050 {
2051 #if defined(CONFIG_BT_CONN)
2052 struct node_rx_hdr *rx_hdr = rx;
2053
2054 /* Serialize Tx ack with Rx enqueue by storing reference to
2055 * last element index in Tx ack FIFO.
2056 */
2057 rx_hdr->ack_last = ull_conn_ack_last_idx_get();
2058 #endif /* CONFIG_BT_CONN */
2059
2060 /* Enqueue the Rx object */
2061 memq_enqueue(link, rx, &memq_ull_rx.tail);
2062 }
2063
ull_rx_sched(void)2064 void ull_rx_sched(void)
2065 {
2066 static memq_link_t link;
2067 static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2068
2069 /* Kick the ULL (using the mayfly, tailchain it) */
2070 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
2071 }
2072
ull_rx_put_sched(memq_link_t * link,void * rx)2073 void ull_rx_put_sched(memq_link_t *link, void *rx)
2074 {
2075 ull_rx_put(link, rx);
2076 ull_rx_sched();
2077 }
2078
ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,struct lll_prepare_param * prepare_param,lll_prepare_cb_t prepare_cb,uint8_t is_resume)2079 struct lll_event *ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
2080 lll_abort_cb_t abort_cb,
2081 struct lll_prepare_param *prepare_param,
2082 lll_prepare_cb_t prepare_cb,
2083 uint8_t is_resume)
2084 {
2085 struct lll_event *e;
2086 uint8_t idx;
2087
2088 idx = MFIFO_ENQUEUE_GET(prep, (void **)&e);
2089 if (!e) {
2090 return NULL;
2091 }
2092
2093 memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param));
2094 e->prepare_cb = prepare_cb;
2095 e->is_abort_cb = is_abort_cb;
2096 e->abort_cb = abort_cb;
2097 e->is_resume = is_resume;
2098 e->is_aborted = 0U;
2099
2100 MFIFO_ENQUEUE(prep, idx);
2101
2102 return e;
2103 }
2104
ull_prepare_dequeue_get(void)2105 void *ull_prepare_dequeue_get(void)
2106 {
2107 return MFIFO_DEQUEUE_GET(prep);
2108 }
2109
ull_prepare_dequeue_iter(uint8_t * idx)2110 void *ull_prepare_dequeue_iter(uint8_t *idx)
2111 {
2112 return MFIFO_DEQUEUE_ITER_GET(prep, idx);
2113 }
2114
ull_prepare_dequeue(uint8_t caller_id)2115 void ull_prepare_dequeue(uint8_t caller_id)
2116 {
2117 void *param_normal_head = NULL;
2118 void *param_normal_next = NULL;
2119 void *param_resume_head = NULL;
2120 void *param_resume_next = NULL;
2121 struct lll_event *next;
2122 uint8_t loop;
2123
2124 /* Development assertion check to ensure the below loop processing
2125 * has a limit.
2126 *
2127 * Only 2 scanner and 1 advertiser (directed adv) gets enqueue back:
2128 *
2129 * Already in queue max 7 (EVENT_PIPELINE_MAX):
2130 * - 2 continuous scan prepare in queue (1M and Coded PHY)
2131 * - 2 continuous scan resume in queue (1M and Coded PHY)
2132 * - 1 directed adv prepare
2133 * - 1 directed adv resume
2134 * - 1 any other role with time reservation
2135 *
2136 * The loop removes the duplicates (scan and advertiser) with is_aborted
2137 * flag set in 7 iterations:
2138 * - 1 scan prepare (1M)
2139 * - 1 scan prepare (Coded PHY)
2140 * - 1 directed adv prepare
2141 *
2142 * and has enqueue the following in these 7 iterations:
2143 * - 1 scan resume (1M)
2144 * - 1 scan resume (Coded PHY)
2145 * - 1 directed adv resume
2146 *
2147 * Hence, it should be (EVENT_PIPELINE_MAX + 3U) iterations max.
2148 */
2149 loop = (EVENT_PIPELINE_MAX + 3U);
2150
2151 next = ull_prepare_dequeue_get();
2152 while (next) {
2153 void *param = next->prepare_param.param;
2154 uint8_t is_aborted = next->is_aborted;
2155 uint8_t is_resume = next->is_resume;
2156
2157 /* Assert if we exceed iterations processing the prepare queue
2158 */
2159 LL_ASSERT(loop);
2160 loop--;
2161
2162 /* Let LLL invoke the `prepare` interface if radio not in active
2163 * use. Otherwise, enqueue at end of the prepare pipeline queue.
2164 */
2165 if (!is_aborted) {
2166 static memq_link_t link;
2167 static struct mayfly mfy = {0, 0, &link, NULL,
2168 lll_resume};
2169 uint32_t ret;
2170
2171 mfy.param = next;
2172 ret = mayfly_enqueue(caller_id, TICKER_USER_ID_LLL, 0,
2173 &mfy);
2174 LL_ASSERT(!ret);
2175 }
2176
2177 MFIFO_DEQUEUE(prep);
2178
2179 /* Check for anymore more prepare elements in queue */
2180 next = ull_prepare_dequeue_get();
2181 if (!next) {
2182 break;
2183 }
2184
2185 /* A valid prepare element has its `prepare` invoked or was
2186 * enqueued back into prepare pipeline.
2187 */
2188 if (!is_aborted) {
2189 /* The prepare element was not a resume event, it would
2190 * use the radio or was enqueued back into prepare
2191 * pipeline with a preempt timeout being set.
2192 *
2193 * Remember the first encountered and the next element
2194 * in the prepare pipeline so that we do not infinitely
2195 * loop through the resume events in prepare pipeline.
2196 */
2197 if (!is_resume) {
2198 if (!param_normal_head) {
2199 param_normal_head = param;
2200 } else if (!param_normal_next) {
2201 param_normal_next = param;
2202 }
2203 } else {
2204 if (!param_resume_head) {
2205 param_resume_head = param;
2206 } else if (!param_resume_next) {
2207 param_resume_next = param;
2208 }
2209 }
2210
2211 /* Stop traversing the prepare pipeline when we reach
2212 * back to the first or next event where we
2213 * initially started processing the prepare pipeline.
2214 */
2215 if (!next->is_aborted &&
2216 ((!next->is_resume &&
2217 ((next->prepare_param.param ==
2218 param_normal_head) ||
2219 (next->prepare_param.param ==
2220 param_normal_next))) ||
2221 (next->is_resume &&
2222 !param_normal_next &&
2223 ((next->prepare_param.param ==
2224 param_resume_head) ||
2225 (next->prepare_param.param ==
2226 param_resume_next))))) {
2227 break;
2228 }
2229 }
2230 }
2231 }
2232
ull_event_done_extra_get(void)2233 struct event_done_extra *ull_event_done_extra_get(void)
2234 {
2235 struct node_rx_event_done *evdone;
2236
2237 evdone = MFIFO_DEQUEUE_PEEK(done);
2238 if (!evdone) {
2239 return NULL;
2240 }
2241
2242 return &evdone->extra;
2243 }
2244
ull_done_extra_type_set(uint8_t type)2245 struct event_done_extra *ull_done_extra_type_set(uint8_t type)
2246 {
2247 struct event_done_extra *extra;
2248
2249 extra = ull_event_done_extra_get();
2250 if (!extra) {
2251 return NULL;
2252 }
2253
2254 extra->type = type;
2255
2256 return extra;
2257 }
2258
ull_event_done(void * param)2259 void *ull_event_done(void *param)
2260 {
2261 struct node_rx_event_done *evdone;
2262 memq_link_t *link;
2263
2264 /* Obtain new node that signals "Done of an RX-event".
2265 * Obtain this by dequeuing from the global 'mfifo_done' queue.
2266 * Note that 'mfifo_done' is a queue of pointers, not of
2267 * struct node_rx_event_done
2268 */
2269 evdone = MFIFO_DEQUEUE(done);
2270 if (!evdone) {
2271 /* Not fatal if we can not obtain node, though
2272 * we will loose the packets in software stack.
2273 * If this happens during Conn Upd, this could cause LSTO
2274 */
2275 return NULL;
2276 }
2277
2278 link = evdone->hdr.link;
2279 evdone->hdr.link = NULL;
2280
2281 evdone->hdr.type = NODE_RX_TYPE_EVENT_DONE;
2282 evdone->param = param;
2283
2284 ull_rx_put_sched(link, evdone);
2285
2286 return evdone;
2287 }
2288
2289 #if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2290 /**
2291 * @brief Extract timing from completed event
2292 *
2293 * @param node_rx_event_done[in] Done event containing fresh timing information
2294 * @param ticks_drift_plus[out] Positive part of drift uncertainty window
2295 * @param ticks_drift_minus[out] Negative part of drift uncertainty window
2296 */
ull_drift_ticks_get(struct node_rx_event_done * done,uint32_t * ticks_drift_plus,uint32_t * ticks_drift_minus)2297 void ull_drift_ticks_get(struct node_rx_event_done *done,
2298 uint32_t *ticks_drift_plus,
2299 uint32_t *ticks_drift_minus)
2300 {
2301 uint32_t start_to_address_expected_us;
2302 uint32_t start_to_address_actual_us;
2303 uint32_t window_widening_event_us;
2304 uint32_t preamble_to_addr_us;
2305
2306 start_to_address_actual_us =
2307 done->extra.drift.start_to_address_actual_us;
2308 window_widening_event_us =
2309 done->extra.drift.window_widening_event_us;
2310 preamble_to_addr_us =
2311 done->extra.drift.preamble_to_addr_us;
2312
2313 start_to_address_expected_us = EVENT_JITTER_US +
2314 EVENT_TICKER_RES_MARGIN_US +
2315 window_widening_event_us +
2316 preamble_to_addr_us;
2317
2318 if (start_to_address_actual_us <= start_to_address_expected_us) {
2319 *ticks_drift_plus =
2320 HAL_TICKER_US_TO_TICKS(window_widening_event_us);
2321 *ticks_drift_minus =
2322 HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
2323 start_to_address_actual_us));
2324 } else {
2325 *ticks_drift_plus =
2326 HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
2327 *ticks_drift_minus =
2328 HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
2329 EVENT_TICKER_RES_MARGIN_US +
2330 preamble_to_addr_us);
2331 }
2332 }
2333 #endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
2334
init_reset(void)2335 static inline int init_reset(void)
2336 {
2337 memq_link_t *link;
2338
2339 /* Initialize and allocate done pool */
2340 RXFIFO_INIT_ALLOC(done);
2341
2342 /* Initialize rx pool. */
2343 mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2344 sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2345 &mem_pdu_rx.free);
2346
2347 /* Initialize rx link pool. */
2348 mem_init(mem_link_rx.pool, sizeof(memq_link_t),
2349 sizeof(mem_link_rx.pool) / sizeof(memq_link_t),
2350 &mem_link_rx.free);
2351
2352 /* Acquire a link to initialize ull rx memq */
2353 link = mem_acquire(&mem_link_rx.free);
2354 LL_ASSERT(link);
2355
2356 /* Initialize ull rx memq */
2357 MEMQ_INIT(ull_rx, link);
2358
2359 /* Acquire a link to initialize ll rx memq */
2360 link = mem_acquire(&mem_link_rx.free);
2361 LL_ASSERT(link);
2362
2363 /* Initialize ll rx memq */
2364 MEMQ_INIT(ll_rx, link);
2365
2366 /* Allocate rx free buffers */
2367 mem_link_rx.quota_pdu = RX_CNT;
2368 rx_replenish_all();
2369
2370 #if (defined(CONFIG_BT_BROADCASTER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2371 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2372 defined(CONFIG_BT_CTLR_SYNC_PERIODIC) || \
2373 defined(CONFIG_BT_CONN)
2374 /* Initialize channel map */
2375 ull_chan_reset();
2376 #endif /* (CONFIG_BT_BROADCASTER && CONFIG_BT_CTLR_ADV_EXT) ||
2377 * CONFIG_BT_CTLR_ADV_PERIODIC ||
2378 * CONFIG_BT_CTLR_SYNC_PERIODIC ||
2379 * CONFIG_BT_CONN
2380 */
2381
2382 return 0;
2383 }
2384
perform_lll_reset(void * param)2385 static void perform_lll_reset(void *param)
2386 {
2387 int err;
2388
2389 /* Reset LLL */
2390 err = lll_reset();
2391 LL_ASSERT(!err);
2392
2393 #if defined(CONFIG_BT_BROADCASTER)
2394 /* Reset adv state */
2395 err = lll_adv_reset();
2396 LL_ASSERT(!err);
2397 #endif /* CONFIG_BT_BROADCASTER */
2398
2399 #if defined(CONFIG_BT_OBSERVER)
2400 /* Reset scan state */
2401 err = lll_scan_reset();
2402 LL_ASSERT(!err);
2403 #endif /* CONFIG_BT_OBSERVER */
2404
2405 #if defined(CONFIG_BT_CONN)
2406 /* Reset conn role */
2407 err = lll_conn_reset();
2408 LL_ASSERT(!err);
2409 #endif /* CONFIG_BT_CONN */
2410
2411 #if defined(CONFIG_BT_CTLR_DF)
2412 err = lll_df_reset();
2413 LL_ASSERT(!err);
2414 #endif /* CONFIG_BT_CTLR_DF */
2415
2416 #if !defined(CONFIG_BT_CTLR_ZLI)
2417 k_sem_give(param);
2418 #endif /* !CONFIG_BT_CTLR_ZLI */
2419 }
2420
mark_set(void ** m,void * param)2421 static inline void *mark_set(void **m, void *param)
2422 {
2423 if (!*m) {
2424 *m = param;
2425 }
2426
2427 return *m;
2428 }
2429
mark_unset(void ** m,void * param)2430 static inline void *mark_unset(void **m, void *param)
2431 {
2432 if (*m && *m == param) {
2433 *m = NULL;
2434
2435 return param;
2436 }
2437
2438 return NULL;
2439 }
2440
mark_get(void * m)2441 static inline void *mark_get(void *m)
2442 {
2443 return m;
2444 }
2445
rx_replenish(uint8_t max)2446 static void rx_replenish(uint8_t max)
2447 {
2448 uint8_t idx;
2449
2450 if (max > mem_link_rx.quota_pdu) {
2451 max = mem_link_rx.quota_pdu;
2452 }
2453
2454 while (max && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
2455 memq_link_t *link;
2456 struct node_rx_hdr *rx;
2457
2458 link = mem_acquire(&mem_link_rx.free);
2459 if (!link) {
2460 return;
2461 }
2462
2463 rx = mem_acquire(&mem_pdu_rx.free);
2464 if (!rx) {
2465 ll_rx_link_release(link);
2466 return;
2467 }
2468
2469 rx->link = link;
2470
2471 MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
2472
2473 ll_rx_link_quota_dec();
2474
2475 max--;
2476 }
2477
2478 #if defined(CONFIG_BT_CONN)
2479 if (!max) {
2480 return;
2481 }
2482
2483 /* Replenish the ULL to LL/HCI free Rx PDU queue after LLL to ULL free
2484 * Rx PDU queue has been filled.
2485 */
2486 while (mem_link_rx.quota_pdu &&
2487 MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
2488 memq_link_t *link;
2489 struct node_rx_hdr *rx;
2490
2491 link = mem_acquire(&mem_link_rx.free);
2492 if (!link) {
2493 return;
2494 }
2495
2496 rx = mem_acquire(&mem_pdu_rx.free);
2497 if (!rx) {
2498 ll_rx_link_release(link);
2499 return;
2500 }
2501
2502 link->mem = NULL;
2503 rx->link = link;
2504
2505 MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
2506
2507 ll_rx_link_quota_dec();
2508 }
2509 #endif /* CONFIG_BT_CONN */
2510 }
2511
rx_replenish_all(void)2512 static void rx_replenish_all(void)
2513 {
2514 rx_replenish(UINT8_MAX);
2515 }
2516
2517 #if defined(CONFIG_BT_CONN) || \
2518 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2519 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2520 defined(CONFIG_BT_CTLR_ADV_ISO)
2521
rx_replenish_one(void)2522 static void rx_replenish_one(void)
2523 {
2524 rx_replenish(1U);
2525 }
2526
rx_release_replenish(struct node_rx_hdr * rx)2527 static void rx_release_replenish(struct node_rx_hdr *rx)
2528 {
2529 ll_rx_release(rx);
2530 rx_replenish_one();
2531 }
2532
rx_link_dequeue_release_quota_inc(memq_link_t * link)2533 static void rx_link_dequeue_release_quota_inc(memq_link_t *link)
2534 {
2535 (void)memq_dequeue(memq_ll_rx.tail,
2536 &memq_ll_rx.head, NULL);
2537 ll_rx_link_release(link);
2538 ll_rx_link_quota_inc();
2539 }
2540 #endif /* CONFIG_BT_CONN ||
2541 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
2542 * CONFIG_BT_CTLR_ADV_PERIODIC ||
2543 * CONFIG_BT_CTLR_ADV_ISO
2544 */
2545
rx_demux(void * param)2546 static void rx_demux(void *param)
2547 {
2548 memq_link_t *link;
2549
2550 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2551 do {
2552 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2553 struct node_rx_hdr *rx;
2554
2555 link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail,
2556 (void **)&rx);
2557 if (link) {
2558 #if defined(CONFIG_BT_CONN)
2559 struct node_tx *node_tx;
2560 memq_link_t *link_tx;
2561 uint16_t handle; /* Handle to Ack TX */
2562 #endif /* CONFIG_BT_CONN */
2563
2564 LL_ASSERT(rx);
2565
2566 #if defined(CONFIG_BT_CONN)
2567 link_tx = ull_conn_ack_by_last_peek(rx->ack_last,
2568 &handle, &node_tx);
2569 if (link_tx) {
2570 rx_demux_conn_tx_ack(rx->ack_last, handle,
2571 link_tx, node_tx);
2572 } else
2573 #endif /* CONFIG_BT_CONN */
2574 {
2575 rx_demux_rx(link, rx);
2576 }
2577
2578 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2579 rx_demux_yield();
2580 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2581
2582 #if defined(CONFIG_BT_CONN)
2583 } else {
2584 struct node_tx *node_tx;
2585 uint8_t ack_last;
2586 uint16_t handle;
2587
2588 link = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2589 if (link) {
2590 rx_demux_conn_tx_ack(ack_last, handle,
2591 link, node_tx);
2592
2593 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2594 rx_demux_yield();
2595 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2596
2597 }
2598 #endif /* CONFIG_BT_CONN */
2599 }
2600
2601 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2602 } while (link);
2603 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2604 }
2605
2606 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
rx_demux_yield(void)2607 static void rx_demux_yield(void)
2608 {
2609 static memq_link_t link;
2610 static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2611 struct node_rx_hdr *rx;
2612 memq_link_t *link_peek;
2613
2614 link_peek = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, (void **)&rx);
2615 if (!link_peek) {
2616 #if defined(CONFIG_BT_CONN)
2617 struct node_tx *node_tx;
2618 uint8_t ack_last;
2619 uint16_t handle;
2620
2621 link_peek = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2622 if (!link_peek) {
2623 return;
2624 }
2625 #else /* !CONFIG_BT_CONN */
2626 return;
2627 #endif /* !CONFIG_BT_CONN */
2628 }
2629
2630 /* Kick the ULL (using the mayfly, tailchain it) */
2631 mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_HIGH, 1,
2632 &mfy);
2633 }
2634 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2635
2636 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
tx_cmplt_get(uint16_t * handle,uint8_t * first,uint8_t last)2637 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
2638 {
2639 struct lll_tx *tx;
2640 uint8_t cmplt;
2641 uint8_t next;
2642
2643 next = *first;
2644 tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2645 mfifo_tx_ack.n, mfifo_fifo_tx_ack.f, last,
2646 &next);
2647 if (!tx) {
2648 return 0;
2649 }
2650
2651 *handle = tx->handle;
2652 cmplt = 0U;
2653 do {
2654 if (false) {
2655 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2656 defined(CONFIG_BT_CTLR_CONN_ISO)
2657 } else if (IS_CIS_HANDLE(tx->handle) ||
2658 IS_ADV_ISO_HANDLE(tx->handle)) {
2659 struct node_tx_iso *tx_node;
2660 uint8_t sdu_fragments;
2661
2662 /* NOTE: tx_cmplt_get() is permitted to be called
2663 * multiple times before the tx_ack queue which is
2664 * associated with Rx queue is changed by the
2665 * dequeue of Rx node.
2666 *
2667 * Tx node is released early without waiting for
2668 * any dependency on Rx queue. Released Tx node
2669 * reference is overloaded to store the Tx
2670 * fragments count.
2671 *
2672 * A hack is used here that depends on the fact
2673 * that memory addresses have a value greater than
2674 * 0xFF, to determined if a node Tx has been
2675 * released in a prior iteration of this function.
2676 */
2677
2678 /* We must count each SDU HCI fragment */
2679 tx_node = tx->node;
2680 if (IS_NODE_TX_PTR(tx_node)) {
2681 /* We count each SDU fragment completed
2682 * by this PDU.
2683 */
2684 sdu_fragments = tx_node->sdu_fragments;
2685
2686 /* Replace node reference with fragments
2687 * count
2688 */
2689 NODE_TX_FRAGMENTS_SET(tx->node, sdu_fragments);
2690
2691 /* Release node as its a reference and not
2692 * fragments count.
2693 */
2694 ll_iso_link_tx_release(tx_node->link);
2695 ll_iso_tx_mem_release(tx_node);
2696 } else {
2697 /* Get SDU fragments count from the encoded
2698 * node reference value.
2699 */
2700 sdu_fragments = NODE_TX_FRAGMENTS_GET(tx_node);
2701 }
2702
2703 /* Accumulate the tx acknowledgements */
2704 cmplt += sdu_fragments;
2705
2706 goto next_ack;
2707 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2708
2709 #if defined(CONFIG_BT_CONN)
2710 } else {
2711 struct node_tx *tx_node;
2712 struct pdu_data *p;
2713
2714 /* NOTE: tx_cmplt_get() is permitted to be called
2715 * multiple times before the tx_ack queue which is
2716 * associated with Rx queue is changed by the
2717 * dequeue of Rx node.
2718 *
2719 * Tx node is released early without waiting for
2720 * any dependency on Rx queue. Released Tx node
2721 * reference is overloaded to store whether
2722 * packet with data or control was released.
2723 *
2724 * A hack is used here that depends on the fact
2725 * that memory addresses have a value greater than
2726 * 0xFF, to determined if a node Tx has been
2727 * released in a prior iteration of this function.
2728 */
2729 tx_node = tx->node;
2730 p = (void *)tx_node->pdu;
2731 if (!tx_node ||
2732 (IS_NODE_TX_PTR(tx_node) &&
2733 (p->ll_id == PDU_DATA_LLID_DATA_START ||
2734 p->ll_id == PDU_DATA_LLID_DATA_CONTINUE)) ||
2735 (!IS_NODE_TX_PTR(tx_node) &&
2736 IS_NODE_TX_DATA(tx_node))) {
2737 /* data packet, hence count num cmplt */
2738 NODE_TX_DATA_SET(tx->node);
2739 cmplt++;
2740 } else {
2741 /* ctrl packet or flushed, hence dont count num
2742 * cmplt
2743 */
2744 NODE_TX_CTRL_SET(tx->node);
2745 }
2746
2747 if (IS_NODE_TX_PTR(tx_node)) {
2748 ll_tx_mem_release(tx_node);
2749 }
2750 #endif /* CONFIG_BT_CONN */
2751
2752 }
2753
2754 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2755 defined(CONFIG_BT_CTLR_CONN_ISO)
2756 next_ack:
2757 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2758
2759 *first = next;
2760 tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2761 mfifo_tx_ack.n, mfifo_fifo_tx_ack.f,
2762 last, &next);
2763 } while (tx && tx->handle == *handle);
2764
2765 return cmplt;
2766 }
2767
rx_demux_conn_tx_ack(uint8_t ack_last,uint16_t handle,memq_link_t * link,struct node_tx * node_tx)2768 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
2769 memq_link_t *link,
2770 struct node_tx *node_tx)
2771 {
2772 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2773 do {
2774 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2775 /* Dequeue node */
2776 ull_conn_ack_dequeue();
2777
2778 /* Process Tx ack */
2779 ull_conn_tx_ack(handle, link, node_tx);
2780
2781 /* Release link mem */
2782 ull_conn_link_tx_release(link);
2783
2784 /* check for more rx ack */
2785 link = ull_conn_ack_by_last_peek(ack_last, &handle, &node_tx);
2786
2787 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2788 if (!link)
2789 #else /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2790 } while (link);
2791 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2792
2793 {
2794 /* trigger thread to call ll_rx_get() */
2795 ll_rx_sched();
2796 }
2797 }
2798 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
2799
2800 /**
2801 * @brief Dispatch rx objects
2802 * @details Rx objects are only peeked, not dequeued yet.
2803 * Execution context: ULL high priority Mayfly
2804 */
rx_demux_rx(memq_link_t * link,struct node_rx_hdr * rx)2805 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
2806 {
2807 /* Demux Rx objects */
2808 switch (rx->type) {
2809 case NODE_RX_TYPE_EVENT_DONE:
2810 {
2811 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2812 rx_demux_event_done(link, (struct node_rx_event_done *)rx);
2813 }
2814 break;
2815
2816 #if defined(CONFIG_BT_OBSERVER)
2817 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2818 case NODE_RX_TYPE_EXT_1M_REPORT:
2819 case NODE_RX_TYPE_EXT_CODED_REPORT:
2820 case NODE_RX_TYPE_EXT_AUX_REPORT:
2821 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2822 case NODE_RX_TYPE_SYNC_REPORT:
2823 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2824 {
2825 struct pdu_adv *adv;
2826
2827 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2828
2829 adv = (void *)((struct node_rx_pdu *)rx)->pdu;
2830 if (adv->type != PDU_ADV_TYPE_EXT_IND) {
2831 ll_rx_put_sched(link, rx);
2832 break;
2833 }
2834
2835 ull_scan_aux_setup(link, (struct node_rx_pdu *)rx);
2836 }
2837 break;
2838
2839 case NODE_RX_TYPE_EXT_AUX_RELEASE:
2840 {
2841 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2842 ull_scan_aux_release(link, (struct node_rx_pdu *)rx);
2843 }
2844 break;
2845 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2846 case NODE_RX_TYPE_SYNC:
2847 {
2848 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2849 ull_sync_established_report(link, (struct node_rx_pdu *)rx);
2850 }
2851 break;
2852 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
2853 case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
2854 {
2855 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2856 ll_rx_put_sched(link, rx);
2857 }
2858 break;
2859 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
2860 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2861 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2862 #endif /* CONFIG_BT_OBSERVER */
2863
2864 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2865 case NODE_RX_TYPE_CIS_ESTABLISHED:
2866 {
2867 struct ll_conn *conn;
2868
2869 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2870
2871 conn = ll_conn_get(rx->handle);
2872 if (ull_cp_cc_awaiting_established(conn)) {
2873 ull_cp_cc_established(conn, BT_HCI_ERR_SUCCESS);
2874 }
2875
2876 rx->type = NODE_RX_TYPE_RELEASE;
2877 ll_rx_put_sched(link, rx);
2878 }
2879 break;
2880 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2881
2882 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
2883 defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
2884 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
2885 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
2886 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
2887 case NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE:
2888 {
2889 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2890 ll_rx_put_sched(link, rx);
2891 }
2892 break;
2893 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
2894
2895 #if defined(CONFIG_BT_CONN)
2896 case NODE_RX_TYPE_CONNECTION:
2897 {
2898 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2899 ull_conn_setup(link, (struct node_rx_pdu *)rx);
2900 }
2901 break;
2902
2903 case NODE_RX_TYPE_DC_PDU:
2904 {
2905 ull_conn_rx(link, (struct node_rx_pdu **)&rx);
2906
2907 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2908
2909 /* Only schedule node if not marked as retain by LLCP */
2910 if (rx && rx->type != NODE_RX_TYPE_RETAIN) {
2911 ll_rx_put_sched(link, rx);
2912 }
2913 }
2914 break;
2915
2916 case NODE_RX_TYPE_TERMINATE:
2917 #endif /* CONFIG_BT_CONN */
2918
2919 #if defined(CONFIG_BT_OBSERVER) || \
2920 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2921 defined(CONFIG_BT_CTLR_BROADCAST_ISO) || \
2922 defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
2923 defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
2924 defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
2925 defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
2926 defined(CONFIG_BT_CONN)
2927
2928 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
2929 case NODE_RX_TYPE_SYNC_CHM_COMPLETE:
2930 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
2931
2932 #if defined(CONFIG_BT_CTLR_ADV_ISO)
2933 case NODE_RX_TYPE_BIG_CHM_COMPLETE:
2934 case NODE_RX_TYPE_BIG_TERMINATE:
2935 #endif /* CONFIG_BT_CTLR_ADV_ISO */
2936
2937 #if defined(CONFIG_BT_OBSERVER)
2938 case NODE_RX_TYPE_REPORT:
2939 #endif /* CONFIG_BT_OBSERVER */
2940
2941 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
2942 case NODE_RX_TYPE_SCAN_REQ:
2943 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
2944
2945 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
2946 case NODE_RX_TYPE_PROFILE:
2947 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
2948
2949 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
2950 case NODE_RX_TYPE_ADV_INDICATION:
2951 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
2952
2953 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
2954 case NODE_RX_TYPE_SCAN_INDICATION:
2955 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
2956
2957 case NODE_RX_TYPE_RELEASE:
2958 {
2959 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2960 ll_rx_put_sched(link, rx);
2961 }
2962 break;
2963 #endif /* CONFIG_BT_OBSERVER ||
2964 * CONFIG_BT_CTLR_ADV_PERIODIC ||
2965 * CONFIG_BT_CTLR_BROADCAST_ISO ||
2966 * CONFIG_BT_CTLR_SCAN_REQ_NOTIFY ||
2967 * CONFIG_BT_CTLR_PROFILE_ISR ||
2968 * CONFIG_BT_CTLR_ADV_INDICATION ||
2969 * CONFIG_BT_CTLR_SCAN_INDICATION ||
2970 * CONFIG_BT_CONN
2971 */
2972
2973 default:
2974 {
2975 #if defined(CONFIG_BT_CTLR_USER_EXT)
2976 /* Try proprietary demuxing */
2977 rx_demux_rx_proprietary(link, rx, memq_ull_rx.tail,
2978 &memq_ull_rx.head);
2979 #else
2980 LL_ASSERT(0);
2981 #endif /* CONFIG_BT_CTLR_USER_EXT */
2982 }
2983 break;
2984 }
2985 }
2986
rx_demux_event_done(memq_link_t * link,struct node_rx_event_done * done)2987 static inline void rx_demux_event_done(memq_link_t *link,
2988 struct node_rx_event_done *done)
2989 {
2990 struct ull_hdr *ull_hdr;
2991 void *release;
2992
2993 /* Decrement prepare reference if ULL will not resume */
2994 ull_hdr = done->param;
2995 if (ull_hdr) {
2996 LL_ASSERT(ull_ref_get(ull_hdr));
2997 ull_ref_dec(ull_hdr);
2998 }
2999
3000 /* Process role dependent event done */
3001 switch (done->extra.type) {
3002 #if defined(CONFIG_BT_CONN)
3003 case EVENT_DONE_EXTRA_TYPE_CONN:
3004 ull_conn_done(done);
3005 break;
3006 #endif /* CONFIG_BT_CONN */
3007
3008 #if defined(CONFIG_BT_BROADCASTER)
3009 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
3010 defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
3011 case EVENT_DONE_EXTRA_TYPE_ADV:
3012 ull_adv_done(done);
3013 break;
3014
3015 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3016 case EVENT_DONE_EXTRA_TYPE_ADV_AUX:
3017 ull_adv_aux_done(done);
3018 break;
3019
3020 #if defined(CONFIG_BT_CTLR_ADV_ISO)
3021 case EVENT_DONE_EXTRA_TYPE_ADV_ISO_COMPLETE:
3022 ull_adv_iso_done_complete(done);
3023 break;
3024
3025 case EVENT_DONE_EXTRA_TYPE_ADV_ISO_TERMINATE:
3026 ull_adv_iso_done_terminate(done);
3027 break;
3028 #endif /* CONFIG_BT_CTLR_ADV_ISO */
3029 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3030 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
3031 #endif /* CONFIG_BT_BROADCASTER */
3032
3033 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3034 #if defined(CONFIG_BT_OBSERVER)
3035 case EVENT_DONE_EXTRA_TYPE_SCAN:
3036 ull_scan_done(done);
3037 break;
3038
3039 case EVENT_DONE_EXTRA_TYPE_SCAN_AUX:
3040 ull_scan_aux_done(done);
3041 break;
3042
3043 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
3044 case EVENT_DONE_EXTRA_TYPE_SYNC:
3045 ull_sync_done(done);
3046 break;
3047
3048 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
3049 case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_ESTAB:
3050 ull_sync_iso_estab_done(done);
3051 break;
3052
3053 case EVENT_DONE_EXTRA_TYPE_SYNC_ISO:
3054 ull_sync_iso_done(done);
3055 break;
3056
3057 case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_TERMINATE:
3058 ull_sync_iso_done_terminate(done);
3059 break;
3060 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
3061 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
3062 #endif /* CONFIG_BT_OBSERVER */
3063 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3064
3065 #if defined(CONFIG_BT_CTLR_CONN_ISO)
3066 case EVENT_DONE_EXTRA_TYPE_CIS:
3067 ull_conn_iso_done(done);
3068 break;
3069 #endif /* CONFIG_BT_CTLR_CONN_ISO */
3070
3071 #if defined(CONFIG_BT_CTLR_USER_EXT)
3072 case EVENT_DONE_EXTRA_TYPE_USER_START
3073 ... EVENT_DONE_EXTRA_TYPE_USER_END:
3074 ull_proprietary_done(done);
3075 break;
3076 #endif /* CONFIG_BT_CTLR_USER_EXT */
3077
3078 case EVENT_DONE_EXTRA_TYPE_NONE:
3079 /* ignore */
3080 break;
3081
3082 default:
3083 LL_ASSERT(0);
3084 break;
3085 }
3086
3087 /* Release done */
3088 done->extra.type = 0U;
3089 release = RXFIFO_RELEASE(done, link, done);
3090 LL_ASSERT(release == done);
3091
3092 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
3093 /* dequeue prepare pipeline */
3094 ull_prepare_dequeue(TICKER_USER_ID_ULL_HIGH);
3095
3096 /* LLL done synchronize count */
3097 lll_done_ull_inc();
3098 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
3099
3100 /* If disable initiated, signal the semaphore */
3101 if (ull_hdr && !ull_ref_get(ull_hdr) && ull_hdr->disabled_cb) {
3102 ull_hdr->disabled_cb(ull_hdr->disabled_param);
3103 }
3104 }
3105
disabled_cb(void * param)3106 static void disabled_cb(void *param)
3107 {
3108 k_sem_give(param);
3109 }
3110
3111 /**
3112 * @brief Support function for RXFIFO_ALLOC macro
3113 * @details This function allocates up to 'max' number of MFIFO elements by
3114 * enqueuing pointers to memory elements with associated memq links.
3115 */
ull_rxfifo_alloc(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,void * mem_free,void * link_free,uint8_t max)3116 void ull_rxfifo_alloc(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3117 void *mem_free, void *link_free, uint8_t max)
3118 {
3119 uint8_t idx;
3120
3121 while ((max--) && mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3122 memq_link_t *link;
3123 struct node_rx_hdr *rx;
3124
3125 link = mem_acquire(link_free);
3126 if (!link) {
3127 break;
3128 }
3129
3130 rx = mem_acquire(mem_free);
3131 if (!rx) {
3132 mem_release(link, link_free);
3133 break;
3134 }
3135
3136 link->mem = NULL;
3137 rx->link = link;
3138
3139 mfifo_by_idx_enqueue(m, s, idx, rx, l);
3140 }
3141 }
3142
3143 /**
3144 * @brief Support function for RXFIFO_RELEASE macro
3145 * @details This function releases a node by returning it to the FIFO.
3146 */
ull_rxfifo_release(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,memq_link_t * link,struct node_rx_hdr * rx)3147 void *ull_rxfifo_release(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3148 memq_link_t *link, struct node_rx_hdr *rx)
3149 {
3150 uint8_t idx;
3151
3152 if (!mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3153 return NULL;
3154 }
3155
3156 rx->link = link;
3157
3158 mfifo_by_idx_enqueue(m, s, idx, rx, l);
3159
3160 return rx;
3161 }
3162
3163 #if defined(CONFIG_BT_CTLR_ISO) || \
3164 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
3165 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
3166 /**
3167 * @brief Wraps given time within the range of 0 to ULL_TIME_WRAPPING_POINT_US
3168 * @param time_now Current time value
3169 * @param time_diff Time difference (signed)
3170 * @return Wrapped time after difference
3171 */
ull_get_wrapped_time_us(uint32_t time_now_us,int32_t time_diff_us)3172 uint32_t ull_get_wrapped_time_us(uint32_t time_now_us, int32_t time_diff_us)
3173 {
3174 LL_ASSERT(time_now_us <= ULL_TIME_WRAPPING_POINT_US);
3175
3176 uint32_t result = ((uint64_t)time_now_us + ULL_TIME_SPAN_FULL_US + time_diff_us) %
3177 ((uint64_t)ULL_TIME_SPAN_FULL_US);
3178
3179 return result;
3180 }
3181 #endif /* CONFIG_BT_CTLR_ISO ||
3182 * CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
3183 * CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
3184 */
3185