1 /*
2 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8 #include <stdbool.h>
9 #include <errno.h>
10
11 #include <zephyr/kernel.h>
12 #include <soc.h>
13 #include <zephyr/device.h>
14 #include <zephyr/drivers/entropy.h>
15 #include <zephyr/bluetooth/hci_types.h>
16
17 #include "hal/cpu.h"
18 #include "hal/ecb.h"
19 #include "hal/ccm.h"
20 #include "hal/cntr.h"
21 #include "hal/ticker.h"
22
23 #include "util/util.h"
24 #include "util/mem.h"
25 #include "util/mfifo.h"
26 #include "util/memq.h"
27 #include "util/mayfly.h"
28 #include "util/dbuf.h"
29
30 #include "ticker/ticker.h"
31
32 #include "pdu_df.h"
33 #include "lll/pdu_vendor.h"
34 #include "pdu.h"
35
36 #include "lll.h"
37 #include "lll/lll_vendor.h"
38 #include "lll/lll_adv_types.h"
39 #include "lll_adv.h"
40 #include "lll/lll_adv_pdu.h"
41 #include "lll_chan.h"
42 #include "lll_scan.h"
43 #include "lll/lll_df_types.h"
44 #include "lll_sync.h"
45 #include "lll_sync_iso.h"
46 #include "lll_iso_tx.h"
47 #include "lll_conn.h"
48 #include "lll_conn_iso.h"
49 #include "lll_df.h"
50
51 #include "ull_adv_types.h"
52 #include "ull_scan_types.h"
53 #include "ull_sync_types.h"
54 #include "ll_sw/ull_tx_queue.h"
55 #include "ull_conn_types.h"
56 #include "ull_filter.h"
57 #include "ull_df_types.h"
58 #include "ull_df_internal.h"
59
60 #if defined(CONFIG_BT_CTLR_USER_EXT)
61 #include "ull_vendor.h"
62 #endif /* CONFIG_BT_CTLR_USER_EXT */
63
64 #include "isoal.h"
65 #include "ll_feat_internal.h"
66 #include "ull_internal.h"
67 #include "ull_chan_internal.h"
68 #include "ull_iso_internal.h"
69 #include "ull_adv_internal.h"
70 #include "ull_scan_internal.h"
71 #include "ull_sync_internal.h"
72 #include "ull_sync_iso_internal.h"
73 #include "ull_central_internal.h"
74 #include "ull_iso_types.h"
75 #include "ull_conn_internal.h"
76 #include "ull_conn_iso_types.h"
77 #include "ull_central_iso_internal.h"
78 #include "ull_llcp_internal.h"
79 #include "ull_llcp.h"
80
81 #include "ull_conn_iso_internal.h"
82 #include "ull_peripheral_iso_internal.h"
83
84 #include "ll.h"
85 #include "ll_feat.h"
86 #include "ll_test.h"
87 #include "ll_settings.h"
88
89 #include "hal/debug.h"
90
91 #if defined(CONFIG_BT_BROADCASTER)
92 #define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
93 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
94 #define BT_ADV_AUX_TICKER_NODES ((TICKER_ID_ADV_AUX_LAST) - \
95 (TICKER_ID_ADV_AUX_BASE) + 1)
96 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
97 #define BT_ADV_SYNC_TICKER_NODES ((TICKER_ID_ADV_SYNC_LAST) - \
98 (TICKER_ID_ADV_SYNC_BASE) + 1)
99 #if defined(CONFIG_BT_CTLR_ADV_ISO)
100 #define BT_ADV_ISO_TICKER_NODES ((TICKER_ID_ADV_ISO_LAST) - \
101 (TICKER_ID_ADV_ISO_BASE) + 1)
102 #else /* !CONFIG_BT_CTLR_ADV_ISO */
103 #define BT_ADV_ISO_TICKER_NODES 0
104 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
105 #else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
106 #define BT_ADV_SYNC_TICKER_NODES 0
107 #define BT_ADV_ISO_TICKER_NODES 0
108 #endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
109 #else /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
110 #define BT_ADV_AUX_TICKER_NODES 0
111 #define BT_ADV_SYNC_TICKER_NODES 0
112 #define BT_ADV_ISO_TICKER_NODES 0
113 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
114 #else /* !CONFIG_BT_BROADCASTER */
115 #define BT_ADV_TICKER_NODES 0
116 #define BT_ADV_AUX_TICKER_NODES 0
117 #define BT_ADV_SYNC_TICKER_NODES 0
118 #define BT_ADV_ISO_TICKER_NODES 0
119 #endif /* !CONFIG_BT_BROADCASTER */
120
121 #if defined(CONFIG_BT_OBSERVER)
122 #define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1)
123 #if defined(CONFIG_BT_CTLR_ADV_EXT)
124 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
125 #define BT_SCAN_AUX_TICKER_NODES 1
126 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
127 #define BT_SCAN_AUX_TICKER_NODES ((TICKER_ID_SCAN_AUX_LAST) - \
128 (TICKER_ID_SCAN_AUX_BASE) + 1)
129 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
130 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
131 #define BT_SCAN_SYNC_TICKER_NODES ((TICKER_ID_SCAN_SYNC_LAST) - \
132 (TICKER_ID_SCAN_SYNC_BASE) + 1)
133 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
134 #define BT_SCAN_SYNC_ISO_TICKER_NODES ((TICKER_ID_SCAN_SYNC_ISO_LAST) - \
135 (TICKER_ID_SCAN_SYNC_ISO_BASE) + 1 + \
136 (TICKER_ID_SCAN_SYNC_ISO_RESUME_LAST) - \
137 (TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE) + 1)
138 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
139 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
140 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
141 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
142 #define BT_SCAN_SYNC_TICKER_NODES 0
143 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
144 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
145 #else /* !CONFIG_BT_CTLR_ADV_EXT */
146 #define BT_SCAN_AUX_TICKER_NODES 0
147 #define BT_SCAN_SYNC_TICKER_NODES 0
148 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
149 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
150 #else
151 #define BT_SCAN_TICKER_NODES 0
152 #define BT_SCAN_AUX_TICKER_NODES 0
153 #define BT_SCAN_SYNC_TICKER_NODES 0
154 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
155 #endif
156
157 #if defined(CONFIG_BT_CONN)
158 #define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1)
159 #else
160 #define BT_CONN_TICKER_NODES 0
161 #endif
162
163 #if defined(CONFIG_BT_CTLR_CONN_ISO)
164 #define BT_CIG_TICKER_NODES ((TICKER_ID_CONN_ISO_LAST) - \
165 (TICKER_ID_CONN_ISO_BASE) + 1 + \
166 (TICKER_ID_CONN_ISO_RESUME_LAST) - \
167 (TICKER_ID_CONN_ISO_RESUME_BASE) + 1)
168
169 #else
170 #define BT_CIG_TICKER_NODES 0
171 #endif
172
173 #if defined(CONFIG_BT_CTLR_USER_EXT)
174 #define USER_TICKER_NODES CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
175 #else
176 #define USER_TICKER_NODES 0
177 #endif
178
179
180 #if defined(CONFIG_BT_CTLR_COEX_TICKER)
181 #define COEX_TICKER_NODES 1
182 /* No. of tickers reserved for coex drivers */
183 #else
184 #define COEX_TICKER_NODES 0
185 #endif
186
187
188 #if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
189 #define FLASH_TICKER_NODES 2 /* No. of tickers reserved for flash
190 * driver
191 */
192 #define TICKER_USER_ULL_HIGH_FLASH_OPS 1 /* No. of additional ticker ULL_HIGH
193 * context operations
194 */
195 #define TICKER_USER_THREAD_FLASH_OPS 1 /* No. of additional ticker thread
196 * context operations
197 */
198 #else
199 #define FLASH_TICKER_NODES 0
200 #define TICKER_USER_ULL_HIGH_FLASH_OPS 0
201 #define TICKER_USER_THREAD_FLASH_OPS 0
202 #endif
203
204 /* Define ticker nodes */
205 /* NOTE: FLASH_TICKER_NODES shall be after Link Layer's list of ticker id
206 * allocations, refer to ll_timeslice_ticker_id_get on how ticker id
207 * used by flash driver is returned.
208 */
209 #define TICKER_NODES (TICKER_ID_ULL_BASE + \
210 BT_ADV_TICKER_NODES + \
211 BT_ADV_AUX_TICKER_NODES + \
212 BT_ADV_SYNC_TICKER_NODES + \
213 BT_ADV_ISO_TICKER_NODES + \
214 BT_SCAN_TICKER_NODES + \
215 BT_SCAN_AUX_TICKER_NODES + \
216 BT_SCAN_SYNC_TICKER_NODES + \
217 BT_SCAN_SYNC_ISO_TICKER_NODES + \
218 BT_CONN_TICKER_NODES + \
219 BT_CIG_TICKER_NODES + \
220 USER_TICKER_NODES + \
221 FLASH_TICKER_NODES + \
222 COEX_TICKER_NODES)
223
224 /* When both central and peripheral are supported, one each Rx node will be
225 * needed by connectable advertising and the initiator to generate connection
226 * complete event, hence conditionally set the count.
227 */
228 #if defined(CONFIG_BT_MAX_CONN)
229 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
230 #define BT_CTLR_MAX_CONNECTABLE (1U + MIN(((CONFIG_BT_MAX_CONN) - 1U), \
231 (BT_CTLR_ADV_SET)))
232 #else
233 #define BT_CTLR_MAX_CONNECTABLE MAX(1U, (BT_CTLR_ADV_SET))
234 #endif
235 #define BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN
236 #else
237 #define BT_CTLR_MAX_CONNECTABLE 0
238 #define BT_CTLR_MAX_CONN 0
239 #endif
240
241 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
242 #if defined(CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX)
243 /* Note: Need node for PDU and CTE sample */
244 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
245 #define BT_CTLR_ADV_EXT_RX_CNT (MIN(CONFIG_BT_CTLR_SCAN_AUX_CHAIN_COUNT, \
246 CONFIG_BT_PER_ADV_SYNC_MAX) * \
247 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
248 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
249 #define BT_CTLR_ADV_EXT_RX_CNT (CONFIG_BT_CTLR_SCAN_AUX_SET * \
250 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
251 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
252 #else /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
253 #define BT_CTLR_ADV_EXT_RX_CNT 1
254 #endif /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
255 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
256 #define BT_CTLR_ADV_EXT_RX_CNT 0
257 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
258
259 #if !defined(TICKER_USER_LLL_VENDOR_OPS)
260 #define TICKER_USER_LLL_VENDOR_OPS 0
261 #endif /* TICKER_USER_LLL_VENDOR_OPS */
262
263 #if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
264 #define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
265 #endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
266
267 #if !defined(TICKER_USER_ULL_LOW_VENDOR_OPS)
268 #define TICKER_USER_ULL_LOW_VENDOR_OPS 0
269 #endif /* TICKER_USER_ULL_LOW_VENDOR_OPS */
270
271 #if !defined(TICKER_USER_THREAD_VENDOR_OPS)
272 #define TICKER_USER_THREAD_VENDOR_OPS 0
273 #endif /* TICKER_USER_THREAD_VENDOR_OPS */
274
275 /* Define ticker user operations */
276 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
277 (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
278 /* NOTE: When ticker job is disabled inside radio events then all advertising,
279 * scanning, and peripheral latency cancel ticker operations will be deferred,
280 * requiring increased ticker thread context operation queue count.
281 */
282 #define TICKER_USER_THREAD_OPS (BT_CTLR_ADV_SET + BT_CTLR_SCAN_SET + \
283 BT_CTLR_MAX_CONN + \
284 TICKER_USER_THREAD_VENDOR_OPS + \
285 TICKER_USER_THREAD_FLASH_OPS + \
286 1)
287 #else /* !CONFIG_BT_CTLR_LOW_LAT */
288 /* NOTE: As ticker job is not disabled inside radio events, no need for extra
289 * thread operations queue element for flash driver.
290 */
291 #define TICKER_USER_THREAD_OPS (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
292 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
293
294 #define TICKER_USER_ULL_LOW_OPS (1 + TICKER_USER_ULL_LOW_VENDOR_OPS + 1)
295
296 /* NOTE: Extended Advertising needs one extra ticker operation being enqueued
297 * for scheduling the auxiliary PDU reception while there can already
298 * be three other operations being enqueued.
299 *
300 * This value also covers the case were initiator with 1M and Coded PHY
301 * scan window is stopping the two scan tickers, stopping one scan stop
302 * ticker and starting one new ticker for establishing an ACL connection.
303 */
304 #if defined(CONFIG_BT_CTLR_ADV_EXT)
305 #define TICKER_USER_ULL_HIGH_OPS (4 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
306 TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
307 #else /* !CONFIG_BT_CTLR_ADV_EXT */
308 #define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
309 TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
310 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
311
312 #define TICKER_USER_LLL_OPS (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
313
314 #define TICKER_USER_OPS (TICKER_USER_LLL_OPS + \
315 TICKER_USER_ULL_HIGH_OPS + \
316 TICKER_USER_ULL_LOW_OPS + \
317 TICKER_USER_THREAD_OPS)
318
319 /* Memory for ticker nodes/instances */
320 static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
321
322 /* Memory for users/contexts operating on ticker module */
323 static uint8_t MALIGN(4) ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
324
325 /* Memory for user/context simultaneous API operations */
326 static uint8_t MALIGN(4) ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
327
328 /* Semaphore to wakeup thread on ticker API callback */
329 static struct k_sem sem_ticker_api_cb;
330
331 /* Semaphore to wakeup thread on Rx-ed objects */
332 static struct k_sem *sem_recv;
333
334 /* Declare prepare-event FIFO: mfifo_prep.
335 * Queue of struct node_rx_event_done
336 */
337 static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX);
338
339 /* Declare done-event RXFIFO. This is a composite pool-backed MFIFO for rx_nodes.
340 * The declaration constructs the following data structures:
341 * - mfifo_done: FIFO with pointers to struct node_rx_event_done
342 * - mem_done: Backing data pool for struct node_rx_event_done elements
343 * - mem_link_done: Pool of memq_link_t elements
344 *
345 * Queue of pointers to struct node_rx_event_done.
346 * The actual backing behind these pointers is mem_done.
347 *
348 * When there are radio events with time reservations lower than the preemption
349 * timeout of 1.5 ms, the pipeline has to account for the maximum radio events
350 * that can be enqueued during the preempt timeout duration. All these enqueued
351 * events could be aborted in case of late scheduling, needing as many done
352 * event buffers.
353 *
354 * During continuous scanning, there can be 1 active radio event, 1 scan resume
355 * and 1 new scan prepare. If there are peripheral prepares in addition, and due
356 * to late scheduling all these will abort needing 4 done buffers.
357 *
358 * If there are additional peripheral prepares enqueued, which are apart by
359 * their time reservations, these are not yet late and hence no more additional
360 * done buffers are needed.
361 *
362 * If Extended Scanning is supported, then an additional auxiliary scan event's
363 * prepare could be enqueued in the pipeline during the preemption duration.
364 *
365 * If Extended Scanning with Coded PHY is supported, then an additional 1 resume
366 * prepare could be enqueued in the pipeline during the preemption duration.
367 */
368 #if !defined(VENDOR_EVENT_DONE_MAX)
369 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
370 #if defined(CONFIG_BT_CTLR_PHY_CODED)
371 #define EVENT_DONE_MAX 6
372 #else /* !CONFIG_BT_CTLR_PHY_CODED */
373 #define EVENT_DONE_MAX 5
374 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
375 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
376 #define EVENT_DONE_MAX 4
377 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
378 #else
379 #define EVENT_DONE_MAX VENDOR_EVENT_DONE_MAX
380 #endif
381
382 /* Maximum time allowed for comleting synchronous LLL disabling via
383 * ull_disable.
384 */
385 #define ULL_DISABLE_TIMEOUT K_MSEC(1000)
386
387 static RXFIFO_DEFINE(done, sizeof(struct node_rx_event_done),
388 EVENT_DONE_MAX, 0U);
389
390 /* Minimum number of node rx for ULL to LL/HCI thread per connection.
391 * Increasing this by times the max. simultaneous connection count will permit
392 * simultaneous parallel PHY update or Connection Update procedures amongst
393 * active connections.
394 * Minimum node rx of 2 that can be reserved happens when:
395 * Central and peripheral always use two new nodes for handling completion
396 * notification one for PHY update complete and another for Data Length Update
397 * complete.
398 */
399 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
400 #define LL_PDU_RX_CNT (2 * (CONFIG_BT_CTLR_LLCP_CONN))
401 #elif defined(CONFIG_BT_CONN)
402 #define LL_PDU_RX_CNT (CONFIG_BT_CTLR_LLCP_CONN)
403 #else
404 #define LL_PDU_RX_CNT 0
405 #endif
406
407 /* No. of node rx for LLL to ULL.
408 * Reserve 3, 1 for adv data, 1 for scan response and 1 for empty PDU reception.
409 */
410 #define PDU_RX_CNT (3 + BT_CTLR_ADV_EXT_RX_CNT + CONFIG_BT_CTLR_RX_BUFFERS)
411
412 /* Part sum of LLL to ULL and ULL to LL/HCI thread node rx count.
413 * Will be used below in allocating node rx pool.
414 */
415 #define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT)
416
417 static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
418
419 #if defined(CONFIG_BT_RX_USER_PDU_LEN)
420 #define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
421 #else
422 #define PDU_RX_USER_PDU_OCTETS_MAX 0
423 #endif
424
425 #define PDU_ADV_SIZE MAX(PDU_AC_LL_SIZE_MAX, \
426 (PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
427
428 #define PDU_DATA_SIZE MAX((PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX), \
429 (PDU_BIS_LL_HEADER_SIZE + LL_BIS_OCTETS_RX_MAX))
430
431 #define PDU_CTRL_SIZE (PDU_DC_LL_HEADER_SIZE + PDU_DC_CTRL_RX_SIZE_MAX)
432
433 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
434
435 #define PDU_RX_NODE_POOL_ELEMENT_SIZE MROUND(NODE_RX_HEADER_SIZE + \
436 MAX(MAX(PDU_ADV_SIZE, \
437 MAX(PDU_DATA_SIZE, \
438 PDU_CTRL_SIZE)), \
439 PDU_RX_USER_PDU_OCTETS_MAX))
440
441 #if defined(CONFIG_BT_CTLR_ADV_ISO_SET)
442 #define BT_CTLR_ADV_ISO_SET CONFIG_BT_CTLR_ADV_ISO_SET
443 #else
444 #define BT_CTLR_ADV_ISO_SET 0
445 #endif
446
447 #if defined(CONFIG_BT_PER_ADV_SYNC_MAX)
448 #define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_PER_ADV_SYNC_MAX
449 #else
450 #define BT_CTLR_SCAN_SYNC_SET 0
451 #endif
452
453 #if defined(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET)
454 #define BT_CTLR_SCAN_SYNC_ISO_SET CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET
455 #else
456 #define BT_CTLR_SCAN_SYNC_ISO_SET 0
457 #endif
458
459 #define PDU_RX_POOL_SIZE (PDU_RX_NODE_POOL_ELEMENT_SIZE * \
460 (RX_CNT + BT_CTLR_MAX_CONNECTABLE + \
461 BT_CTLR_ADV_SET + BT_CTLR_SCAN_SYNC_SET))
462
463 /* Macros for encoding number of completed packets.
464 *
465 * If the pointer is numerically below 0x100, the pointer is treated as either
466 * data or control PDU.
467 *
468 * NOTE: For any architecture which would map RAM below address 0x100, this will
469 * not work.
470 */
471 #define IS_NODE_TX_PTR(_p) ((uint32_t)(_p) & ~0xFFUL)
472 #define IS_NODE_TX_DATA(_p) ((uint32_t)(_p) == 0x01UL)
473 #define IS_NODE_TX_CTRL(_p) ((uint32_t)(_p) == 0x02UL)
474 #define NODE_TX_DATA_SET(_p) ((_p) = (void *)0x01UL)
475 #define NODE_TX_CTRL_SET(_p) ((_p) = (void *)0x012UL)
476
477 /* Macros for encoding number of ISO SDU fragments in the enqueued TX node
478 * pointer. This is needed to ensure only a single release of the node and link
479 * in tx_cmplt_get, even when called several times. At all times, the number of
480 * fragments must be available for HCI complete-counting.
481 *
482 * If the pointer is numerically below 0x100, the pointer is treated as a one
483 * byte fragments count.
484 *
485 * NOTE: For any architecture which would map RAM below address 0x100, this will
486 * not work.
487 */
488 #define NODE_TX_FRAGMENTS_GET(_p) ((uint32_t)(_p) & 0xFFUL)
489 #define NODE_TX_FRAGMENTS_SET(_p, _cmplt) ((_p) = (void *)(uint32_t)(_cmplt))
490
491 static struct {
492 void *free;
493 uint8_t pool[PDU_RX_POOL_SIZE];
494 } mem_pdu_rx;
495
496 /* NOTE: Two memq_link structures are reserved in the case of periodic sync,
497 * one each for sync established and sync lost respectively. Where as in
498 * comparison to a connection, the connection established uses incoming Rx-ed
499 * CONNECT_IND PDU to piggy back generation of connection complete, and hence
500 * only one is reserved for the generation of disconnection event (which can
501 * happen due to supervision timeout and other reasons that dont have an
502 * incoming Rx-ed PDU).
503 */
504 #define LINK_RX_POOL_SIZE \
505 (sizeof(memq_link_t) * \
506 (RX_CNT + 2 + BT_CTLR_MAX_CONN + BT_CTLR_ADV_SET + \
507 (BT_CTLR_ADV_ISO_SET * 2) + (BT_CTLR_SCAN_SYNC_SET * 2) + \
508 (BT_CTLR_SCAN_SYNC_ISO_SET * 2) + \
509 (IQ_REPORT_CNT)))
510 static struct {
511 uint16_t quota_pdu; /* Number of un-utilized buffers */
512
513 void *free;
514 uint8_t pool[LINK_RX_POOL_SIZE];
515 } mem_link_rx;
516
517 static MEMQ_DECLARE(ull_rx);
518 static MEMQ_DECLARE(ll_rx);
519
520 #if defined(CONFIG_BT_CTLR_ISO) || \
521 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
522 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
523 #define ULL_TIME_WRAPPING_POINT_US (HAL_TICKER_TICKS_TO_US_64BIT(HAL_TICKER_CNTR_MASK))
524 #define ULL_TIME_SPAN_FULL_US (ULL_TIME_WRAPPING_POINT_US + 1)
525 #endif /* CONFIG_BT_CTLR_ISO ||
526 * CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
527 * CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
528 */
529
530 #if defined(CONFIG_BT_CONN)
531 static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
532
533 static void *mark_update;
534 #endif /* CONFIG_BT_CONN */
535
536 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
537 #if defined(CONFIG_BT_CONN)
538 #define BT_CTLR_TX_BUFFERS (CONFIG_BT_BUF_ACL_TX_COUNT + LLCP_TX_CTRL_BUF_COUNT)
539 #else
540 #define BT_CTLR_TX_BUFFERS 0
541 #endif /* CONFIG_BT_CONN */
542
543 static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
544 BT_CTLR_TX_BUFFERS + BT_CTLR_ISO_TX_BUFFERS);
545 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
546
547 static void *mark_disable;
548
549 static inline int init_reset(void);
550 static void perform_lll_reset(void *param);
551 static inline void *mark_set(void **m, void *param);
552 static inline void *mark_unset(void **m, void *param);
553 static inline void *mark_get(void *m);
554 static void rx_replenish_all(void);
555 #if defined(CONFIG_BT_CONN) || \
556 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
557 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
558 defined(CONFIG_BT_CTLR_ADV_ISO)
559 static void rx_release_replenish(struct node_rx_hdr *rx);
560 static void rx_link_dequeue_release_quota_inc(memq_link_t *link);
561 #endif /* CONFIG_BT_CONN ||
562 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
563 * CONFIG_BT_CTLR_ADV_PERIODIC ||
564 * CONFIG_BT_CTLR_ADV_ISO
565 */
566 static void rx_demux(void *param);
567 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
568 static void rx_demux_yield(void);
569 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
570 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
571 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last);
572 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
573 memq_link_t *link,
574 struct node_tx *node_tx);
575 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
576 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx_hdr);
577 static inline void rx_demux_event_done(memq_link_t *link,
578 struct node_rx_event_done *done);
579 static void ll_rx_link_quota_inc(void);
580 static void ll_rx_link_quota_dec(void);
581 static void disabled_cb(void *param);
582
ll_init(struct k_sem * sem_rx)583 int ll_init(struct k_sem *sem_rx)
584 {
585 static bool mayfly_initialized;
586 int err;
587
588 /* Store the semaphore to be used to wakeup Thread context */
589 sem_recv = sem_rx;
590
591 /* Initialize counter */
592 /* TODO: Bind and use counter driver? */
593 cntr_init();
594
595 /* Initialize mayfly. It may be done only once due to mayfly design.
596 *
597 * On init mayfly memq head and tail is assigned with a link instance
598 * that is used during enqueue operation. New link provided by enqueue
599 * is added as a tail and will be used in future enqueue. While dequeue,
600 * the link that was used for storage of the job is released and stored
601 * in a job it was related to. The job may store initial link. If mayfly
602 * is re-initialized but job objects were not re-initialized there is a
603 * risk that enqueued job will point to the same link as it is in a memq
604 * just after re-initialization. After enqueue operation with that link,
605 * head and tail still points to the same link object, so memq is
606 * considered as empty.
607 */
608 if (!mayfly_initialized) {
609 mayfly_init();
610 mayfly_initialized = true;
611 }
612
613
614 /* Initialize Ticker */
615 ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
616 ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
617 ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
618 ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_THREAD_OPS;
619
620 err = ticker_init(TICKER_INSTANCE_ID_CTLR,
621 TICKER_NODES, &ticker_nodes[0],
622 MAYFLY_CALLER_COUNT, &ticker_users[0],
623 TICKER_USER_OPS, &ticker_user_ops[0],
624 hal_ticker_instance0_caller_id_get,
625 hal_ticker_instance0_sched,
626 hal_ticker_instance0_trigger_set);
627 LL_ASSERT(!err);
628
629 /* Initialize semaphore for ticker API blocking wait */
630 k_sem_init(&sem_ticker_api_cb, 0, 1);
631
632 /* Initialize LLL */
633 err = lll_init();
634 if (err) {
635 return err;
636 }
637
638 /* Initialize ULL internals */
639 /* TODO: globals? */
640
641 /* Common to init and reset */
642 err = init_reset();
643 if (err) {
644 return err;
645 }
646
647 #if defined(CONFIG_BT_BROADCASTER)
648 err = lll_adv_init();
649 if (err) {
650 return err;
651 }
652
653 err = ull_adv_init();
654 if (err) {
655 return err;
656 }
657 #endif /* CONFIG_BT_BROADCASTER */
658
659 #if defined(CONFIG_BT_OBSERVER)
660 err = lll_scan_init();
661 if (err) {
662 return err;
663 }
664
665 err = ull_scan_init();
666 if (err) {
667 return err;
668 }
669 #endif /* CONFIG_BT_OBSERVER */
670
671 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
672 err = lll_sync_init();
673 if (err) {
674 return err;
675 }
676
677 err = ull_sync_init();
678 if (err) {
679 return err;
680 }
681
682 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
683 err = ull_sync_iso_init();
684 if (err) {
685 return err;
686 }
687 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
688 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
689
690 #if defined(CONFIG_BT_CONN)
691 err = lll_conn_init();
692 if (err) {
693 return err;
694 }
695
696 err = ull_conn_init();
697 if (err) {
698 return err;
699 }
700 #endif /* CONFIG_BT_CONN */
701
702 #if defined(CONFIG_BT_CTLR_DF)
703 err = ull_df_init();
704 if (err) {
705 return err;
706 }
707 #endif
708
709 #if defined(CONFIG_BT_CTLR_ISO)
710 err = ull_iso_init();
711 if (err) {
712 return err;
713 }
714 #endif /* CONFIG_BT_CTLR_ISO */
715
716 #if defined(CONFIG_BT_CTLR_CONN_ISO)
717 err = ull_conn_iso_init();
718 if (err) {
719 return err;
720 }
721 #endif /* CONFIG_BT_CTLR_CONN_ISO */
722
723 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
724 err = ull_peripheral_iso_init();
725 if (err) {
726 return err;
727 }
728 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
729
730 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
731 err = ull_central_iso_init();
732 if (err) {
733 return err;
734 }
735 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
736
737 #if defined(CONFIG_BT_CTLR_ADV_ISO)
738 err = ull_adv_iso_init();
739 if (err) {
740 return err;
741 }
742 #endif /* CONFIG_BT_CTLR_ADV_ISO */
743
744 #if defined(CONFIG_BT_CTLR_DF)
745 err = lll_df_init();
746 if (err) {
747 return err;
748 }
749 #endif
750
751 #if defined(CONFIG_BT_CTLR_USER_EXT)
752 err = ull_user_init();
753 if (err) {
754 return err;
755 }
756 #endif /* CONFIG_BT_CTLR_USER_EXT */
757
758 /* reset filter accept list, resolving list and initialise RPA timeout*/
759 if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
760 ull_filter_reset(true);
761 }
762
763 #if defined(CONFIG_BT_CTLR_TEST)
764 err = mem_ut();
765 if (err) {
766 return err;
767 }
768
769 err = ecb_ut();
770 if (err) {
771 return err;
772 }
773
774 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
775 lll_chan_sel_2_ut();
776 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
777 #endif /* CONFIG_BT_CTLR_TEST */
778
779 return 0;
780 }
781
ll_deinit(void)782 int ll_deinit(void)
783 {
784 ll_reset();
785 return lll_deinit();
786 }
787
ll_reset(void)788 void ll_reset(void)
789 {
790 int err;
791
792 /* Note: The sequence of reset control flow is as follows:
793 * - Reset ULL context, i.e. stop ULL scheduling, abort LLL events etc.
794 * - Reset LLL context, i.e. post LLL event abort, let LLL cleanup its
795 * variables, if any.
796 * - Reset ULL static variables (which otherwise was mem-zeroed in cases
797 * if power-on reset wherein architecture startup mem-zeroes .bss
798 * sections.
799 * - Initialize ULL context variable, similar to on-power-up.
800 */
801
802 #if defined(CONFIG_BT_BROADCASTER)
803 #if defined(CONFIG_BT_CTLR_ADV_ISO)
804 /* Reset adv iso sets */
805 err = ull_adv_iso_reset();
806 LL_ASSERT(!err);
807 #endif /* CONFIG_BT_CTLR_ADV_ISO */
808
809 /* Reset adv state */
810 err = ull_adv_reset();
811 LL_ASSERT(!err);
812 #endif /* CONFIG_BT_BROADCASTER */
813
814 #if defined(CONFIG_BT_OBSERVER)
815 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
816 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
817 /* Reset sync iso sets */
818 err = ull_sync_iso_reset();
819 LL_ASSERT(!err);
820 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
821
822 /* Reset periodic sync sets */
823 err = ull_sync_reset();
824 LL_ASSERT(!err);
825 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
826
827 /* Reset scan state */
828 err = ull_scan_reset();
829 LL_ASSERT(!err);
830 #endif /* CONFIG_BT_OBSERVER */
831
832 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
833 err = ull_peripheral_iso_reset();
834 LL_ASSERT(!err);
835 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
836
837 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
838 err = ull_central_iso_reset();
839 LL_ASSERT(!err);
840 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
841
842 #if defined(CONFIG_BT_CTLR_CONN_ISO)
843 err = ull_conn_iso_reset();
844 LL_ASSERT(!err);
845 #endif /* CONFIG_BT_CTLR_CONN_ISO */
846
847 #if defined(CONFIG_BT_CTLR_ISO)
848 err = ull_iso_reset();
849 LL_ASSERT(!err);
850 #endif /* CONFIG_BT_CTLR_ISO */
851
852 #if defined(CONFIG_BT_CONN)
853 /* Reset conn role */
854 err = ull_conn_reset();
855 LL_ASSERT(!err);
856
857 MFIFO_INIT(tx_ack);
858 #endif /* CONFIG_BT_CONN */
859
860 /* reset filter accept list and resolving list */
861 if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
862 ull_filter_reset(false);
863 }
864
865 /* Re-initialize ULL internals */
866
867 /* Re-initialize the prep mfifo */
868 MFIFO_INIT(prep);
869
870 /* Re-initialize the free rx mfifo */
871 MFIFO_INIT(pdu_rx_free);
872
873 #if defined(CONFIG_BT_CONN)
874 /* Re-initialize the free ll rx mfifo */
875 MFIFO_INIT(ll_pdu_rx_free);
876 #endif /* CONFIG_BT_CONN */
877
878 /* Reset LLL via mayfly */
879 {
880 static memq_link_t link;
881 static struct mayfly mfy = {0, 0, &link, NULL,
882 perform_lll_reset};
883 uint32_t retval;
884
885 /* NOTE: If Zero Latency Interrupt is used, then LLL context
886 * will be the highest priority IRQ in the system, hence
887 * mayfly_enqueue will be done running the callee inline
888 * (vector to the callee function) in this function. Else
889 * we use semaphore to wait for perform_lll_reset to
890 * complete.
891 */
892
893 #if !defined(CONFIG_BT_CTLR_ZLI)
894 struct k_sem sem;
895
896 k_sem_init(&sem, 0, 1);
897 mfy.param = &sem;
898 #endif /* !CONFIG_BT_CTLR_ZLI */
899
900 retval = mayfly_enqueue(TICKER_USER_ID_THREAD,
901 TICKER_USER_ID_LLL, 0, &mfy);
902 LL_ASSERT(!retval);
903
904 #if !defined(CONFIG_BT_CTLR_ZLI)
905 /* LLL reset must complete before returning - wait for
906 * reset completion in LLL mayfly thread
907 */
908 k_sem_take(&sem, K_FOREVER);
909 #endif /* !CONFIG_BT_CTLR_ZLI */
910 }
911
912 #if defined(CONFIG_BT_BROADCASTER)
913 /* Finalize after adv state LLL context reset */
914 err = ull_adv_reset_finalize();
915 LL_ASSERT(!err);
916 #endif /* CONFIG_BT_BROADCASTER */
917
918 /* Reset/End DTM Tx or Rx commands */
919 if (IS_ENABLED(CONFIG_BT_CTLR_DTM)) {
920 uint16_t num_rx;
921
922 (void)ll_test_end(&num_rx);
923 ARG_UNUSED(num_rx);
924 }
925
926 /* Common to init and reset */
927 err = init_reset();
928 LL_ASSERT(!err);
929
930 #if defined(CONFIG_BT_CTLR_DF)
931 /* Direction Finding has to be reset after ull init_reset call because
932 * it uses mem_link_rx for node_rx_iq_report. The mem_linx_rx is reset
933 * in common ull init_reset.
934 */
935 err = ull_df_reset();
936 LL_ASSERT(!err);
937 #endif
938
939 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
940 ll_feat_reset();
941 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
942
943 /* clear static random address */
944 (void)ll_addr_set(1U, NULL);
945 }
946
947 /**
948 * @brief Peek the next node_rx to send up to Host
949 * @details Tightly coupled with prio_recv_thread()
950 * Execution context: Controller thread
951 *
952 * @param node_rx[out] Pointer to rx node at head of queue
953 * @param handle[out] Connection handle
954 * @return TX completed
955 */
ll_rx_get(void ** node_rx,uint16_t * handle)956 uint8_t ll_rx_get(void **node_rx, uint16_t *handle)
957 {
958 struct node_rx_pdu *rx;
959 memq_link_t *link;
960 uint8_t cmplt = 0U;
961
962 #if defined(CONFIG_BT_CONN) || \
963 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
964 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
965 defined(CONFIG_BT_CTLR_ADV_ISO)
966 ll_rx_get_again:
967 #endif /* CONFIG_BT_CONN ||
968 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
969 * CONFIG_BT_CTLR_ADV_PERIODIC ||
970 * CONFIG_BT_CTLR_ADV_ISO
971 */
972
973 *node_rx = NULL;
974
975 link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
976 if (link) {
977 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
978 cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f, rx->hdr.ack_last);
979 if (!cmplt) {
980 uint8_t f, cmplt_prev, cmplt_curr;
981 uint16_t h;
982
983 cmplt_curr = 0U;
984 f = mfifo_fifo_tx_ack.f;
985 do {
986 cmplt_prev = cmplt_curr;
987 cmplt_curr = tx_cmplt_get(&h, &f,
988 mfifo_fifo_tx_ack.l);
989 } while ((cmplt_prev != 0U) ||
990 (cmplt_prev != cmplt_curr));
991 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
992
993 if (0) {
994 #if defined(CONFIG_BT_CONN) || \
995 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT))
996 /* Do not send up buffers to Host thread that are
997 * marked for release
998 */
999 } else if (rx->hdr.type == NODE_RX_TYPE_RELEASE) {
1000 rx_link_dequeue_release_quota_inc(link);
1001 rx_release_replenish((struct node_rx_hdr *)rx);
1002
1003 goto ll_rx_get_again;
1004 #endif /* CONFIG_BT_CONN ||
1005 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT)
1006 */
1007
1008 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1009 } else if (rx->hdr.type == NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE) {
1010 const uint8_t report_cnt = 1U;
1011
1012 (void)memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head, NULL);
1013 ll_rx_link_release(link);
1014 ull_iq_report_link_inc_quota(report_cnt);
1015 ull_df_iq_report_mem_release(rx);
1016 ull_df_rx_iq_report_alloc(report_cnt);
1017
1018 goto ll_rx_get_again;
1019 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1020
1021 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1022 } else if (rx->hdr.type == NODE_RX_TYPE_SYNC_CHM_COMPLETE) {
1023 rx_link_dequeue_release_quota_inc(link);
1024
1025 /* Remove Channel Map Update Indication from
1026 * ACAD.
1027 */
1028 ull_adv_sync_chm_complete(rx);
1029
1030 rx_release_replenish((struct node_rx_hdr *)rx);
1031
1032 goto ll_rx_get_again;
1033 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1034
1035 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1036 } else if (rx->hdr.type == NODE_RX_TYPE_BIG_CHM_COMPLETE) {
1037 rx_link_dequeue_release_quota_inc(link);
1038
1039 /* Update Channel Map in BIGInfo present in
1040 * Periodic Advertising PDU.
1041 */
1042 ull_adv_iso_chm_complete(rx);
1043
1044 rx_release_replenish((struct node_rx_hdr *)rx);
1045
1046 goto ll_rx_get_again;
1047 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1048 }
1049
1050 *node_rx = rx;
1051
1052 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1053 }
1054 } else {
1055 cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f,
1056 mfifo_fifo_tx_ack.l);
1057 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1058 }
1059
1060 return cmplt;
1061 }
1062
1063 /**
1064 * @brief Commit the dequeue from memq_ll_rx, where ll_rx_get() did the peek
1065 * @details Execution context: Controller thread
1066 */
ll_rx_dequeue(void)1067 void ll_rx_dequeue(void)
1068 {
1069 struct node_rx_pdu *rx = NULL;
1070 memq_link_t *link;
1071
1072 link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head,
1073 (void **)&rx);
1074 LL_ASSERT(link);
1075
1076 ll_rx_link_release(link);
1077
1078 /* handle object specific clean up */
1079 switch (rx->hdr.type) {
1080 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1081 #if defined(CONFIG_BT_OBSERVER)
1082 case NODE_RX_TYPE_EXT_1M_REPORT:
1083 case NODE_RX_TYPE_EXT_2M_REPORT:
1084 case NODE_RX_TYPE_EXT_CODED_REPORT:
1085 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1086 case NODE_RX_TYPE_SYNC_REPORT:
1087 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1088 {
1089 struct node_rx_pdu *rx_curr;
1090 struct pdu_adv *adv;
1091 uint8_t loop = PDU_RX_POOL_SIZE / PDU_RX_NODE_POOL_ELEMENT_SIZE;
1092
1093 adv = (struct pdu_adv *)rx->pdu;
1094 if (adv->type != PDU_ADV_TYPE_EXT_IND) {
1095 break;
1096 }
1097
1098 rx_curr = rx->rx_ftr.extra;
1099 while (rx_curr) {
1100 memq_link_t *link_free;
1101
1102 LL_ASSERT(loop);
1103 loop--;
1104
1105 link_free = rx_curr->hdr.link;
1106 rx_curr = rx_curr->rx_ftr.extra;
1107
1108 ll_rx_link_release(link_free);
1109 }
1110 }
1111 break;
1112
1113 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1114 {
1115 ull_scan_term_dequeue(rx->hdr.handle);
1116 }
1117 break;
1118 #endif /* CONFIG_BT_OBSERVER */
1119
1120 #if defined(CONFIG_BT_BROADCASTER)
1121 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1122 {
1123 struct ll_adv_set *adv;
1124 struct lll_adv_aux *lll_aux;
1125
1126 adv = ull_adv_set_get(rx->hdr.handle);
1127 LL_ASSERT(adv);
1128
1129 lll_aux = adv->lll.aux;
1130 if (lll_aux) {
1131 struct ll_adv_aux_set *aux;
1132
1133 aux = HDR_LLL2ULL(lll_aux);
1134
1135 aux->is_started = 0U;
1136 }
1137
1138 #if defined(CONFIG_BT_PERIPHERAL)
1139 struct lll_conn *lll_conn = adv->lll.conn;
1140
1141 if (!lll_conn) {
1142 adv->is_enabled = 0U;
1143
1144 break;
1145 }
1146
1147 LL_ASSERT(!lll_conn->link_tx_free);
1148
1149 memq_link_t *memq_link = memq_deinit(&lll_conn->memq_tx.head,
1150 &lll_conn->memq_tx.tail);
1151 LL_ASSERT(memq_link);
1152
1153 lll_conn->link_tx_free = memq_link;
1154
1155 struct ll_conn *conn = HDR_LLL2ULL(lll_conn);
1156
1157 ll_conn_release(conn);
1158 adv->lll.conn = NULL;
1159
1160 ll_rx_release(adv->node_rx_cc_free);
1161 adv->node_rx_cc_free = NULL;
1162
1163 ll_rx_link_release(adv->link_cc_free);
1164 adv->link_cc_free = NULL;
1165 #endif /* CONFIG_BT_PERIPHERAL */
1166
1167 adv->is_enabled = 0U;
1168 }
1169 break;
1170 #endif /* CONFIG_BT_BROADCASTER */
1171 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1172
1173 #if defined(CONFIG_BT_CONN)
1174 case NODE_RX_TYPE_CONNECTION:
1175 {
1176 struct node_rx_cc *cc = (void *)rx->pdu;
1177 struct node_rx_ftr *ftr = &(rx->rx_ftr);
1178
1179 if (0) {
1180
1181 #if defined(CONFIG_BT_PERIPHERAL)
1182 } else if ((cc->status == BT_HCI_ERR_ADV_TIMEOUT) || cc->role) {
1183 struct ll_adv_set *adv;
1184 struct lll_adv *lll;
1185
1186 /* Get reference to ULL context */
1187 lll = ftr->param;
1188 adv = HDR_LLL2ULL(lll);
1189
1190 if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1191 struct lll_conn *conn_lll;
1192 struct ll_conn *conn;
1193 memq_link_t *memq_link;
1194
1195 conn_lll = lll->conn;
1196 LL_ASSERT(conn_lll);
1197 lll->conn = NULL;
1198
1199 LL_ASSERT(!conn_lll->link_tx_free);
1200 memq_link = memq_deinit(&conn_lll->memq_tx.head,
1201 &conn_lll->memq_tx.tail);
1202 LL_ASSERT(memq_link);
1203 conn_lll->link_tx_free = memq_link;
1204
1205 conn = HDR_LLL2ULL(conn_lll);
1206 ll_conn_release(conn);
1207 } else {
1208 /* Release un-utilized node rx */
1209 if (adv->node_rx_cc_free) {
1210 void *rx_free;
1211
1212 rx_free = adv->node_rx_cc_free;
1213 adv->node_rx_cc_free = NULL;
1214
1215 ll_rx_release(rx_free);
1216 }
1217 }
1218
1219 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1220 if (lll->aux) {
1221 struct ll_adv_aux_set *aux;
1222
1223 aux = HDR_LLL2ULL(lll->aux);
1224 aux->is_started = 0U;
1225 }
1226
1227 /* If Extended Advertising Commands used, reset
1228 * is_enabled when advertising set terminated event is
1229 * dequeued. Otherwise, legacy advertising commands used
1230 * then reset is_enabled here.
1231 */
1232 if (!lll->node_rx_adv_term) {
1233 adv->is_enabled = 0U;
1234 }
1235 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1236 adv->is_enabled = 0U;
1237 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1238
1239 #else /* !CONFIG_BT_PERIPHERAL */
1240 ARG_UNUSED(cc);
1241 #endif /* !CONFIG_BT_PERIPHERAL */
1242
1243 #if defined(CONFIG_BT_CENTRAL)
1244 } else {
1245 struct ll_scan_set *scan = HDR_LLL2ULL(ftr->param);
1246
1247 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
1248 struct ll_scan_set *scan_other =
1249 ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
1250
1251 if (scan_other) {
1252 if (scan_other == scan) {
1253 scan_other = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
1254 }
1255
1256 if (scan_other) {
1257 scan_other->lll.conn = NULL;
1258 scan_other->is_enabled = 0U;
1259 }
1260 }
1261 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
1262
1263 scan->lll.conn = NULL;
1264 scan->is_enabled = 0U;
1265 #else /* !CONFIG_BT_CENTRAL */
1266 } else {
1267 LL_ASSERT(0);
1268 #endif /* !CONFIG_BT_CENTRAL */
1269 }
1270
1271 if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
1272 uint8_t bm;
1273
1274 /* FIXME: use the correct adv and scan set to get
1275 * enabled status bitmask
1276 */
1277 bm = (IS_ENABLED(CONFIG_BT_OBSERVER)?(ull_scan_is_enabled(0) << 1):0) |
1278 (IS_ENABLED(CONFIG_BT_BROADCASTER)?ull_adv_is_enabled(0):0);
1279
1280 if (!bm) {
1281 ull_filter_adv_scan_state_cb(0);
1282 }
1283 }
1284 }
1285 break;
1286
1287 case NODE_RX_TYPE_TERMINATE:
1288 case NODE_RX_TYPE_DC_PDU:
1289 #endif /* CONFIG_BT_CONN */
1290
1291 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1292 case NODE_RX_TYPE_BIG_COMPLETE:
1293 case NODE_RX_TYPE_BIG_TERMINATE:
1294 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1295
1296 #if defined(CONFIG_BT_OBSERVER)
1297 case NODE_RX_TYPE_REPORT:
1298
1299 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1300 /* fall through */
1301 case NODE_RX_TYPE_SYNC:
1302 case NODE_RX_TYPE_SYNC_LOST:
1303 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1304 /* fall through */
1305 case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
1306 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1307 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1308 /* fall through */
1309 case NODE_RX_TYPE_SYNC_ISO:
1310 case NODE_RX_TYPE_SYNC_ISO_LOST:
1311 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1312 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1313 #endif /* CONFIG_BT_OBSERVER */
1314
1315 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1316 case NODE_RX_TYPE_SCAN_REQ:
1317 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1318
1319 #if defined(CONFIG_BT_CONN)
1320 case NODE_RX_TYPE_CONN_UPDATE:
1321 case NODE_RX_TYPE_ENC_REFRESH:
1322
1323 #if defined(CONFIG_BT_CTLR_LE_PING)
1324 case NODE_RX_TYPE_APTO:
1325 #endif /* CONFIG_BT_CTLR_LE_PING */
1326
1327 case NODE_RX_TYPE_CHAN_SEL_ALGO:
1328
1329 #if defined(CONFIG_BT_CTLR_PHY)
1330 case NODE_RX_TYPE_PHY_UPDATE:
1331 #endif /* CONFIG_BT_CTLR_PHY */
1332
1333 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1334 case NODE_RX_TYPE_RSSI:
1335 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1336 #endif /* CONFIG_BT_CONN */
1337
1338 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1339 case NODE_RX_TYPE_PROFILE:
1340 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1341
1342 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1343 case NODE_RX_TYPE_ADV_INDICATION:
1344 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1345
1346 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1347 case NODE_RX_TYPE_SCAN_INDICATION:
1348 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1349
1350 #if defined(CONFIG_BT_HCI_MESH_EXT)
1351 case NODE_RX_TYPE_MESH_ADV_CPLT:
1352 case NODE_RX_TYPE_MESH_REPORT:
1353 #endif /* CONFIG_BT_HCI_MESH_EXT */
1354
1355 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1356 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1357 __fallthrough;
1358 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1359
1360 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1361 case NODE_RX_TYPE_CIS_REQUEST:
1362 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1363
1364 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1365 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1366 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1367
1368 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1369 case NODE_RX_TYPE_CIS_ESTABLISHED:
1370 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1371
1372 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1373 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1374 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1375
1376 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1377 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1378 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1379
1380 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1381 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1382 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
1383
1384 /* Ensure that at least one 'case' statement is present for this
1385 * code block.
1386 */
1387 case NODE_RX_TYPE_NONE:
1388 LL_ASSERT(rx->hdr.type != NODE_RX_TYPE_NONE);
1389 break;
1390
1391 default:
1392 LL_ASSERT(0);
1393 break;
1394 }
1395
1396 /* FIXME: clean up when porting Mesh Ext. */
1397 if (0) {
1398 #if defined(CONFIG_BT_HCI_MESH_EXT)
1399 } else if (rx->hdr.type == NODE_RX_TYPE_MESH_ADV_CPLT) {
1400 struct ll_adv_set *adv;
1401 struct ll_scan_set *scan;
1402
1403 adv = ull_adv_is_enabled_get(0);
1404 LL_ASSERT(adv);
1405 adv->is_enabled = 0U;
1406
1407 scan = ull_scan_is_enabled_get(0);
1408 LL_ASSERT(scan);
1409
1410 scan->is_enabled = 0U;
1411
1412 ll_adv_scan_state_cb(0);
1413 #endif /* CONFIG_BT_HCI_MESH_EXT */
1414 }
1415 }
1416
ll_rx_mem_release(void ** node_rx)1417 void ll_rx_mem_release(void **node_rx)
1418 {
1419 struct node_rx_pdu *rx;
1420
1421 rx = *node_rx;
1422 while (rx) {
1423 struct node_rx_pdu *rx_free;
1424
1425 rx_free = rx;
1426 rx = rx->hdr.next;
1427
1428 switch (rx_free->hdr.type) {
1429 #if defined(CONFIG_BT_BROADCASTER)
1430 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1431 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1432 ll_rx_release(rx_free);
1433 break;
1434
1435 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1436 case NODE_RX_TYPE_BIG_COMPLETE:
1437 /* Nothing to release */
1438 break;
1439
1440 case NODE_RX_TYPE_BIG_TERMINATE:
1441 {
1442 struct ll_adv_iso_set *adv_iso = rx_free->rx_ftr.param;
1443
1444 ull_adv_iso_stream_release(adv_iso);
1445 }
1446 break;
1447 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1448 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1449 #endif /* CONFIG_BT_BROADCASTER */
1450
1451 #if defined(CONFIG_BT_OBSERVER)
1452 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1453 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1454 {
1455 ll_rx_release(rx_free);
1456 }
1457 break;
1458 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1459 #endif /* CONFIG_BT_OBSERVER */
1460
1461 #if defined(CONFIG_BT_CONN)
1462 case NODE_RX_TYPE_CONNECTION:
1463 {
1464 struct node_rx_cc *cc =
1465 (void *)rx_free->pdu;
1466
1467 if (0) {
1468
1469 #if defined(CONFIG_BT_PERIPHERAL)
1470 } else if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1471 ll_rx_release(rx_free);
1472
1473 break;
1474 #endif /* !CONFIG_BT_PERIPHERAL */
1475
1476 #if defined(CONFIG_BT_CENTRAL)
1477 } else if (cc->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1478 ull_central_cleanup(rx_free);
1479
1480 #if defined(CONFIG_BT_CTLR_PRIVACY)
1481 #if defined(CONFIG_BT_BROADCASTER)
1482 if (!ull_adv_is_enabled_get(0))
1483 #endif /* CONFIG_BT_BROADCASTER */
1484 {
1485 ull_filter_adv_scan_state_cb(0);
1486 }
1487 #endif /* CONFIG_BT_CTLR_PRIVACY */
1488 break;
1489 #endif /* CONFIG_BT_CENTRAL */
1490
1491 } else {
1492 LL_ASSERT(!cc->status);
1493 }
1494 }
1495
1496 __fallthrough;
1497 case NODE_RX_TYPE_DC_PDU:
1498 #endif /* CONFIG_BT_CONN */
1499
1500 #if defined(CONFIG_BT_OBSERVER)
1501 case NODE_RX_TYPE_REPORT:
1502
1503 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1504 __fallthrough;
1505 case NODE_RX_TYPE_EXT_1M_REPORT:
1506 case NODE_RX_TYPE_EXT_2M_REPORT:
1507 case NODE_RX_TYPE_EXT_CODED_REPORT:
1508 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1509 case NODE_RX_TYPE_SYNC_REPORT:
1510 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1511 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1512 #endif /* CONFIG_BT_OBSERVER */
1513
1514 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1515 case NODE_RX_TYPE_SCAN_REQ:
1516 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1517
1518 #if defined(CONFIG_BT_CONN)
1519 case NODE_RX_TYPE_CONN_UPDATE:
1520 case NODE_RX_TYPE_ENC_REFRESH:
1521
1522 #if defined(CONFIG_BT_CTLR_LE_PING)
1523 case NODE_RX_TYPE_APTO:
1524 #endif /* CONFIG_BT_CTLR_LE_PING */
1525
1526 case NODE_RX_TYPE_CHAN_SEL_ALGO:
1527
1528 #if defined(CONFIG_BT_CTLR_PHY)
1529 case NODE_RX_TYPE_PHY_UPDATE:
1530 #endif /* CONFIG_BT_CTLR_PHY */
1531
1532 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1533 case NODE_RX_TYPE_RSSI:
1534 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1535 #endif /* CONFIG_BT_CONN */
1536
1537 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1538 case NODE_RX_TYPE_PROFILE:
1539 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1540
1541 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1542 case NODE_RX_TYPE_ADV_INDICATION:
1543 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1544
1545 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1546 case NODE_RX_TYPE_SCAN_INDICATION:
1547 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1548
1549 #if defined(CONFIG_BT_HCI_MESH_EXT)
1550 case NODE_RX_TYPE_MESH_ADV_CPLT:
1551 case NODE_RX_TYPE_MESH_REPORT:
1552 #endif /* CONFIG_BT_HCI_MESH_EXT */
1553
1554 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1555 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1556 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1557
1558 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1559 case NODE_RX_TYPE_CIS_REQUEST:
1560 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1561
1562 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1563 case NODE_RX_TYPE_CIS_ESTABLISHED:
1564 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1565
1566 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1567 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1568 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1569
1570 #if defined(CONFIG_BT_CTLR_ISO)
1571 case NODE_RX_TYPE_ISO_PDU:
1572 #endif
1573
1574 /* Ensure that at least one 'case' statement is present for this
1575 * code block.
1576 */
1577 case NODE_RX_TYPE_NONE:
1578 LL_ASSERT(rx_free->hdr.type != NODE_RX_TYPE_NONE);
1579 ll_rx_link_quota_inc();
1580 ll_rx_release(rx_free);
1581 break;
1582
1583 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1584 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1585 case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
1586 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1587 case NODE_RX_TYPE_SYNC:
1588 {
1589 struct node_rx_sync *se =
1590 (void *)rx_free->pdu;
1591 uint8_t status = se->status;
1592
1593 /* Below status codes use node_rx_sync_estab, hence
1594 * release the node_rx memory and release sync context
1595 * if sync establishment failed.
1596 */
1597 if ((status == BT_HCI_ERR_SUCCESS) ||
1598 (status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE) ||
1599 (status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB)) {
1600 struct ll_sync_set *sync;
1601
1602 /* pick the sync context before node_rx
1603 * release.
1604 */
1605 sync = (void *)rx_free->rx_ftr.param;
1606
1607 ll_rx_release(rx_free);
1608
1609 ull_sync_setup_reset(sync);
1610
1611 if (status != BT_HCI_ERR_SUCCESS) {
1612 memq_link_t *link_sync_lost;
1613
1614 link_sync_lost =
1615 sync->node_rx_lost.rx.hdr.link;
1616 ll_rx_link_release(link_sync_lost);
1617
1618 ull_sync_release(sync);
1619 }
1620
1621 break;
1622 } else {
1623 LL_ASSERT(status == BT_HCI_ERR_OP_CANCELLED_BY_HOST);
1624
1625 /* Fall through and release sync context */
1626 }
1627 }
1628 /* Pass through */
1629
1630 case NODE_RX_TYPE_SYNC_LOST:
1631 {
1632 struct ll_sync_set *sync =
1633 (void *)rx_free->rx_ftr.param;
1634
1635 ull_sync_release(sync);
1636 }
1637 break;
1638
1639 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1640 case NODE_RX_TYPE_SYNC_ISO:
1641 {
1642 struct node_rx_sync_iso *se =
1643 (void *)rx_free->pdu;
1644
1645 if (!se->status) {
1646 ll_rx_release(rx_free);
1647
1648 break;
1649 }
1650 }
1651 /* Pass through */
1652
1653 case NODE_RX_TYPE_SYNC_ISO_LOST:
1654 {
1655 struct ll_sync_iso_set *sync_iso =
1656 (void *)rx_free->rx_ftr.param;
1657
1658 ull_sync_iso_stream_release(sync_iso);
1659 }
1660 break;
1661 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1662 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1663
1664 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
1665 defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1666 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1667 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1668 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1669 {
1670 const uint8_t report_cnt = 1U;
1671
1672 ull_iq_report_link_inc_quota(report_cnt);
1673 ull_df_iq_report_mem_release(rx_free);
1674 ull_df_rx_iq_report_alloc(report_cnt);
1675 }
1676 break;
1677 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1678
1679 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_CONN_ISO)
1680 case NODE_RX_TYPE_TERMINATE:
1681 {
1682 if (IS_ACL_HANDLE(rx_free->hdr.handle)) {
1683 struct ll_conn *conn;
1684 memq_link_t *link;
1685
1686 conn = ll_conn_get(rx_free->hdr.handle);
1687
1688 LL_ASSERT(!conn->lll.link_tx_free);
1689 link = memq_deinit(&conn->lll.memq_tx.head,
1690 &conn->lll.memq_tx.tail);
1691 LL_ASSERT(link);
1692 conn->lll.link_tx_free = link;
1693
1694 ll_conn_release(conn);
1695 } else if (IS_CIS_HANDLE(rx_free->hdr.handle)) {
1696 ll_rx_link_quota_inc();
1697 ll_rx_release(rx_free);
1698 }
1699 }
1700 break;
1701 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_CONN_ISO */
1702
1703 case NODE_RX_TYPE_EVENT_DONE:
1704 default:
1705 LL_ASSERT(0);
1706 break;
1707 }
1708 }
1709
1710 *node_rx = rx;
1711
1712 rx_replenish_all();
1713 }
1714
ll_rx_link_quota_update(int8_t delta)1715 static void ll_rx_link_quota_update(int8_t delta)
1716 {
1717 LL_ASSERT(delta <= 0 || mem_link_rx.quota_pdu < RX_CNT);
1718 mem_link_rx.quota_pdu += delta;
1719 }
1720
ll_rx_link_quota_inc(void)1721 static void ll_rx_link_quota_inc(void)
1722 {
1723 ll_rx_link_quota_update(1);
1724 }
1725
ll_rx_link_quota_dec(void)1726 static void ll_rx_link_quota_dec(void)
1727 {
1728 ll_rx_link_quota_update(-1);
1729 }
1730
ll_rx_link_alloc(void)1731 void *ll_rx_link_alloc(void)
1732 {
1733 return mem_acquire(&mem_link_rx.free);
1734 }
1735
ll_rx_link_release(memq_link_t * link)1736 void ll_rx_link_release(memq_link_t *link)
1737 {
1738 mem_release(link, &mem_link_rx.free);
1739 }
1740
ll_rx_alloc(void)1741 void *ll_rx_alloc(void)
1742 {
1743 return mem_acquire(&mem_pdu_rx.free);
1744 }
1745
ll_rx_release(void * node_rx)1746 void ll_rx_release(void *node_rx)
1747 {
1748 mem_release(node_rx, &mem_pdu_rx.free);
1749 }
1750
ll_rx_put(memq_link_t * link,void * rx)1751 void ll_rx_put(memq_link_t *link, void *rx)
1752 {
1753 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1754 struct node_rx_hdr *rx_hdr = rx;
1755
1756 /* Serialize Tx ack with Rx enqueue by storing reference to
1757 * last element index in Tx ack FIFO.
1758 */
1759 rx_hdr->ack_last = mfifo_fifo_tx_ack.l;
1760 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1761
1762 /* Enqueue the Rx object */
1763 memq_enqueue(link, rx, &memq_ll_rx.tail);
1764 }
1765
1766 /**
1767 * @brief Permit another loop in the controller thread (prio_recv_thread)
1768 * @details Execution context: ULL mayfly
1769 */
ll_rx_sched(void)1770 void ll_rx_sched(void)
1771 {
1772 /* sem_recv references the same semaphore (sem_prio_recv)
1773 * in prio_recv_thread
1774 */
1775 k_sem_give(sem_recv);
1776 }
1777
ll_rx_put_sched(memq_link_t * link,void * rx)1778 void ll_rx_put_sched(memq_link_t *link, void *rx)
1779 {
1780 ll_rx_put(link, rx);
1781 ll_rx_sched();
1782 }
1783
1784 #if defined(CONFIG_BT_CONN)
ll_pdu_rx_alloc_peek(uint8_t count)1785 void *ll_pdu_rx_alloc_peek(uint8_t count)
1786 {
1787 if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
1788 return NULL;
1789 }
1790
1791 return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
1792 }
1793
ll_pdu_rx_alloc(void)1794 void *ll_pdu_rx_alloc(void)
1795 {
1796 return MFIFO_DEQUEUE(ll_pdu_rx_free);
1797 }
1798 #endif /* CONFIG_BT_CONN */
1799
1800 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
ll_tx_ack_put(uint16_t handle,struct node_tx * node_tx)1801 void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
1802 {
1803 struct lll_tx *tx;
1804 uint8_t idx;
1805
1806 idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx);
1807 LL_ASSERT(tx);
1808
1809 tx->handle = handle;
1810 tx->node = node_tx;
1811
1812 MFIFO_ENQUEUE(tx_ack, idx);
1813 }
1814 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1815
ll_timeslice_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1816 void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
1817 uint8_t * const ticker_id)
1818 {
1819 *instance_index = TICKER_INSTANCE_ID_CTLR;
1820 *ticker_id = (TICKER_NODES - FLASH_TICKER_NODES - COEX_TICKER_NODES);
1821 }
1822
ll_coex_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1823 void ll_coex_ticker_id_get(uint8_t * const instance_index,
1824 uint8_t * const ticker_id)
1825 {
1826 *instance_index = TICKER_INSTANCE_ID_CTLR;
1827 *ticker_id = (TICKER_NODES - COEX_TICKER_NODES);
1828 }
1829
ll_radio_state_abort(void)1830 void ll_radio_state_abort(void)
1831 {
1832 static memq_link_t link;
1833 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1834 uint32_t ret;
1835
1836 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1837 &mfy);
1838 LL_ASSERT(!ret);
1839 }
1840
ll_radio_state_is_idle(void)1841 uint32_t ll_radio_state_is_idle(void)
1842 {
1843 return lll_radio_is_idle();
1844 }
1845
ull_ticker_status_give(uint32_t status,void * param)1846 void ull_ticker_status_give(uint32_t status, void *param)
1847 {
1848 *((uint32_t volatile *)param) = status;
1849
1850 k_sem_give(&sem_ticker_api_cb);
1851 }
1852
1853 /**
1854 * @brief Take the ticker API semaphore (if applicable) and wait for operation
1855 * complete.
1856 *
1857 * Waits for ticker operation to complete by taking ticker API semaphore,
1858 * unless the operation was executed inline due to same-priority caller/
1859 * callee id.
1860 *
1861 * In case of asynchronous ticker operation (caller priority !=
1862 * callee priority), the function grabs the semaphore and waits for
1863 * ull_ticker_status_give, which assigns the ret_cb variable and releases
1864 * the semaphore.
1865 *
1866 * In case of synchronous ticker operation, the result is already known at
1867 * entry, and semaphore is only taken if ret_cb has been updated. This is done
1868 * to balance take/give counts. If *ret_cb is still TICKER_STATUS_BUSY, but
1869 * ret is not, the ticker operation has failed early, and no callback will be
1870 * invoked. In this case the semaphore shall not be taken.
1871 *
1872 * @param ret Return value from ticker API call:
1873 * TICKER_STATUS_BUSY: Ticker operation is queued
1874 * TICKER_STATUS_SUCCESS: Operation completed OK
1875 * TICKER_STATUS_FAILURE: Operation failed
1876 *
1877 * @param ret_cb Pointer to user data passed to ticker operation
1878 * callback, which holds the operation result. Value
1879 * upon entry:
1880 * TICKER_STATUS_BUSY: Ticker has not yet called CB
1881 * TICKER_STATUS_SUCCESS: Operation completed OK via CB
1882 * TICKER_STATUS_FAILURE: Operation failed via CB
1883 *
1884 * NOTE: For correct operation, *ret_cb must be initialized
1885 * to TICKER_STATUS_BUSY before initiating the ticker API call.
1886 *
1887 * @return uint32_t Returns result of completed ticker operation
1888 */
ull_ticker_status_take(uint32_t ret,uint32_t volatile * ret_cb)1889 uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
1890 {
1891 if ((ret == TICKER_STATUS_BUSY) || (*ret_cb != TICKER_STATUS_BUSY)) {
1892 /* Operation is either pending of completed via callback
1893 * prior to this function call. Take the semaphore and wait,
1894 * or take it to balance take/give counting.
1895 */
1896 k_sem_take(&sem_ticker_api_cb, K_FOREVER);
1897 return *ret_cb;
1898 }
1899
1900 return ret;
1901 }
1902
ull_disable_mark(void * param)1903 void *ull_disable_mark(void *param)
1904 {
1905 return mark_set(&mark_disable, param);
1906 }
1907
ull_disable_unmark(void * param)1908 void *ull_disable_unmark(void *param)
1909 {
1910 return mark_unset(&mark_disable, param);
1911 }
1912
ull_disable_mark_get(void)1913 void *ull_disable_mark_get(void)
1914 {
1915 return mark_get(mark_disable);
1916 }
1917
1918 /**
1919 * @brief Stops a specified ticker using the ull_disable_(un)mark functions.
1920 *
1921 * @param ticker_handle The handle of the ticker.
1922 * @param param The object to mark.
1923 * @param lll_disable Optional object when calling @ref ull_disable
1924 *
1925 * @return 0 if success, else ERRNO.
1926 */
ull_ticker_stop_with_mark(uint8_t ticker_handle,void * param,void * lll_disable)1927 int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param,
1928 void *lll_disable)
1929 {
1930 uint32_t volatile ret_cb;
1931 uint32_t ret;
1932 void *mark;
1933 int err;
1934
1935 mark = ull_disable_mark(param);
1936 if (mark != param) {
1937 return -ENOLCK;
1938 }
1939
1940 ret_cb = TICKER_STATUS_BUSY;
1941 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1942 ticker_handle, ull_ticker_status_give,
1943 (void *)&ret_cb);
1944 ret = ull_ticker_status_take(ret, &ret_cb);
1945 if (ret) {
1946 mark = ull_disable_unmark(param);
1947 if (mark != param) {
1948 return -ENOLCK;
1949 }
1950
1951 return -EALREADY;
1952 }
1953
1954 err = ull_disable(lll_disable);
1955
1956 mark = ull_disable_unmark(param);
1957 if (mark != param) {
1958 return -ENOLCK;
1959 }
1960
1961 if (err && (err != -EALREADY)) {
1962 return err;
1963 }
1964
1965 return 0;
1966 }
1967
1968 #if defined(CONFIG_BT_CONN)
ull_update_mark(void * param)1969 void *ull_update_mark(void *param)
1970 {
1971 return mark_set(&mark_update, param);
1972 }
1973
ull_update_unmark(void * param)1974 void *ull_update_unmark(void *param)
1975 {
1976 return mark_unset(&mark_update, param);
1977 }
1978
ull_update_mark_get(void)1979 void *ull_update_mark_get(void)
1980 {
1981 return mark_get(mark_update);
1982 }
1983 #endif /* CONFIG_BT_CONN */
1984
ull_disable(void * lll)1985 int ull_disable(void *lll)
1986 {
1987 static memq_link_t link;
1988 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1989 struct ull_hdr *hdr;
1990 struct k_sem sem;
1991 uint32_t ret;
1992
1993 hdr = HDR_LLL2ULL(lll);
1994 if (!ull_ref_get(hdr)) {
1995 return -EALREADY;
1996 }
1997 cpu_dmb(); /* Ensure synchronized data access */
1998
1999 k_sem_init(&sem, 0, 1);
2000
2001 hdr->disabled_param = &sem;
2002 hdr->disabled_cb = disabled_cb;
2003
2004 cpu_dmb(); /* Ensure synchronized data access */
2005
2006 /* ULL_HIGH can run after we have call `ull_ref_get` and it can
2007 * decrement the ref count. Hence, handle this race condition by
2008 * ensuring that `disabled_cb` has been set while the ref count is still
2009 * set.
2010 * No need to call `lll_disable` and take the semaphore thereafter if
2011 * reference count is zero.
2012 * If the `sem` is given when reference count was decremented, we do not
2013 * care.
2014 */
2015 if (!ull_ref_get(hdr)) {
2016 return -EALREADY;
2017 }
2018
2019 mfy.param = lll;
2020 ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0,
2021 &mfy);
2022 LL_ASSERT(!ret);
2023
2024 return k_sem_take(&sem, ULL_DISABLE_TIMEOUT);
2025 }
2026
ull_pdu_rx_alloc_peek(uint8_t count)2027 void *ull_pdu_rx_alloc_peek(uint8_t count)
2028 {
2029 if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) {
2030 return NULL;
2031 }
2032
2033 return MFIFO_DEQUEUE_PEEK(pdu_rx_free);
2034 }
2035
ull_pdu_rx_alloc_peek_iter(uint8_t * idx)2036 void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx)
2037 {
2038 return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx);
2039 }
2040
ull_pdu_rx_alloc(void)2041 void *ull_pdu_rx_alloc(void)
2042 {
2043 return MFIFO_DEQUEUE(pdu_rx_free);
2044 }
2045
ull_rx_put(memq_link_t * link,void * rx)2046 void ull_rx_put(memq_link_t *link, void *rx)
2047 {
2048 #if defined(CONFIG_BT_CONN)
2049 struct node_rx_hdr *rx_hdr = rx;
2050
2051 /* Serialize Tx ack with Rx enqueue by storing reference to
2052 * last element index in Tx ack FIFO.
2053 */
2054 rx_hdr->ack_last = ull_conn_ack_last_idx_get();
2055 #endif /* CONFIG_BT_CONN */
2056
2057 /* Enqueue the Rx object */
2058 memq_enqueue(link, rx, &memq_ull_rx.tail);
2059 }
2060
ull_rx_sched(void)2061 void ull_rx_sched(void)
2062 {
2063 static memq_link_t link;
2064 static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2065
2066 /* Kick the ULL (using the mayfly, tailchain it) */
2067 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
2068 }
2069
ull_rx_put_sched(memq_link_t * link,void * rx)2070 void ull_rx_put_sched(memq_link_t *link, void *rx)
2071 {
2072 ull_rx_put(link, rx);
2073 ull_rx_sched();
2074 }
2075
ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,struct lll_prepare_param * prepare_param,lll_prepare_cb_t prepare_cb,uint8_t is_resume)2076 struct lll_event *ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
2077 lll_abort_cb_t abort_cb,
2078 struct lll_prepare_param *prepare_param,
2079 lll_prepare_cb_t prepare_cb,
2080 uint8_t is_resume)
2081 {
2082 struct lll_event *e;
2083 uint8_t idx;
2084
2085 idx = MFIFO_ENQUEUE_GET(prep, (void **)&e);
2086 if (!e) {
2087 return NULL;
2088 }
2089
2090 memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param));
2091 e->prepare_cb = prepare_cb;
2092 e->is_abort_cb = is_abort_cb;
2093 e->abort_cb = abort_cb;
2094 e->is_resume = is_resume;
2095 e->is_aborted = 0U;
2096
2097 MFIFO_ENQUEUE(prep, idx);
2098
2099 return e;
2100 }
2101
ull_prepare_dequeue_get(void)2102 void *ull_prepare_dequeue_get(void)
2103 {
2104 return MFIFO_DEQUEUE_GET(prep);
2105 }
2106
ull_prepare_dequeue_iter(uint8_t * idx)2107 void *ull_prepare_dequeue_iter(uint8_t *idx)
2108 {
2109 return MFIFO_DEQUEUE_ITER_GET(prep, idx);
2110 }
2111
ull_prepare_dequeue(uint8_t caller_id)2112 void ull_prepare_dequeue(uint8_t caller_id)
2113 {
2114 void *param_normal_head = NULL;
2115 void *param_normal_next = NULL;
2116 void *param_resume_head = NULL;
2117 void *param_resume_next = NULL;
2118 struct lll_event *next;
2119 uint8_t loop;
2120
2121 /* Development assertion check to ensure the below loop processing
2122 * has a limit.
2123 *
2124 * Only 2 scanner and 1 advertiser (directed adv) gets enqueue back:
2125 *
2126 * Already in queue max 7 (EVENT_PIPELINE_MAX):
2127 * - 2 continuous scan prepare in queue (1M and Coded PHY)
2128 * - 2 continuous scan resume in queue (1M and Coded PHY)
2129 * - 1 directed adv prepare
2130 * - 1 directed adv resume
2131 * - 1 any other role with time reservation
2132 *
2133 * The loop removes the duplicates (scan and advertiser) with is_aborted
2134 * flag set in 7 iterations:
2135 * - 1 scan prepare (1M)
2136 * - 1 scan prepare (Coded PHY)
2137 * - 1 directed adv prepare
2138 *
2139 * and has enqueue the following in these 7 iterations:
2140 * - 1 scan resume (1M)
2141 * - 1 scan resume (Coded PHY)
2142 * - 1 directed adv resume
2143 *
2144 * Hence, it should be (EVENT_PIPELINE_MAX + 3U) iterations max.
2145 */
2146 loop = (EVENT_PIPELINE_MAX + 3U);
2147
2148 next = ull_prepare_dequeue_get();
2149 while (next) {
2150 void *param = next->prepare_param.param;
2151 uint8_t is_aborted = next->is_aborted;
2152 uint8_t is_resume = next->is_resume;
2153
2154 /* Assert if we exceed iterations processing the prepare queue
2155 */
2156 LL_ASSERT(loop);
2157 loop--;
2158
2159 /* Let LLL invoke the `prepare` interface if radio not in active
2160 * use. Otherwise, enqueue at end of the prepare pipeline queue.
2161 */
2162 if (!is_aborted) {
2163 static memq_link_t link;
2164 static struct mayfly mfy = {0, 0, &link, NULL,
2165 lll_resume};
2166 uint32_t ret;
2167
2168 mfy.param = next;
2169 ret = mayfly_enqueue(caller_id, TICKER_USER_ID_LLL, 0,
2170 &mfy);
2171 LL_ASSERT(!ret);
2172 }
2173
2174 MFIFO_DEQUEUE(prep);
2175
2176 /* Check for anymore more prepare elements in queue */
2177 next = ull_prepare_dequeue_get();
2178 if (!next) {
2179 break;
2180 }
2181
2182 /* A valid prepare element has its `prepare` invoked or was
2183 * enqueued back into prepare pipeline.
2184 */
2185 if (!is_aborted) {
2186 /* The prepare element was not a resume event, it would
2187 * use the radio or was enqueued back into prepare
2188 * pipeline with a preempt timeout being set.
2189 *
2190 * Remember the first encountered and the next element
2191 * in the prepare pipeline so that we do not infinitely
2192 * loop through the resume events in prepare pipeline.
2193 */
2194 if (!is_resume) {
2195 if (!param_normal_head) {
2196 param_normal_head = param;
2197 } else if (!param_normal_next) {
2198 param_normal_next = param;
2199 }
2200 } else {
2201 if (!param_resume_head) {
2202 param_resume_head = param;
2203 } else if (!param_resume_next) {
2204 param_resume_next = param;
2205 }
2206 }
2207
2208 /* Stop traversing the prepare pipeline when we reach
2209 * back to the first or next event where we
2210 * initially started processing the prepare pipeline.
2211 */
2212 if (!next->is_aborted &&
2213 ((!next->is_resume &&
2214 ((next->prepare_param.param ==
2215 param_normal_head) ||
2216 (next->prepare_param.param ==
2217 param_normal_next))) ||
2218 (next->is_resume &&
2219 !param_normal_next &&
2220 ((next->prepare_param.param ==
2221 param_resume_head) ||
2222 (next->prepare_param.param ==
2223 param_resume_next))))) {
2224 break;
2225 }
2226 }
2227 }
2228 }
2229
ull_event_done_extra_get(void)2230 struct event_done_extra *ull_event_done_extra_get(void)
2231 {
2232 struct node_rx_event_done *evdone;
2233
2234 evdone = MFIFO_DEQUEUE_PEEK(done);
2235 if (!evdone) {
2236 return NULL;
2237 }
2238
2239 return &evdone->extra;
2240 }
2241
ull_done_extra_type_set(uint8_t type)2242 struct event_done_extra *ull_done_extra_type_set(uint8_t type)
2243 {
2244 struct event_done_extra *extra;
2245
2246 extra = ull_event_done_extra_get();
2247 if (!extra) {
2248 return NULL;
2249 }
2250
2251 extra->type = type;
2252
2253 return extra;
2254 }
2255
ull_event_done(void * param)2256 void *ull_event_done(void *param)
2257 {
2258 struct node_rx_event_done *evdone;
2259 memq_link_t *link;
2260
2261 /* Obtain new node that signals "Done of an RX-event".
2262 * Obtain this by dequeuing from the global 'mfifo_done' queue.
2263 * Note that 'mfifo_done' is a queue of pointers, not of
2264 * struct node_rx_event_done
2265 */
2266 evdone = MFIFO_DEQUEUE(done);
2267 if (!evdone) {
2268 /* Not fatal if we can not obtain node, though
2269 * we will loose the packets in software stack.
2270 * If this happens during Conn Upd, this could cause LSTO
2271 */
2272 return NULL;
2273 }
2274
2275 link = evdone->hdr.link;
2276 evdone->hdr.link = NULL;
2277
2278 evdone->hdr.type = NODE_RX_TYPE_EVENT_DONE;
2279 evdone->param = param;
2280
2281 ull_rx_put_sched(link, evdone);
2282
2283 return evdone;
2284 }
2285
2286 #if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2287 /**
2288 * @brief Extract timing from completed event
2289 *
2290 * @param node_rx_event_done[in] Done event containing fresh timing information
2291 * @param ticks_drift_plus[out] Positive part of drift uncertainty window
2292 * @param ticks_drift_minus[out] Negative part of drift uncertainty window
2293 */
ull_drift_ticks_get(struct node_rx_event_done * done,uint32_t * ticks_drift_plus,uint32_t * ticks_drift_minus)2294 void ull_drift_ticks_get(struct node_rx_event_done *done,
2295 uint32_t *ticks_drift_plus,
2296 uint32_t *ticks_drift_minus)
2297 {
2298 uint32_t start_to_address_expected_us;
2299 uint32_t start_to_address_actual_us;
2300 uint32_t window_widening_event_us;
2301 uint32_t preamble_to_addr_us;
2302
2303 start_to_address_actual_us =
2304 done->extra.drift.start_to_address_actual_us;
2305 window_widening_event_us =
2306 done->extra.drift.window_widening_event_us;
2307 preamble_to_addr_us =
2308 done->extra.drift.preamble_to_addr_us;
2309
2310 start_to_address_expected_us = EVENT_JITTER_US +
2311 EVENT_TICKER_RES_MARGIN_US +
2312 window_widening_event_us +
2313 preamble_to_addr_us;
2314
2315 if (start_to_address_actual_us <= start_to_address_expected_us) {
2316 *ticks_drift_plus =
2317 HAL_TICKER_US_TO_TICKS(window_widening_event_us);
2318 *ticks_drift_minus =
2319 HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
2320 start_to_address_actual_us));
2321 } else {
2322 *ticks_drift_plus =
2323 HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
2324 *ticks_drift_minus =
2325 HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
2326 EVENT_TICKER_RES_MARGIN_US +
2327 preamble_to_addr_us);
2328 }
2329 }
2330 #endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
2331
init_reset(void)2332 static inline int init_reset(void)
2333 {
2334 memq_link_t *link;
2335
2336 /* Initialize and allocate done pool */
2337 RXFIFO_INIT_ALLOC(done);
2338
2339 /* Initialize rx pool. */
2340 mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2341 sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2342 &mem_pdu_rx.free);
2343
2344 /* Initialize rx link pool. */
2345 mem_init(mem_link_rx.pool, sizeof(memq_link_t),
2346 sizeof(mem_link_rx.pool) / sizeof(memq_link_t),
2347 &mem_link_rx.free);
2348
2349 /* Acquire a link to initialize ull rx memq */
2350 link = mem_acquire(&mem_link_rx.free);
2351 LL_ASSERT(link);
2352
2353 /* Initialize ull rx memq */
2354 MEMQ_INIT(ull_rx, link);
2355
2356 /* Acquire a link to initialize ll rx memq */
2357 link = mem_acquire(&mem_link_rx.free);
2358 LL_ASSERT(link);
2359
2360 /* Initialize ll rx memq */
2361 MEMQ_INIT(ll_rx, link);
2362
2363 /* Allocate rx free buffers */
2364 mem_link_rx.quota_pdu = RX_CNT;
2365 rx_replenish_all();
2366
2367 #if (defined(CONFIG_BT_BROADCASTER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2368 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2369 defined(CONFIG_BT_CTLR_SYNC_PERIODIC) || \
2370 defined(CONFIG_BT_CONN)
2371 /* Initialize channel map */
2372 ull_chan_reset();
2373 #endif /* (CONFIG_BT_BROADCASTER && CONFIG_BT_CTLR_ADV_EXT) ||
2374 * CONFIG_BT_CTLR_ADV_PERIODIC ||
2375 * CONFIG_BT_CTLR_SYNC_PERIODIC ||
2376 * CONFIG_BT_CONN
2377 */
2378
2379 return 0;
2380 }
2381
perform_lll_reset(void * param)2382 static void perform_lll_reset(void *param)
2383 {
2384 int err;
2385
2386 /* Reset LLL */
2387 err = lll_reset();
2388 LL_ASSERT(!err);
2389
2390 #if defined(CONFIG_BT_BROADCASTER)
2391 /* Reset adv state */
2392 err = lll_adv_reset();
2393 LL_ASSERT(!err);
2394 #endif /* CONFIG_BT_BROADCASTER */
2395
2396 #if defined(CONFIG_BT_OBSERVER)
2397 /* Reset scan state */
2398 err = lll_scan_reset();
2399 LL_ASSERT(!err);
2400 #endif /* CONFIG_BT_OBSERVER */
2401
2402 #if defined(CONFIG_BT_CONN)
2403 /* Reset conn role */
2404 err = lll_conn_reset();
2405 LL_ASSERT(!err);
2406 #endif /* CONFIG_BT_CONN */
2407
2408 #if defined(CONFIG_BT_CTLR_DF)
2409 err = lll_df_reset();
2410 LL_ASSERT(!err);
2411 #endif /* CONFIG_BT_CTLR_DF */
2412
2413 #if !defined(CONFIG_BT_CTLR_ZLI)
2414 k_sem_give(param);
2415 #endif /* !CONFIG_BT_CTLR_ZLI */
2416 }
2417
mark_set(void ** m,void * param)2418 static inline void *mark_set(void **m, void *param)
2419 {
2420 if (!*m) {
2421 *m = param;
2422 }
2423
2424 return *m;
2425 }
2426
mark_unset(void ** m,void * param)2427 static inline void *mark_unset(void **m, void *param)
2428 {
2429 if (*m && *m == param) {
2430 *m = NULL;
2431
2432 return param;
2433 }
2434
2435 return NULL;
2436 }
2437
mark_get(void * m)2438 static inline void *mark_get(void *m)
2439 {
2440 return m;
2441 }
2442
rx_replenish(uint8_t max)2443 static void rx_replenish(uint8_t max)
2444 {
2445 uint8_t idx;
2446
2447 if (max > mem_link_rx.quota_pdu) {
2448 max = mem_link_rx.quota_pdu;
2449 }
2450
2451 while (max && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
2452 memq_link_t *link;
2453 struct node_rx_hdr *rx;
2454
2455 link = mem_acquire(&mem_link_rx.free);
2456 if (!link) {
2457 return;
2458 }
2459
2460 rx = mem_acquire(&mem_pdu_rx.free);
2461 if (!rx) {
2462 ll_rx_link_release(link);
2463 return;
2464 }
2465
2466 rx->link = link;
2467
2468 MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
2469
2470 ll_rx_link_quota_dec();
2471
2472 max--;
2473 }
2474
2475 #if defined(CONFIG_BT_CONN)
2476 if (!max) {
2477 return;
2478 }
2479
2480 /* Replenish the ULL to LL/HCI free Rx PDU queue after LLL to ULL free
2481 * Rx PDU queue has been filled.
2482 */
2483 while (mem_link_rx.quota_pdu &&
2484 MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
2485 memq_link_t *link;
2486 struct node_rx_hdr *rx;
2487
2488 link = mem_acquire(&mem_link_rx.free);
2489 if (!link) {
2490 return;
2491 }
2492
2493 rx = mem_acquire(&mem_pdu_rx.free);
2494 if (!rx) {
2495 ll_rx_link_release(link);
2496 return;
2497 }
2498
2499 link->mem = NULL;
2500 rx->link = link;
2501
2502 MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
2503
2504 ll_rx_link_quota_dec();
2505 }
2506 #endif /* CONFIG_BT_CONN */
2507 }
2508
rx_replenish_all(void)2509 static void rx_replenish_all(void)
2510 {
2511 rx_replenish(UINT8_MAX);
2512 }
2513
2514 #if defined(CONFIG_BT_CONN) || \
2515 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2516 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2517 defined(CONFIG_BT_CTLR_ADV_ISO)
2518
rx_replenish_one(void)2519 static void rx_replenish_one(void)
2520 {
2521 rx_replenish(1U);
2522 }
2523
rx_release_replenish(struct node_rx_hdr * rx)2524 static void rx_release_replenish(struct node_rx_hdr *rx)
2525 {
2526 ll_rx_release(rx);
2527 rx_replenish_one();
2528 }
2529
rx_link_dequeue_release_quota_inc(memq_link_t * link)2530 static void rx_link_dequeue_release_quota_inc(memq_link_t *link)
2531 {
2532 (void)memq_dequeue(memq_ll_rx.tail,
2533 &memq_ll_rx.head, NULL);
2534 ll_rx_link_release(link);
2535 ll_rx_link_quota_inc();
2536 }
2537 #endif /* CONFIG_BT_CONN ||
2538 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
2539 * CONFIG_BT_CTLR_ADV_PERIODIC ||
2540 * CONFIG_BT_CTLR_ADV_ISO
2541 */
2542
rx_demux(void * param)2543 static void rx_demux(void *param)
2544 {
2545 memq_link_t *link;
2546
2547 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2548 do {
2549 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2550 struct node_rx_hdr *rx;
2551
2552 link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail,
2553 (void **)&rx);
2554 if (link) {
2555 #if defined(CONFIG_BT_CONN)
2556 struct node_tx *node_tx;
2557 memq_link_t *link_tx;
2558 uint16_t handle; /* Handle to Ack TX */
2559 #endif /* CONFIG_BT_CONN */
2560
2561 LL_ASSERT(rx);
2562
2563 #if defined(CONFIG_BT_CONN)
2564 link_tx = ull_conn_ack_by_last_peek(rx->ack_last,
2565 &handle, &node_tx);
2566 if (link_tx) {
2567 rx_demux_conn_tx_ack(rx->ack_last, handle,
2568 link_tx, node_tx);
2569 } else
2570 #endif /* CONFIG_BT_CONN */
2571 {
2572 rx_demux_rx(link, rx);
2573 }
2574
2575 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2576 rx_demux_yield();
2577 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2578
2579 #if defined(CONFIG_BT_CONN)
2580 } else {
2581 struct node_tx *node_tx;
2582 uint8_t ack_last;
2583 uint16_t handle;
2584
2585 link = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2586 if (link) {
2587 rx_demux_conn_tx_ack(ack_last, handle,
2588 link, node_tx);
2589
2590 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2591 rx_demux_yield();
2592 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2593
2594 }
2595 #endif /* CONFIG_BT_CONN */
2596 }
2597
2598 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2599 } while (link);
2600 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2601 }
2602
2603 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
rx_demux_yield(void)2604 static void rx_demux_yield(void)
2605 {
2606 static memq_link_t link;
2607 static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2608 struct node_rx_hdr *rx;
2609 memq_link_t *link_peek;
2610
2611 link_peek = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, (void **)&rx);
2612 if (!link_peek) {
2613 #if defined(CONFIG_BT_CONN)
2614 struct node_tx *node_tx;
2615 uint8_t ack_last;
2616 uint16_t handle;
2617
2618 link_peek = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2619 if (!link_peek) {
2620 return;
2621 }
2622 #else /* !CONFIG_BT_CONN */
2623 return;
2624 #endif /* !CONFIG_BT_CONN */
2625 }
2626
2627 /* Kick the ULL (using the mayfly, tailchain it) */
2628 mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_HIGH, 1,
2629 &mfy);
2630 }
2631 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2632
2633 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
tx_cmplt_get(uint16_t * handle,uint8_t * first,uint8_t last)2634 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
2635 {
2636 struct lll_tx *tx;
2637 uint8_t cmplt;
2638 uint8_t next;
2639
2640 next = *first;
2641 tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2642 mfifo_tx_ack.n, mfifo_fifo_tx_ack.f, last,
2643 &next);
2644 if (!tx) {
2645 return 0;
2646 }
2647
2648 *handle = tx->handle;
2649 cmplt = 0U;
2650 do {
2651 if (false) {
2652 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2653 defined(CONFIG_BT_CTLR_CONN_ISO)
2654 } else if (IS_CIS_HANDLE(tx->handle) ||
2655 IS_ADV_ISO_HANDLE(tx->handle)) {
2656 struct node_tx_iso *tx_node;
2657 uint8_t sdu_fragments;
2658
2659 /* NOTE: tx_cmplt_get() is permitted to be called
2660 * multiple times before the tx_ack queue which is
2661 * associated with Rx queue is changed by the
2662 * dequeue of Rx node.
2663 *
2664 * Tx node is released early without waiting for
2665 * any dependency on Rx queue. Released Tx node
2666 * reference is overloaded to store the Tx
2667 * fragments count.
2668 *
2669 * A hack is used here that depends on the fact
2670 * that memory addresses have a value greater than
2671 * 0xFF, to determined if a node Tx has been
2672 * released in a prior iteration of this function.
2673 */
2674
2675 /* We must count each SDU HCI fragment */
2676 tx_node = tx->node;
2677 if (IS_NODE_TX_PTR(tx_node)) {
2678 /* We count each SDU fragment completed
2679 * by this PDU.
2680 */
2681 sdu_fragments = tx_node->sdu_fragments;
2682
2683 /* Replace node reference with fragments
2684 * count
2685 */
2686 NODE_TX_FRAGMENTS_SET(tx->node, sdu_fragments);
2687
2688 /* Release node as its a reference and not
2689 * fragments count.
2690 */
2691 ll_iso_link_tx_release(tx_node->link);
2692 ll_iso_tx_mem_release(tx_node);
2693 } else {
2694 /* Get SDU fragments count from the encoded
2695 * node reference value.
2696 */
2697 sdu_fragments = NODE_TX_FRAGMENTS_GET(tx_node);
2698 }
2699
2700 /* Accumulate the tx acknowledgements */
2701 cmplt += sdu_fragments;
2702
2703 goto next_ack;
2704 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2705
2706 #if defined(CONFIG_BT_CONN)
2707 } else {
2708 struct node_tx *tx_node;
2709 struct pdu_data *p;
2710
2711 /* NOTE: tx_cmplt_get() is permitted to be called
2712 * multiple times before the tx_ack queue which is
2713 * associated with Rx queue is changed by the
2714 * dequeue of Rx node.
2715 *
2716 * Tx node is released early without waiting for
2717 * any dependency on Rx queue. Released Tx node
2718 * reference is overloaded to store whether
2719 * packet with data or control was released.
2720 *
2721 * A hack is used here that depends on the fact
2722 * that memory addresses have a value greater than
2723 * 0xFF, to determined if a node Tx has been
2724 * released in a prior iteration of this function.
2725 */
2726 tx_node = tx->node;
2727 p = (void *)tx_node->pdu;
2728 if (!tx_node ||
2729 (IS_NODE_TX_PTR(tx_node) &&
2730 (p->ll_id == PDU_DATA_LLID_DATA_START ||
2731 p->ll_id == PDU_DATA_LLID_DATA_CONTINUE)) ||
2732 (!IS_NODE_TX_PTR(tx_node) &&
2733 IS_NODE_TX_DATA(tx_node))) {
2734 /* data packet, hence count num cmplt */
2735 NODE_TX_DATA_SET(tx->node);
2736 cmplt++;
2737 } else {
2738 /* ctrl packet or flushed, hence dont count num
2739 * cmplt
2740 */
2741 NODE_TX_CTRL_SET(tx->node);
2742 }
2743
2744 if (IS_NODE_TX_PTR(tx_node)) {
2745 ll_tx_mem_release(tx_node);
2746 }
2747 #endif /* CONFIG_BT_CONN */
2748
2749 }
2750
2751 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2752 defined(CONFIG_BT_CTLR_CONN_ISO)
2753 next_ack:
2754 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2755
2756 *first = next;
2757 tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2758 mfifo_tx_ack.n, mfifo_fifo_tx_ack.f,
2759 last, &next);
2760 } while (tx && tx->handle == *handle);
2761
2762 return cmplt;
2763 }
2764
rx_demux_conn_tx_ack(uint8_t ack_last,uint16_t handle,memq_link_t * link,struct node_tx * node_tx)2765 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
2766 memq_link_t *link,
2767 struct node_tx *node_tx)
2768 {
2769 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2770 do {
2771 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2772 /* Dequeue node */
2773 ull_conn_ack_dequeue();
2774
2775 /* Process Tx ack */
2776 ull_conn_tx_ack(handle, link, node_tx);
2777
2778 /* Release link mem */
2779 ull_conn_link_tx_release(link);
2780
2781 /* check for more rx ack */
2782 link = ull_conn_ack_by_last_peek(ack_last, &handle, &node_tx);
2783
2784 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2785 if (!link)
2786 #else /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2787 } while (link);
2788 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2789
2790 {
2791 /* trigger thread to call ll_rx_get() */
2792 ll_rx_sched();
2793 }
2794 }
2795 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
2796
2797 /**
2798 * @brief Dispatch rx objects
2799 * @details Rx objects are only peeked, not dequeued yet.
2800 * Execution context: ULL high priority Mayfly
2801 */
rx_demux_rx(memq_link_t * link,struct node_rx_hdr * rx)2802 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
2803 {
2804 /* Demux Rx objects */
2805 switch (rx->type) {
2806 case NODE_RX_TYPE_EVENT_DONE:
2807 {
2808 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2809 rx_demux_event_done(link, (struct node_rx_event_done *)rx);
2810 }
2811 break;
2812
2813 #if defined(CONFIG_BT_OBSERVER)
2814 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2815 case NODE_RX_TYPE_EXT_1M_REPORT:
2816 case NODE_RX_TYPE_EXT_CODED_REPORT:
2817 case NODE_RX_TYPE_EXT_AUX_REPORT:
2818 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2819 case NODE_RX_TYPE_SYNC_REPORT:
2820 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2821 {
2822 struct pdu_adv *adv;
2823
2824 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2825
2826 adv = (void *)((struct node_rx_pdu *)rx)->pdu;
2827 if (adv->type != PDU_ADV_TYPE_EXT_IND) {
2828 ll_rx_put_sched(link, rx);
2829 break;
2830 }
2831
2832 ull_scan_aux_setup(link, (struct node_rx_pdu *)rx);
2833 }
2834 break;
2835
2836 case NODE_RX_TYPE_EXT_AUX_RELEASE:
2837 {
2838 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2839 ull_scan_aux_release(link, (struct node_rx_pdu *)rx);
2840 }
2841 break;
2842 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2843 case NODE_RX_TYPE_SYNC:
2844 {
2845 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2846 ull_sync_established_report(link, (struct node_rx_pdu *)rx);
2847 }
2848 break;
2849 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
2850 case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
2851 {
2852 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2853 ll_rx_put_sched(link, rx);
2854 }
2855 break;
2856 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
2857 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2858 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2859 #endif /* CONFIG_BT_OBSERVER */
2860
2861 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2862 case NODE_RX_TYPE_CIS_ESTABLISHED:
2863 {
2864 struct ll_conn *conn;
2865
2866 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2867
2868 conn = ll_conn_get(rx->handle);
2869 if (ull_cp_cc_awaiting_established(conn)) {
2870 ull_cp_cc_established(conn, BT_HCI_ERR_SUCCESS);
2871 }
2872
2873 rx->type = NODE_RX_TYPE_RELEASE;
2874 ll_rx_put_sched(link, rx);
2875 }
2876 break;
2877 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2878
2879 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
2880 defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
2881 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
2882 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
2883 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
2884 case NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE:
2885 {
2886 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2887 ll_rx_put_sched(link, rx);
2888 }
2889 break;
2890 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
2891
2892 #if defined(CONFIG_BT_CONN)
2893 case NODE_RX_TYPE_CONNECTION:
2894 {
2895 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2896 ull_conn_setup(link, (struct node_rx_pdu *)rx);
2897 }
2898 break;
2899
2900 case NODE_RX_TYPE_DC_PDU:
2901 {
2902 ull_conn_rx(link, (struct node_rx_pdu **)&rx);
2903
2904 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2905
2906 /* Only schedule node if not marked as retain by LLCP */
2907 if (rx && rx->type != NODE_RX_TYPE_RETAIN) {
2908 ll_rx_put_sched(link, rx);
2909 }
2910 }
2911 break;
2912
2913 case NODE_RX_TYPE_TERMINATE:
2914 #endif /* CONFIG_BT_CONN */
2915
2916 #if defined(CONFIG_BT_OBSERVER) || \
2917 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2918 defined(CONFIG_BT_CTLR_BROADCAST_ISO) || \
2919 defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
2920 defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
2921 defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
2922 defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
2923 defined(CONFIG_BT_CONN)
2924
2925 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
2926 case NODE_RX_TYPE_SYNC_CHM_COMPLETE:
2927 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
2928
2929 #if defined(CONFIG_BT_CTLR_ADV_ISO)
2930 case NODE_RX_TYPE_BIG_CHM_COMPLETE:
2931 case NODE_RX_TYPE_BIG_TERMINATE:
2932 #endif /* CONFIG_BT_CTLR_ADV_ISO */
2933
2934 #if defined(CONFIG_BT_OBSERVER)
2935 case NODE_RX_TYPE_REPORT:
2936 #endif /* CONFIG_BT_OBSERVER */
2937
2938 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
2939 case NODE_RX_TYPE_SCAN_REQ:
2940 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
2941
2942 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
2943 case NODE_RX_TYPE_PROFILE:
2944 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
2945
2946 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
2947 case NODE_RX_TYPE_ADV_INDICATION:
2948 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
2949
2950 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
2951 case NODE_RX_TYPE_SCAN_INDICATION:
2952 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
2953
2954 case NODE_RX_TYPE_RELEASE:
2955 {
2956 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2957 ll_rx_put_sched(link, rx);
2958 }
2959 break;
2960 #endif /* CONFIG_BT_OBSERVER ||
2961 * CONFIG_BT_CTLR_ADV_PERIODIC ||
2962 * CONFIG_BT_CTLR_BROADCAST_ISO ||
2963 * CONFIG_BT_CTLR_SCAN_REQ_NOTIFY ||
2964 * CONFIG_BT_CTLR_PROFILE_ISR ||
2965 * CONFIG_BT_CTLR_ADV_INDICATION ||
2966 * CONFIG_BT_CTLR_SCAN_INDICATION ||
2967 * CONFIG_BT_CONN
2968 */
2969
2970 default:
2971 {
2972 #if defined(CONFIG_BT_CTLR_USER_EXT)
2973 /* Try proprietary demuxing */
2974 rx_demux_rx_proprietary(link, rx, memq_ull_rx.tail,
2975 &memq_ull_rx.head);
2976 #else
2977 LL_ASSERT(0);
2978 #endif /* CONFIG_BT_CTLR_USER_EXT */
2979 }
2980 break;
2981 }
2982 }
2983
rx_demux_event_done(memq_link_t * link,struct node_rx_event_done * done)2984 static inline void rx_demux_event_done(memq_link_t *link,
2985 struct node_rx_event_done *done)
2986 {
2987 struct ull_hdr *ull_hdr;
2988 void *release;
2989
2990 /* Decrement prepare reference if ULL will not resume */
2991 ull_hdr = done->param;
2992 if (ull_hdr) {
2993 LL_ASSERT(ull_ref_get(ull_hdr));
2994 ull_ref_dec(ull_hdr);
2995 }
2996
2997 /* Process role dependent event done */
2998 switch (done->extra.type) {
2999 #if defined(CONFIG_BT_CONN)
3000 case EVENT_DONE_EXTRA_TYPE_CONN:
3001 ull_conn_done(done);
3002 break;
3003 #endif /* CONFIG_BT_CONN */
3004
3005 #if defined(CONFIG_BT_BROADCASTER)
3006 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
3007 defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
3008 case EVENT_DONE_EXTRA_TYPE_ADV:
3009 ull_adv_done(done);
3010 break;
3011
3012 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3013 case EVENT_DONE_EXTRA_TYPE_ADV_AUX:
3014 ull_adv_aux_done(done);
3015 break;
3016
3017 #if defined(CONFIG_BT_CTLR_ADV_ISO)
3018 case EVENT_DONE_EXTRA_TYPE_ADV_ISO_COMPLETE:
3019 ull_adv_iso_done_complete(done);
3020 break;
3021
3022 case EVENT_DONE_EXTRA_TYPE_ADV_ISO_TERMINATE:
3023 ull_adv_iso_done_terminate(done);
3024 break;
3025 #endif /* CONFIG_BT_CTLR_ADV_ISO */
3026 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3027 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
3028 #endif /* CONFIG_BT_BROADCASTER */
3029
3030 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3031 #if defined(CONFIG_BT_OBSERVER)
3032 case EVENT_DONE_EXTRA_TYPE_SCAN:
3033 ull_scan_done(done);
3034 break;
3035
3036 case EVENT_DONE_EXTRA_TYPE_SCAN_AUX:
3037 ull_scan_aux_done(done);
3038 break;
3039
3040 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
3041 case EVENT_DONE_EXTRA_TYPE_SYNC:
3042 ull_sync_done(done);
3043 break;
3044
3045 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
3046 case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_ESTAB:
3047 ull_sync_iso_estab_done(done);
3048 break;
3049
3050 case EVENT_DONE_EXTRA_TYPE_SYNC_ISO:
3051 ull_sync_iso_done(done);
3052 break;
3053
3054 case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_TERMINATE:
3055 ull_sync_iso_done_terminate(done);
3056 break;
3057 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
3058 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
3059 #endif /* CONFIG_BT_OBSERVER */
3060 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3061
3062 #if defined(CONFIG_BT_CTLR_CONN_ISO)
3063 case EVENT_DONE_EXTRA_TYPE_CIS:
3064 ull_conn_iso_done(done);
3065 break;
3066 #endif /* CONFIG_BT_CTLR_CONN_ISO */
3067
3068 #if defined(CONFIG_BT_CTLR_USER_EXT)
3069 case EVENT_DONE_EXTRA_TYPE_USER_START
3070 ... EVENT_DONE_EXTRA_TYPE_USER_END:
3071 ull_proprietary_done(done);
3072 break;
3073 #endif /* CONFIG_BT_CTLR_USER_EXT */
3074
3075 case EVENT_DONE_EXTRA_TYPE_NONE:
3076 /* ignore */
3077 break;
3078
3079 default:
3080 LL_ASSERT(0);
3081 break;
3082 }
3083
3084 /* Release done */
3085 done->extra.type = 0U;
3086 release = RXFIFO_RELEASE(done, link, done);
3087 LL_ASSERT(release == done);
3088
3089 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
3090 /* dequeue prepare pipeline */
3091 ull_prepare_dequeue(TICKER_USER_ID_ULL_HIGH);
3092
3093 /* LLL done synchronize count */
3094 lll_done_ull_inc();
3095 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
3096
3097 /* If disable initiated, signal the semaphore */
3098 if (ull_hdr && !ull_ref_get(ull_hdr) && ull_hdr->disabled_cb) {
3099 ull_hdr->disabled_cb(ull_hdr->disabled_param);
3100 }
3101 }
3102
disabled_cb(void * param)3103 static void disabled_cb(void *param)
3104 {
3105 k_sem_give(param);
3106 }
3107
3108 /**
3109 * @brief Support function for RXFIFO_ALLOC macro
3110 * @details This function allocates up to 'max' number of MFIFO elements by
3111 * enqueuing pointers to memory elements with associated memq links.
3112 */
ull_rxfifo_alloc(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,void * mem_free,void * link_free,uint8_t max)3113 void ull_rxfifo_alloc(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3114 void *mem_free, void *link_free, uint8_t max)
3115 {
3116 uint8_t idx;
3117
3118 while ((max--) && mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3119 memq_link_t *link;
3120 struct node_rx_hdr *rx;
3121
3122 link = mem_acquire(link_free);
3123 if (!link) {
3124 break;
3125 }
3126
3127 rx = mem_acquire(mem_free);
3128 if (!rx) {
3129 mem_release(link, link_free);
3130 break;
3131 }
3132
3133 link->mem = NULL;
3134 rx->link = link;
3135
3136 mfifo_by_idx_enqueue(m, s, idx, rx, l);
3137 }
3138 }
3139
3140 /**
3141 * @brief Support function for RXFIFO_RELEASE macro
3142 * @details This function releases a node by returning it to the FIFO.
3143 */
ull_rxfifo_release(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,memq_link_t * link,struct node_rx_hdr * rx)3144 void *ull_rxfifo_release(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3145 memq_link_t *link, struct node_rx_hdr *rx)
3146 {
3147 uint8_t idx;
3148
3149 if (!mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3150 return NULL;
3151 }
3152
3153 rx->link = link;
3154
3155 mfifo_by_idx_enqueue(m, s, idx, rx, l);
3156
3157 return rx;
3158 }
3159
3160 #if defined(CONFIG_BT_CTLR_ISO) || \
3161 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
3162 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
3163 /**
3164 * @brief Wraps given time within the range of 0 to ULL_TIME_WRAPPING_POINT_US
3165 * @param time_now Current time value
3166 * @param time_diff Time difference (signed)
3167 * @return Wrapped time after difference
3168 */
ull_get_wrapped_time_us(uint32_t time_now_us,int32_t time_diff_us)3169 uint32_t ull_get_wrapped_time_us(uint32_t time_now_us, int32_t time_diff_us)
3170 {
3171 LL_ASSERT(time_now_us <= ULL_TIME_WRAPPING_POINT_US);
3172
3173 uint32_t result = ((uint64_t)time_now_us + ULL_TIME_SPAN_FULL_US + time_diff_us) %
3174 ((uint64_t)ULL_TIME_SPAN_FULL_US);
3175
3176 return result;
3177 }
3178 #endif /* CONFIG_BT_CTLR_ISO ||
3179 * CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
3180 * CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
3181 */
3182