1 /*
2 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8 #include <stdbool.h>
9 #include <errno.h>
10
11 #include <zephyr.h>
12 #include <soc.h>
13 #include <device.h>
14 #include <drivers/entropy.h>
15 #include <bluetooth/hci.h>
16
17 #include "hal/cpu.h"
18 #include "hal/ccm.h"
19 #include "hal/cntr.h"
20 #include "hal/ticker.h"
21
22 #include "util/util.h"
23 #include "util/mem.h"
24 #include "util/mfifo.h"
25 #include "util/memq.h"
26 #include "util/mayfly.h"
27
28 #include "ticker/ticker.h"
29
30 #include "pdu.h"
31
32 #include "lll.h"
33 #include "lll/lll_vendor.h"
34 #include "lll/lll_adv_types.h"
35 #include "lll_adv.h"
36 #include "lll/lll_adv_pdu.h"
37 #include "lll_chan.h"
38 #include "lll_scan.h"
39 #include "lll/lll_df_types.h"
40 #include "lll_sync.h"
41 #include "lll_sync_iso.h"
42 #include "lll_conn.h"
43 #include "lll_df.h"
44
45 #include "ull_adv_types.h"
46 #include "ull_scan_types.h"
47 #include "ull_sync_types.h"
48 #include "ull_conn_types.h"
49 #include "ull_filter.h"
50 #include "ull_df_types.h"
51 #include "ull_df_internal.h"
52
53 #include "isoal.h"
54 #include "ull_internal.h"
55 #include "ull_iso_internal.h"
56 #include "ull_adv_internal.h"
57 #include "ull_scan_internal.h"
58 #include "ull_sync_internal.h"
59 #include "ull_sync_iso_internal.h"
60 #include "ull_central_internal.h"
61 #include "ull_conn_internal.h"
62 #include "lll_conn_iso.h"
63 #include "ull_conn_iso_types.h"
64 #include "ull_iso_types.h"
65 #include "ull_central_iso_internal.h"
66
67 #include "ull_conn_iso_internal.h"
68 #include "ull_peripheral_iso_internal.h"
69
70 #if defined(CONFIG_BT_CTLR_USER_EXT)
71 #include "ull_vendor.h"
72 #endif /* CONFIG_BT_CTLR_USER_EXT */
73
74 #include "ll.h"
75 #include "ll_feat.h"
76 #include "ll_test.h"
77 #include "ll_settings.h"
78
79 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
80 #define LOG_MODULE_NAME bt_ctlr_ull
81 #include "common/log.h"
82 #include "hal/debug.h"
83
84 #if defined(CONFIG_BT_BROADCASTER)
85 #define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
86 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
87 #define BT_ADV_AUX_TICKER_NODES ((TICKER_ID_ADV_AUX_LAST) - \
88 (TICKER_ID_ADV_AUX_BASE) + 1)
89 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
90 #define BT_ADV_SYNC_TICKER_NODES ((TICKER_ID_ADV_SYNC_LAST) - \
91 (TICKER_ID_ADV_SYNC_BASE) + 1)
92 #if defined(CONFIG_BT_CTLR_ADV_ISO)
93 #define BT_ADV_ISO_TICKER_NODES ((TICKER_ID_ADV_ISO_LAST) - \
94 (TICKER_ID_ADV_ISO_BASE) + 1)
95 #else /* !CONFIG_BT_CTLR_ADV_ISO */
96 #define BT_ADV_ISO_TICKER_NODES 0
97 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
98 #else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
99 #define BT_ADV_SYNC_TICKER_NODES 0
100 #define BT_ADV_ISO_TICKER_NODES 0
101 #endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
102 #else /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
103 #define BT_ADV_AUX_TICKER_NODES 0
104 #define BT_ADV_SYNC_TICKER_NODES 0
105 #define BT_ADV_ISO_TICKER_NODES 0
106 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
107 #else /* !CONFIG_BT_BROADCASTER */
108 #define BT_ADV_TICKER_NODES 0
109 #define BT_ADV_AUX_TICKER_NODES 0
110 #define BT_ADV_SYNC_TICKER_NODES 0
111 #define BT_ADV_ISO_TICKER_NODES 0
112 #endif /* !CONFIG_BT_BROADCASTER */
113
114 #if defined(CONFIG_BT_OBSERVER)
115 #define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1)
116 #if defined(CONFIG_BT_CTLR_ADV_EXT)
117 #define BT_SCAN_AUX_TICKER_NODES ((TICKER_ID_SCAN_AUX_LAST) - \
118 (TICKER_ID_SCAN_AUX_BASE) + 1)
119 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
120 #define BT_SCAN_SYNC_TICKER_NODES ((TICKER_ID_SCAN_SYNC_LAST) - \
121 (TICKER_ID_SCAN_SYNC_BASE) + 1)
122 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
123 #define BT_SCAN_SYNC_ISO_TICKER_NODES ((TICKER_ID_SCAN_SYNC_ISO_LAST) - \
124 (TICKER_ID_SCAN_SYNC_ISO_BASE) + 1)
125 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
126 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
127 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
128 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
129 #define BT_SCAN_SYNC_TICKER_NODES 0
130 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
131 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
132 #else /* !CONFIG_BT_CTLR_ADV_EXT */
133 #define BT_SCAN_AUX_TICKER_NODES 0
134 #define BT_SCAN_SYNC_TICKER_NODES 0
135 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
136 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
137 #else
138 #define BT_SCAN_TICKER_NODES 0
139 #define BT_SCAN_AUX_TICKER_NODES 0
140 #define BT_SCAN_SYNC_TICKER_NODES 0
141 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
142 #endif
143
144 #if defined(CONFIG_BT_CONN)
145 #define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1)
146 #else
147 #define BT_CONN_TICKER_NODES 0
148 #endif
149
150 #if defined(CONFIG_BT_CTLR_CONN_ISO)
151 #define BT_CIG_TICKER_NODES ((TICKER_ID_CONN_ISO_LAST) - \
152 (TICKER_ID_CONN_ISO_BASE) + 1 + \
153 (TICKER_ID_CONN_ISO_RESUME_LAST) - \
154 (TICKER_ID_CONN_ISO_RESUME_BASE) + 1)
155
156 #else
157 #define BT_CIG_TICKER_NODES 0
158 #endif
159
160 #if defined(CONFIG_BT_CTLR_USER_EXT)
161 #define USER_TICKER_NODES CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
162 #else
163 #define USER_TICKER_NODES 0
164 #endif
165
166 #if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
167 #define FLASH_TICKER_NODES 2 /* No. of tickers reserved for flash
168 * driver
169 */
170 #define TICKER_USER_ULL_HIGH_FLASH_OPS 1 /* No. of additional ticker ULL_HIGH
171 * context operations
172 */
173 #define TICKER_USER_THREAD_FLASH_OPS 1 /* No. of additional ticker thread
174 * context operations
175 */
176 #else
177 #define FLASH_TICKER_NODES 0
178 #define TICKER_USER_ULL_HIGH_FLASH_OPS 0
179 #define TICKER_USER_THREAD_FLASH_OPS 0
180 #endif
181
182 /* Define ticker nodes */
183 /* NOTE: FLASH_TICKER_NODES shall be after Link Layer's list of ticker id
184 * allocations, refer to ll_timeslice_ticker_id_get on how ticker id
185 * used by flash driver is returned.
186 */
187 #define TICKER_NODES (TICKER_ID_ULL_BASE + \
188 BT_ADV_TICKER_NODES + \
189 BT_ADV_AUX_TICKER_NODES + \
190 BT_ADV_SYNC_TICKER_NODES + \
191 BT_ADV_ISO_TICKER_NODES + \
192 BT_SCAN_TICKER_NODES + \
193 BT_SCAN_AUX_TICKER_NODES + \
194 BT_SCAN_SYNC_TICKER_NODES + \
195 BT_SCAN_SYNC_ISO_TICKER_NODES + \
196 BT_CONN_TICKER_NODES + \
197 BT_CIG_TICKER_NODES + \
198 USER_TICKER_NODES + \
199 FLASH_TICKER_NODES)
200
201 /* When both central and peripheral are supported, one each Rx node will be
202 * needed by connectable advertising and the initiator to generate connection
203 * complete event, hence conditionally set the count.
204 */
205 #if defined(CONFIG_BT_MAX_CONN)
206 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
207 #define BT_CTLR_MAX_CONNECTABLE 2
208 #else
209 #define BT_CTLR_MAX_CONNECTABLE 1
210 #endif
211 #define BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN
212 #else
213 #define BT_CTLR_MAX_CONNECTABLE 0
214 #define BT_CTLR_MAX_CONN 0
215 #endif
216
217 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
218 #if defined(CONFIG_BT_CTLR_DF_CTE_RX)
219 /* Note: Need node for PDU and CTE sample */
220 #define BT_CTLR_ADV_EXT_RX_CNT (CONFIG_BT_CTLR_SCAN_AUX_SET * \
221 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
222 #else
223 /* Note: Assume up to 7 PDUs per advertising train (max data length) */
224 #define BT_CTLR_ADV_EXT_RX_CNT (CONFIG_BT_CTLR_SCAN_AUX_SET * 7)
225 #endif
226 #else
227 #define BT_CTLR_ADV_EXT_RX_CNT 0
228 #endif
229
230 #if !defined(TICKER_USER_LLL_VENDOR_OPS)
231 #define TICKER_USER_LLL_VENDOR_OPS 0
232 #endif /* TICKER_USER_LLL_VENDOR_OPS */
233
234 #if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
235 #define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
236 #endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
237
238 #if !defined(TICKER_USER_THREAD_VENDOR_OPS)
239 #define TICKER_USER_THREAD_VENDOR_OPS 0
240 #endif /* TICKER_USER_THREAD_VENDOR_OPS */
241
242 /* Define ticker user operations */
243 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
244 (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
245 /* NOTE: When ticker job is disabled inside radio events then all advertising,
246 * scanning, and peripheral latency cancel ticker operations will be deferred,
247 * requiring increased ticker thread context operation queue count.
248 */
249 #define TICKER_USER_THREAD_OPS (BT_CTLR_ADV_SET + BT_CTLR_SCAN_SET + \
250 BT_CTLR_MAX_CONN + \
251 TICKER_USER_THREAD_VENDOR_OPS + \
252 TICKER_USER_THREAD_FLASH_OPS + \
253 1)
254 #else /* !CONFIG_BT_CTLR_LOW_LAT */
255 /* NOTE: As ticker job is not disabled inside radio events, no need for extra
256 * thread operations queue element for flash driver.
257 */
258 #define TICKER_USER_THREAD_OPS (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
259 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
260
261 #define TICKER_USER_ULL_LOW_OPS (1 + 1)
262
263 /* NOTE: When ULL_LOW priority is configured to lower than ULL_HIGH, then extra
264 * ULL_HIGH operations queue elements are required to buffer the
265 * requested ticker operations.
266 */
267 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_ADV_EXT) && \
268 defined(CONFIG_BT_CTLR_PHY_CODED)
269 #define TICKER_USER_ULL_HIGH_OPS (4 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
270 TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
271 #else /* !CONFIG_BT_CENTRAL || !CONFIG_BT_CTLR_ADV_EXT ||
272 * !CONFIG_BT_CTLR_PHY_CODED
273 */
274 #define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
275 TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
276 #endif /* !CONFIG_BT_CENTRAL || !CONFIG_BT_CTLR_ADV_EXT ||
277 * !CONFIG_BT_CTLR_PHY_CODED
278 */
279
280 #define TICKER_USER_LLL_OPS (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
281
282 #define TICKER_USER_OPS (TICKER_USER_LLL_OPS + \
283 TICKER_USER_ULL_HIGH_OPS + \
284 TICKER_USER_ULL_LOW_OPS + \
285 TICKER_USER_THREAD_OPS)
286
287 /* Memory for ticker nodes/instances */
288 static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
289
290 /* Memory for users/contexts operating on ticker module */
291 static uint8_t MALIGN(4) ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
292
293 /* Memory for user/context simultaneous API operations */
294 static uint8_t MALIGN(4) ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
295
296 /* Semaphire to wakeup thread on ticker API callback */
297 static struct k_sem sem_ticker_api_cb;
298
299 /* Semaphore to wakeup thread on Rx-ed objects */
300 static struct k_sem *sem_recv;
301
302 /* Declare prepare-event FIFO: mfifo_prep.
303 * Queue of struct node_rx_event_done
304 */
305 static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX);
306
307 /* Declare done-event FIFO: mfifo_done.
308 * Queue of pointers to struct node_rx_event_done.
309 * The actual backing behind these pointers is mem_done.
310 *
311 * When there are radio events with time reservations lower than the preemption
312 * timeout of 1.5 ms, the pipeline has to account for the maximum radio events
313 * that can be enqueued during the preempt timeout duration. All these enqueued
314 * events could be aborted in case of late scheduling, needing as many done
315 * event buffers.
316 *
317 * During continuous scanning, there can be 1 active radio event, 1 scan resume
318 * and 1 new scan prepare. If there are peripheral prepares in addition, and due
319 * to late scheduling all these will abort needing 4 done buffers.
320 *
321 * If there are additional peripheral prepares enqueued, which are apart by
322 * their time reservations, these are not yet late and hence no more additional
323 * done buffers are needed.
324 *
325 * If Extended Scanning is supported, then an additional auxiliary scan event's
326 * prepare could be enqueued in the pipeline during the preemption duration.
327 */
328 #if !defined(VENDOR_EVENT_DONE_MAX)
329 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
330 #define EVENT_DONE_MAX 5
331 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
332 #define EVENT_DONE_MAX 4
333 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
334 #else
335 #define EVENT_DONE_MAX VENDOR_EVENT_DONE_MAX
336 #endif
337
338 static MFIFO_DEFINE(done, sizeof(struct node_rx_event_done *), EVENT_DONE_MAX);
339
340 /* Backing storage for elements in mfifo_done */
341 static struct {
342 void *free;
343 uint8_t pool[sizeof(struct node_rx_event_done) * EVENT_DONE_MAX];
344 } mem_done;
345
346 static struct {
347 void *free;
348 uint8_t pool[sizeof(memq_link_t) *
349 (EVENT_DONE_MAX + EVENT_DONE_LINK_CNT)];
350 } mem_link_done;
351
352 /* Minimum number of node rx for ULL to LL/HCI thread per connection.
353 * Increasing this by times the max. simultaneous connection count will permit
354 * simultaneous parallel PHY update or Connection Update procedures amongst
355 * active connections.
356 * Minimum node rx of 2 that can be reserved happens when local central
357 * initiated PHY Update reserves 2 node rx, one for PHY update complete and
358 * another for Data Length Update complete notification. Otherwise, a
359 * peripheral only needs 1 additional node rx to generate Data Length Update
360 * complete when PHY Update completes; node rx for PHY update complete is
361 * reserved as the received PHY Update Ind PDU.
362 */
363 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_PHY) && \
364 defined(CONFIG_BT_CTLR_DATA_LENGTH)
365 #define LL_PDU_RX_CNT (2 * (CONFIG_BT_CTLR_LLCP_CONN))
366 #elif defined(CONFIG_BT_CONN)
367 #define LL_PDU_RX_CNT (CONFIG_BT_CTLR_LLCP_CONN)
368 #else
369 #define LL_PDU_RX_CNT 0
370 #endif
371
372 /* No. of node rx for LLL to ULL.
373 * Reserve 3, 1 for adv data, 1 for scan response and 1 for empty PDU reception.
374 */
375 #define PDU_RX_CNT (3 + BT_CTLR_ADV_EXT_RX_CNT + CONFIG_BT_CTLR_RX_BUFFERS)
376
377 /* Part sum of LLL to ULL and ULL to LL/HCI thread node rx count.
378 * Will be used below in allocating node rx pool.
379 */
380 #define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT)
381
382 static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
383
384 #if defined(CONFIG_BT_RX_USER_PDU_LEN)
385 #define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
386 #else
387 #define PDU_RX_USER_PDU_OCTETS_MAX 0
388 #endif
389
390 #define PDU_ADV_SIZE MAX(PDU_AC_LL_SIZE_MAX, \
391 (PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
392
393 #define PDU_DATA_SIZE MAX((PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX), \
394 (PDU_BIS_LL_HEADER_SIZE + LL_BIS_OCTETS_RX_MAX))
395
396 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
397
398 #define PDU_RX_NODE_POOL_ELEMENT_SIZE MROUND(NODE_RX_HEADER_SIZE + \
399 MAX(MAX(PDU_ADV_SIZE, \
400 PDU_DATA_SIZE), \
401 PDU_RX_USER_PDU_OCTETS_MAX))
402
403 #if defined(CONFIG_BT_PER_ADV_SYNC_MAX)
404 #define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_PER_ADV_SYNC_MAX
405 #else
406 #define BT_CTLR_SCAN_SYNC_SET 0
407 #endif
408
409 #if defined(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET)
410 #define BT_CTLR_SCAN_SYNC_ISO_SET CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET
411 #else
412 #define BT_CTLR_SCAN_SYNC_ISO_SET 0
413 #endif
414
415 #define PDU_RX_POOL_SIZE (PDU_RX_NODE_POOL_ELEMENT_SIZE * \
416 (RX_CNT + BT_CTLR_MAX_CONNECTABLE + \
417 BT_CTLR_ADV_SET + BT_CTLR_SCAN_SYNC_SET))
418
419 static struct {
420 void *free;
421 uint8_t pool[PDU_RX_POOL_SIZE];
422 } mem_pdu_rx;
423
424 /* NOTE: Two memq_link structures are reserved in the case of periodic sync,
425 * one each for sync established and sync lost respectively. Where as in
426 * comparison to a connection, the connection established uses incoming Rx-ed
427 * CONNECT_IND PDU to piggy back generation of connection complete, and hence
428 * only one is reserved for the generation of disconnection event (which can
429 * happen due to supervision timeout and other reasons that dont have an
430 * incoming Rx-ed PDU).
431 */
432 #define LINK_RX_POOL_SIZE \
433 (sizeof(memq_link_t) * \
434 (RX_CNT + 2 + BT_CTLR_MAX_CONN + BT_CTLR_ADV_SET + \
435 (BT_CTLR_SCAN_SYNC_SET * 2) + (BT_CTLR_SCAN_SYNC_ISO_SET * 2) + \
436 (IQ_REPORT_CNT)))
437 static struct {
438 uint8_t quota_pdu; /* Number of un-utilized buffers */
439
440 void *free;
441 uint8_t pool[LINK_RX_POOL_SIZE];
442 } mem_link_rx;
443
444 static MEMQ_DECLARE(ull_rx);
445 static MEMQ_DECLARE(ll_rx);
446 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
447 static MEMQ_DECLARE(ull_done);
448 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
449
450 #if defined(CONFIG_BT_CONN)
451 static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
452 static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
453 CONFIG_BT_BUF_ACL_TX_COUNT);
454
455 static void *mark_update;
456 #endif /* CONFIG_BT_CONN */
457
458 static void *mark_disable;
459
460 static inline int init_reset(void);
461 static void perform_lll_reset(void *param);
462 static inline void *mark_set(void **m, void *param);
463 static inline void *mark_unset(void **m, void *param);
464 static inline void *mark_get(void *m);
465 static inline void done_alloc(void);
466 static inline void rx_alloc(uint8_t max);
467 static void rx_demux(void *param);
468 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
469 static void rx_demux_yield(void);
470 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
471 #if defined(CONFIG_BT_CONN)
472 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last);
473 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
474 memq_link_t *link,
475 struct node_tx *node_tx);
476 #endif /* CONFIG_BT_CONN */
477 static inline int rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx);
478 static inline void rx_demux_event_done(memq_link_t *link,
479 struct node_rx_hdr *rx);
480 static inline void ll_rx_link_inc_quota(int8_t delta);
481 static void disabled_cb(void *param);
482 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
483 static void ull_done(void *param);
484 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
485
ll_init(struct k_sem * sem_rx)486 int ll_init(struct k_sem *sem_rx)
487 {
488 int err;
489
490 /* Store the semaphore to be used to wakeup Thread context */
491 sem_recv = sem_rx;
492
493 /* Initialize counter */
494 /* TODO: Bind and use counter driver? */
495 cntr_init();
496
497 /* Initialize Mayfly */
498 mayfly_init();
499
500 /* Initialize Ticker */
501 ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
502 ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
503 ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
504 ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_THREAD_OPS;
505
506 err = ticker_init(TICKER_INSTANCE_ID_CTLR,
507 TICKER_NODES, &ticker_nodes[0],
508 MAYFLY_CALLER_COUNT, &ticker_users[0],
509 TICKER_USER_OPS, &ticker_user_ops[0],
510 hal_ticker_instance0_caller_id_get,
511 hal_ticker_instance0_sched,
512 hal_ticker_instance0_trigger_set);
513 LL_ASSERT(!err);
514
515 /* Initialize semaphore for ticker API blocking wait */
516 k_sem_init(&sem_ticker_api_cb, 0, 1);
517
518 /* Initialize LLL */
519 err = lll_init();
520 if (err) {
521 return err;
522 }
523
524 /* Initialize ULL internals */
525 /* TODO: globals? */
526
527 /* Common to init and reset */
528 err = init_reset();
529 if (err) {
530 return err;
531 }
532
533 #if defined(CONFIG_BT_BROADCASTER)
534 err = lll_adv_init();
535 if (err) {
536 return err;
537 }
538
539 err = ull_adv_init();
540 if (err) {
541 return err;
542 }
543 #endif /* CONFIG_BT_BROADCASTER */
544
545 #if defined(CONFIG_BT_OBSERVER)
546 err = lll_scan_init();
547 if (err) {
548 return err;
549 }
550
551 err = ull_scan_init();
552 if (err) {
553 return err;
554 }
555 #endif /* CONFIG_BT_OBSERVER */
556
557 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
558 err = lll_sync_init();
559 if (err) {
560 return err;
561 }
562
563 err = ull_sync_init();
564 if (err) {
565 return err;
566 }
567
568 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
569 err = ull_sync_iso_init();
570 if (err) {
571 return err;
572 }
573 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
574 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
575
576 #if defined(CONFIG_BT_CONN)
577 err = lll_conn_init();
578 if (err) {
579 return err;
580 }
581
582 err = ull_conn_init();
583 if (err) {
584 return err;
585 }
586 #endif /* CONFIG_BT_CONN */
587
588 #if defined(CONFIG_BT_CTLR_DF)
589 err = ull_df_init();
590 if (err) {
591 return err;
592 }
593 #endif
594
595 #if defined(CONFIG_BT_CTLR_ISO)
596 err = ull_iso_init();
597 if (err) {
598 return err;
599 }
600 #endif /* CONFIG_BT_CTLR_ISO */
601
602 #if defined(CONFIG_BT_CTLR_CONN_ISO)
603 err = ull_conn_iso_init();
604 if (err) {
605 return err;
606 }
607 #endif /* CONFIG_BT_CTLR_CONN_ISO */
608
609 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
610 err = ull_peripheral_iso_init();
611 if (err) {
612 return err;
613 }
614 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
615
616 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
617 err = ull_central_iso_init();
618 if (err) {
619 return err;
620 }
621 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
622
623 #if defined(CONFIG_BT_CTLR_ADV_ISO)
624 err = ull_adv_iso_init();
625 if (err) {
626 return err;
627 }
628 #endif /* CONFIG_BT_CTLR_ADV_ISO */
629
630 #if defined(CONFIG_BT_CTLR_DF)
631 err = lll_df_init();
632 if (err) {
633 return err;
634 }
635 #endif
636
637 #if defined(CONFIG_BT_CTLR_USER_EXT)
638 err = ull_user_init();
639 if (err) {
640 return err;
641 }
642 #endif /* CONFIG_BT_CTLR_USER_EXT */
643
644 /* reset filter accept list, resolving list and initialise RPA timeout*/
645 if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
646 ull_filter_reset(true);
647 }
648
649 #if defined(CONFIG_BT_CTLR_TEST)
650 lll_chan_sel_2_ut();
651 #endif /* CONFIG_BT_CTLR_TEST */
652
653 return 0;
654 }
655
ll_reset(void)656 void ll_reset(void)
657 {
658 int err;
659
660 /* Note: The sequence of reset control flow is as follows:
661 * - Reset ULL context, i.e. stop ULL scheduling, abort LLL events etc.
662 * - Reset LLL context, i.e. post LLL event abort, let LLL cleanup its
663 * variables, if any.
664 * - Reset ULL static variables (which otherwise was mem-zeroed in cases
665 * if power-on reset wherein architecture startup mem-zeroes .bss
666 * sections.
667 * - Initialize ULL context variable, similar to on-power-up.
668 */
669
670 #if defined(CONFIG_BT_BROADCASTER)
671 /* Reset adv state */
672 err = ull_adv_reset();
673 LL_ASSERT(!err);
674 #endif /* CONFIG_BT_BROADCASTER */
675
676 #if defined(CONFIG_BT_OBSERVER)
677 /* Reset scan state */
678 err = ull_scan_reset();
679 LL_ASSERT(!err);
680 #endif /* CONFIG_BT_OBSERVER */
681
682 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
683 /* Reset periodic sync sets */
684 err = ull_sync_reset();
685 LL_ASSERT(!err);
686 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
687 /* Reset periodic sync sets */
688 err = ull_sync_iso_reset();
689 LL_ASSERT(!err);
690 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
691 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
692
693 #if defined(CONFIG_BT_CTLR_ISO)
694 err = ull_iso_reset();
695 LL_ASSERT(!err);
696 #endif /* CONFIG_BT_CTLR_ISO */
697
698 #if defined(CONFIG_BT_CTLR_CONN_ISO)
699 err = ull_conn_iso_reset();
700 LL_ASSERT(!err);
701 #endif /* CONFIG_BT_CTLR_CONN_ISO */
702
703 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
704 err = ull_peripheral_iso_reset();
705 LL_ASSERT(!err);
706 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
707
708 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
709 err = ull_central_iso_reset();
710 LL_ASSERT(!err);
711 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
712
713 #if defined(CONFIG_BT_CTLR_ADV_ISO)
714 /* Reset periodic sync sets */
715 err = ull_adv_iso_reset();
716 LL_ASSERT(!err);
717 #endif /* CONFIG_BT_CTLR_ADV_ISO */
718
719 #if defined(CONFIG_BT_CONN)
720 /* Reset conn role */
721 err = ull_conn_reset();
722 LL_ASSERT(!err);
723
724 MFIFO_INIT(tx_ack);
725 #endif /* CONFIG_BT_CONN */
726
727 /* reset filter accept list and resolving list */
728 if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
729 ull_filter_reset(false);
730 }
731
732 /* Re-initialize ULL internals */
733
734 /* Re-initialize the prep mfifo */
735 MFIFO_INIT(prep);
736
737 /* Re-initialize the free done mfifo */
738 MFIFO_INIT(done);
739
740 /* Re-initialize the free rx mfifo */
741 MFIFO_INIT(pdu_rx_free);
742
743 #if defined(CONFIG_BT_CONN)
744 /* Re-initialize the free ll rx mfifo */
745 MFIFO_INIT(ll_pdu_rx_free);
746 #endif /* CONFIG_BT_CONN */
747
748 /* Reset LLL via mayfly */
749 {
750 static memq_link_t link;
751 static struct mayfly mfy = {0, 0, &link, NULL,
752 perform_lll_reset};
753 uint32_t retval;
754
755 /* NOTE: If Zero Latency Interrupt is used, then LLL context
756 * will be the highest priority IRQ in the system, hence
757 * mayfly_enqueue will be done running the callee inline
758 * (vector to the callee function) in this function. Else
759 * we use semaphore to wait for perform_lll_reset to
760 * complete.
761 */
762
763 #if !defined(CONFIG_BT_CTLR_ZLI)
764 struct k_sem sem;
765
766 k_sem_init(&sem, 0, 1);
767 mfy.param = &sem;
768 #endif /* !CONFIG_BT_CTLR_ZLI */
769
770 retval = mayfly_enqueue(TICKER_USER_ID_THREAD,
771 TICKER_USER_ID_LLL, 0, &mfy);
772 LL_ASSERT(!retval);
773
774 #if !defined(CONFIG_BT_CTLR_ZLI)
775 /* LLL reset must complete before returning - wait for
776 * reset completion in LLL mayfly thread
777 */
778 k_sem_take(&sem, K_FOREVER);
779 #endif /* !CONFIG_BT_CTLR_ZLI */
780 }
781
782 #if defined(CONFIG_BT_BROADCASTER)
783 /* Finalize after adv state LLL context reset */
784 err = ull_adv_reset_finalize();
785 LL_ASSERT(!err);
786 #endif /* CONFIG_BT_BROADCASTER */
787
788 /* Reset/End DTM Tx or Rx commands */
789 if (IS_ENABLED(CONFIG_BT_CTLR_DTM)) {
790 uint16_t num_rx;
791
792 (void)ll_test_end(&num_rx);
793 ARG_UNUSED(num_rx);
794 }
795
796 /* Common to init and reset */
797 err = init_reset();
798 LL_ASSERT(!err);
799
800 #if defined(CONFIG_BT_CTLR_DF)
801 /* Direction Finding has to be reset after ull init_reset call because
802 * it uses mem_link_rx for node_rx_iq_report. The mem_linx_rx is reset
803 * in common ull init_reset.
804 */
805 err = ull_df_reset();
806 LL_ASSERT(!err);
807 #endif
808 }
809
810 /**
811 * @brief Peek the next node_rx to send up to Host
812 * @details Tightly coupled with prio_recv_thread()
813 * Execution context: Controller thread
814 *
815 * @param node_rx[out] Pointer to rx node at head of queue
816 * @param handle[out] Connection handle
817 * @return TX completed
818 */
ll_rx_get(void ** node_rx,uint16_t * handle)819 uint8_t ll_rx_get(void **node_rx, uint16_t *handle)
820 {
821 struct node_rx_hdr *rx;
822 memq_link_t *link;
823 uint8_t cmplt = 0U;
824
825 #if defined(CONFIG_BT_CONN) || \
826 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
827 defined(CONFIG_BT_CTLR_ADV_PERIODIC)
828 ll_rx_get_again:
829 #endif /* CONFIG_BT_CONN ||
830 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
831 * CONFIG_BT_CTLR_ADV_PERIODIC
832 */
833
834 *node_rx = NULL;
835
836 link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
837 if (link) {
838 #if defined(CONFIG_BT_CONN)
839 cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, rx->ack_last);
840 if (!cmplt) {
841 uint8_t f, cmplt_prev, cmplt_curr;
842 uint16_t h;
843
844 cmplt_curr = 0U;
845 f = mfifo_tx_ack.f;
846 do {
847 cmplt_prev = cmplt_curr;
848 cmplt_curr = tx_cmplt_get(&h, &f,
849 mfifo_tx_ack.l);
850 } while ((cmplt_prev != 0U) ||
851 (cmplt_prev != cmplt_curr));
852 #endif /* CONFIG_BT_CONN */
853
854 if (0) {
855 #if defined(CONFIG_BT_CONN) || \
856 (defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT))
857 /* Do not send up buffers to Host thread that are
858 * marked for release
859 */
860 } else if (rx->type == NODE_RX_TYPE_RELEASE) {
861 (void)memq_dequeue(memq_ll_rx.tail,
862 &memq_ll_rx.head, NULL);
863 mem_release(link, &mem_link_rx.free);
864
865 ll_rx_link_inc_quota(1);
866
867 mem_release(rx, &mem_pdu_rx.free);
868
869 rx_alloc(1);
870
871 goto ll_rx_get_again;
872 #endif /* CONFIG_BT_CONN ||
873 * (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT)
874 */
875
876 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
877 } else if (rx->type == NODE_RX_TYPE_SYNC_CHM_COMPLETE) {
878 (void)memq_dequeue(memq_ll_rx.tail,
879 &memq_ll_rx.head, NULL);
880 mem_release(link, &mem_link_rx.free);
881
882 ll_rx_link_inc_quota(1);
883
884 /* Remove Channel Map Update Indication from
885 * ACAD.
886 */
887 ull_adv_sync_chm_complete(rx);
888
889 mem_release(rx, &mem_pdu_rx.free);
890
891 rx_alloc(1);
892
893 goto ll_rx_get_again;
894 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
895 }
896
897 *node_rx = rx;
898
899 #if defined(CONFIG_BT_CONN)
900 }
901 } else {
902 cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, mfifo_tx_ack.l);
903 #endif /* CONFIG_BT_CONN */
904 }
905
906 return cmplt;
907 }
908
909 /**
910 * @brief Commit the dequeue from memq_ll_rx, where ll_rx_get() did the peek
911 * @details Execution context: Controller thread
912 */
ll_rx_dequeue(void)913 void ll_rx_dequeue(void)
914 {
915 struct node_rx_hdr *rx = NULL;
916 memq_link_t *link;
917
918 link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head,
919 (void **)&rx);
920 LL_ASSERT(link);
921
922 mem_release(link, &mem_link_rx.free);
923
924 /* handle object specific clean up */
925 switch (rx->type) {
926 #if defined(CONFIG_BT_CTLR_ADV_EXT)
927 #if defined(CONFIG_BT_OBSERVER)
928 case NODE_RX_TYPE_EXT_1M_REPORT:
929 case NODE_RX_TYPE_EXT_2M_REPORT:
930 case NODE_RX_TYPE_EXT_CODED_REPORT:
931 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
932 case NODE_RX_TYPE_SYNC_REPORT:
933 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
934 {
935 struct node_rx_hdr *rx_curr;
936 struct pdu_adv *adv;
937
938 adv = (void *)((struct node_rx_pdu *)rx)->pdu;
939 if (adv->type != PDU_ADV_TYPE_EXT_IND) {
940 break;
941 }
942
943 rx_curr = rx->rx_ftr.extra;
944 while (rx_curr) {
945 memq_link_t *link_free;
946
947 link_free = rx_curr->link;
948 rx_curr = rx_curr->rx_ftr.extra;
949
950 mem_release(link_free, &mem_link_rx.free);
951 }
952 }
953 break;
954
955 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
956 {
957 ull_scan_term_dequeue(rx->handle);
958 }
959 break;
960 #endif /* CONFIG_BT_OBSERVER */
961
962 #if defined(CONFIG_BT_BROADCASTER)
963 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
964 {
965 struct ll_adv_set *adv;
966 struct lll_adv_aux *lll_aux;
967
968 adv = ull_adv_set_get(rx->handle);
969 LL_ASSERT(adv);
970
971 lll_aux = adv->lll.aux;
972 if (lll_aux) {
973 struct ll_adv_aux_set *aux;
974
975 aux = HDR_LLL2ULL(lll_aux);
976
977 aux->is_started = 0U;
978 }
979
980 #if defined(CONFIG_BT_PERIPHERAL)
981 struct lll_conn *lll_conn = adv->lll.conn;
982
983 if (!lll_conn) {
984 adv->is_enabled = 0U;
985
986 break;
987 }
988
989 LL_ASSERT(!lll_conn->link_tx_free);
990
991 memq_link_t *link = memq_deinit(&lll_conn->memq_tx.head,
992 &lll_conn->memq_tx.tail);
993 LL_ASSERT(link);
994
995 lll_conn->link_tx_free = link;
996
997 struct ll_conn *conn = HDR_LLL2ULL(lll_conn);
998
999 ll_conn_release(conn);
1000 adv->lll.conn = NULL;
1001
1002 ll_rx_release(adv->node_rx_cc_free);
1003 adv->node_rx_cc_free = NULL;
1004
1005 ll_rx_link_release(adv->link_cc_free);
1006 adv->link_cc_free = NULL;
1007 #endif /* CONFIG_BT_PERIPHERAL */
1008
1009 adv->is_enabled = 0U;
1010 }
1011 break;
1012 #endif /* CONFIG_BT_BROADCASTER */
1013 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1014
1015 #if defined(CONFIG_BT_CONN)
1016 case NODE_RX_TYPE_CONNECTION:
1017 {
1018 struct node_rx_cc *cc = (void *)((struct node_rx_pdu *)rx)->pdu;
1019 struct node_rx_ftr *ftr = &(rx->rx_ftr);
1020
1021 if (0) {
1022
1023 #if defined(CONFIG_BT_PERIPHERAL)
1024 } else if ((cc->status == BT_HCI_ERR_ADV_TIMEOUT) || cc->role) {
1025 struct ll_adv_set *adv;
1026 struct lll_adv *lll;
1027
1028 /* Get reference to ULL context */
1029 lll = ftr->param;
1030 adv = HDR_LLL2ULL(lll);
1031
1032 if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1033 struct lll_conn *conn_lll;
1034 struct ll_conn *conn;
1035 memq_link_t *link;
1036
1037 conn_lll = lll->conn;
1038 LL_ASSERT(conn_lll);
1039 lll->conn = NULL;
1040
1041 LL_ASSERT(!conn_lll->link_tx_free);
1042 link = memq_deinit(&conn_lll->memq_tx.head,
1043 &conn_lll->memq_tx.tail);
1044 LL_ASSERT(link);
1045 conn_lll->link_tx_free = link;
1046
1047 conn = HDR_LLL2ULL(conn_lll);
1048 ll_conn_release(conn);
1049 } else {
1050 /* Release un-utilized node rx */
1051 if (adv->node_rx_cc_free) {
1052 void *rx_free;
1053
1054 rx_free = adv->node_rx_cc_free;
1055 adv->node_rx_cc_free = NULL;
1056
1057 mem_release(rx_free, &mem_pdu_rx.free);
1058 }
1059 }
1060
1061 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1062 if (lll->aux) {
1063 struct ll_adv_aux_set *aux;
1064
1065 aux = HDR_LLL2ULL(lll->aux);
1066 aux->is_started = 0U;
1067 }
1068 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1069
1070 adv->is_enabled = 0U;
1071 #else /* !CONFIG_BT_PERIPHERAL */
1072 ARG_UNUSED(cc);
1073 #endif /* !CONFIG_BT_PERIPHERAL */
1074
1075 #if defined(CONFIG_BT_CENTRAL)
1076 } else {
1077 struct ll_scan_set *scan = HDR_LLL2ULL(ftr->param);
1078
1079 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
1080 struct ll_scan_set *scan_other =
1081 ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
1082
1083 if (scan_other) {
1084 if (scan_other == scan) {
1085 scan_other = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
1086 }
1087
1088 if (scan_other) {
1089 scan_other->lll.conn = NULL;
1090 scan_other->is_enabled = 0U;
1091 }
1092 }
1093 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
1094
1095 scan->lll.conn = NULL;
1096 scan->is_enabled = 0U;
1097 #else /* !CONFIG_BT_CENTRAL */
1098 } else {
1099 LL_ASSERT(0);
1100 #endif /* !CONFIG_BT_CENTRAL */
1101 }
1102
1103 if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
1104 uint8_t bm;
1105
1106 /* FIXME: use the correct adv and scan set to get
1107 * enabled status bitmask
1108 */
1109 bm = (IS_ENABLED(CONFIG_BT_OBSERVER) &&
1110 (ull_scan_is_enabled(0) << 1)) |
1111 (IS_ENABLED(CONFIG_BT_BROADCASTER) &&
1112 ull_adv_is_enabled(0));
1113
1114 if (!bm) {
1115 ull_filter_adv_scan_state_cb(0);
1116 }
1117 }
1118 }
1119 break;
1120
1121 case NODE_RX_TYPE_TERMINATE:
1122 case NODE_RX_TYPE_DC_PDU:
1123 #endif /* CONFIG_BT_CONN */
1124
1125 #if defined(CONFIG_BT_OBSERVER)
1126 case NODE_RX_TYPE_REPORT:
1127
1128 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1129 /* fall through */
1130 case NODE_RX_TYPE_SYNC:
1131 case NODE_RX_TYPE_SYNC_LOST:
1132 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1133 #endif /* CONFIG_BT_OBSERVER */
1134
1135 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1136 case NODE_RX_TYPE_SCAN_REQ:
1137 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1138
1139 #if defined(CONFIG_BT_CONN)
1140 case NODE_RX_TYPE_CONN_UPDATE:
1141 case NODE_RX_TYPE_ENC_REFRESH:
1142
1143 #if defined(CONFIG_BT_CTLR_LE_PING)
1144 case NODE_RX_TYPE_APTO:
1145 #endif /* CONFIG_BT_CTLR_LE_PING */
1146
1147 case NODE_RX_TYPE_CHAN_SEL_ALGO:
1148
1149 #if defined(CONFIG_BT_CTLR_PHY)
1150 case NODE_RX_TYPE_PHY_UPDATE:
1151 #endif /* CONFIG_BT_CTLR_PHY */
1152
1153 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1154 case NODE_RX_TYPE_RSSI:
1155 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1156 #endif /* CONFIG_BT_CONN */
1157
1158 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1159 case NODE_RX_TYPE_PROFILE:
1160 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1161
1162 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1163 case NODE_RX_TYPE_ADV_INDICATION:
1164 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1165
1166 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1167 case NODE_RX_TYPE_SCAN_INDICATION:
1168 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1169
1170 #if defined(CONFIG_BT_HCI_MESH_EXT)
1171 case NODE_RX_TYPE_MESH_ADV_CPLT:
1172 case NODE_RX_TYPE_MESH_REPORT:
1173 #endif /* CONFIG_BT_HCI_MESH_EXT */
1174
1175 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1176 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1177 __fallthrough;
1178 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1179
1180 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1181 case NODE_RX_TYPE_CIS_REQUEST:
1182 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1183
1184 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1185 case NODE_RX_TYPE_CIS_ESTABLISHED:
1186 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1187
1188 #if defined(CONFIG_BT_CTLR_ISO)
1189 case NODE_RX_TYPE_ISO_PDU:
1190 #endif
1191
1192 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1193 case NODE_RX_TYPE_IQ_SAMPLE_REPORT:
1194 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1195
1196 /* Ensure that at least one 'case' statement is present for this
1197 * code block.
1198 */
1199 case NODE_RX_TYPE_NONE:
1200 LL_ASSERT(rx->type != NODE_RX_TYPE_NONE);
1201 break;
1202
1203 default:
1204 LL_ASSERT(0);
1205 break;
1206 }
1207
1208 /* FIXME: clean up when porting Mesh Ext. */
1209 if (0) {
1210 #if defined(CONFIG_BT_HCI_MESH_EXT)
1211 } else if (rx->type == NODE_RX_TYPE_MESH_ADV_CPLT) {
1212 struct ll_adv_set *adv;
1213 struct ll_scan_set *scan;
1214
1215 adv = ull_adv_is_enabled_get(0);
1216 LL_ASSERT(adv);
1217 adv->is_enabled = 0U;
1218
1219 scan = ull_scan_is_enabled_get(0);
1220 LL_ASSERT(scan);
1221
1222 scan->is_enabled = 0U;
1223
1224 ll_adv_scan_state_cb(0);
1225 #endif /* CONFIG_BT_HCI_MESH_EXT */
1226 }
1227 }
1228
ll_rx_mem_release(void ** node_rx)1229 void ll_rx_mem_release(void **node_rx)
1230 {
1231 struct node_rx_hdr *rx;
1232
1233 rx = *node_rx;
1234 while (rx) {
1235 struct node_rx_hdr *rx_free;
1236
1237 rx_free = rx;
1238 rx = rx->next;
1239
1240 switch (rx_free->type) {
1241 #if defined(CONFIG_BT_BROADCASTER)
1242 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1243 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1244 mem_release(rx_free, &mem_pdu_rx.free);
1245 break;
1246 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1247 #endif /* CONFIG_BT_BROADCASTER */
1248
1249 #if defined(CONFIG_BT_OBSERVER)
1250 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1251 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1252 {
1253 mem_release(rx_free, &mem_pdu_rx.free);
1254 }
1255 break;
1256 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1257 #endif /* CONFIG_BT_OBSERVER */
1258
1259 #if defined(CONFIG_BT_CONN)
1260 case NODE_RX_TYPE_CONNECTION:
1261 {
1262 struct node_rx_cc *cc =
1263 (void *)((struct node_rx_pdu *)rx_free)->pdu;
1264
1265 if (0) {
1266
1267 #if defined(CONFIG_BT_PERIPHERAL)
1268 } else if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1269 mem_release(rx_free, &mem_pdu_rx.free);
1270
1271 break;
1272 #endif /* !CONFIG_BT_PERIPHERAL */
1273
1274 #if defined(CONFIG_BT_CENTRAL)
1275 } else if (cc->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1276 ull_central_cleanup(rx_free);
1277
1278 #if defined(CONFIG_BT_CTLR_PRIVACY)
1279 #if defined(CONFIG_BT_BROADCASTER)
1280 if (!ull_adv_is_enabled_get(0))
1281 #endif /* CONFIG_BT_BROADCASTER */
1282 {
1283 ull_filter_adv_scan_state_cb(0);
1284 }
1285 #endif /* CONFIG_BT_CTLR_PRIVACY */
1286 break;
1287 #endif /* CONFIG_BT_CENTRAL */
1288
1289 } else {
1290 LL_ASSERT(!cc->status);
1291 }
1292 }
1293
1294 __fallthrough;
1295 case NODE_RX_TYPE_DC_PDU:
1296 #endif /* CONFIG_BT_CONN */
1297
1298 #if defined(CONFIG_BT_OBSERVER)
1299 case NODE_RX_TYPE_REPORT:
1300
1301 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1302 __fallthrough;
1303 case NODE_RX_TYPE_EXT_1M_REPORT:
1304 case NODE_RX_TYPE_EXT_2M_REPORT:
1305 case NODE_RX_TYPE_EXT_CODED_REPORT:
1306 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1307 case NODE_RX_TYPE_SYNC_REPORT:
1308 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1309 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1310 #endif /* CONFIG_BT_OBSERVER */
1311
1312 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1313 case NODE_RX_TYPE_SCAN_REQ:
1314 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1315
1316 #if defined(CONFIG_BT_CONN)
1317 case NODE_RX_TYPE_CONN_UPDATE:
1318 case NODE_RX_TYPE_ENC_REFRESH:
1319
1320 #if defined(CONFIG_BT_CTLR_LE_PING)
1321 case NODE_RX_TYPE_APTO:
1322 #endif /* CONFIG_BT_CTLR_LE_PING */
1323
1324 case NODE_RX_TYPE_CHAN_SEL_ALGO:
1325
1326 #if defined(CONFIG_BT_CTLR_PHY)
1327 case NODE_RX_TYPE_PHY_UPDATE:
1328 #endif /* CONFIG_BT_CTLR_PHY */
1329
1330 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1331 case NODE_RX_TYPE_RSSI:
1332 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1333 #endif /* CONFIG_BT_CONN */
1334
1335 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1336 case NODE_RX_TYPE_PROFILE:
1337 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1338
1339 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1340 case NODE_RX_TYPE_ADV_INDICATION:
1341 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1342
1343 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1344 case NODE_RX_TYPE_SCAN_INDICATION:
1345 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1346
1347 #if defined(CONFIG_BT_HCI_MESH_EXT)
1348 case NODE_RX_TYPE_MESH_ADV_CPLT:
1349 case NODE_RX_TYPE_MESH_REPORT:
1350 #endif /* CONFIG_BT_HCI_MESH_EXT */
1351
1352 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1353 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1354 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1355
1356 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1357 case NODE_RX_TYPE_CIS_REQUEST:
1358 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1359
1360 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1361 case NODE_RX_TYPE_CIS_ESTABLISHED:
1362 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1363
1364 #if defined(CONFIG_BT_CTLR_ISO)
1365 case NODE_RX_TYPE_ISO_PDU:
1366 #endif
1367
1368 /* Ensure that at least one 'case' statement is present for this
1369 * code block.
1370 */
1371 case NODE_RX_TYPE_NONE:
1372 LL_ASSERT(rx_free->type != NODE_RX_TYPE_NONE);
1373 ll_rx_link_inc_quota(1);
1374 mem_release(rx_free, &mem_pdu_rx.free);
1375 break;
1376
1377 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1378 case NODE_RX_TYPE_SYNC:
1379 {
1380 struct node_rx_sync *se =
1381 (void *)((struct node_rx_pdu *)rx_free)->pdu;
1382
1383 if (!se->status) {
1384 mem_release(rx_free, &mem_pdu_rx.free);
1385
1386 break;
1387 }
1388 }
1389 /* Pass through */
1390
1391 case NODE_RX_TYPE_SYNC_LOST:
1392 {
1393 struct ll_sync_set *sync =
1394 (void *)rx_free->rx_ftr.param;
1395
1396 sync->timeout_reload = 0U;
1397
1398 ull_sync_release(sync);
1399 }
1400 break;
1401 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1402 case NODE_RX_TYPE_IQ_SAMPLE_REPORT:
1403 {
1404 ull_iq_report_link_inc_quota(1);
1405 ull_df_iq_report_mem_release(rx_free);
1406 ull_df_rx_iq_report_alloc(1);
1407 }
1408 break;
1409 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1410 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1411
1412 #if defined(CONFIG_BT_CONN)
1413 case NODE_RX_TYPE_TERMINATE:
1414 {
1415 struct ll_conn *conn;
1416 memq_link_t *link;
1417
1418 conn = ll_conn_get(rx_free->handle);
1419
1420 LL_ASSERT(!conn->lll.link_tx_free);
1421 link = memq_deinit(&conn->lll.memq_tx.head,
1422 &conn->lll.memq_tx.tail);
1423 LL_ASSERT(link);
1424 conn->lll.link_tx_free = link;
1425
1426 ll_conn_release(conn);
1427 }
1428 break;
1429 #endif /* CONFIG_BT_CONN */
1430
1431 case NODE_RX_TYPE_EVENT_DONE:
1432 default:
1433 LL_ASSERT(0);
1434 break;
1435 }
1436 }
1437
1438 *node_rx = rx;
1439
1440 rx_alloc(UINT8_MAX);
1441 }
1442
ll_rx_link_inc_quota(int8_t delta)1443 static inline void ll_rx_link_inc_quota(int8_t delta)
1444 {
1445 LL_ASSERT(delta <= 0 || mem_link_rx.quota_pdu < RX_CNT);
1446 mem_link_rx.quota_pdu += delta;
1447 }
1448
ll_rx_link_alloc(void)1449 void *ll_rx_link_alloc(void)
1450 {
1451 return mem_acquire(&mem_link_rx.free);
1452 }
1453
ll_rx_link_release(void * link)1454 void ll_rx_link_release(void *link)
1455 {
1456 mem_release(link, &mem_link_rx.free);
1457 }
1458
ll_rx_alloc(void)1459 void *ll_rx_alloc(void)
1460 {
1461 return mem_acquire(&mem_pdu_rx.free);
1462 }
1463
ll_rx_release(void * node_rx)1464 void ll_rx_release(void *node_rx)
1465 {
1466 mem_release(node_rx, &mem_pdu_rx.free);
1467 }
1468
ll_rx_put(memq_link_t * link,void * rx)1469 void ll_rx_put(memq_link_t *link, void *rx)
1470 {
1471 #if defined(CONFIG_BT_CONN)
1472 struct node_rx_hdr *rx_hdr = rx;
1473
1474 /* Serialize Tx ack with Rx enqueue by storing reference to
1475 * last element index in Tx ack FIFO.
1476 */
1477 rx_hdr->ack_last = mfifo_tx_ack.l;
1478 #endif /* CONFIG_BT_CONN */
1479
1480 /* Enqueue the Rx object */
1481 memq_enqueue(link, rx, &memq_ll_rx.tail);
1482 }
1483
1484 /**
1485 * @brief Permit another loop in the controller thread (prio_recv_thread)
1486 * @details Execution context: ULL mayfly
1487 */
ll_rx_sched(void)1488 void ll_rx_sched(void)
1489 {
1490 /* sem_recv references the same semaphore (sem_prio_recv)
1491 * in prio_recv_thread
1492 */
1493 k_sem_give(sem_recv);
1494 }
1495
1496 #if defined(CONFIG_BT_CONN)
ll_pdu_rx_alloc_peek(uint8_t count)1497 void *ll_pdu_rx_alloc_peek(uint8_t count)
1498 {
1499 if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
1500 return NULL;
1501 }
1502
1503 return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
1504 }
1505
ll_pdu_rx_alloc(void)1506 void *ll_pdu_rx_alloc(void)
1507 {
1508 return MFIFO_DEQUEUE(ll_pdu_rx_free);
1509 }
1510
ll_tx_ack_put(uint16_t handle,struct node_tx * node_tx)1511 void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
1512 {
1513 struct lll_tx *tx;
1514 uint8_t idx;
1515
1516 idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx);
1517 LL_ASSERT(tx);
1518
1519 tx->handle = handle;
1520 tx->node = node_tx;
1521
1522 MFIFO_ENQUEUE(tx_ack, idx);
1523 }
1524 #endif /* CONFIG_BT_CONN */
1525
ll_timeslice_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1526 void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
1527 uint8_t * const ticker_id)
1528 {
1529 *instance_index = TICKER_INSTANCE_ID_CTLR;
1530 *ticker_id = (TICKER_NODES - FLASH_TICKER_NODES);
1531 }
1532
ll_radio_state_abort(void)1533 void ll_radio_state_abort(void)
1534 {
1535 static memq_link_t link;
1536 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1537 uint32_t ret;
1538
1539 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1540 &mfy);
1541 LL_ASSERT(!ret);
1542 }
1543
ll_radio_state_is_idle(void)1544 uint32_t ll_radio_state_is_idle(void)
1545 {
1546 return lll_radio_is_idle();
1547 }
1548
ull_ticker_status_give(uint32_t status,void * param)1549 void ull_ticker_status_give(uint32_t status, void *param)
1550 {
1551 *((uint32_t volatile *)param) = status;
1552
1553 k_sem_give(&sem_ticker_api_cb);
1554 }
1555
ull_ticker_status_take(uint32_t ret,uint32_t volatile * ret_cb)1556 uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
1557 {
1558 if (ret == TICKER_STATUS_BUSY) {
1559 /* TODO: Enable ticker job in case of CONFIG_BT_CTLR_LOW_LAT */
1560 } else {
1561 /* Check for ticker operation enqueue failed, in which case
1562 * function return value (ret) will be TICKER_STATUS_FAILURE
1563 * and callback return value (ret_cb) will remain as
1564 * TICKER_STATUS_BUSY.
1565 * This assert check will avoid waiting forever to take the
1566 * semaphore that will never be given when the ticker operation
1567 * callback does not get called due to enqueue failure.
1568 */
1569 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1570 (*ret_cb != TICKER_STATUS_BUSY));
1571 }
1572
1573 k_sem_take(&sem_ticker_api_cb, K_FOREVER);
1574
1575 return *ret_cb;
1576 }
1577
ull_disable_mark(void * param)1578 void *ull_disable_mark(void *param)
1579 {
1580 return mark_set(&mark_disable, param);
1581 }
1582
ull_disable_unmark(void * param)1583 void *ull_disable_unmark(void *param)
1584 {
1585 return mark_unset(&mark_disable, param);
1586 }
1587
ull_disable_mark_get(void)1588 void *ull_disable_mark_get(void)
1589 {
1590 return mark_get(mark_disable);
1591 }
1592
1593 /**
1594 * @brief Stops a specified ticker using the ull_disable_(un)mark functions.
1595 *
1596 * @param ticker_handle The handle of the ticker.
1597 * @param param The object to mark.
1598 * @param lll_disable Optional object when calling @ref ull_disable
1599 *
1600 * @return 0 if success, else ERRNO.
1601 */
ull_ticker_stop_with_mark(uint8_t ticker_handle,void * param,void * lll_disable)1602 int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param,
1603 void *lll_disable)
1604 {
1605 uint32_t volatile ret_cb;
1606 uint32_t ret;
1607 void *mark;
1608
1609 mark = ull_disable_mark(param);
1610 if (mark != param) {
1611 return -ENOLCK;
1612 }
1613
1614 ret_cb = TICKER_STATUS_BUSY;
1615 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1616 ticker_handle, ull_ticker_status_give,
1617 (void *)&ret_cb);
1618 ret = ull_ticker_status_take(ret, &ret_cb);
1619 if (ret) {
1620 mark = ull_disable_unmark(param);
1621 if (mark != param) {
1622 return -ENOLCK;
1623 }
1624
1625 return -EALREADY;
1626 }
1627
1628 ret = ull_disable(lll_disable);
1629 if (ret) {
1630 return -EBUSY;
1631 }
1632
1633 mark = ull_disable_unmark(param);
1634 if (mark != param) {
1635 return -ENOLCK;
1636 }
1637
1638 return 0;
1639 }
1640
1641 #if defined(CONFIG_BT_CONN)
ull_update_mark(void * param)1642 void *ull_update_mark(void *param)
1643 {
1644 return mark_set(&mark_update, param);
1645 }
1646
ull_update_unmark(void * param)1647 void *ull_update_unmark(void *param)
1648 {
1649 return mark_unset(&mark_update, param);
1650 }
1651
ull_update_mark_get(void)1652 void *ull_update_mark_get(void)
1653 {
1654 return mark_get(mark_update);
1655 }
1656 #endif /* CONFIG_BT_CONN */
1657
ull_disable(void * lll)1658 int ull_disable(void *lll)
1659 {
1660 static memq_link_t link;
1661 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1662 struct ull_hdr *hdr;
1663 struct k_sem sem;
1664 uint32_t ret;
1665
1666 hdr = HDR_LLL2ULL(lll);
1667 if (!hdr || !ull_ref_get(hdr)) {
1668 return 0;
1669 }
1670
1671 k_sem_init(&sem, 0, 1);
1672
1673 hdr->disabled_param = &sem;
1674 hdr->disabled_cb = disabled_cb;
1675
1676 /* ULL_HIGH can run after we have call `ull_ref_get` and it can
1677 * decrement the ref count. Hence, handle this race condition by
1678 * ensuring that `disabled_cb` has been set while the ref count is still
1679 * set.
1680 * No need to call `lll_disable` and take the semaphore thereafter if
1681 * reference count is zero.
1682 * If the `sem` is given when reference count was decremented, we do not
1683 * care.
1684 */
1685 if (!ull_ref_get(hdr)) {
1686 return 0;
1687 }
1688
1689 mfy.param = lll;
1690 ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0,
1691 &mfy);
1692 LL_ASSERT(!ret);
1693
1694 return k_sem_take(&sem, K_FOREVER);
1695 }
1696
ull_pdu_rx_alloc_peek(uint8_t count)1697 void *ull_pdu_rx_alloc_peek(uint8_t count)
1698 {
1699 if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) {
1700 return NULL;
1701 }
1702
1703 return MFIFO_DEQUEUE_PEEK(pdu_rx_free);
1704 }
1705
ull_pdu_rx_alloc_peek_iter(uint8_t * idx)1706 void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx)
1707 {
1708 return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx);
1709 }
1710
ull_pdu_rx_alloc(void)1711 void *ull_pdu_rx_alloc(void)
1712 {
1713 return MFIFO_DEQUEUE(pdu_rx_free);
1714 }
1715
ull_rx_put(memq_link_t * link,void * rx)1716 void ull_rx_put(memq_link_t *link, void *rx)
1717 {
1718 #if defined(CONFIG_BT_CONN)
1719 struct node_rx_hdr *rx_hdr = rx;
1720
1721 /* Serialize Tx ack with Rx enqueue by storing reference to
1722 * last element index in Tx ack FIFO.
1723 */
1724 rx_hdr->ack_last = ull_conn_ack_last_idx_get();
1725 #endif /* CONFIG_BT_CONN */
1726
1727 /* Enqueue the Rx object */
1728 memq_enqueue(link, rx, &memq_ull_rx.tail);
1729 }
1730
ull_rx_sched(void)1731 void ull_rx_sched(void)
1732 {
1733 static memq_link_t link;
1734 static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
1735
1736 /* Kick the ULL (using the mayfly, tailchain it) */
1737 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
1738 }
1739
1740 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
ull_rx_put_done(memq_link_t * link,void * done)1741 void ull_rx_put_done(memq_link_t *link, void *done)
1742 {
1743 /* Enqueue the done object */
1744 memq_enqueue(link, done, &memq_ull_done.tail);
1745 }
1746
ull_rx_sched_done(void)1747 void ull_rx_sched_done(void)
1748 {
1749 static memq_link_t link;
1750 static struct mayfly mfy = {0, 0, &link, NULL, ull_done};
1751
1752 /* Kick the ULL (using the mayfly, tailchain it) */
1753 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
1754 }
1755 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
1756
ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,struct lll_prepare_param * prepare_param,lll_prepare_cb_t prepare_cb,uint8_t is_resume)1757 struct lll_event *ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
1758 lll_abort_cb_t abort_cb,
1759 struct lll_prepare_param *prepare_param,
1760 lll_prepare_cb_t prepare_cb,
1761 uint8_t is_resume)
1762 {
1763 struct lll_event *e;
1764 uint8_t idx;
1765
1766 idx = MFIFO_ENQUEUE_GET(prep, (void **)&e);
1767 if (!e) {
1768 return NULL;
1769 }
1770
1771 memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param));
1772 e->prepare_cb = prepare_cb;
1773 e->is_abort_cb = is_abort_cb;
1774 e->abort_cb = abort_cb;
1775 e->is_resume = is_resume;
1776 e->is_aborted = 0U;
1777
1778 MFIFO_ENQUEUE(prep, idx);
1779
1780 return e;
1781 }
1782
ull_prepare_dequeue_get(void)1783 void *ull_prepare_dequeue_get(void)
1784 {
1785 return MFIFO_DEQUEUE_GET(prep);
1786 }
1787
ull_prepare_dequeue_iter(uint8_t * idx)1788 void *ull_prepare_dequeue_iter(uint8_t *idx)
1789 {
1790 return MFIFO_DEQUEUE_ITER_GET(prep, idx);
1791 }
1792
ull_prepare_dequeue(uint8_t caller_id)1793 void ull_prepare_dequeue(uint8_t caller_id)
1794 {
1795 struct lll_event *next;
1796
1797 next = ull_prepare_dequeue_get();
1798 while (next) {
1799 uint8_t is_aborted = next->is_aborted;
1800 uint8_t is_resume = next->is_resume;
1801
1802 if (!is_aborted) {
1803 static memq_link_t link;
1804 static struct mayfly mfy = {0, 0, &link, NULL,
1805 lll_resume};
1806 uint32_t ret;
1807
1808 mfy.param = next;
1809 ret = mayfly_enqueue(caller_id, TICKER_USER_ID_LLL, 0,
1810 &mfy);
1811 LL_ASSERT(!ret);
1812 }
1813
1814 MFIFO_DEQUEUE(prep);
1815
1816 next = ull_prepare_dequeue_get();
1817
1818 if (!next || (!is_aborted && (!is_resume || next->is_resume))) {
1819 break;
1820 }
1821 }
1822 }
1823
ull_event_done_extra_get(void)1824 struct event_done_extra *ull_event_done_extra_get(void)
1825 {
1826 struct node_rx_event_done *evdone;
1827
1828 evdone = MFIFO_DEQUEUE_PEEK(done);
1829 if (!evdone) {
1830 return NULL;
1831 }
1832
1833 return &evdone->extra;
1834 }
1835
ull_done_extra_type_set(uint8_t type)1836 struct event_done_extra *ull_done_extra_type_set(uint8_t type)
1837 {
1838 struct event_done_extra *extra;
1839
1840 extra = ull_event_done_extra_get();
1841 if (!extra) {
1842 return NULL;
1843 }
1844
1845 extra->type = type;
1846
1847 return extra;
1848 }
1849
ull_event_done(void * param)1850 void *ull_event_done(void *param)
1851 {
1852 struct node_rx_event_done *evdone;
1853 memq_link_t *link;
1854
1855 /* Obtain new node that signals "Done of an RX-event".
1856 * Obtain this by dequeuing from the global 'mfifo_done' queue.
1857 * Note that 'mfifo_done' is a queue of pointers, not of
1858 * struct node_rx_event_done
1859 */
1860 evdone = MFIFO_DEQUEUE(done);
1861 if (!evdone) {
1862 /* Not fatal if we can not obtain node, though
1863 * we will loose the packets in software stack.
1864 * If this happens during Conn Upd, this could cause LSTO
1865 */
1866 return NULL;
1867 }
1868
1869 link = evdone->hdr.link;
1870 evdone->hdr.link = NULL;
1871
1872 evdone->hdr.type = NODE_RX_TYPE_EVENT_DONE;
1873 evdone->param = param;
1874
1875 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
1876 ull_rx_put_done(link, evdone);
1877 ull_rx_sched_done();
1878 #else
1879 ull_rx_put(link, evdone);
1880 ull_rx_sched();
1881 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
1882
1883 return evdone;
1884 }
1885
1886 #if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1887 /**
1888 * @brief Extract timing from completed event
1889 *
1890 * @param node_rx_event_done[in] Done event containing fresh timing information
1891 * @param ticks_drift_plus[out] Positive part of drift uncertainty window
1892 * @param ticks_drift_minus[out] Negative part of drift uncertainty window
1893 */
ull_drift_ticks_get(struct node_rx_event_done * done,uint32_t * ticks_drift_plus,uint32_t * ticks_drift_minus)1894 void ull_drift_ticks_get(struct node_rx_event_done *done,
1895 uint32_t *ticks_drift_plus,
1896 uint32_t *ticks_drift_minus)
1897 {
1898 uint32_t start_to_address_expected_us;
1899 uint32_t start_to_address_actual_us;
1900 uint32_t window_widening_event_us;
1901 uint32_t preamble_to_addr_us;
1902
1903 start_to_address_actual_us =
1904 done->extra.drift.start_to_address_actual_us;
1905 window_widening_event_us =
1906 done->extra.drift.window_widening_event_us;
1907 preamble_to_addr_us =
1908 done->extra.drift.preamble_to_addr_us;
1909
1910 start_to_address_expected_us = EVENT_JITTER_US +
1911 EVENT_TICKER_RES_MARGIN_US +
1912 window_widening_event_us +
1913 preamble_to_addr_us;
1914
1915 if (start_to_address_actual_us <= start_to_address_expected_us) {
1916 *ticks_drift_plus =
1917 HAL_TICKER_US_TO_TICKS(window_widening_event_us);
1918 *ticks_drift_minus =
1919 HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
1920 start_to_address_actual_us));
1921 } else {
1922 *ticks_drift_plus =
1923 HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
1924 *ticks_drift_minus =
1925 HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
1926 EVENT_TICKER_RES_MARGIN_US +
1927 preamble_to_addr_us);
1928 }
1929 }
1930 #endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
1931
init_reset(void)1932 static inline int init_reset(void)
1933 {
1934 memq_link_t *link;
1935
1936 /* Initialize done pool. */
1937 mem_init(mem_done.pool, sizeof(struct node_rx_event_done),
1938 EVENT_DONE_MAX, &mem_done.free);
1939
1940 /* Initialize done link pool. */
1941 mem_init(mem_link_done.pool, sizeof(memq_link_t), EVENT_DONE_MAX +
1942 EVENT_DONE_LINK_CNT, &mem_link_done.free);
1943
1944 /* Allocate done buffers */
1945 done_alloc();
1946
1947 /* Initialize rx pool. */
1948 mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
1949 sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE),
1950 &mem_pdu_rx.free);
1951
1952 /* Initialize rx link pool. */
1953 mem_init(mem_link_rx.pool, sizeof(memq_link_t),
1954 sizeof(mem_link_rx.pool) / sizeof(memq_link_t),
1955 &mem_link_rx.free);
1956
1957 /* Acquire a link to initialize ull rx memq */
1958 link = mem_acquire(&mem_link_rx.free);
1959 LL_ASSERT(link);
1960
1961 /* Initialize ull rx memq */
1962 MEMQ_INIT(ull_rx, link);
1963
1964 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
1965 /* Acquire a link to initialize ull done memq */
1966 link = mem_acquire(&mem_link_done.free);
1967 LL_ASSERT(link);
1968
1969 /* Initialize ull done memq */
1970 MEMQ_INIT(ull_done, link);
1971 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
1972
1973 /* Acquire a link to initialize ll rx memq */
1974 link = mem_acquire(&mem_link_rx.free);
1975 LL_ASSERT(link);
1976
1977 /* Initialize ll rx memq */
1978 MEMQ_INIT(ll_rx, link);
1979
1980 /* Allocate rx free buffers */
1981 mem_link_rx.quota_pdu = RX_CNT;
1982 rx_alloc(UINT8_MAX);
1983
1984 return 0;
1985 }
1986
perform_lll_reset(void * param)1987 static void perform_lll_reset(void *param)
1988 {
1989 int err;
1990
1991 /* Reset LLL */
1992 err = lll_reset();
1993 LL_ASSERT(!err);
1994
1995 #if defined(CONFIG_BT_BROADCASTER)
1996 /* Reset adv state */
1997 err = lll_adv_reset();
1998 LL_ASSERT(!err);
1999 #endif /* CONFIG_BT_BROADCASTER */
2000
2001 #if defined(CONFIG_BT_OBSERVER)
2002 /* Reset scan state */
2003 err = lll_scan_reset();
2004 LL_ASSERT(!err);
2005 #endif /* CONFIG_BT_OBSERVER */
2006
2007 #if defined(CONFIG_BT_CONN)
2008 /* Reset conn role */
2009 err = lll_conn_reset();
2010 LL_ASSERT(!err);
2011 #endif /* CONFIG_BT_CONN */
2012
2013 #if defined(CONFIG_BT_CTLR_DF)
2014 err = lll_df_reset();
2015 LL_ASSERT(!err);
2016 #endif /* CONFIG_BT_CTLR_DF */
2017
2018 #if !defined(CONFIG_BT_CTLR_ZLI)
2019 k_sem_give(param);
2020 #endif /* !CONFIG_BT_CTLR_ZLI */
2021 }
2022
mark_set(void ** m,void * param)2023 static inline void *mark_set(void **m, void *param)
2024 {
2025 if (!*m) {
2026 *m = param;
2027 }
2028
2029 return *m;
2030 }
2031
mark_unset(void ** m,void * param)2032 static inline void *mark_unset(void **m, void *param)
2033 {
2034 if (*m && *m == param) {
2035 *m = NULL;
2036
2037 return param;
2038 }
2039
2040 return NULL;
2041 }
2042
mark_get(void * m)2043 static inline void *mark_get(void *m)
2044 {
2045 return m;
2046 }
2047
2048 /**
2049 * @brief Allocate buffers for done events
2050 */
done_alloc(void)2051 static inline void done_alloc(void)
2052 {
2053 uint8_t idx;
2054
2055 /* mfifo_done is a queue of pointers */
2056 while (MFIFO_ENQUEUE_IDX_GET(done, &idx)) {
2057 memq_link_t *link;
2058 struct node_rx_hdr *rx;
2059
2060 link = mem_acquire(&mem_link_done.free);
2061 if (!link) {
2062 break;
2063 }
2064
2065 rx = mem_acquire(&mem_done.free);
2066 if (!rx) {
2067 mem_release(link, &mem_link_done.free);
2068 break;
2069 }
2070
2071 rx->link = link;
2072
2073 MFIFO_BY_IDX_ENQUEUE(done, idx, rx);
2074 }
2075 }
2076
done_release(memq_link_t * link,struct node_rx_event_done * done)2077 static inline void *done_release(memq_link_t *link,
2078 struct node_rx_event_done *done)
2079 {
2080 uint8_t idx;
2081
2082 if (!MFIFO_ENQUEUE_IDX_GET(done, &idx)) {
2083 return NULL;
2084 }
2085
2086 done->hdr.link = link;
2087
2088 MFIFO_BY_IDX_ENQUEUE(done, idx, done);
2089
2090 return done;
2091 }
2092
rx_alloc(uint8_t max)2093 static inline void rx_alloc(uint8_t max)
2094 {
2095 uint8_t idx;
2096
2097 if (max > mem_link_rx.quota_pdu) {
2098 max = mem_link_rx.quota_pdu;
2099 }
2100
2101 while (max && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
2102 memq_link_t *link;
2103 struct node_rx_hdr *rx;
2104
2105 link = mem_acquire(&mem_link_rx.free);
2106 if (!link) {
2107 return;
2108 }
2109
2110 rx = mem_acquire(&mem_pdu_rx.free);
2111 if (!rx) {
2112 mem_release(link, &mem_link_rx.free);
2113 return;
2114 }
2115
2116 rx->link = link;
2117
2118 MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
2119
2120 ll_rx_link_inc_quota(-1);
2121
2122 max--;
2123 }
2124
2125 #if defined(CONFIG_BT_CONN)
2126 if (!max) {
2127 return;
2128 }
2129
2130 /* Replenish the ULL to LL/HCI free Rx PDU queue after LLL to ULL free
2131 * Rx PDU queue has been filled.
2132 */
2133 while (mem_link_rx.quota_pdu &&
2134 MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
2135 memq_link_t *link;
2136 struct node_rx_hdr *rx;
2137
2138 link = mem_acquire(&mem_link_rx.free);
2139 if (!link) {
2140 return;
2141 }
2142
2143 rx = mem_acquire(&mem_pdu_rx.free);
2144 if (!rx) {
2145 mem_release(link, &mem_link_rx.free);
2146 return;
2147 }
2148
2149 link->mem = NULL;
2150 rx->link = link;
2151
2152 MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
2153
2154 ll_rx_link_inc_quota(-1);
2155 }
2156 #endif /* CONFIG_BT_CONN */
2157 }
2158
rx_demux(void * param)2159 static void rx_demux(void *param)
2160 {
2161 memq_link_t *link;
2162
2163 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2164 do {
2165 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2166 struct node_rx_hdr *rx;
2167
2168 link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail,
2169 (void **)&rx);
2170 if (link) {
2171 #if defined(CONFIG_BT_CONN)
2172 struct node_tx *node_tx;
2173 memq_link_t *link_tx;
2174 uint16_t handle; /* Handle to Ack TX */
2175 #endif /* CONFIG_BT_CONN */
2176 int nack = 0;
2177
2178 LL_ASSERT(rx);
2179
2180 #if defined(CONFIG_BT_CONN)
2181 link_tx = ull_conn_ack_by_last_peek(rx->ack_last,
2182 &handle, &node_tx);
2183 if (link_tx) {
2184 rx_demux_conn_tx_ack(rx->ack_last, handle,
2185 link_tx, node_tx);
2186 } else
2187 #endif /* CONFIG_BT_CONN */
2188 {
2189 nack = rx_demux_rx(link, rx);
2190 }
2191
2192 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2193 if (!nack) {
2194 rx_demux_yield();
2195 }
2196 #else /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2197 if (nack) {
2198 break;
2199 }
2200 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2201
2202 #if defined(CONFIG_BT_CONN)
2203 } else {
2204 struct node_tx *node_tx;
2205 uint8_t ack_last;
2206 uint16_t handle;
2207
2208 link = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2209 if (link) {
2210 rx_demux_conn_tx_ack(ack_last, handle,
2211 link, node_tx);
2212
2213 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2214 rx_demux_yield();
2215 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2216
2217 }
2218 #endif /* CONFIG_BT_CONN */
2219 }
2220
2221 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2222 } while (link);
2223 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2224 }
2225
2226 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
rx_demux_yield(void)2227 static void rx_demux_yield(void)
2228 {
2229 static memq_link_t link;
2230 static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2231 struct node_rx_hdr *rx;
2232 memq_link_t *link_peek;
2233
2234 link_peek = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, (void **)&rx);
2235 if (!link_peek) {
2236 #if defined(CONFIG_BT_CONN)
2237 struct node_tx *node_tx;
2238 uint8_t ack_last;
2239 uint16_t handle;
2240
2241 link_peek = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2242 if (!link_peek) {
2243 return;
2244 }
2245 #else /* !CONFIG_BT_CONN */
2246 return;
2247 #endif /* !CONFIG_BT_CONN */
2248 }
2249
2250 /* Kick the ULL (using the mayfly, tailchain it) */
2251 mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_HIGH, 1,
2252 &mfy);
2253 }
2254 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2255
2256 #if defined(CONFIG_BT_CONN)
tx_cmplt_get(uint16_t * handle,uint8_t * first,uint8_t last)2257 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
2258 {
2259 struct lll_tx *tx;
2260 uint8_t cmplt;
2261
2262 tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s,
2263 mfifo_tx_ack.n, mfifo_tx_ack.f, last,
2264 first);
2265 if (!tx) {
2266 return 0;
2267 }
2268
2269 *handle = tx->handle;
2270 cmplt = 0U;
2271 do {
2272 struct node_tx *node_tx;
2273 struct pdu_data *p;
2274
2275 node_tx = tx->node;
2276 p = (void *)node_tx->pdu;
2277 if (!node_tx || (node_tx == (void *)1) ||
2278 (((uint32_t)node_tx & ~3) &&
2279 (p->ll_id == PDU_DATA_LLID_DATA_START ||
2280 p->ll_id == PDU_DATA_LLID_DATA_CONTINUE))) {
2281 /* data packet, hence count num cmplt */
2282 tx->node = (void *)1;
2283 cmplt++;
2284 } else {
2285 /* ctrl packet or flushed, hence dont count num cmplt */
2286 tx->node = (void *)2;
2287 }
2288
2289 if (((uint32_t)node_tx & ~3)) {
2290 ll_tx_mem_release(node_tx);
2291 }
2292
2293 tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s,
2294 mfifo_tx_ack.n, mfifo_tx_ack.f,
2295 last, first);
2296 } while (tx && tx->handle == *handle);
2297
2298 return cmplt;
2299 }
2300
rx_demux_conn_tx_ack(uint8_t ack_last,uint16_t handle,memq_link_t * link,struct node_tx * node_tx)2301 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
2302 memq_link_t *link,
2303 struct node_tx *node_tx)
2304 {
2305 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2306 do {
2307 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2308 /* Dequeue node */
2309 ull_conn_ack_dequeue();
2310
2311 /* Process Tx ack */
2312 ull_conn_tx_ack(handle, link, node_tx);
2313
2314 /* Release link mem */
2315 ull_conn_link_tx_release(link);
2316
2317 /* check for more rx ack */
2318 link = ull_conn_ack_by_last_peek(ack_last, &handle, &node_tx);
2319
2320 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2321 if (!link)
2322 #else /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2323 } while (link);
2324 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2325
2326 {
2327 /* trigger thread to call ll_rx_get() */
2328 ll_rx_sched();
2329 }
2330 }
2331 #endif /* CONFIG_BT_CONN */
2332
2333 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
ull_done(void * param)2334 static void ull_done(void *param)
2335 {
2336 memq_link_t *link;
2337 struct node_rx_hdr *done;
2338
2339 do {
2340 link = memq_peek(memq_ull_done.head, memq_ull_done.tail,
2341 (void **)&done);
2342
2343 if (link) {
2344 /* Process done event */
2345 (void)memq_dequeue(memq_ull_done.tail,
2346 &memq_ull_done.head, NULL);
2347 rx_demux_event_done(link, done);
2348 }
2349 } while (link);
2350 }
2351 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2352
2353 /**
2354 * @brief Dispatch rx objects
2355 * @details Rx objects are only peeked, not dequeued yet.
2356 * Execution context: ULL high priority Mayfly
2357 */
rx_demux_rx(memq_link_t * link,struct node_rx_hdr * rx)2358 static inline int rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
2359 {
2360 /* Demux Rx objects */
2361 switch (rx->type) {
2362 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2363 case NODE_RX_TYPE_EVENT_DONE:
2364 {
2365 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2366 rx_demux_event_done(link, rx);
2367 }
2368 break;
2369 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2370
2371 #if defined(CONFIG_BT_OBSERVER)
2372 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2373 case NODE_RX_TYPE_EXT_1M_REPORT:
2374 case NODE_RX_TYPE_EXT_CODED_REPORT:
2375 case NODE_RX_TYPE_EXT_AUX_REPORT:
2376 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2377 case NODE_RX_TYPE_SYNC_REPORT:
2378 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2379 {
2380 struct pdu_adv *adv;
2381
2382 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2383
2384 adv = (void *)((struct node_rx_pdu *)rx)->pdu;
2385 if (adv->type != PDU_ADV_TYPE_EXT_IND) {
2386 ll_rx_put(link, rx);
2387 ll_rx_sched();
2388 break;
2389 }
2390
2391 ull_scan_aux_setup(link, rx);
2392 }
2393 break;
2394
2395 case NODE_RX_TYPE_EXT_AUX_RELEASE:
2396 {
2397 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2398 ull_scan_aux_release(link, rx);
2399 }
2400 break;
2401 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2402 case NODE_RX_TYPE_SYNC:
2403 {
2404 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2405 ull_sync_established_report(link, rx);
2406 }
2407 break;
2408 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2409 case NODE_RX_TYPE_IQ_SAMPLE_REPORT: {
2410 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2411 ll_rx_put(link, rx);
2412 ll_rx_sched();
2413 }
2414 break;
2415 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2416 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2417 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2418 #endif /* CONFIG_BT_OBSERVER */
2419
2420 #if defined(CONFIG_BT_CONN)
2421 case NODE_RX_TYPE_CONNECTION:
2422 {
2423 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2424 ull_conn_setup(link, rx);
2425 }
2426 break;
2427
2428 case NODE_RX_TYPE_DC_PDU:
2429 {
2430 int nack;
2431
2432 nack = ull_conn_rx(link, (void *)&rx);
2433 if (nack) {
2434 return nack;
2435 }
2436
2437 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2438
2439 if (rx) {
2440 ll_rx_put(link, rx);
2441 ll_rx_sched();
2442 }
2443 }
2444 break;
2445
2446 case NODE_RX_TYPE_TERMINATE:
2447 #endif /* CONFIG_BT_CONN */
2448
2449 #if defined(CONFIG_BT_OBSERVER) || \
2450 defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2451 defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
2452 defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
2453 defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
2454 defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
2455 defined(CONFIG_BT_CONN)
2456
2457 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
2458 case NODE_RX_TYPE_SYNC_CHM_COMPLETE:
2459 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
2460
2461 #if defined(CONFIG_BT_OBSERVER)
2462 case NODE_RX_TYPE_REPORT:
2463 #endif /* CONFIG_BT_OBSERVER */
2464
2465 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
2466 case NODE_RX_TYPE_SCAN_REQ:
2467 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
2468
2469 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
2470 case NODE_RX_TYPE_PROFILE:
2471 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
2472
2473 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
2474 case NODE_RX_TYPE_ADV_INDICATION:
2475 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
2476
2477 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
2478 case NODE_RX_TYPE_SCAN_INDICATION:
2479 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
2480
2481 case NODE_RX_TYPE_RELEASE:
2482 {
2483 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2484 ll_rx_put(link, rx);
2485 ll_rx_sched();
2486 }
2487 break;
2488 #endif /* CONFIG_BT_OBSERVER ||
2489 * CONFIG_BT_CTLR_ADV_PERIODIC ||
2490 * CONFIG_BT_CTLR_SCAN_REQ_NOTIFY ||
2491 * CONFIG_BT_CTLR_PROFILE_ISR ||
2492 * CONFIG_BT_CTLR_ADV_INDICATION ||
2493 * CONFIG_BT_CTLR_SCAN_INDICATION ||
2494 * CONFIG_BT_CONN
2495 */
2496
2497 #if defined(CONFIG_BT_CTLR_ISO)
2498 case NODE_RX_TYPE_ISO_PDU:
2499 {
2500 /* Remove from receive-queue; ULL has received this now */
2501 (void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2502
2503 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2504 struct node_rx_pdu *rx_pdu = (struct node_rx_pdu *)rx;
2505 struct ll_conn_iso_stream *cis =
2506 ll_conn_iso_stream_get(rx_pdu->hdr.handle);
2507 struct ll_iso_datapath *dp = cis->datapath_out;
2508 isoal_sink_handle_t sink = dp->sink_hdl;
2509
2510 if (dp->path_id != BT_HCI_DATAPATH_ID_HCI) {
2511 /* If vendor specific datapath pass to ISO AL here,
2512 * in case of HCI destination it will be passed in
2513 * HCI context.
2514 */
2515 struct isoal_pdu_rx pckt_meta = {
2516 .meta = &rx_pdu->hdr.rx_iso_meta,
2517 .pdu = (union isoal_pdu *) &rx_pdu->pdu[0]
2518 };
2519
2520 /* Pass the ISO PDU through ISO-AL */
2521 isoal_status_t err =
2522 isoal_rx_pdu_recombine(sink, &pckt_meta);
2523
2524 LL_ASSERT(err == ISOAL_STATUS_OK); /* TODO handle err */
2525 }
2526 #endif
2527
2528 /* Let ISO PDU start its long journey upwards */
2529 ll_rx_put(link, rx);
2530 ll_rx_sched();
2531 }
2532 break;
2533 #endif
2534
2535 default:
2536 {
2537 #if defined(CONFIG_BT_CTLR_USER_EXT)
2538 /* Try proprietary demuxing */
2539 rx_demux_rx_proprietary(link, rx, memq_ull_rx.tail,
2540 &memq_ull_rx.head);
2541 #else
2542 LL_ASSERT(0);
2543 #endif /* CONFIG_BT_CTLR_USER_EXT */
2544 }
2545 break;
2546 }
2547
2548 return 0;
2549 }
2550
rx_demux_event_done(memq_link_t * link,struct node_rx_hdr * rx)2551 static inline void rx_demux_event_done(memq_link_t *link,
2552 struct node_rx_hdr *rx)
2553 {
2554 struct node_rx_event_done *done = (void *)rx;
2555 struct ull_hdr *ull_hdr;
2556 void *release;
2557
2558 /* Decrement prepare reference if ULL will not resume */
2559 ull_hdr = done->param;
2560 if (ull_hdr) {
2561 LL_ASSERT(ull_ref_get(ull_hdr));
2562 ull_ref_dec(ull_hdr);
2563 }
2564
2565 /* Process role dependent event done */
2566 switch (done->extra.type) {
2567 #if defined(CONFIG_BT_CONN)
2568 case EVENT_DONE_EXTRA_TYPE_CONN:
2569 ull_conn_done(done);
2570 break;
2571 #endif /* CONFIG_BT_CONN */
2572
2573 #if defined(CONFIG_BT_BROADCASTER)
2574 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
2575 defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2576 case EVENT_DONE_EXTRA_TYPE_ADV:
2577 ull_adv_done(done);
2578 break;
2579
2580 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2581 case EVENT_DONE_EXTRA_TYPE_ADV_AUX:
2582 ull_adv_aux_done(done);
2583 break;
2584 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2585 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
2586 #endif /* CONFIG_BT_BROADCASTER */
2587
2588 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2589 #if defined(CONFIG_BT_OBSERVER)
2590 case EVENT_DONE_EXTRA_TYPE_SCAN:
2591 ull_scan_done(done);
2592 break;
2593
2594 case EVENT_DONE_EXTRA_TYPE_SCAN_AUX:
2595 ull_scan_aux_done(done);
2596 break;
2597
2598 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2599 case EVENT_DONE_EXTRA_TYPE_SYNC:
2600 ull_sync_done(done);
2601 break;
2602 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2603 #endif /* CONFIG_BT_OBSERVER */
2604 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2605
2606 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2607 case EVENT_DONE_EXTRA_TYPE_CIS:
2608 ull_conn_iso_done(done);
2609 break;
2610 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2611
2612 #if defined(CONFIG_BT_CTLR_USER_EXT)
2613 case EVENT_DONE_EXTRA_TYPE_USER_START
2614 ... EVENT_DONE_EXTRA_TYPE_USER_END:
2615 ull_proprietary_done(done);
2616 break;
2617 #endif /* CONFIG_BT_CTLR_USER_EXT */
2618
2619 case EVENT_DONE_EXTRA_TYPE_NONE:
2620 /* ignore */
2621 break;
2622
2623 default:
2624 LL_ASSERT(0);
2625 break;
2626 }
2627
2628 /* release done */
2629 done->extra.type = 0U;
2630 release = done_release(link, done);
2631 LL_ASSERT(release == done);
2632
2633 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
2634 /* dequeue prepare pipeline */
2635 ull_prepare_dequeue(TICKER_USER_ID_ULL_HIGH);
2636
2637 /* LLL done synchronized */
2638 lll_done_sync();
2639 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
2640
2641 /* If disable initiated, signal the semaphore */
2642 if (ull_hdr && !ull_ref_get(ull_hdr) && ull_hdr->disabled_cb) {
2643 ull_hdr->disabled_cb(ull_hdr->disabled_param);
2644 }
2645 }
2646
disabled_cb(void * param)2647 static void disabled_cb(void *param)
2648 {
2649 k_sem_give(param);
2650 }
2651