1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/types.h>
8 #include <zephyr/ztest.h>
9
10 #include <zephyr/bluetooth/hci.h>
11
12 #include "hal/cpu_vendor_hal.h"
13 #include "hal/ccm.h"
14
15 #include "util/mem.h"
16 #include "util/mfifo.h"
17 #include "util/memq.h"
18 #include "util/dbuf.h"
19 #include "util.h"
20
21 #include "pdu_df.h"
22 #include "lll/pdu_vendor.h"
23 #include "pdu.h"
24 #include "ll.h"
25 #include "ll_feat.h"
26 #include "ll_settings.h"
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll/lll_adv_types.h"
30 #include "lll_adv.h"
31 #include "lll/lll_adv_pdu.h"
32 #include "lll_scan.h"
33 #include "lll_sync.h"
34 #include "lll/lll_df_types.h"
35 #include "lll_conn.h"
36
37 #include "ull_conn_internal.h"
38
39 #define EVENT_DONE_MAX 3
40 /* Backing storage for elements in mfifo_done */
41 static struct {
42 void *free;
43 uint8_t pool[sizeof(struct node_rx_event_done) * EVENT_DONE_MAX];
44 } mem_done;
45
46 static struct {
47 void *free;
48 uint8_t pool[sizeof(memq_link_t) * EVENT_DONE_MAX];
49 } mem_link_done;
50
51 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_DATA_LENGTH)
52 #define LL_PDU_RX_CNT (3 + 128)
53 #else
54 #define LL_PDU_RX_CNT (2 + 128)
55 #endif
56
57 #define PDU_RX_CNT (CONFIG_BT_CTLR_RX_BUFFERS + 3)
58 #define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT)
59
60 static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
61
62 #if defined(CONFIG_BT_RX_USER_PDU_LEN)
63 #define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
64 #else
65 #define PDU_RX_USER_PDU_OCTETS_MAX 0
66 #endif
67 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
68 #define NODE_RX_STRUCT_OVERHEAD (NODE_RX_HEADER_SIZE)
69
70 #define PDU_ADV_SIZE MAX(PDU_AC_LL_SIZE_MAX, \
71 (PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
72
73 #define PDU_DATA_SIZE (PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX)
74
75 #define PDU_RX_NODE_POOL_ELEMENT_SIZE \
76 MROUND(NODE_RX_STRUCT_OVERHEAD + \
77 MAX(MAX(PDU_ADV_SIZE, PDU_DATA_SIZE), PDU_RX_USER_PDU_OCTETS_MAX))
78
79 /*
80 * just a big number
81 */
82 #define PDU_RX_POOL_SIZE 16384
83
84 static struct {
85 void *free;
86 uint8_t pool[PDU_RX_POOL_SIZE];
87 } mem_pdu_rx;
88
89 /*
90 * just a big number
91 */
92 #define LINK_RX_POOL_SIZE 16384
93 static struct {
94 uint8_t quota_pdu; /* Number of un-utilized buffers */
95
96 void *free;
97 uint8_t pool[LINK_RX_POOL_SIZE];
98 } mem_link_rx;
99
100 static MEMQ_DECLARE(ull_rx);
101 static MEMQ_DECLARE(ll_rx);
102
103 #if defined(CONFIG_BT_CONN)
104 static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
105 #endif /* CONFIG_BT_CONN */
106
107 #ifdef ZTEST_UNITTEST
108 extern sys_slist_t ut_rx_q;
109 #else
110 sys_slist_t ut_rx_q;
111 #endif
112
113 static inline int init_reset(void);
114 static inline void rx_alloc(uint8_t max);
115 static inline void ll_rx_link_inc_quota(int8_t delta);
116
ll_reset(void)117 void ll_reset(void)
118 {
119 MFIFO_INIT(ll_pdu_rx_free);
120 init_reset();
121 }
122
ll_rx_mem_release(void ** node_rx)123 void ll_rx_mem_release(void **node_rx)
124 {
125 struct node_rx_hdr *rx;
126
127 rx = *node_rx;
128 while (rx) {
129 struct node_rx_hdr *rx_free;
130
131 rx_free = rx;
132 rx = rx->next;
133
134 switch (rx_free->type) {
135 case NODE_RX_TYPE_DC_PDU:
136 case NODE_RX_TYPE_CONN_UPDATE:
137 case NODE_RX_TYPE_ENC_REFRESH:
138 case NODE_RX_TYPE_PHY_UPDATE:
139 case NODE_RX_TYPE_CIS_REQUEST:
140 case NODE_RX_TYPE_CIS_ESTABLISHED:
141
142 ll_rx_link_inc_quota(1);
143 mem_release(rx_free, &mem_pdu_rx.free);
144 break;
145 default:
146 __ASSERT(0, "Tried to release unknown rx node type");
147 break;
148 }
149 }
150
151 *node_rx = rx;
152
153 rx_alloc(UINT8_MAX);
154 }
155
ll_rx_link_inc_quota(int8_t delta)156 static inline void ll_rx_link_inc_quota(int8_t delta)
157 {
158 mem_link_rx.quota_pdu += delta;
159 }
160
ll_rx_link_alloc(void)161 void *ll_rx_link_alloc(void)
162 {
163 return mem_acquire(&mem_link_rx.free);
164 }
165
ll_rx_link_release(void * link)166 void ll_rx_link_release(void *link)
167 {
168 mem_release(link, &mem_link_rx.free);
169 }
170
ll_rx_alloc(void)171 void *ll_rx_alloc(void)
172 {
173 return mem_acquire(&mem_pdu_rx.free);
174 }
175
ll_rx_release(void * node_rx)176 void ll_rx_release(void *node_rx)
177 {
178 mem_release(node_rx, &mem_pdu_rx.free);
179 }
180
ll_rx_put(memq_link_t * link,void * rx)181 void ll_rx_put(memq_link_t *link, void *rx)
182 {
183 if (((struct node_rx_hdr *)rx)->type != NODE_RX_TYPE_RELEASE) {
184 /* Only put/sched if node was not marked for release */
185 sys_slist_append(&ut_rx_q, (sys_snode_t *)rx);
186 }
187 }
188
ll_rx_sched(void)189 void ll_rx_sched(void)
190 {
191 }
192
ll_rx_put_sched(memq_link_t * link,void * rx)193 void ll_rx_put_sched(memq_link_t *link, void *rx)
194 {
195 ll_rx_put(link, rx);
196 ll_rx_sched();
197 }
198
ll_pdu_rx_alloc_peek(uint8_t count)199 void *ll_pdu_rx_alloc_peek(uint8_t count)
200 {
201 if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
202 return NULL;
203 }
204
205 return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
206 }
207
ll_pdu_rx_alloc(void)208 void *ll_pdu_rx_alloc(void)
209 {
210 return MFIFO_DEQUEUE(ll_pdu_rx_free);
211 }
212
ll_tx_ack_put(uint16_t handle,struct node_tx * node)213 void ll_tx_ack_put(uint16_t handle, struct node_tx *node)
214 {
215 }
216
ull_ticker_status_give(uint32_t status,void * param)217 void ull_ticker_status_give(uint32_t status, void *param)
218 {
219 }
220
ull_ticker_status_take(uint32_t ret,uint32_t volatile * ret_cb)221 uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
222 {
223 return *ret_cb;
224 }
225
ull_disable_mark(void * param)226 void *ull_disable_mark(void *param)
227 {
228 return NULL;
229 }
230
ull_disable_unmark(void * param)231 void *ull_disable_unmark(void *param)
232 {
233 return NULL;
234 }
235
ull_disable_mark_get(void)236 void *ull_disable_mark_get(void)
237 {
238 return NULL;
239 }
240
ull_ticker_stop_with_mark(uint8_t ticker_handle,void * param,void * lll_disable)241 int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param, void *lll_disable)
242 {
243 return 0;
244 }
245
ull_update_mark(void * param)246 void *ull_update_mark(void *param)
247 {
248 return NULL;
249 }
250
ull_update_unmark(void * param)251 void *ull_update_unmark(void *param)
252 {
253 return NULL;
254 }
255
ull_update_mark_get(void)256 void *ull_update_mark_get(void)
257 {
258 return NULL;
259 }
260
ull_disable(void * lll)261 int ull_disable(void *lll)
262 {
263 return 0;
264 }
265
ull_pdu_rx_alloc(void)266 void *ull_pdu_rx_alloc(void)
267 {
268 return NULL;
269 }
270
ull_rx_put(memq_link_t * link,void * rx)271 void ull_rx_put(memq_link_t *link, void *rx)
272 {
273 }
274
ull_rx_sched(void)275 void ull_rx_sched(void)
276 {
277 }
278
ull_rx_put_sched(memq_link_t * link,void * rx)279 void ull_rx_put_sched(memq_link_t *link, void *rx)
280 {
281 }
282
283 /* Forward declaration */
284 struct node_rx_event_done;
ull_drift_ticks_get(struct node_rx_event_done * done,uint32_t * ticks_drift_plus,uint32_t * ticks_drift_minus)285 void ull_drift_ticks_get(struct node_rx_event_done *done, uint32_t *ticks_drift_plus,
286 uint32_t *ticks_drift_minus)
287 {
288 }
289
init_reset(void)290 static inline int init_reset(void)
291 {
292 memq_link_t *link;
293
294 /* Initialize done pool. */
295 mem_init(mem_done.pool, sizeof(struct node_rx_event_done), EVENT_DONE_MAX, &mem_done.free);
296
297 /* Initialize done link pool. */
298 mem_init(mem_link_done.pool, sizeof(memq_link_t), EVENT_DONE_MAX, &mem_link_done.free);
299
300 /* Initialize rx pool. */
301 mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
302 sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE), &mem_pdu_rx.free);
303
304 /* Initialize rx link pool. */
305 mem_init(mem_link_rx.pool, sizeof(memq_link_t),
306 sizeof(mem_link_rx.pool) / sizeof(memq_link_t), &mem_link_rx.free);
307
308 /* Acquire a link to initialize ull rx memq */
309 link = mem_acquire(&mem_link_rx.free);
310
311 /* Initialize ull rx memq */
312 MEMQ_INIT(ull_rx, link);
313
314 /* Acquire a link to initialize ll rx memq */
315 link = mem_acquire(&mem_link_rx.free);
316
317 /* Initialize ll rx memq */
318 MEMQ_INIT(ll_rx, link);
319
320 /* Allocate rx free buffers */
321 mem_link_rx.quota_pdu = RX_CNT;
322 rx_alloc(UINT8_MAX);
323
324 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
325 /* Reset CPR mutex */
326 cpr_active_reset();
327 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
328
329 return 0;
330 }
331
rx_alloc(uint8_t max)332 static inline void rx_alloc(uint8_t max)
333 {
334 uint8_t idx;
335
336 #if defined(CONFIG_BT_CONN)
337 while (mem_link_rx.quota_pdu && MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
338 memq_link_t *link;
339 struct node_rx_hdr *rx;
340
341 link = mem_acquire(&mem_link_rx.free);
342 if (!link) {
343 break;
344 }
345
346 rx = mem_acquire(&mem_pdu_rx.free);
347 if (!rx) {
348 mem_release(link, &mem_link_rx.free);
349 break;
350 }
351
352 link->mem = NULL;
353 rx->link = link;
354
355 MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
356
357 ll_rx_link_inc_quota(-1);
358 }
359 #endif /* CONFIG_BT_CONN */
360
361 if (max > mem_link_rx.quota_pdu) {
362 max = mem_link_rx.quota_pdu;
363 }
364
365 while ((max--) && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
366 memq_link_t *link;
367 struct node_rx_hdr *rx;
368
369 link = mem_acquire(&mem_link_rx.free);
370 if (!link) {
371 break;
372 }
373
374 rx = mem_acquire(&mem_pdu_rx.free);
375 if (!rx) {
376 mem_release(link, &mem_link_rx.free);
377 break;
378 }
379
380 rx->link = link;
381
382 MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
383
384 ll_rx_link_inc_quota(-1);
385 }
386 }
387
388 #if defined(CONFIG_BT_CTLR_ISO) || \
389 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
390 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_get_wrapped_time_us(uint32_t time_now_us,int32_t time_diff_us)391 uint32_t ull_get_wrapped_time_us(uint32_t time_now_us, int32_t time_diff_us)
392 {
393 return 0;
394 }
395 #endif /* CONFIG_BT_CTLR_ISO ||
396 * CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
397 * CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
398 */
399