1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/types.h>
8 #include <zephyr/devicetree.h>
9 #include <zephyr/ztest.h>
10
11 #include <zephyr/bluetooth/hci.h>
12
13 #include "hal/cpu_vendor_hal.h"
14 #include "hal/ccm.h"
15
16 #include "util/mem.h"
17 #include "util/mfifo.h"
18 #include "util/memq.h"
19 #include "util/dbuf.h"
20 #include "util.h"
21
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25 #include "ll.h"
26 #include "ll_feat.h"
27 #include "ll_settings.h"
28 #include "lll.h"
29 #include "lll/lll_vendor.h"
30 #include "lll/lll_adv_types.h"
31 #include "lll_adv.h"
32 #include "lll/lll_adv_pdu.h"
33 #include "lll_scan.h"
34 #include "lll_sync.h"
35 #include "lll/lll_df_types.h"
36 #include "lll_conn.h"
37
38 #include "ull_conn_internal.h"
39
40 #define EVENT_DONE_MAX 3
41 /* Backing storage for elements in mfifo_done */
42 static struct {
43 void *free;
44 uint8_t pool[sizeof(struct node_rx_event_done) * EVENT_DONE_MAX];
45 } mem_done;
46
47 static struct {
48 void *free;
49 uint8_t pool[sizeof(memq_link_t) * EVENT_DONE_MAX];
50 } mem_link_done;
51
52 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_DATA_LENGTH)
53 #define LL_PDU_RX_CNT (3 + 128)
54 #else
55 #define LL_PDU_RX_CNT (2 + 128)
56 #endif
57
58 #define PDU_RX_CNT (CONFIG_BT_CTLR_RX_BUFFERS + 3)
59 #define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT)
60
61 static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
62
63 #if defined(CONFIG_BT_RX_USER_PDU_LEN)
64 #define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
65 #else
66 #define PDU_RX_USER_PDU_OCTETS_MAX 0
67 #endif
68 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
69 #define NODE_RX_STRUCT_OVERHEAD (NODE_RX_HEADER_SIZE)
70
71 #define PDU_ADV_SIZE MAX(PDU_AC_LL_SIZE_MAX, \
72 (PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
73
74 #define PDU_DATA_SIZE (PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX)
75
76 #define PDU_RX_NODE_POOL_ELEMENT_SIZE \
77 MROUND(NODE_RX_STRUCT_OVERHEAD + \
78 MAX(MAX(PDU_ADV_SIZE, PDU_DATA_SIZE), PDU_RX_USER_PDU_OCTETS_MAX))
79
80 /*
81 * just a big number
82 */
83 #define PDU_RX_POOL_SIZE 16384
84
85 static struct {
86 void *free;
87 uint8_t pool[PDU_RX_POOL_SIZE];
88 } mem_pdu_rx;
89
90 /*
91 * just a big number
92 */
93 #define LINK_RX_POOL_SIZE 16384
94 static struct {
95 uint8_t quota_pdu; /* Number of un-utilized buffers */
96
97 void *free;
98 uint8_t pool[LINK_RX_POOL_SIZE];
99 } mem_link_rx;
100
101 static MEMQ_DECLARE(ull_rx);
102 static MEMQ_DECLARE(ll_rx);
103
104 #if defined(CONFIG_BT_CONN)
105 static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
106 #endif /* CONFIG_BT_CONN */
107
108 #ifdef ZTEST_UNITTEST
109 extern sys_slist_t ut_rx_q;
110 #else
111 sys_slist_t ut_rx_q;
112 #endif
113
114 static inline int init_reset(void);
115 static inline void rx_alloc(uint8_t max);
116 static inline void ll_rx_link_inc_quota(int8_t delta);
117
ll_reset(void)118 void ll_reset(void)
119 {
120 MFIFO_INIT(ll_pdu_rx_free);
121 init_reset();
122 }
123
ll_rx_mem_release(void ** node_rx)124 void ll_rx_mem_release(void **node_rx)
125 {
126 struct node_rx_hdr *rx;
127
128 rx = *node_rx;
129 while (rx) {
130 struct node_rx_hdr *rx_free;
131
132 rx_free = rx;
133 rx = rx->next;
134
135 switch (rx_free->type) {
136 case NODE_RX_TYPE_DC_PDU:
137 case NODE_RX_TYPE_CONN_UPDATE:
138 case NODE_RX_TYPE_ENC_REFRESH:
139 case NODE_RX_TYPE_PHY_UPDATE:
140 case NODE_RX_TYPE_CIS_REQUEST:
141 case NODE_RX_TYPE_CIS_ESTABLISHED:
142
143 ll_rx_link_inc_quota(1);
144 mem_release(rx_free, &mem_pdu_rx.free);
145 break;
146 default:
147 __ASSERT(0, "Tried to release unknown rx node type");
148 break;
149 }
150 }
151
152 *node_rx = rx;
153
154 rx_alloc(UINT8_MAX);
155 }
156
ll_rx_link_inc_quota(int8_t delta)157 static inline void ll_rx_link_inc_quota(int8_t delta)
158 {
159 mem_link_rx.quota_pdu += delta;
160 }
161
ll_rx_link_alloc(void)162 void *ll_rx_link_alloc(void)
163 {
164 return mem_acquire(&mem_link_rx.free);
165 }
166
ll_rx_link_release(void * link)167 void ll_rx_link_release(void *link)
168 {
169 mem_release(link, &mem_link_rx.free);
170 }
171
ll_rx_alloc(void)172 void *ll_rx_alloc(void)
173 {
174 return mem_acquire(&mem_pdu_rx.free);
175 }
176
ll_rx_release(void * node_rx)177 void ll_rx_release(void *node_rx)
178 {
179 mem_release(node_rx, &mem_pdu_rx.free);
180 }
181
ll_rx_put(memq_link_t * link,void * rx)182 void ll_rx_put(memq_link_t *link, void *rx)
183 {
184 if (((struct node_rx_hdr *)rx)->type != NODE_RX_TYPE_RELEASE) {
185 /* Only put/sched if node was not marked for release */
186 sys_slist_append(&ut_rx_q, (sys_snode_t *)rx);
187 }
188 }
189
ll_rx_sched(void)190 void ll_rx_sched(void)
191 {
192 }
193
ll_rx_put_sched(memq_link_t * link,void * rx)194 void ll_rx_put_sched(memq_link_t *link, void *rx)
195 {
196 ll_rx_put(link, rx);
197 ll_rx_sched();
198 }
199
ll_pdu_rx_alloc_peek(uint8_t count)200 void *ll_pdu_rx_alloc_peek(uint8_t count)
201 {
202 if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
203 return NULL;
204 }
205
206 return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
207 }
208
ll_pdu_rx_alloc(void)209 void *ll_pdu_rx_alloc(void)
210 {
211 return MFIFO_DEQUEUE(ll_pdu_rx_free);
212 }
213
ll_tx_ack_put(uint16_t handle,struct node_tx * node)214 void ll_tx_ack_put(uint16_t handle, struct node_tx *node)
215 {
216 }
217
ull_ticker_status_give(uint32_t status,void * param)218 void ull_ticker_status_give(uint32_t status, void *param)
219 {
220 }
221
ull_ticker_status_take(uint32_t ret,uint32_t volatile * ret_cb)222 uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
223 {
224 return *ret_cb;
225 }
226
ull_disable_mark(void * param)227 void *ull_disable_mark(void *param)
228 {
229 return NULL;
230 }
231
ull_disable_unmark(void * param)232 void *ull_disable_unmark(void *param)
233 {
234 return NULL;
235 }
236
ull_disable_mark_get(void)237 void *ull_disable_mark_get(void)
238 {
239 return NULL;
240 }
241
ull_ticker_stop_with_mark(uint8_t ticker_handle,void * param,void * lll_disable)242 int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param, void *lll_disable)
243 {
244 return 0;
245 }
246
ull_update_mark(void * param)247 void *ull_update_mark(void *param)
248 {
249 return NULL;
250 }
251
ull_update_unmark(void * param)252 void *ull_update_unmark(void *param)
253 {
254 return NULL;
255 }
256
ull_update_mark_get(void)257 void *ull_update_mark_get(void)
258 {
259 return NULL;
260 }
261
ull_disable(void * lll)262 int ull_disable(void *lll)
263 {
264 return 0;
265 }
266
ull_pdu_rx_alloc(void)267 void *ull_pdu_rx_alloc(void)
268 {
269 return NULL;
270 }
271
ull_rx_put(memq_link_t * link,void * rx)272 void ull_rx_put(memq_link_t *link, void *rx)
273 {
274 }
275
ull_rx_sched(void)276 void ull_rx_sched(void)
277 {
278 }
279
ull_rx_put_sched(memq_link_t * link,void * rx)280 void ull_rx_put_sched(memq_link_t *link, void *rx)
281 {
282 }
283
284 /* Forward declaration */
285 struct node_rx_event_done;
ull_drift_ticks_get(struct node_rx_event_done * done,uint32_t * ticks_drift_plus,uint32_t * ticks_drift_minus)286 void ull_drift_ticks_get(struct node_rx_event_done *done, uint32_t *ticks_drift_plus,
287 uint32_t *ticks_drift_minus)
288 {
289 }
290
init_reset(void)291 static inline int init_reset(void)
292 {
293 memq_link_t *link;
294
295 /* Initialize done pool. */
296 mem_init(mem_done.pool, sizeof(struct node_rx_event_done), EVENT_DONE_MAX, &mem_done.free);
297
298 /* Initialize done link pool. */
299 mem_init(mem_link_done.pool, sizeof(memq_link_t), EVENT_DONE_MAX, &mem_link_done.free);
300
301 /* Initialize rx pool. */
302 mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
303 sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE), &mem_pdu_rx.free);
304
305 /* Initialize rx link pool. */
306 mem_init(mem_link_rx.pool, sizeof(memq_link_t),
307 sizeof(mem_link_rx.pool) / sizeof(memq_link_t), &mem_link_rx.free);
308
309 /* Acquire a link to initialize ull rx memq */
310 link = mem_acquire(&mem_link_rx.free);
311
312 /* Initialize ull rx memq */
313 MEMQ_INIT(ull_rx, link);
314
315 /* Acquire a link to initialize ll rx memq */
316 link = mem_acquire(&mem_link_rx.free);
317
318 /* Initialize ll rx memq */
319 MEMQ_INIT(ll_rx, link);
320
321 /* Allocate rx free buffers */
322 mem_link_rx.quota_pdu = RX_CNT;
323 rx_alloc(UINT8_MAX);
324
325 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
326 /* Reset CPR mutex */
327 cpr_active_reset();
328 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
329
330 return 0;
331 }
332
rx_alloc(uint8_t max)333 static inline void rx_alloc(uint8_t max)
334 {
335 uint8_t idx;
336
337 #if defined(CONFIG_BT_CONN)
338 while (mem_link_rx.quota_pdu && MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
339 memq_link_t *link;
340 struct node_rx_hdr *rx;
341
342 link = mem_acquire(&mem_link_rx.free);
343 if (!link) {
344 break;
345 }
346
347 rx = mem_acquire(&mem_pdu_rx.free);
348 if (!rx) {
349 mem_release(link, &mem_link_rx.free);
350 break;
351 }
352
353 link->mem = NULL;
354 rx->link = link;
355
356 MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
357
358 ll_rx_link_inc_quota(-1);
359 }
360 #endif /* CONFIG_BT_CONN */
361
362 if (max > mem_link_rx.quota_pdu) {
363 max = mem_link_rx.quota_pdu;
364 }
365
366 while ((max--) && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
367 memq_link_t *link;
368 struct node_rx_hdr *rx;
369
370 link = mem_acquire(&mem_link_rx.free);
371 if (!link) {
372 break;
373 }
374
375 rx = mem_acquire(&mem_pdu_rx.free);
376 if (!rx) {
377 mem_release(link, &mem_link_rx.free);
378 break;
379 }
380
381 rx->link = link;
382
383 MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
384
385 ll_rx_link_inc_quota(-1);
386 }
387 }
388
389 #if defined(CONFIG_BT_CTLR_ISO) || \
390 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
391 defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_get_wrapped_time_us(uint32_t time_now_us,int32_t time_diff_us)392 uint32_t ull_get_wrapped_time_us(uint32_t time_now_us, int32_t time_diff_us)
393 {
394 return 0;
395 }
396 #endif /* CONFIG_BT_CTLR_ISO ||
397 * CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
398 * CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
399 */
400