1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8
9 #include <soc.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/sys/byteorder.h>
12 #include <zephyr/bluetooth/hci_types.h>
13 #include <zephyr/bluetooth/buf.h>
14 #include <zephyr/sys/util_macro.h>
15
16 #include "hal/cpu.h"
17 #include "hal/ccm.h"
18 #include "hal/ticker.h"
19
20 #include "util/util.h"
21 #include "util/mem.h"
22 #include "util/memq.h"
23 #include "util/mfifo.h"
24 #include "util/mayfly.h"
25 #include "util/dbuf.h"
26
27 #include "ticker/ticker.h"
28
29 #include "pdu_df.h"
30 #include "lll/pdu_vendor.h"
31 #include "pdu.h"
32
33 #include "lll.h"
34 #include "lll/lll_adv_types.h"
35 #include "lll_adv.h"
36 #include "lll/lll_adv_pdu.h"
37 #include "lll_adv_iso.h"
38 #include "lll/lll_df_types.h"
39 #include "lll_sync.h"
40 #include "lll_sync_iso.h"
41 #include "lll_conn.h"
42 #include "lll_conn_iso.h"
43 #include "lll_iso_tx.h"
44 #include "lll/lll_vendor.h"
45
46 #include "ll_sw/ull_tx_queue.h"
47
48 #include "isoal.h"
49
50 #include "ull_adv_types.h"
51 #include "ull_sync_types.h"
52 #include "ull_conn_types.h"
53 #include "ull_iso_types.h"
54 #include "ull_conn_iso_types.h"
55 #include "ull_llcp.h"
56
57 #include "ull_internal.h"
58 #include "ull_adv_internal.h"
59 #include "ull_conn_internal.h"
60 #include "ull_iso_internal.h"
61 #include "ull_sync_iso_internal.h"
62 #include "ull_conn_iso_internal.h"
63
64 #include "ll_feat.h"
65
66 #include "hal/debug.h"
67
68 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
69 #include <zephyr/logging/log.h>
70 LOG_MODULE_REGISTER(bt_ctlr_ull_iso);
71
72 #if defined(CONFIG_BT_CTLR_CONN_ISO_STREAMS)
73 #define BT_CTLR_CONN_ISO_STREAMS CONFIG_BT_CTLR_CONN_ISO_STREAMS
74 #else /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
75 #define BT_CTLR_CONN_ISO_STREAMS 0
76 #endif /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
77
78 #if defined(CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
79 #define BT_CTLR_ADV_ISO_STREAMS (CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
80 #else /* !CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
81 #define BT_CTLR_ADV_ISO_STREAMS 0
82 #endif /* CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
83
84 #if defined(CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
85 #define BT_CTLR_SYNC_ISO_STREAMS (CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
86 #else /* !CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
87 #define BT_CTLR_SYNC_ISO_STREAMS 0
88 #endif /* CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
89
90 static int init_reset(void);
91
92 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
93 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer);
94 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
95 const size_t offset,
96 const uint8_t *sdu_payload,
97 const size_t consume_len);
98 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
99 const uint16_t handle);
100 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
101 const uint16_t handle,
102 const isoal_status_t status);
103 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
104
105 /* Allocate data path pools for RX/TX directions for each stream */
106 #define BT_CTLR_ISO_STREAMS ((2 * (BT_CTLR_CONN_ISO_STREAMS)) + \
107 BT_CTLR_ADV_ISO_STREAMS + \
108 BT_CTLR_SYNC_ISO_STREAMS)
109 #if BT_CTLR_ISO_STREAMS
110 static struct ll_iso_datapath datapath_pool[BT_CTLR_ISO_STREAMS];
111 #endif /* BT_CTLR_ISO_STREAMS */
112
113 static void *datapath_free;
114
115 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
116 static void ticker_resume_op_cb(uint32_t status, void *param);
117 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
118 uint32_t remainder, uint16_t lazy, uint8_t force,
119 void *param);
120
121 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
122 /* ISO LL conformance tests require a PDU size of maximum 251 bytes + header */
123 #define ISO_RX_BUFFER_SIZE (2 + 251)
124
125 /* Declare the ISO rx node RXFIFO. This is a composite pool-backed MFIFO for
126 * rx_nodes. The declaration constructs the following data structures:
127 * - mfifo_iso_rx: FIFO with pointers to PDU buffers
128 * - mem_iso_rx: Backing data pool for PDU buffer elements
129 * - mem_link_iso_rx: Pool of memq_link_t elements
130 *
131 * One extra rx buffer is reserved for empty ISO PDU reception.
132 * Two extra links are reserved for use by the ll_iso_rx and ull_iso_rx memq.
133 */
134 static RXFIFO_DEFINE(iso_rx, ((NODE_RX_HEADER_SIZE) + (ISO_RX_BUFFER_SIZE)),
135 (CONFIG_BT_CTLR_ISO_RX_BUFFERS + 1U), 2U);
136
137 static MEMQ_DECLARE(ll_iso_rx);
138 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
139 static MEMQ_DECLARE(ull_iso_rx);
140 static void iso_rx_demux(void *param);
141 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
142 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
143
144 #define ISO_TEST_PACKET_COUNTER_SIZE 4U
145
146 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
147 void ll_iso_link_tx_release(void *link);
148 void ll_iso_tx_mem_release(void *node_tx);
149
150 #define NODE_TX_BUFFER_SIZE MROUND(offsetof(struct node_tx_iso, pdu) + \
151 offsetof(struct pdu_iso, payload) + \
152 MAX(LL_BIS_OCTETS_TX_MAX, \
153 LL_CIS_OCTETS_TX_MAX))
154
155 #define ISO_TEST_TX_BUFFER_SIZE 32U
156
157 static struct {
158 void *free;
159 uint8_t pool[NODE_TX_BUFFER_SIZE * BT_CTLR_ISO_TX_PDU_BUFFERS];
160 } mem_iso_tx;
161
162 static struct {
163 void *free;
164 uint8_t pool[sizeof(memq_link_t) * BT_CTLR_ISO_TX_PDU_BUFFERS];
165 } mem_link_iso_tx;
166
167 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
168
ll_read_iso_tx_sync(uint16_t handle,uint16_t * seq,uint32_t * timestamp,uint32_t * offset)169 uint8_t ll_read_iso_tx_sync(uint16_t handle, uint16_t *seq,
170 uint32_t *timestamp, uint32_t *offset)
171 {
172 if (IS_CIS_HANDLE(handle)) {
173 struct ll_iso_datapath *dp = NULL;
174 struct ll_conn_iso_stream *cis;
175
176 cis = ll_conn_iso_stream_get(handle);
177
178 if (cis) {
179 dp = cis->hdr.datapath_in;
180 }
181
182 if (dp &&
183 isoal_tx_get_sync_info(dp->source_hdl, seq,
184 timestamp, offset) == ISOAL_STATUS_OK) {
185 return BT_HCI_ERR_SUCCESS;
186 }
187
188 return BT_HCI_ERR_CMD_DISALLOWED;
189
190 } else if (IS_ADV_ISO_HANDLE(handle)) {
191 const struct lll_adv_iso_stream *adv_stream;
192 uint16_t stream_handle;
193
194 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
195 adv_stream = ull_adv_iso_stream_get(stream_handle);
196 if (!adv_stream || !adv_stream->dp ||
197 isoal_tx_get_sync_info(adv_stream->dp->source_hdl, seq,
198 timestamp, offset) != ISOAL_STATUS_OK) {
199 return BT_HCI_ERR_CMD_DISALLOWED;
200 }
201
202 return BT_HCI_ERR_SUCCESS;
203
204 } else if (IS_SYNC_ISO_HANDLE(handle)) {
205 return BT_HCI_ERR_CMD_DISALLOWED;
206 }
207
208 return BT_HCI_ERR_UNKNOWN_CONN_ID;
209 }
210
path_is_vendor_specific(uint8_t path_id)211 static inline bool path_is_vendor_specific(uint8_t path_id)
212 {
213 return (path_id >= BT_HCI_DATAPATH_ID_VS &&
214 path_id <= BT_HCI_DATAPATH_ID_VS_END);
215 }
216
ll_setup_iso_path(uint16_t handle,uint8_t path_dir,uint8_t path_id,uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint32_t controller_delay,uint8_t codec_config_len,uint8_t * codec_config)217 uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
218 uint8_t coding_format, uint16_t company_id,
219 uint16_t vs_codec_id, uint32_t controller_delay,
220 uint8_t codec_config_len, uint8_t *codec_config)
221 {
222 struct lll_sync_iso_stream *sync_stream = NULL;
223 struct lll_adv_iso_stream *adv_stream = NULL;
224 struct ll_conn_iso_stream *cis = NULL;
225 struct ll_iso_datapath *dp;
226 uint32_t stream_sync_delay;
227 uint32_t group_sync_delay;
228 uint8_t flush_timeout;
229 uint16_t iso_interval;
230 uint32_t sdu_interval;
231 uint8_t burst_number;
232 uint8_t max_octets;
233 uint8_t framed;
234 uint8_t role;
235
236 ARG_UNUSED(controller_delay);
237 ARG_UNUSED(codec_config);
238
239 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && IS_CIS_HANDLE(handle)) {
240 struct ll_conn_iso_group *cig;
241 struct ll_conn *conn;
242
243 /* If the Host attempts to set a data path with a Connection
244 * Handle that does not exist or that is not for a CIS or a BIS,
245 * the Controller shall return the error code Unknown Connection
246 * Identifier (0x02)
247 */
248 cis = ll_conn_iso_stream_get(handle);
249 if (!cis || !cis->group) {
250 /* CIS does not belong to a CIG */
251 return BT_HCI_ERR_UNKNOWN_CONN_ID;
252 }
253
254 conn = ll_connected_get(cis->lll.acl_handle);
255 if (conn) {
256 /* If we're still waiting for accept/response from
257 * host, path setup is premature and we must return
258 * disallowed status.
259 */
260 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
261 const uint8_t cis_waiting = ull_cp_cc_awaiting_reply(conn);
262
263 if (cis_waiting) {
264 return BT_HCI_ERR_CMD_DISALLOWED;
265 }
266 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
267 }
268
269 if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR && cis->hdr.datapath_in) ||
270 (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST && cis->hdr.datapath_out)) {
271 /* Data path has been set up, can only do setup once */
272 return BT_HCI_ERR_CMD_DISALLOWED;
273 }
274
275 cig = cis->group;
276
277 role = cig->lll.role;
278 iso_interval = cig->iso_interval;
279 group_sync_delay = cig->sync_delay;
280 stream_sync_delay = cis->sync_delay;
281 framed = cis->framed;
282
283 if (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) {
284 /* Create sink for RX data path */
285 burst_number = cis->lll.rx.bn;
286 flush_timeout = cis->lll.rx.ft;
287 max_octets = cis->lll.rx.max_pdu;
288
289 if (role) {
290 /* peripheral */
291 sdu_interval = cig->c_sdu_interval;
292 } else {
293 /* central */
294 sdu_interval = cig->p_sdu_interval;
295 }
296 } else {
297 /* path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR */
298 burst_number = cis->lll.tx.bn;
299 flush_timeout = cis->lll.tx.ft;
300 max_octets = cis->lll.tx.max_pdu;
301
302 if (role) {
303 /* peripheral */
304 sdu_interval = cig->p_sdu_interval;
305 } else {
306 /* central */
307 sdu_interval = cig->c_sdu_interval;
308 }
309 }
310 #if defined(CONFIG_BT_CTLR_ADV_ISO)
311 } else if (IS_ADV_ISO_HANDLE(handle)) {
312 struct ll_adv_iso_set *adv_iso;
313 struct lll_adv_iso *lll_iso;
314 uint16_t stream_handle;
315
316 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
317 adv_stream = ull_adv_iso_stream_get(stream_handle);
318 if (!adv_stream || adv_stream->dp) {
319 return BT_HCI_ERR_CMD_DISALLOWED;
320 }
321
322 adv_iso = ull_adv_iso_by_stream_get(stream_handle);
323 lll_iso = &adv_iso->lll;
324
325 role = ISOAL_ROLE_BROADCAST_SOURCE;
326 iso_interval = lll_iso->iso_interval;
327 sdu_interval = lll_iso->sdu_interval;
328 burst_number = lll_iso->bn;
329 flush_timeout = 0U; /* Not used for Broadcast ISO */
330 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
331 lll_iso->nse, lll_iso->sub_interval,
332 lll_iso->phy, lll_iso->max_pdu,
333 lll_iso->enc);
334 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
335 framed = lll_iso->framing;
336 max_octets = lll_iso->max_pdu;
337 #endif /* CONFIG_BT_CTLR_ADV_ISO */
338
339 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
340 } else if (IS_SYNC_ISO_HANDLE(handle)) {
341 struct ll_sync_iso_set *sync_iso;
342 struct lll_sync_iso *lll_iso;
343 uint16_t stream_handle;
344
345 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
346 sync_stream = ull_sync_iso_stream_get(stream_handle);
347 if (!sync_stream || sync_stream->dp) {
348 return BT_HCI_ERR_CMD_DISALLOWED;
349 }
350
351 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
352 lll_iso = &sync_iso->lll;
353
354 role = ISOAL_ROLE_BROADCAST_SINK;
355 iso_interval = lll_iso->iso_interval;
356 sdu_interval = lll_iso->sdu_interval;
357 burst_number = lll_iso->bn;
358
359 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
360 lll_iso->nse, lll_iso->sub_interval,
361 lll_iso->phy, lll_iso->max_pdu,
362 lll_iso->enc);
363 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
364 framed = lll_iso->framing;
365 max_octets = lll_iso->max_pdu;
366 flush_timeout = 0U; /* Not used for Broadcast ISO */
367 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
368
369 } else {
370 return BT_HCI_ERR_UNKNOWN_CONN_ID;
371 }
372
373 if (path_is_vendor_specific(path_id) &&
374 (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
375 !ll_data_path_configured(path_dir, path_id))) {
376 /* Data path must be configured prior to setup */
377 return BT_HCI_ERR_CMD_DISALLOWED;
378 }
379
380 /* If Codec_Configuration_Length non-zero and Codec_ID set to
381 * transparent air mode, the Controller shall return the error code
382 * Invalid HCI Command Parameters (0x12).
383 */
384 if (codec_config_len &&
385 (vs_codec_id == BT_HCI_CODING_FORMAT_TRANSPARENT)) {
386 return BT_HCI_ERR_INVALID_PARAM;
387 }
388
389 /* Allocate and configure datapath */
390 dp = mem_acquire(&datapath_free);
391 if (!dp) {
392 return BT_HCI_ERR_CMD_DISALLOWED;
393 }
394
395 dp->path_dir = path_dir;
396 dp->path_id = path_id;
397 dp->coding_format = coding_format;
398 dp->company_id = company_id;
399
400 /* TODO dp->sync_delay = controller_delay; ?*/
401
402 if (false) {
403
404 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
405 } else if ((path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) &&
406 (cis || sync_stream)) {
407 isoal_sink_handle_t sink_handle;
408 isoal_status_t err;
409
410 if (path_id == BT_HCI_DATAPATH_ID_HCI) {
411 /* Not vendor specific, thus alloc and emit functions
412 * known
413 */
414 err = isoal_sink_create(handle, role, framed,
415 burst_number, flush_timeout,
416 sdu_interval, iso_interval,
417 stream_sync_delay,
418 group_sync_delay,
419 sink_sdu_alloc_hci,
420 sink_sdu_emit_hci,
421 sink_sdu_write_hci,
422 &sink_handle);
423 } else {
424 /* Set up vendor specific data path */
425 isoal_sink_sdu_alloc_cb sdu_alloc;
426 isoal_sink_sdu_emit_cb sdu_emit;
427 isoal_sink_sdu_write_cb sdu_write;
428
429 /* Request vendor sink callbacks for path */
430 if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) &&
431 ll_data_path_sink_create(handle, dp, &sdu_alloc,
432 &sdu_emit, &sdu_write)) {
433 err = isoal_sink_create(handle, role, framed,
434 burst_number,
435 flush_timeout,
436 sdu_interval,
437 iso_interval,
438 stream_sync_delay,
439 group_sync_delay,
440 sdu_alloc, sdu_emit,
441 sdu_write,
442 &sink_handle);
443 } else {
444 ull_iso_datapath_release(dp);
445
446 return BT_HCI_ERR_CMD_DISALLOWED;
447 }
448 }
449
450 if (!err) {
451 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && cis != NULL) {
452 cis->hdr.datapath_out = dp;
453 }
454
455 if (sync_stream) {
456 sync_stream->dp = dp;
457 }
458
459 dp->sink_hdl = sink_handle;
460 isoal_sink_enable(sink_handle);
461 } else {
462 ull_iso_datapath_release(dp);
463
464 return BT_HCI_ERR_CMD_DISALLOWED;
465 }
466 #else /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
467 ARG_UNUSED(sync_stream);
468 #endif /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
469
470 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
471 } else if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR) &&
472 (cis || adv_stream)) {
473 isoal_source_handle_t source_handle;
474 isoal_status_t err;
475
476 /* Create source for TX data path */
477 isoal_source_pdu_alloc_cb pdu_alloc;
478 isoal_source_pdu_write_cb pdu_write;
479 isoal_source_pdu_emit_cb pdu_emit;
480 isoal_source_pdu_release_cb pdu_release;
481
482 if (path_is_vendor_specific(path_id)) {
483 if (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
484 !ll_data_path_source_create(handle, dp,
485 &pdu_alloc, &pdu_write,
486 &pdu_emit,
487 &pdu_release)) {
488 ull_iso_datapath_release(dp);
489
490 return BT_HCI_ERR_CMD_DISALLOWED;
491 }
492 } else {
493 /* Set default callbacks when not vendor specific
494 * or that the vendor specific path is the same.
495 */
496 pdu_alloc = ll_iso_pdu_alloc;
497 pdu_write = ll_iso_pdu_write;
498 pdu_emit = ll_iso_pdu_emit;
499 pdu_release = ll_iso_pdu_release;
500 }
501
502 err = isoal_source_create(handle, role, framed, burst_number,
503 flush_timeout, max_octets,
504 sdu_interval, iso_interval,
505 stream_sync_delay, group_sync_delay,
506 pdu_alloc, pdu_write, pdu_emit,
507 pdu_release, &source_handle);
508
509 if (!err) {
510 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && cis != NULL) {
511 cis->hdr.datapath_in = dp;
512 }
513
514 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && adv_stream != NULL) {
515 adv_stream->dp = dp;
516 }
517
518 dp->source_hdl = source_handle;
519 isoal_source_enable(source_handle);
520 } else {
521 ull_iso_datapath_release(dp);
522
523 return BT_HCI_ERR_CMD_DISALLOWED;
524 }
525
526 #else /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
527 ARG_UNUSED(adv_stream);
528 #endif /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
529
530 } else {
531 return BT_HCI_ERR_CMD_DISALLOWED;
532 }
533
534 return BT_HCI_ERR_SUCCESS;
535 }
536
ll_remove_iso_path(uint16_t handle,uint8_t path_dir)537 uint8_t ll_remove_iso_path(uint16_t handle, uint8_t path_dir)
538 {
539 /* If the Host issues this command with a Connection_Handle that does
540 * not exist or is not for a CIS or a BIS, the Controller shall return
541 * the error code Unknown Connection Identifier (0x02).
542 */
543 if (false) {
544
545 #if defined(CONFIG_BT_CTLR_CONN_ISO)
546 } else if (IS_CIS_HANDLE(handle)) {
547 struct ll_conn_iso_stream *cis;
548 struct ll_iso_stream_hdr *hdr;
549 struct ll_iso_datapath *dp;
550
551 cis = ll_conn_iso_stream_get(handle);
552 hdr = &cis->hdr;
553
554 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR)) {
555 dp = hdr->datapath_in;
556 if (dp) {
557 isoal_source_destroy(dp->source_hdl);
558
559 hdr->datapath_in = NULL;
560 ull_iso_datapath_release(dp);
561 } else {
562 /* Datapath was not previously set up */
563 return BT_HCI_ERR_CMD_DISALLOWED;
564 }
565 }
566
567 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST)) {
568 dp = hdr->datapath_out;
569 if (dp) {
570 isoal_sink_destroy(dp->sink_hdl);
571
572 hdr->datapath_out = NULL;
573 ull_iso_datapath_release(dp);
574 } else {
575 /* Datapath was not previously set up */
576 return BT_HCI_ERR_CMD_DISALLOWED;
577 }
578 }
579 #endif /* CONFIG_BT_CTLR_CONN_ISO */
580
581 #if defined(CONFIG_BT_CTLR_ADV_ISO)
582 } else if (IS_ADV_ISO_HANDLE(handle)) {
583 struct lll_adv_iso_stream *adv_stream;
584 struct ll_iso_datapath *dp;
585 uint16_t stream_handle;
586
587 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR))) {
588 return BT_HCI_ERR_CMD_DISALLOWED;
589 }
590
591 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
592 adv_stream = ull_adv_iso_stream_get(stream_handle);
593 if (!adv_stream) {
594 return BT_HCI_ERR_CMD_DISALLOWED;
595 }
596
597 dp = adv_stream->dp;
598 if (dp) {
599 adv_stream->dp = NULL;
600 isoal_source_destroy(dp->source_hdl);
601 ull_iso_datapath_release(dp);
602 } else {
603 /* Datapath was not previously set up */
604 return BT_HCI_ERR_CMD_DISALLOWED;
605 }
606 #endif /* CONFIG_BT_CTLR_ADV_ISO */
607
608 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
609 } else if (IS_SYNC_ISO_HANDLE(handle)) {
610 struct lll_sync_iso_stream *sync_stream;
611 struct ll_iso_datapath *dp;
612 uint16_t stream_handle;
613
614 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST))) {
615 return BT_HCI_ERR_CMD_DISALLOWED;
616 }
617
618 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
619 sync_stream = ull_sync_iso_stream_get(stream_handle);
620 if (!sync_stream) {
621 return BT_HCI_ERR_CMD_DISALLOWED;
622 }
623
624 dp = sync_stream->dp;
625 if (dp) {
626 sync_stream->dp = NULL;
627 isoal_sink_destroy(dp->sink_hdl);
628 ull_iso_datapath_release(dp);
629 } else {
630 /* Datapath was not previously set up */
631 return BT_HCI_ERR_CMD_DISALLOWED;
632 }
633 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
634
635 } else {
636 return BT_HCI_ERR_CMD_DISALLOWED;
637 }
638
639 return 0;
640 }
641
642 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
643 /* The sdu_alloc function is called before combining PDUs into an SDU. Here we
644 * store the paylaod number associated with the first PDU, for unframed usecase.
645 */
ll_iso_test_sdu_alloc(const struct isoal_sink * sink_ctx,const struct isoal_pdu_rx * valid_pdu,struct isoal_sdu_buffer * sdu_buffer)646 static isoal_status_t ll_iso_test_sdu_alloc(const struct isoal_sink *sink_ctx,
647 const struct isoal_pdu_rx *valid_pdu,
648 struct isoal_sdu_buffer *sdu_buffer)
649 {
650 uint16_t handle;
651
652 handle = sink_ctx->session.handle;
653
654 if (IS_CIS_HANDLE(handle)) {
655 if (!sink_ctx->session.framed) {
656 struct ll_conn_iso_stream *cis;
657
658 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
659 LL_ASSERT(cis);
660
661 /* For unframed, SDU counter is the payload number */
662 cis->hdr.test_mode.rx.sdu_counter =
663 (uint32_t)valid_pdu->meta->payload_number;
664 }
665 } else if (IS_SYNC_ISO_HANDLE(handle)) {
666 if (!sink_ctx->session.framed) {
667 struct lll_sync_iso_stream *sync_stream;
668 uint16_t stream_handle;
669
670 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
671 sync_stream = ull_sync_iso_stream_get(stream_handle);
672 LL_ASSERT(sync_stream);
673
674 sync_stream->test_mode->sdu_counter =
675 (uint32_t)valid_pdu->meta->payload_number;
676 }
677 }
678
679 return sink_sdu_alloc_hci(sink_ctx, valid_pdu, sdu_buffer);
680 }
681
682 /* The sdu_emit function is called whenever an SDU is combined and ready to be sent
683 * further in the data path. This injected implementation performs statistics on
684 * the SDU and then discards it.
685 */
ll_iso_test_sdu_emit(const struct isoal_sink * sink_ctx,const struct isoal_emitted_sdu_frag * sdu_frag,const struct isoal_emitted_sdu * sdu)686 static isoal_status_t ll_iso_test_sdu_emit(const struct isoal_sink *sink_ctx,
687 const struct isoal_emitted_sdu_frag *sdu_frag,
688 const struct isoal_emitted_sdu *sdu)
689 {
690 struct ll_iso_rx_test_mode *test_mode_rx;
691 isoal_sdu_len_t length;
692 isoal_status_t status;
693 struct net_buf *buf;
694 uint32_t sdu_counter;
695 uint16_t max_sdu;
696 uint16_t handle;
697 uint8_t framed;
698
699 handle = sink_ctx->session.handle;
700 buf = (struct net_buf *)sdu_frag->sdu.contents.dbuf;
701
702 if (IS_CIS_HANDLE(handle)) {
703 struct ll_conn_iso_stream *cis;
704
705 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
706 LL_ASSERT(cis);
707
708 test_mode_rx = &cis->hdr.test_mode.rx;
709 max_sdu = cis->c_max_sdu;
710 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
711 } else if (IS_SYNC_ISO_HANDLE(handle)) {
712 struct lll_sync_iso_stream *sync_stream;
713 struct ll_sync_iso_set *sync_iso;
714 uint16_t stream_handle;
715
716 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
717 sync_stream = ull_sync_iso_stream_get(stream_handle);
718 LL_ASSERT(sync_stream);
719
720 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
721
722 test_mode_rx = sync_stream->test_mode;
723 max_sdu = sync_iso->lll.max_sdu;
724 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
725 } else {
726 /* Handle is out of range */
727 status = ISOAL_STATUS_ERR_SDU_EMIT;
728 net_buf_unref(buf);
729
730 return status;
731 }
732
733 length = sink_ctx->sdu_production.sdu_written;
734 framed = sink_ctx->session.framed;
735
736 /* In BT_HCI_ISO_TEST_ZERO_SIZE_SDU mode, all SDUs must have length 0 and there is
737 * no sdu_counter field. In the other modes, the first 4 bytes must contain a
738 * packet counter, which is used as SDU counter. The sdu_counter is extracted
739 * regardless of mode as a sanity check, unless the length does not allow it.
740 */
741 if (length >= ISO_TEST_PACKET_COUNTER_SIZE) {
742 sdu_counter = sys_get_le32(buf->data);
743 } else {
744 sdu_counter = 0U;
745 }
746
747 switch (sdu_frag->sdu.status) {
748 case ISOAL_SDU_STATUS_VALID:
749 if (framed && test_mode_rx->sdu_counter == 0U) {
750 /* BT 5.3, Vol 6, Part B, section 7.2:
751 * When using framed PDUs the expected value of the SDU counter
752 * shall be initialized with the value of the SDU counter of the
753 * first valid received SDU.
754 */
755 test_mode_rx->sdu_counter = sdu_counter;
756 }
757
758 switch (test_mode_rx->payload_type) {
759 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
760 if (length == 0) {
761 test_mode_rx->received_cnt++;
762 } else {
763 test_mode_rx->failed_cnt++;
764 }
765 break;
766
767 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
768 if ((length >= ISO_TEST_PACKET_COUNTER_SIZE) &&
769 (length <= max_sdu) &&
770 (sdu_counter == test_mode_rx->sdu_counter)) {
771 test_mode_rx->received_cnt++;
772 } else {
773 test_mode_rx->failed_cnt++;
774 }
775 break;
776
777 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
778 if ((length == max_sdu) &&
779 (sdu_counter == test_mode_rx->sdu_counter)) {
780 test_mode_rx->received_cnt++;
781 } else {
782 test_mode_rx->failed_cnt++;
783 }
784 break;
785
786 default:
787 LL_ASSERT(0);
788 return ISOAL_STATUS_ERR_SDU_EMIT;
789 }
790 break;
791
792 case ISOAL_SDU_STATUS_ERRORS:
793 case ISOAL_SDU_STATUS_LOST_DATA:
794 test_mode_rx->missed_cnt++;
795 break;
796 }
797
798 /* In framed mode, we may start incrementing the SDU counter when rx_sdu_counter
799 * becomes non zero (initial state), or in case of zero-based counting, if zero
800 * is actually the first valid SDU counter received.
801 */
802 if (framed && (test_mode_rx->sdu_counter ||
803 (sdu_frag->sdu.status == ISOAL_SDU_STATUS_VALID))) {
804 test_mode_rx->sdu_counter++;
805 }
806
807 status = ISOAL_STATUS_OK;
808 net_buf_unref(buf);
809
810 return status;
811 }
812
ll_iso_receive_test(uint16_t handle,uint8_t payload_type)813 uint8_t ll_iso_receive_test(uint16_t handle, uint8_t payload_type)
814 {
815 struct ll_iso_rx_test_mode *test_mode_rx;
816 isoal_sink_handle_t sink_handle;
817 struct ll_iso_datapath *dp;
818 uint32_t sdu_interval;
819 isoal_status_t err;
820
821 struct ll_iso_datapath **stream_dp;
822
823 uint32_t stream_sync_delay;
824 uint32_t group_sync_delay;
825 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
826 uint16_t stream_handle;
827 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
828 uint16_t iso_interval;
829 uint8_t framed;
830 uint8_t role;
831 uint8_t ft;
832 uint8_t bn;
833
834 if (IS_CIS_HANDLE(handle)) {
835 struct ll_conn_iso_stream *cis;
836 struct ll_conn_iso_group *cig;
837
838 cis = ll_iso_stream_connected_get(handle);
839 if (!cis) {
840 /* CIS is not connected */
841 return BT_HCI_ERR_UNKNOWN_CONN_ID;
842 }
843
844 if (cis->lll.rx.bn == 0) {
845 /* CIS is not configured for RX */
846 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
847 }
848
849 test_mode_rx = &cis->hdr.test_mode.rx;
850 stream_dp = &cis->hdr.datapath_out;
851 cig = cis->group;
852
853 if (cig->lll.role == BT_HCI_ROLE_PERIPHERAL) {
854 /* peripheral */
855 sdu_interval = cig->c_sdu_interval;
856 } else {
857 /* central */
858 sdu_interval = cig->p_sdu_interval;
859 }
860
861 role = cig->lll.role;
862 framed = cis->framed;
863 bn = cis->lll.rx.bn;
864 ft = cis->lll.rx.ft;
865 iso_interval = cig->iso_interval;
866 stream_sync_delay = cis->sync_delay;
867 group_sync_delay = cig->sync_delay;
868 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
869 } else if (IS_SYNC_ISO_HANDLE(handle)) {
870 /* Get the sync stream from the handle */
871 struct lll_sync_iso_stream *sync_stream;
872 struct ll_sync_iso_set *sync_iso;
873 struct lll_sync_iso *lll_iso;
874
875 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
876 sync_stream = ull_sync_iso_stream_get(stream_handle);
877 if (!sync_stream) {
878 return BT_HCI_ERR_UNKNOWN_CONN_ID;
879 }
880
881 if (sync_stream->dp) {
882 /* Data path already set up */
883 return BT_HCI_ERR_CMD_DISALLOWED;
884 }
885
886 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
887 lll_iso = &sync_iso->lll;
888
889 test_mode_rx = sync_stream->test_mode;
890 stream_dp = &sync_stream->dp;
891
892 /* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
893 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing
894 * + (NSE – 1) × Sub_Interval + MPT.
895 */
896 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
897 lll_iso->nse, lll_iso->sub_interval,
898 lll_iso->phy, lll_iso->max_pdu,
899 lll_iso->enc);
900 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
901
902 role = ISOAL_ROLE_BROADCAST_SINK;
903 framed = lll_iso->framing;
904 bn = lll_iso->bn;
905 ft = 0;
906 sdu_interval = lll_iso->sdu_interval;
907 iso_interval = lll_iso->iso_interval;
908 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
909 } else {
910 /* Handle is out of range */
911 return BT_HCI_ERR_UNKNOWN_CONN_ID;
912 }
913
914 if (*stream_dp) {
915 /* Data path already set up */
916 return BT_HCI_ERR_CMD_DISALLOWED;
917 }
918
919 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
920 return BT_HCI_ERR_INVALID_LL_PARAM;
921 }
922
923 /* Allocate and configure test datapath */
924 dp = mem_acquire(&datapath_free);
925 if (!dp) {
926 return BT_HCI_ERR_CMD_DISALLOWED;
927 }
928
929 dp->path_dir = BT_HCI_DATAPATH_DIR_CTLR_TO_HOST;
930 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
931
932 *stream_dp = dp;
933 memset(test_mode_rx, 0, sizeof(struct ll_iso_rx_test_mode));
934
935 err = isoal_sink_create(handle, role, framed, bn, ft,
936 sdu_interval, iso_interval,
937 stream_sync_delay, group_sync_delay,
938 ll_iso_test_sdu_alloc,
939 ll_iso_test_sdu_emit,
940 sink_sdu_write_hci, &sink_handle);
941 if (err) {
942 /* Error creating test source - cleanup source and
943 * datapath
944 */
945 isoal_sink_destroy(sink_handle);
946 ull_iso_datapath_release(dp);
947 *stream_dp = NULL;
948
949 return BT_HCI_ERR_CMD_DISALLOWED;
950 }
951
952 dp->sink_hdl = sink_handle;
953 isoal_sink_enable(sink_handle);
954
955 /* Enable Receive Test Mode */
956 test_mode_rx->enabled = 1;
957 test_mode_rx->payload_type = payload_type;
958
959 return BT_HCI_ERR_SUCCESS;
960 }
961
ll_iso_read_test_counters(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)962 uint8_t ll_iso_read_test_counters(uint16_t handle, uint32_t *received_cnt,
963 uint32_t *missed_cnt,
964 uint32_t *failed_cnt)
965 {
966 struct ll_iso_rx_test_mode *test_mode_rx;
967
968 *received_cnt = 0U;
969 *missed_cnt = 0U;
970 *failed_cnt = 0U;
971
972 if (IS_CIS_HANDLE(handle)) {
973 struct ll_conn_iso_stream *cis;
974
975 cis = ll_iso_stream_connected_get(handle);
976 if (!cis) {
977 /* CIS is not connected */
978 return BT_HCI_ERR_UNKNOWN_CONN_ID;
979 }
980
981 test_mode_rx = &cis->hdr.test_mode.rx;
982
983 } else if (IS_SYNC_ISO_HANDLE(handle)) {
984 /* Get the sync stream from the handle */
985 struct lll_sync_iso_stream *sync_stream;
986 uint16_t stream_handle;
987
988 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
989 sync_stream = ull_sync_iso_stream_get(stream_handle);
990 if (!sync_stream) {
991 return BT_HCI_ERR_UNKNOWN_CONN_ID;
992 }
993
994 test_mode_rx = sync_stream->test_mode;
995
996 } else {
997 /* Handle is out of range */
998 return BT_HCI_ERR_UNKNOWN_CONN_ID;
999 }
1000
1001 if (!test_mode_rx->enabled) {
1002 /* ISO receive Test is not active */
1003 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1004 }
1005
1006 /* Return SDU statistics */
1007 *received_cnt = test_mode_rx->received_cnt;
1008 *missed_cnt = test_mode_rx->missed_cnt;
1009 *failed_cnt = test_mode_rx->failed_cnt;
1010
1011 return BT_HCI_ERR_SUCCESS;
1012 }
1013
1014 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
ll_read_iso_link_quality(uint16_t handle,uint32_t * tx_unacked_packets,uint32_t * tx_flushed_packets,uint32_t * tx_last_subevent_packets,uint32_t * retransmitted_packets,uint32_t * crc_error_packets,uint32_t * rx_unreceived_packets,uint32_t * duplicate_packets)1015 uint8_t ll_read_iso_link_quality(uint16_t handle,
1016 uint32_t *tx_unacked_packets,
1017 uint32_t *tx_flushed_packets,
1018 uint32_t *tx_last_subevent_packets,
1019 uint32_t *retransmitted_packets,
1020 uint32_t *crc_error_packets,
1021 uint32_t *rx_unreceived_packets,
1022 uint32_t *duplicate_packets)
1023 {
1024 uint8_t status;
1025
1026 *tx_unacked_packets = 0;
1027 *tx_flushed_packets = 0;
1028 *tx_last_subevent_packets = 0;
1029 *retransmitted_packets = 0;
1030 *crc_error_packets = 0;
1031 *rx_unreceived_packets = 0;
1032 *duplicate_packets = 0;
1033
1034 status = BT_HCI_ERR_SUCCESS;
1035
1036 if (IS_CIS_HANDLE(handle)) {
1037 struct ll_conn_iso_stream *cis;
1038
1039 cis = ll_iso_stream_connected_get(handle);
1040
1041 if (!cis) {
1042 /* CIS is not connected */
1043 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1044 }
1045
1046 *tx_unacked_packets = cis->hdr.link_quality.tx_unacked_packets;
1047 *tx_flushed_packets = cis->hdr.link_quality.tx_flushed_packets;
1048 *tx_last_subevent_packets = cis->hdr.link_quality.tx_last_subevent_packets;
1049 *retransmitted_packets = cis->hdr.link_quality.retransmitted_packets;
1050 *crc_error_packets = cis->hdr.link_quality.crc_error_packets;
1051 *rx_unreceived_packets = cis->hdr.link_quality.rx_unreceived_packets;
1052 *duplicate_packets = cis->hdr.link_quality.duplicate_packets;
1053
1054 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1055 /* FIXME: Implement for sync receiver */
1056 status = BT_HCI_ERR_CMD_DISALLOWED;
1057 } else {
1058 /* Handle is out of range */
1059 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1060 }
1061
1062 return status;
1063 }
1064 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1065
1066 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1067
1068 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_test_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)1069 static isoal_status_t ll_iso_test_pdu_release(struct node_tx_iso *node_tx,
1070 const uint16_t handle,
1071 const isoal_status_t status)
1072 {
1073 /* Release back to memory pool */
1074 if (node_tx->link) {
1075 ll_iso_link_tx_release(node_tx->link);
1076 }
1077 ll_iso_tx_mem_release(node_tx);
1078
1079 return ISOAL_STATUS_OK;
1080 }
1081
1082 #if defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_transmit_test_send_sdu(uint16_t handle,uint32_t ticks_at_expire)1083 void ll_iso_transmit_test_send_sdu(uint16_t handle, uint32_t ticks_at_expire)
1084 {
1085 isoal_source_handle_t source_handle;
1086 struct isoal_sdu_tx sdu;
1087 isoal_status_t err;
1088 uint8_t tx_buffer[ISO_TEST_TX_BUFFER_SIZE];
1089 uint64_t next_payload_number;
1090 uint16_t remaining_tx;
1091 uint32_t sdu_counter;
1092
1093 if (IS_CIS_HANDLE(handle)) {
1094 struct ll_conn_iso_stream *cis;
1095 struct ll_conn_iso_group *cig;
1096 uint32_t rand_max_sdu;
1097 uint8_t event_offset;
1098 uint8_t max_sdu;
1099 uint8_t rand_8;
1100
1101 cis = ll_iso_stream_connected_get(handle);
1102 LL_ASSERT(cis);
1103
1104 if (!cis->hdr.test_mode.tx.enabled) {
1105 /* Transmit Test Mode not enabled */
1106 return;
1107 }
1108
1109 cig = cis->group;
1110 source_handle = cis->hdr.datapath_in->source_hdl;
1111
1112 max_sdu = IS_PERIPHERAL(cig) ? cis->p_max_sdu : cis->c_max_sdu;
1113
1114 switch (cis->hdr.test_mode.tx.payload_type) {
1115 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
1116 remaining_tx = 0;
1117 break;
1118
1119 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
1120 /* Randomize the length [4..max_sdu] */
1121 lll_rand_get(&rand_8, sizeof(rand_8));
1122 rand_max_sdu = rand_8 * (max_sdu - ISO_TEST_PACKET_COUNTER_SIZE);
1123 remaining_tx = ISO_TEST_PACKET_COUNTER_SIZE + (rand_max_sdu >> 8);
1124 break;
1125
1126 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
1127 LL_ASSERT(max_sdu > ISO_TEST_PACKET_COUNTER_SIZE);
1128 remaining_tx = max_sdu;
1129 break;
1130
1131 default:
1132 LL_ASSERT(0);
1133 return;
1134 }
1135
1136 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1137 sdu.sdu_state = BT_ISO_START;
1138 } else {
1139 sdu.sdu_state = BT_ISO_SINGLE;
1140 }
1141
1142 /* Configure SDU similarly to one delivered via HCI */
1143 sdu.packet_sn = 0;
1144 sdu.dbuf = tx_buffer;
1145
1146 /* We must ensure sufficient time for ISO-AL to fragment SDU and
1147 * deliver PDUs to the TX queue. By checking ull_ref_get, we
1148 * know if we are within the subevents of an ISO event. If so,
1149 * we can assume that we have enough time to deliver in the next
1150 * ISO event. If we're not active within the ISO event, we don't
1151 * know if there is enough time to deliver in the next event,
1152 * and for safety we set the target to current event + 2.
1153 *
1154 * For FT > 1, we have the opportunity to retransmit in later
1155 * event(s), in which case we have the option to target an
1156 * earlier event (this or next) because being late does not
1157 * instantly flush the payload.
1158 */
1159 event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
1160 if (cis->lll.tx.ft > 1) {
1161 /* FT > 1, target an earlier event */
1162 event_offset -= 1;
1163 }
1164
1165 sdu.grp_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
1166 (event_offset * cig->iso_interval *
1167 ISO_INT_UNIT_US));
1168 sdu.target_event = cis->lll.event_count + event_offset;
1169 sdu.iso_sdu_length = remaining_tx;
1170
1171 /* Send all SDU fragments */
1172 do {
1173 sdu.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticks_at_expire);
1174 sdu.time_stamp = sdu.cntr_time_stamp;
1175 sdu.size = MIN(remaining_tx, ISO_TEST_TX_BUFFER_SIZE);
1176 memset(tx_buffer, 0, sdu.size);
1177
1178 /* If this is the first fragment of a framed SDU, inject the SDU
1179 * counter.
1180 */
1181 if ((sdu.size >= ISO_TEST_PACKET_COUNTER_SIZE) &&
1182 ((sdu.sdu_state == BT_ISO_START) || (sdu.sdu_state == BT_ISO_SINGLE))) {
1183 if (cis->framed) {
1184 sdu_counter = (uint32_t)cis->hdr.test_mode.tx.sdu_counter;
1185 } else {
1186 /* Unframed. Get the next payload counter.
1187 *
1188 * BT 5.3, Vol 6, Part B, Section 7.1:
1189 * When using unframed PDUs, the SDU counter shall be equal
1190 * to the payload counter.
1191 */
1192 isoal_tx_unframed_get_next_payload_number(source_handle,
1193 &sdu,
1194 &next_payload_number);
1195 sdu_counter = (uint32_t)next_payload_number;
1196 }
1197
1198 sys_put_le32(sdu_counter, tx_buffer);
1199 }
1200
1201 /* Send to ISOAL */
1202 err = isoal_tx_sdu_fragment(source_handle, &sdu);
1203 LL_ASSERT(!err);
1204
1205 remaining_tx -= sdu.size;
1206
1207 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1208 sdu.sdu_state = BT_ISO_CONT;
1209 } else {
1210 sdu.sdu_state = BT_ISO_END;
1211 }
1212 } while (remaining_tx);
1213
1214 cis->hdr.test_mode.tx.sdu_counter++;
1215
1216 } else if (IS_ADV_ISO_HANDLE(handle)) {
1217 /* FIXME: Implement for broadcaster */
1218 } else {
1219 LL_ASSERT(0);
1220 }
1221 }
1222 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1223
ll_iso_transmit_test(uint16_t handle,uint8_t payload_type)1224 uint8_t ll_iso_transmit_test(uint16_t handle, uint8_t payload_type)
1225 {
1226 isoal_source_handle_t source_handle;
1227 struct ll_iso_datapath *dp;
1228 uint32_t sdu_interval;
1229 isoal_status_t err;
1230 uint8_t status;
1231
1232 status = BT_HCI_ERR_SUCCESS;
1233
1234 if (IS_CIS_HANDLE(handle)) {
1235 struct ll_conn_iso_stream *cis;
1236 struct ll_conn_iso_group *cig;
1237
1238 cis = ll_iso_stream_connected_get(handle);
1239 if (!cis) {
1240 /* CIS is not connected */
1241 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1242 }
1243
1244 if (cis->lll.tx.bn == 0U) {
1245 /* CIS is not configured for TX */
1246 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1247 }
1248
1249 if (cis->hdr.datapath_in) {
1250 /* Data path already set up */
1251 return BT_HCI_ERR_CMD_DISALLOWED;
1252 }
1253
1254 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
1255 return BT_HCI_ERR_INVALID_LL_PARAM;
1256 }
1257
1258 /* Allocate and configure test datapath */
1259 dp = mem_acquire(&datapath_free);
1260 if (!dp) {
1261 return BT_HCI_ERR_CMD_DISALLOWED;
1262 }
1263
1264 dp->path_dir = BT_HCI_DATAPATH_DIR_HOST_TO_CTLR;
1265 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
1266
1267 cis->hdr.datapath_in = dp;
1268 cig = cis->group;
1269
1270 sdu_interval = IS_PERIPHERAL(cig) ? cig->p_sdu_interval : cig->c_sdu_interval;
1271
1272 /* Setup the test source */
1273 err = isoal_source_create(handle, cig->lll.role, cis->framed,
1274 cis->lll.tx.bn, cis->lll.tx.ft,
1275 cis->lll.tx.max_pdu, sdu_interval,
1276 cig->iso_interval, cis->sync_delay,
1277 cig->sync_delay, ll_iso_pdu_alloc,
1278 ll_iso_pdu_write, ll_iso_pdu_emit,
1279 ll_iso_test_pdu_release,
1280 &source_handle);
1281
1282 if (err) {
1283 /* Error creating test source - cleanup source and datapath */
1284 isoal_source_destroy(source_handle);
1285 ull_iso_datapath_release(dp);
1286 cis->hdr.datapath_in = NULL;
1287
1288 return BT_HCI_ERR_CMD_DISALLOWED;
1289 }
1290
1291 dp->source_hdl = source_handle;
1292 isoal_source_enable(source_handle);
1293
1294 /* Enable Transmit Test Mode */
1295 cis->hdr.test_mode.tx.enabled = 1;
1296 cis->hdr.test_mode.tx.payload_type = payload_type;
1297
1298 } else if (IS_ADV_ISO_HANDLE(handle)) {
1299 struct lll_adv_iso_stream *stream;
1300 uint16_t stream_handle;
1301
1302 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1303 stream = ull_adv_iso_stream_get(stream_handle);
1304 if (!stream) {
1305 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1306 }
1307
1308 /* FIXME: Implement use of common header in stream to enable code sharing
1309 * between CIS and BIS for test commands (and other places).
1310 */
1311 status = BT_HCI_ERR_CMD_DISALLOWED;
1312 } else {
1313 /* Handle is out of range */
1314 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1315 }
1316
1317 return status;
1318 }
1319 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1320
ll_iso_test_end(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)1321 uint8_t ll_iso_test_end(uint16_t handle, uint32_t *received_cnt,
1322 uint32_t *missed_cnt, uint32_t *failed_cnt)
1323 {
1324 *received_cnt = 0U;
1325 *missed_cnt = 0U;
1326 *failed_cnt = 0U;
1327
1328 if (IS_CIS_HANDLE(handle)) {
1329 struct ll_conn_iso_stream *cis;
1330
1331 cis = ll_iso_stream_connected_get(handle);
1332 if (!cis) {
1333 /* CIS is not connected */
1334 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1335 }
1336
1337 if (!cis->hdr.test_mode.rx.enabled && !cis->hdr.test_mode.tx.enabled) {
1338 /* Test Mode is not active */
1339 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1340 }
1341
1342 if (cis->hdr.test_mode.rx.enabled) {
1343 isoal_sink_destroy(cis->hdr.datapath_out->sink_hdl);
1344 ull_iso_datapath_release(cis->hdr.datapath_out);
1345 cis->hdr.datapath_out = NULL;
1346
1347 /* Return SDU statistics */
1348 *received_cnt = cis->hdr.test_mode.rx.received_cnt;
1349 *missed_cnt = cis->hdr.test_mode.rx.missed_cnt;
1350 *failed_cnt = cis->hdr.test_mode.rx.failed_cnt;
1351 }
1352
1353 if (cis->hdr.test_mode.tx.enabled) {
1354 /* Tear down source and datapath */
1355 isoal_source_destroy(cis->hdr.datapath_in->source_hdl);
1356 ull_iso_datapath_release(cis->hdr.datapath_in);
1357 cis->hdr.datapath_in = NULL;
1358 }
1359
1360 /* Disable Test Mode */
1361 (void)memset(&cis->hdr.test_mode, 0U, sizeof(cis->hdr.test_mode));
1362
1363 } else if (IS_ADV_ISO_HANDLE(handle)) {
1364 /* FIXME: Implement for broadcaster */
1365 return BT_HCI_ERR_CMD_DISALLOWED;
1366
1367 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1368 struct lll_sync_iso_stream *sync_stream;
1369 uint16_t stream_handle;
1370
1371 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
1372 sync_stream = ull_sync_iso_stream_get(stream_handle);
1373 if (!sync_stream) {
1374 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1375 }
1376
1377 if (!sync_stream->test_mode->enabled || !sync_stream->dp) {
1378 /* Test Mode is not active */
1379 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1380 }
1381
1382 isoal_sink_destroy(sync_stream->dp->sink_hdl);
1383 ull_iso_datapath_release(sync_stream->dp);
1384 sync_stream->dp = NULL;
1385
1386 /* Return SDU statistics */
1387 *received_cnt = sync_stream->test_mode->received_cnt;
1388 *missed_cnt = sync_stream->test_mode->missed_cnt;
1389 *failed_cnt = sync_stream->test_mode->failed_cnt;
1390
1391 (void)memset(&sync_stream->test_mode, 0U, sizeof(sync_stream->test_mode));
1392
1393 } else {
1394 /* Handle is out of range */
1395 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1396 }
1397
1398 return BT_HCI_ERR_SUCCESS;
1399 }
1400
1401 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_tx_mem_acquire(void)1402 void *ll_iso_tx_mem_acquire(void)
1403 {
1404 return mem_acquire(&mem_iso_tx.free);
1405 }
1406
ll_iso_tx_mem_release(void * node_tx)1407 void ll_iso_tx_mem_release(void *node_tx)
1408 {
1409 mem_release(node_tx, &mem_iso_tx.free);
1410 }
1411
ll_iso_tx_mem_enqueue(uint16_t handle,void * node_tx,void * link)1412 int ll_iso_tx_mem_enqueue(uint16_t handle, void *node_tx, void *link)
1413 {
1414 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) &&
1415 IS_CIS_HANDLE(handle)) {
1416 struct ll_conn_iso_stream *cis;
1417
1418 cis = ll_conn_iso_stream_get(handle);
1419 memq_enqueue(link, node_tx, &cis->lll.memq_tx.tail);
1420
1421 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) &&
1422 IS_ADV_ISO_HANDLE(handle)) {
1423 struct lll_adv_iso_stream *stream;
1424 uint16_t stream_handle;
1425
1426 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1427 stream = ull_adv_iso_stream_get(stream_handle);
1428 memq_enqueue(link, node_tx, &stream->memq_tx.tail);
1429
1430 } else {
1431 return -EINVAL;
1432 }
1433
1434 return 0;
1435 }
1436 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1437
ull_iso_init(void)1438 int ull_iso_init(void)
1439 {
1440 int err;
1441
1442 err = init_reset();
1443 if (err) {
1444 return err;
1445 }
1446
1447 return 0;
1448 }
1449
ull_iso_reset(void)1450 int ull_iso_reset(void)
1451 {
1452 int err;
1453
1454 err = init_reset();
1455 if (err) {
1456 return err;
1457 }
1458
1459 return 0;
1460 }
1461
1462 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_lll_ack_enqueue(uint16_t handle,struct node_tx_iso * node_tx)1463 void ull_iso_lll_ack_enqueue(uint16_t handle, struct node_tx_iso *node_tx)
1464 {
1465 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && IS_CIS_HANDLE(handle)) {
1466 struct ll_conn_iso_stream *cis;
1467 struct ll_iso_datapath *dp;
1468
1469 cis = ll_conn_iso_stream_get(handle);
1470 dp = cis->hdr.datapath_in;
1471
1472 if (dp) {
1473 isoal_tx_pdu_release(dp->source_hdl, node_tx);
1474 } else {
1475 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
1476 /* Possible race with Data Path remove - handle release in vendor
1477 * function.
1478 */
1479 ll_data_path_tx_pdu_release(handle, node_tx);
1480 #else
1481 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1482 * used by ACL connections in ULL context to dispatch
1483 * ack.
1484 */
1485 ll_tx_ack_put(handle, (void *)node_tx);
1486 ll_rx_sched();
1487 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1488 }
1489 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && IS_ADV_ISO_HANDLE(handle)) {
1490 /* Process as TX ack. TODO: Can be unified with CIS and use
1491 * ISOAL.
1492 */
1493 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1494 * used by ACL connections in ULL context to dispatch
1495 * ack.
1496 */
1497 ll_tx_ack_put(handle, (void *)node_tx);
1498 ll_rx_sched();
1499 } else {
1500 LL_ASSERT(0);
1501 }
1502 }
1503
ull_iso_lll_event_prepare(uint16_t handle,uint64_t event_count)1504 void ull_iso_lll_event_prepare(uint16_t handle, uint64_t event_count)
1505 {
1506 if (IS_CIS_HANDLE(handle)) {
1507 struct ll_iso_datapath *dp = NULL;
1508 struct ll_conn_iso_stream *cis;
1509
1510 cis = ll_iso_stream_connected_get(handle);
1511
1512 if (cis) {
1513 dp = cis->hdr.datapath_in;
1514 }
1515
1516 if (dp) {
1517 isoal_tx_event_prepare(dp->source_hdl, event_count);
1518 }
1519 } else if (IS_ADV_ISO_HANDLE(handle)) {
1520 struct ll_iso_datapath *dp = NULL;
1521 struct lll_adv_iso_stream *stream;
1522 uint16_t stream_handle;
1523
1524 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1525 stream = ull_adv_iso_stream_get(stream_handle);
1526
1527 if (stream) {
1528 dp = stream->dp;
1529 }
1530
1531 if (dp) {
1532 isoal_tx_event_prepare(dp->source_hdl, event_count);
1533 }
1534 } else {
1535 LL_ASSERT(0);
1536 }
1537 }
1538 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1539
1540 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
ull_iso_big_sync_delay(uint8_t num_bis,uint32_t bis_spacing,uint8_t nse,uint32_t sub_interval,uint8_t phy,uint8_t max_pdu,bool enc)1541 uint32_t ull_iso_big_sync_delay(uint8_t num_bis, uint32_t bis_spacing, uint8_t nse,
1542 uint32_t sub_interval, uint8_t phy, uint8_t max_pdu, bool enc)
1543 {
1544 /* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
1545 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing + (NSE – 1) × Sub_Interval + MPT.
1546 */
1547 return (num_bis - 1) * bis_spacing + (nse - 1) * sub_interval +
1548 BYTES2US(PDU_OVERHEAD_SIZE(phy) + max_pdu + (enc ? 4 : 0), phy);
1549 }
1550 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1551
1552 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_pdu_rx_alloc_peek(uint8_t count)1553 void *ull_iso_pdu_rx_alloc_peek(uint8_t count)
1554 {
1555 if (count > MFIFO_AVAIL_COUNT_GET(iso_rx)) {
1556 return NULL;
1557 }
1558
1559 return MFIFO_DEQUEUE_PEEK(iso_rx);
1560 }
1561
ull_iso_pdu_rx_alloc(void)1562 void *ull_iso_pdu_rx_alloc(void)
1563 {
1564 return MFIFO_DEQUEUE(iso_rx);
1565 }
1566
1567 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
ull_iso_rx_put(memq_link_t * link,void * rx)1568 void ull_iso_rx_put(memq_link_t *link, void *rx)
1569 {
1570 /* Enqueue the Rx object */
1571 memq_enqueue(link, rx, &memq_ull_iso_rx.tail);
1572 }
1573
ull_iso_rx_sched(void)1574 void ull_iso_rx_sched(void)
1575 {
1576 static memq_link_t link;
1577 static struct mayfly mfy = {0, 0, &link, NULL, iso_rx_demux};
1578
1579 /* Kick the ULL (using the mayfly, tailchain it) */
1580 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
1581 }
1582
1583 #if defined(CONFIG_BT_CTLR_CONN_ISO)
iso_rx_cig_ref_point_update(struct ll_conn_iso_group * cig,const struct ll_conn_iso_stream * cis,const struct node_rx_iso_meta * meta)1584 static void iso_rx_cig_ref_point_update(struct ll_conn_iso_group *cig,
1585 const struct ll_conn_iso_stream *cis,
1586 const struct node_rx_iso_meta *meta)
1587 {
1588 uint32_t cig_sync_delay;
1589 uint32_t cis_sync_delay;
1590 uint64_t event_count;
1591 uint8_t burst_number;
1592 uint8_t role;
1593
1594 role = cig->lll.role;
1595 cig_sync_delay = cig->sync_delay;
1596 cis_sync_delay = cis->sync_delay;
1597 burst_number = cis->lll.rx.bn;
1598 event_count = cis->lll.event_count;
1599
1600 if (role) {
1601 /* Peripheral */
1602
1603 /* Check if this is the first payload received for this cis in
1604 * this event
1605 */
1606 if (meta->payload_number == (burst_number * event_count)) {
1607 /* Update the CIG reference point based on the CIS
1608 * anchor point
1609 */
1610 cig->cig_ref_point = isoal_get_wrapped_time_us(meta->timestamp,
1611 cis_sync_delay - cig_sync_delay);
1612 }
1613 }
1614 }
1615 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1616
iso_rx_demux(void * param)1617 static void iso_rx_demux(void *param)
1618 {
1619 #if defined(CONFIG_BT_CTLR_CONN_ISO) || \
1620 defined(CONFIG_BT_CTLR_SYNC_ISO)
1621 struct ll_iso_datapath *dp;
1622 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1623 struct node_rx_pdu *rx_pdu;
1624 struct node_rx_hdr *rx;
1625 memq_link_t *link;
1626 uint16_t handle;
1627
1628 do {
1629 link = memq_peek(memq_ull_iso_rx.head, memq_ull_iso_rx.tail,
1630 (void **)&rx);
1631 if (link) {
1632 /* Demux Rx objects */
1633 switch (rx->type) {
1634 case NODE_RX_TYPE_RELEASE:
1635 (void)memq_dequeue(memq_ull_iso_rx.tail,
1636 &memq_ull_iso_rx.head, NULL);
1637 ll_iso_rx_put(link, rx);
1638 ll_rx_sched();
1639 break;
1640
1641 case NODE_RX_TYPE_ISO_PDU:
1642 /* Remove from receive-queue; ULL has received this now */
1643 (void)memq_dequeue(memq_ull_iso_rx.tail, &memq_ull_iso_rx.head,
1644 NULL);
1645
1646 rx_pdu = (struct node_rx_pdu *)rx;
1647 handle = rx_pdu->hdr.handle;
1648 dp = NULL;
1649
1650 if (false) {
1651 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1652 } else if (IS_CIS_HANDLE(handle)) {
1653 struct ll_conn_iso_stream *cis;
1654 struct ll_conn_iso_group *cig;
1655
1656 cis = ll_conn_iso_stream_get(handle);
1657 cig = cis->group;
1658 dp = cis->hdr.datapath_out;
1659
1660 iso_rx_cig_ref_point_update(cig, cis,
1661 &rx_pdu->hdr.rx_iso_meta);
1662 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1663 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1664 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1665 struct lll_sync_iso_stream *sync_stream;
1666 uint16_t stream_handle;
1667
1668 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
1669 sync_stream = ull_sync_iso_stream_get(stream_handle);
1670 dp = sync_stream ? sync_stream->dp : NULL;
1671 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1672 }
1673
1674 #if defined(CONFIG_BT_CTLR_CONN_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
1675 if (dp && dp->path_id != BT_HCI_DATAPATH_ID_HCI) {
1676 /* If vendor specific datapath pass to ISO AL here,
1677 * in case of HCI destination it will be passed in
1678 * HCI context.
1679 */
1680 struct isoal_pdu_rx pckt_meta = {
1681 .meta = &rx_pdu->rx_iso_meta,
1682 .pdu = (struct pdu_iso *)&rx_pdu->pdu[0]
1683 };
1684
1685 /* Pass the ISO PDU through ISO-AL */
1686 const isoal_status_t err =
1687 isoal_rx_pdu_recombine(dp->sink_hdl, &pckt_meta);
1688
1689 LL_ASSERT(err == ISOAL_STATUS_OK); /* TODO handle err */
1690 }
1691 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1692
1693 /* Let ISO PDU start its long journey upwards */
1694 ll_iso_rx_put(link, rx);
1695 ll_rx_sched();
1696 break;
1697
1698 default:
1699 LL_ASSERT(0);
1700 break;
1701 }
1702 }
1703 } while (link);
1704 }
1705 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1706
ll_iso_rx_put(memq_link_t * link,void * rx)1707 void ll_iso_rx_put(memq_link_t *link, void *rx)
1708 {
1709 /* Enqueue the Rx object */
1710 memq_enqueue(link, rx, &memq_ll_iso_rx.tail);
1711 }
1712
ll_iso_rx_get(void)1713 void *ll_iso_rx_get(void)
1714 {
1715 struct node_rx_hdr *rx;
1716 memq_link_t *link;
1717
1718 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1719 while (link) {
1720 /* Do not send up buffers to Host thread that are
1721 * marked for release
1722 */
1723 if (rx->type == NODE_RX_TYPE_RELEASE) {
1724 (void)memq_dequeue(memq_ll_iso_rx.tail,
1725 &memq_ll_iso_rx.head, NULL);
1726 mem_release(link, &mem_link_iso_rx.free);
1727 mem_release(rx, &mem_pool_iso_rx.free);
1728 RXFIFO_ALLOC(iso_rx, 1);
1729
1730 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1731 continue;
1732 }
1733 return rx;
1734 }
1735
1736 return NULL;
1737 }
1738
ll_iso_rx_dequeue(void)1739 void ll_iso_rx_dequeue(void)
1740 {
1741 struct node_rx_hdr *rx = NULL;
1742 memq_link_t *link;
1743
1744 link = memq_dequeue(memq_ll_iso_rx.tail, &memq_ll_iso_rx.head,
1745 (void **)&rx);
1746 LL_ASSERT(link);
1747
1748 mem_release(link, &mem_link_iso_rx.free);
1749
1750 /* Handle object specific clean up */
1751 switch (rx->type) {
1752 case NODE_RX_TYPE_ISO_PDU:
1753 break;
1754 default:
1755 LL_ASSERT(0);
1756 break;
1757 }
1758 }
1759
ll_iso_rx_mem_release(void ** node_rx)1760 void ll_iso_rx_mem_release(void **node_rx)
1761 {
1762 struct node_rx_hdr *rx;
1763
1764 rx = *node_rx;
1765 while (rx) {
1766 struct node_rx_hdr *rx_free;
1767
1768 rx_free = rx;
1769 rx = rx->next;
1770
1771 switch (rx_free->type) {
1772 case NODE_RX_TYPE_ISO_PDU:
1773 mem_release(rx_free, &mem_pool_iso_rx.free);
1774 break;
1775 default:
1776 /* Ignore other types as node may have been initialized due to
1777 * race with HCI reset.
1778 */
1779 break;
1780 }
1781 }
1782
1783 *node_rx = rx;
1784
1785 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1786 }
1787 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1788
ull_iso_datapath_alloc(void)1789 struct ll_iso_datapath *ull_iso_datapath_alloc(void)
1790 {
1791 return mem_acquire(&datapath_free);
1792 }
1793
ull_iso_datapath_release(struct ll_iso_datapath * dp)1794 void ull_iso_datapath_release(struct ll_iso_datapath *dp)
1795 {
1796 mem_release(dp, &datapath_free);
1797 }
1798
1799 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_link_tx_release(void * link)1800 void ll_iso_link_tx_release(void *link)
1801 {
1802 mem_release(link, &mem_link_iso_tx.free);
1803 }
1804
1805 /**
1806 * Allocate a PDU from the LL and store the details in the given buffer. Allocation
1807 * is not expected to fail as there must always be sufficient PDU buffers. Any
1808 * failure will trigger the assert.
1809 * @param[in] pdu_buffer Buffer to store PDU details in
1810 * @return Error status of operation
1811 */
ll_iso_pdu_alloc(struct isoal_pdu_buffer * pdu_buffer)1812 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer)
1813 {
1814 struct node_tx_iso *node_tx;
1815
1816 node_tx = ll_iso_tx_mem_acquire();
1817 if (!node_tx) {
1818 LOG_ERR("Tx Buffer Overflow");
1819 /* TODO: Report overflow to HCI and remove assert
1820 * data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO)
1821 */
1822 LL_ASSERT(0);
1823 return ISOAL_STATUS_ERR_PDU_ALLOC;
1824 }
1825
1826 node_tx->link = NULL;
1827
1828 /* node_tx handle will be required to emit the PDU later */
1829 pdu_buffer->handle = (void *)node_tx;
1830 pdu_buffer->pdu = (void *)node_tx->pdu;
1831
1832 /* Use TX buffer size as the limit here. Actual size will be decided in
1833 * the ISOAL based on the minimum of the buffer size and the respective
1834 * Max_PDU_C_To_P or Max_PDU_P_To_C.
1835 */
1836 pdu_buffer->size = MAX(LL_BIS_OCTETS_TX_MAX, LL_CIS_OCTETS_TX_MAX);
1837
1838 return ISOAL_STATUS_OK;
1839 }
1840
1841 /**
1842 * Write the given SDU payload to the target PDU buffer at the given offset.
1843 * @param[in,out] pdu_buffer Target PDU buffer
1844 * @param[in] pdu_offset Offset / current write position within PDU
1845 * @param[in] sdu_payload Location of source data
1846 * @param[in] consume_len Length of data to copy
1847 * @return Error status of write operation
1848 */
ll_iso_pdu_write(struct isoal_pdu_buffer * pdu_buffer,const size_t pdu_offset,const uint8_t * sdu_payload,const size_t consume_len)1849 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
1850 const size_t pdu_offset,
1851 const uint8_t *sdu_payload,
1852 const size_t consume_len)
1853 {
1854 ARG_UNUSED(pdu_offset);
1855 ARG_UNUSED(consume_len);
1856
1857 LL_ASSERT(pdu_buffer);
1858 LL_ASSERT(pdu_buffer->pdu);
1859 LL_ASSERT(sdu_payload);
1860
1861 if ((pdu_offset + consume_len) > pdu_buffer->size) {
1862 /* Exceeded PDU buffer */
1863 return ISOAL_STATUS_ERR_UNSPECIFIED;
1864 }
1865
1866 /* Copy source to destination at given offset */
1867 memcpy(&pdu_buffer->pdu->payload[pdu_offset], sdu_payload, consume_len);
1868
1869 return ISOAL_STATUS_OK;
1870 }
1871
1872 /**
1873 * Emit the encoded node to the transmission queue
1874 * @param node_tx TX node to enqueue
1875 * @param handle CIS/BIS handle
1876 * @return Error status of enqueue operation
1877 */
ll_iso_pdu_emit(struct node_tx_iso * node_tx,const uint16_t handle)1878 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
1879 const uint16_t handle)
1880 {
1881 memq_link_t *link;
1882
1883 link = mem_acquire(&mem_link_iso_tx.free);
1884 LL_ASSERT(link);
1885
1886 if (ll_iso_tx_mem_enqueue(handle, node_tx, link)) {
1887 return ISOAL_STATUS_ERR_PDU_EMIT;
1888 }
1889
1890 return ISOAL_STATUS_OK;
1891 }
1892
1893 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1894 /**
1895 * Release the given payload back to the memory pool.
1896 * @param node_tx TX node to release or forward
1897 * @param handle CIS/BIS handle
1898 * @param status Reason for release
1899 * @return Error status of release operation
1900 */
ll_iso_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)1901 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
1902 const uint16_t handle,
1903 const isoal_status_t status)
1904 {
1905 if (status == ISOAL_STATUS_OK) {
1906 /* Process as TX ack, we are in LLL execution context here.
1907 * status == ISOAL_STATUS_OK when an ISO PDU has been acked.
1908 *
1909 * Call Path:
1910 * ull_iso_lll_ack_enqueue() --> isoal_tx_pdu_release() -->
1911 * pdu_release() == ll_iso_pdu_release() (this function).
1912 */
1913 /* FIXME: ll_tx_ack_put is not LLL callable as it is used by
1914 * ACL connections in ULL context to dispatch ack.
1915 */
1916 ll_tx_ack_put(handle, (void *)node_tx);
1917 ll_rx_sched();
1918 } else {
1919 /* Release back to memory pool, we are in Thread context
1920 * Callers:
1921 * isoal_source_deallocate() with ISOAL_STATUS_ERR_PDU_EMIT
1922 * isoal_tx_pdu_emit with status != ISOAL_STATUS_OK
1923 */
1924 if (node_tx->link) {
1925 ll_iso_link_tx_release(node_tx->link);
1926 }
1927 ll_iso_tx_mem_release(node_tx);
1928 }
1929
1930 return ISOAL_STATUS_OK;
1931 }
1932 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1933 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1934
init_reset(void)1935 static int init_reset(void)
1936 {
1937 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1938 memq_link_t *link;
1939
1940 RXFIFO_INIT(iso_rx);
1941
1942 /* Acquire a link to initialize ull rx memq */
1943 link = mem_acquire(&mem_link_iso_rx.free);
1944 LL_ASSERT(link);
1945
1946 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
1947 /* Initialize ull rx memq */
1948 MEMQ_INIT(ull_iso_rx, link);
1949 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1950
1951 /* Acquire a link to initialize ll_iso_rx memq */
1952 link = mem_acquire(&mem_link_iso_rx.free);
1953 LL_ASSERT(link);
1954
1955 /* Initialize ll_iso_rx memq */
1956 MEMQ_INIT(ll_iso_rx, link);
1957
1958 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1959 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1960
1961 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1962 /* Initialize tx pool. */
1963 mem_init(mem_iso_tx.pool, NODE_TX_BUFFER_SIZE, BT_CTLR_ISO_TX_PDU_BUFFERS,
1964 &mem_iso_tx.free);
1965
1966 /* Initialize tx link pool. */
1967 mem_init(mem_link_iso_tx.pool, sizeof(memq_link_t), BT_CTLR_ISO_TX_PDU_BUFFERS,
1968 &mem_link_iso_tx.free);
1969 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1970
1971 #if BT_CTLR_ISO_STREAMS
1972 /* Initialize ISO Datapath pool */
1973 mem_init(datapath_pool, sizeof(struct ll_iso_datapath),
1974 sizeof(datapath_pool) / sizeof(struct ll_iso_datapath), &datapath_free);
1975 #endif /* BT_CTLR_ISO_STREAMS */
1976
1977 /* Initialize ISO Adaptation Layer */
1978 isoal_init();
1979
1980 return 0;
1981 }
1982
1983 #if defined(CONFIG_BT_CTLR_CONN_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
ull_iso_resume_ticker_start(struct lll_event * resume_event,uint16_t group_handle,uint16_t stream_handle,uint8_t role,uint32_t ticks_anchor,uint32_t resume_timeout)1984 void ull_iso_resume_ticker_start(struct lll_event *resume_event,
1985 uint16_t group_handle,
1986 uint16_t stream_handle,
1987 uint8_t role,
1988 uint32_t ticks_anchor,
1989 uint32_t resume_timeout)
1990 {
1991 uint32_t resume_delay_us;
1992 int32_t resume_offset_us;
1993 uint8_t ticker_id = 0;
1994 uint32_t ret;
1995
1996 resume_delay_us = EVENT_OVERHEAD_START_US;
1997 resume_delay_us += EVENT_TICKER_RES_MARGIN_US;
1998
1999 if (0) {
2000 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2001 } else if (IS_CIS_HANDLE(stream_handle)) {
2002 ticker_id = TICKER_ID_CONN_ISO_RESUME_BASE + group_handle;
2003 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2004 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
2005 } else if (IS_SYNC_ISO_HANDLE(stream_handle)) {
2006 ticker_id = TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE + group_handle;
2007 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
2008 } else {
2009 LL_ASSERT(0);
2010 }
2011
2012 if (role == BT_HCI_ROLE_PERIPHERAL) {
2013 /* Add peripheral specific delay */
2014 if (0) {
2015 #if defined(CONFIG_BT_CTLR_PHY)
2016 } else {
2017 uint8_t phy = 0;
2018
2019 if (0) {
2020 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2021 } else if (IS_CIS_HANDLE(stream_handle)) {
2022 struct ll_conn_iso_stream *cis;
2023 struct ll_conn *conn;
2024
2025 cis = ll_conn_iso_stream_get(stream_handle);
2026 conn = ll_conn_get(cis->lll.acl_handle);
2027 phy = conn->lll.phy_rx;
2028 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2029 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
2030 } else if (IS_SYNC_ISO_HANDLE(stream_handle)) {
2031 struct ll_sync_iso_set *sync_iso;
2032 uint16_t stream_idx;
2033
2034 stream_idx = LL_BIS_SYNC_IDX_FROM_HANDLE(stream_handle);
2035 sync_iso = ull_sync_iso_by_stream_get(stream_idx);
2036 phy = sync_iso->lll.phy;
2037 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
2038 } else {
2039 LL_ASSERT(0);
2040 }
2041
2042 resume_delay_us +=
2043 lll_radio_rx_ready_delay_get(phy, PHY_FLAGS_S8);
2044 #else
2045 } else {
2046 resume_delay_us += lll_radio_rx_ready_delay_get(0, 0);
2047 #endif /* CONFIG_BT_CTLR_PHY */
2048 }
2049 }
2050
2051 resume_offset_us = (int32_t)(resume_timeout - resume_delay_us);
2052 LL_ASSERT(resume_offset_us >= 0);
2053
2054 /* Setup resume timeout as single-shot */
2055 ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
2056 TICKER_USER_ID_LLL,
2057 ticker_id,
2058 ticks_anchor,
2059 HAL_TICKER_US_TO_TICKS(resume_offset_us),
2060 TICKER_NULL_PERIOD,
2061 TICKER_NULL_REMAINDER,
2062 TICKER_NULL_LAZY,
2063 TICKER_NULL_SLOT,
2064 ticker_resume_cb, resume_event,
2065 ticker_resume_op_cb, NULL);
2066
2067 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2068 (ret == TICKER_STATUS_BUSY));
2069 }
2070
ticker_resume_op_cb(uint32_t status,void * param)2071 static void ticker_resume_op_cb(uint32_t status, void *param)
2072 {
2073 ARG_UNUSED(param);
2074
2075 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2076 }
2077
ticker_resume_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)2078 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2079 uint32_t remainder, uint16_t lazy, uint8_t force,
2080 void *param)
2081 {
2082 static memq_link_t link;
2083 static struct mayfly mfy = {0, 0, &link, NULL, lll_resume};
2084 struct lll_event *resume_event;
2085 uint32_t ret;
2086
2087 ARG_UNUSED(ticks_drift);
2088 LL_ASSERT(lazy == 0);
2089
2090 resume_event = param;
2091
2092 /* Append timing parameters */
2093 resume_event->prepare_param.ticks_at_expire = ticks_at_expire;
2094 resume_event->prepare_param.remainder = remainder;
2095 resume_event->prepare_param.lazy = 0;
2096 resume_event->prepare_param.force = force;
2097 mfy.param = resume_event;
2098
2099 /* Kick LLL resume */
2100 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
2101 0, &mfy);
2102
2103 LL_ASSERT(!ret);
2104 }
2105 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
2106