1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <soc.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 #include <zephyr/bluetooth/buf.h>
12
13 #include "hal/cpu.h"
14 #include "hal/ccm.h"
15 #include "hal/ticker.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mfifo.h"
21 #include "util/mayfly.h"
22 #include "util/dbuf.h"
23
24 #include "pdu_df.h"
25 #include "lll/pdu_vendor.h"
26 #include "pdu.h"
27
28 #include "lll.h"
29 #include "lll/lll_adv_types.h"
30 #include "lll_adv.h"
31 #include "lll/lll_adv_pdu.h"
32 #include "lll_adv_iso.h"
33 #include "lll/lll_df_types.h"
34 #include "lll_sync.h"
35 #include "lll_sync_iso.h"
36 #include "lll_conn.h"
37 #include "lll_conn_iso.h"
38 #include "lll_iso_tx.h"
39
40 #include "ll_sw/ull_tx_queue.h"
41
42 #include "isoal.h"
43
44 #include "ull_adv_types.h"
45 #include "ull_sync_types.h"
46 #include "ull_conn_types.h"
47 #include "ull_iso_types.h"
48 #include "ull_conn_iso_types.h"
49 #include "ull_llcp.h"
50
51 #include "ull_internal.h"
52 #include "ull_adv_internal.h"
53 #include "ull_conn_internal.h"
54 #include "ull_iso_internal.h"
55 #include "ull_sync_iso_internal.h"
56 #include "ull_conn_iso_internal.h"
57
58 #include "ll_feat.h"
59
60 #include "hal/debug.h"
61
62 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
63 #include <zephyr/logging/log.h>
64 LOG_MODULE_REGISTER(bt_ctlr_ull_iso);
65
66 #if defined(CONFIG_BT_CTLR_CONN_ISO_STREAMS)
67 #define BT_CTLR_CONN_ISO_STREAMS CONFIG_BT_CTLR_CONN_ISO_STREAMS
68 #else /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
69 #define BT_CTLR_CONN_ISO_STREAMS 0
70 #endif /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
71
72 #if defined(CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
73 #define BT_CTLR_ADV_ISO_STREAMS (CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
74 #else /* !CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
75 #define BT_CTLR_ADV_ISO_STREAMS 0
76 #endif /* CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
77
78 #if defined(CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
79 #define BT_CTLR_SYNC_ISO_STREAMS (CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
80 #else /* !CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
81 #define BT_CTLR_SYNC_ISO_STREAMS 0
82 #endif /* CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
83
84 static int init_reset(void);
85
86 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
87 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer);
88 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
89 const size_t offset,
90 const uint8_t *sdu_payload,
91 const size_t consume_len);
92 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
93 const uint16_t handle);
94 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
95 const uint16_t handle,
96 const isoal_status_t status);
97 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
98
99 /* Allocate data path pools for RX/TX directions for each stream */
100 #define BT_CTLR_ISO_STREAMS ((2 * (BT_CTLR_CONN_ISO_STREAMS)) + \
101 BT_CTLR_ADV_ISO_STREAMS + \
102 BT_CTLR_SYNC_ISO_STREAMS)
103 #if BT_CTLR_ISO_STREAMS
104 static struct ll_iso_datapath datapath_pool[BT_CTLR_ISO_STREAMS];
105 #endif /* BT_CTLR_ISO_STREAMS */
106
107 static void *datapath_free;
108
109 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
110 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
111 /* ISO LL conformance tests require a PDU size of maximum 251 bytes + header */
112 #define ISO_RX_BUFFER_SIZE (2 + 251)
113
114 /* Declare the ISO rx node RXFIFO. This is a composite pool-backed MFIFO for
115 * rx_nodes. The declaration constructs the following data structures:
116 * - mfifo_iso_rx: FIFO with pointers to PDU buffers
117 * - mem_iso_rx: Backing data pool for PDU buffer elements
118 * - mem_link_iso_rx: Pool of memq_link_t elements
119 *
120 * One extra rx buffer is reserved for empty ISO PDU reception.
121 * Two extra links are reserved for use by the ll_iso_rx and ull_iso_rx memq.
122 */
123 static RXFIFO_DEFINE(iso_rx, ((NODE_RX_HEADER_SIZE) + (ISO_RX_BUFFER_SIZE)),
124 (CONFIG_BT_CTLR_ISO_RX_BUFFERS + 1U), 2U);
125
126 static MEMQ_DECLARE(ll_iso_rx);
127 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
128 static MEMQ_DECLARE(ull_iso_rx);
129 static void iso_rx_demux(void *param);
130 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
131 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
132
133 #define ISO_TEST_PACKET_COUNTER_SIZE 4U
134
135 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
136 void ll_iso_link_tx_release(void *link);
137 void ll_iso_tx_mem_release(void *node_tx);
138
139 #define NODE_TX_BUFFER_SIZE MROUND(offsetof(struct node_tx_iso, pdu) + \
140 offsetof(struct pdu_iso, payload) + \
141 MAX(LL_BIS_OCTETS_TX_MAX, \
142 LL_CIS_OCTETS_TX_MAX))
143
144 #define ISO_TEST_TX_BUFFER_SIZE 32U
145
146 static struct {
147 void *free;
148 uint8_t pool[NODE_TX_BUFFER_SIZE * BT_CTLR_ISO_TX_BUFFERS];
149 } mem_iso_tx;
150
151 static struct {
152 void *free;
153 uint8_t pool[sizeof(memq_link_t) * BT_CTLR_ISO_TX_BUFFERS];
154 } mem_link_iso_tx;
155
156 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
157
ll_read_iso_tx_sync(uint16_t handle,uint16_t * seq,uint32_t * timestamp,uint32_t * offset)158 uint8_t ll_read_iso_tx_sync(uint16_t handle, uint16_t *seq,
159 uint32_t *timestamp, uint32_t *offset)
160 {
161 if (IS_CIS_HANDLE(handle)) {
162 struct ll_iso_datapath *dp = NULL;
163 struct ll_conn_iso_stream *cis;
164
165 cis = ll_conn_iso_stream_get(handle);
166
167 if (cis) {
168 dp = cis->hdr.datapath_in;
169 }
170
171 if (dp &&
172 isoal_tx_get_sync_info(dp->source_hdl, seq,
173 timestamp, offset) == ISOAL_STATUS_OK) {
174 return BT_HCI_ERR_SUCCESS;
175 }
176
177 return BT_HCI_ERR_CMD_DISALLOWED;
178
179 } else if (IS_ADV_ISO_HANDLE(handle)) {
180 const struct lll_adv_iso_stream *adv_stream;
181 uint16_t stream_handle;
182
183 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
184 adv_stream = ull_adv_iso_stream_get(stream_handle);
185 if (!adv_stream || !adv_stream->dp ||
186 isoal_tx_get_sync_info(adv_stream->dp->source_hdl, seq,
187 timestamp, offset) != ISOAL_STATUS_OK) {
188 return BT_HCI_ERR_CMD_DISALLOWED;
189 }
190
191 return BT_HCI_ERR_SUCCESS;
192 }
193
194 return BT_HCI_ERR_UNKNOWN_CONN_ID;
195 }
196
path_is_vendor_specific(uint8_t path_id)197 static inline bool path_is_vendor_specific(uint8_t path_id)
198 {
199 return (path_id >= BT_HCI_DATAPATH_ID_VS &&
200 path_id <= BT_HCI_DATAPATH_ID_VS_END);
201 }
202
ll_setup_iso_path(uint16_t handle,uint8_t path_dir,uint8_t path_id,uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint32_t controller_delay,uint8_t codec_config_len,uint8_t * codec_config)203 uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
204 uint8_t coding_format, uint16_t company_id,
205 uint16_t vs_codec_id, uint32_t controller_delay,
206 uint8_t codec_config_len, uint8_t *codec_config)
207 {
208 struct lll_sync_iso_stream *sync_stream = NULL;
209 struct lll_adv_iso_stream *adv_stream = NULL;
210 struct ll_conn_iso_stream *cis = NULL;
211 struct ll_iso_datapath *dp;
212 uint32_t stream_sync_delay;
213 uint32_t group_sync_delay;
214 uint8_t flush_timeout;
215 uint16_t iso_interval;
216 uint32_t sdu_interval;
217 uint8_t burst_number;
218 uint8_t max_octets;
219 uint8_t framed;
220 uint8_t role;
221
222 ARG_UNUSED(controller_delay);
223 ARG_UNUSED(codec_config);
224
225 if (false) {
226
227 #if defined(CONFIG_BT_CTLR_CONN_ISO)
228 } else if (IS_CIS_HANDLE(handle)) {
229 struct ll_conn_iso_group *cig;
230 struct ll_conn *conn;
231
232 /* If the Host attempts to set a data path with a Connection
233 * Handle that does not exist or that is not for a CIS or a BIS,
234 * the Controller shall return the error code Unknown Connection
235 * Identifier (0x02)
236 */
237 cis = ll_conn_iso_stream_get(handle);
238 if (!cis || !cis->group) {
239 /* CIS does not belong to a CIG */
240 return BT_HCI_ERR_UNKNOWN_CONN_ID;
241 }
242
243 conn = ll_connected_get(cis->lll.acl_handle);
244 if (conn) {
245 /* If we're still waiting for accept/response from
246 * host, path setup is premature and we must return
247 * disallowed status.
248 */
249 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
250 const uint8_t cis_waiting = ull_cp_cc_awaiting_reply(conn);
251
252 if (cis_waiting) {
253 return BT_HCI_ERR_CMD_DISALLOWED;
254 }
255 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
256 }
257
258 if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR && cis->hdr.datapath_in) ||
259 (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST && cis->hdr.datapath_out)) {
260 /* Data path has been set up, can only do setup once */
261 return BT_HCI_ERR_CMD_DISALLOWED;
262 }
263
264 cig = cis->group;
265
266 role = cig->lll.role;
267 iso_interval = cig->iso_interval;
268 group_sync_delay = cig->sync_delay;
269 stream_sync_delay = cis->sync_delay;
270 framed = cis->framed;
271
272 if (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) {
273 /* Create sink for RX data path */
274 burst_number = cis->lll.rx.bn;
275 flush_timeout = cis->lll.rx.ft;
276 max_octets = cis->lll.rx.max_pdu;
277
278 if (role) {
279 /* peripheral */
280 sdu_interval = cig->c_sdu_interval;
281 } else {
282 /* central */
283 sdu_interval = cig->p_sdu_interval;
284 }
285 } else {
286 /* path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR */
287 burst_number = cis->lll.tx.bn;
288 flush_timeout = cis->lll.tx.ft;
289 max_octets = cis->lll.tx.max_pdu;
290
291 if (role) {
292 /* peripheral */
293 sdu_interval = cig->p_sdu_interval;
294 } else {
295 /* central */
296 sdu_interval = cig->c_sdu_interval;
297 }
298 }
299 #endif /* CONFIG_BT_CTLR_CONN_ISO */
300
301 #if defined(CONFIG_BT_CTLR_ADV_ISO)
302 } else if (IS_ADV_ISO_HANDLE(handle)) {
303 struct ll_adv_iso_set *adv_iso;
304 struct lll_adv_iso *lll_iso;
305 uint16_t stream_handle;
306
307 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
308 adv_stream = ull_adv_iso_stream_get(stream_handle);
309 if (!adv_stream || adv_stream->dp) {
310 return BT_HCI_ERR_CMD_DISALLOWED;
311 }
312
313 adv_iso = ull_adv_iso_by_stream_get(stream_handle);
314 lll_iso = &adv_iso->lll;
315
316 role = ISOAL_ROLE_BROADCAST_SOURCE;
317 iso_interval = lll_iso->iso_interval;
318 sdu_interval = lll_iso->sdu_interval;
319 burst_number = lll_iso->bn;
320 flush_timeout = 0U; /* Not used for Broadcast ISO */
321 group_sync_delay = 0U; /* FIXME: */
322 stream_sync_delay = 0U; /* FIXME: */
323 framed = 0U; /* FIXME: pick the framing value from context */
324 max_octets = lll_iso->max_pdu;
325 #endif /* CONFIG_BT_CTLR_ADV_ISO */
326
327 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
328 } else if (IS_SYNC_ISO_HANDLE(handle)) {
329 struct ll_sync_iso_set *sync_iso;
330 struct lll_sync_iso *lll_iso;
331 uint16_t stream_handle;
332
333 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
334 sync_stream = ull_sync_iso_stream_get(stream_handle);
335 if (!sync_stream || sync_stream->dp) {
336 return BT_HCI_ERR_CMD_DISALLOWED;
337 }
338
339 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
340 lll_iso = &sync_iso->lll;
341
342 role = ISOAL_ROLE_BROADCAST_SINK;
343 iso_interval = lll_iso->iso_interval;
344 sdu_interval = lll_iso->sdu_interval;
345 burst_number = lll_iso->bn;
346 flush_timeout = 0U; /* Not used for Broadcast ISO */
347 group_sync_delay = 0U; /* FIXME: */
348 stream_sync_delay = 0U; /* FIXME: */
349 framed = 0U; /* FIXME: pick the framing value from context */
350 max_octets = 0U;
351 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
352
353 } else {
354 return BT_HCI_ERR_UNKNOWN_CONN_ID;
355 }
356
357 if (path_is_vendor_specific(path_id) &&
358 (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
359 !ll_data_path_configured(path_dir, path_id))) {
360 /* Data path must be configured prior to setup */
361 return BT_HCI_ERR_CMD_DISALLOWED;
362 }
363
364 /* If Codec_Configuration_Length non-zero and Codec_ID set to
365 * transparent air mode, the Controller shall return the error code
366 * Invalid HCI Command Parameters (0x12).
367 */
368 if (codec_config_len &&
369 (vs_codec_id == BT_HCI_CODING_FORMAT_TRANSPARENT)) {
370 return BT_HCI_ERR_INVALID_PARAM;
371 }
372
373 /* Allocate and configure datapath */
374 dp = mem_acquire(&datapath_free);
375 if (!dp) {
376 return BT_HCI_ERR_CMD_DISALLOWED;
377 }
378
379 dp->path_dir = path_dir;
380 dp->path_id = path_id;
381 dp->coding_format = coding_format;
382 dp->company_id = company_id;
383
384 /* TODO dp->sync_delay = controller_delay; ?*/
385
386 if (false) {
387
388 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
389 } else if ((path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) &&
390 (cis || sync_stream)) {
391 isoal_sink_handle_t sink_handle;
392 isoal_status_t err;
393
394 if (path_id == BT_HCI_DATAPATH_ID_HCI) {
395 /* Not vendor specific, thus alloc and emit functions
396 * known
397 */
398 err = isoal_sink_create(handle, role, framed,
399 burst_number, flush_timeout,
400 sdu_interval, iso_interval,
401 stream_sync_delay,
402 group_sync_delay,
403 sink_sdu_alloc_hci,
404 sink_sdu_emit_hci,
405 sink_sdu_write_hci,
406 &sink_handle);
407 } else {
408 /* Set up vendor specific data path */
409 isoal_sink_sdu_alloc_cb sdu_alloc;
410 isoal_sink_sdu_emit_cb sdu_emit;
411 isoal_sink_sdu_write_cb sdu_write;
412
413 /* Request vendor sink callbacks for path */
414 if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) &&
415 ll_data_path_sink_create(handle, dp, &sdu_alloc,
416 &sdu_emit, &sdu_write)) {
417 err = isoal_sink_create(handle, role, framed,
418 burst_number,
419 flush_timeout,
420 sdu_interval,
421 iso_interval,
422 stream_sync_delay,
423 group_sync_delay,
424 sdu_alloc, sdu_emit,
425 sdu_write,
426 &sink_handle);
427 } else {
428 ull_iso_datapath_release(dp);
429
430 return BT_HCI_ERR_CMD_DISALLOWED;
431 }
432 }
433
434 if (!err) {
435 if (cis) {
436 cis->hdr.datapath_out = dp;
437 }
438
439 if (sync_stream) {
440 sync_stream->dp = dp;
441 }
442
443 dp->sink_hdl = sink_handle;
444 isoal_sink_enable(sink_handle);
445 } else {
446 ull_iso_datapath_release(dp);
447
448 return BT_HCI_ERR_CMD_DISALLOWED;
449 }
450 #else /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
451 ARG_UNUSED(sync_stream);
452 #endif /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
453
454 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
455 } else if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR) &&
456 (cis || adv_stream)) {
457 isoal_source_handle_t source_handle;
458 isoal_status_t err;
459
460 /* Create source for TX data path */
461 isoal_source_pdu_alloc_cb pdu_alloc;
462 isoal_source_pdu_write_cb pdu_write;
463 isoal_source_pdu_emit_cb pdu_emit;
464 isoal_source_pdu_release_cb pdu_release;
465
466 if (path_is_vendor_specific(path_id)) {
467 if (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
468 !ll_data_path_source_create(handle, dp,
469 &pdu_alloc, &pdu_write,
470 &pdu_emit,
471 &pdu_release)) {
472 ull_iso_datapath_release(dp);
473
474 return BT_HCI_ERR_CMD_DISALLOWED;
475 }
476 } else {
477 /* Set default callbacks when not vendor specific
478 * or that the vendor specific path is the same.
479 */
480 pdu_alloc = ll_iso_pdu_alloc;
481 pdu_write = ll_iso_pdu_write;
482 pdu_emit = ll_iso_pdu_emit;
483 pdu_release = ll_iso_pdu_release;
484 }
485
486 err = isoal_source_create(handle, role, framed, burst_number,
487 flush_timeout, max_octets,
488 sdu_interval, iso_interval,
489 stream_sync_delay, group_sync_delay,
490 pdu_alloc, pdu_write, pdu_emit,
491 pdu_release, &source_handle);
492
493 if (!err) {
494 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && cis != NULL) {
495 cis->hdr.datapath_in = dp;
496 }
497
498 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && adv_stream != NULL) {
499 adv_stream->dp = dp;
500 }
501
502 dp->source_hdl = source_handle;
503 isoal_source_enable(source_handle);
504 } else {
505 ull_iso_datapath_release(dp);
506
507 return BT_HCI_ERR_CMD_DISALLOWED;
508 }
509
510 #else /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
511 ARG_UNUSED(adv_stream);
512 #endif /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
513
514 } else {
515 return BT_HCI_ERR_CMD_DISALLOWED;
516 }
517
518 return BT_HCI_ERR_SUCCESS;
519 }
520
ll_remove_iso_path(uint16_t handle,uint8_t path_dir)521 uint8_t ll_remove_iso_path(uint16_t handle, uint8_t path_dir)
522 {
523 /* If the Host issues this command with a Connection_Handle that does
524 * not exist or is not for a CIS or a BIS, the Controller shall return
525 * the error code Unknown Connection Identifier (0x02).
526 */
527 if (false) {
528
529 #if defined(CONFIG_BT_CTLR_CONN_ISO)
530 } else if (IS_CIS_HANDLE(handle)) {
531 struct ll_conn_iso_stream *cis;
532 struct ll_iso_stream_hdr *hdr;
533 struct ll_iso_datapath *dp;
534
535 cis = ll_conn_iso_stream_get(handle);
536 hdr = &cis->hdr;
537
538 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR)) {
539 dp = hdr->datapath_in;
540 if (dp) {
541 isoal_source_destroy(dp->source_hdl);
542
543 hdr->datapath_in = NULL;
544 ull_iso_datapath_release(dp);
545 } else {
546 /* Datapath was not previously set up */
547 return BT_HCI_ERR_CMD_DISALLOWED;
548 }
549 }
550
551 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST)) {
552 dp = hdr->datapath_out;
553 if (dp) {
554 isoal_sink_destroy(dp->sink_hdl);
555
556 hdr->datapath_out = NULL;
557 ull_iso_datapath_release(dp);
558 } else {
559 /* Datapath was not previously set up */
560 return BT_HCI_ERR_CMD_DISALLOWED;
561 }
562 }
563 #endif /* CONFIG_BT_CTLR_CONN_ISO */
564
565 #if defined(CONFIG_BT_CTLR_ADV_ISO)
566 } else if (IS_ADV_ISO_HANDLE(handle)) {
567 struct lll_adv_iso_stream *adv_stream;
568 struct ll_iso_datapath *dp;
569 uint16_t stream_handle;
570
571 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR))) {
572 return BT_HCI_ERR_CMD_DISALLOWED;
573 }
574
575 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
576 adv_stream = ull_adv_iso_stream_get(stream_handle);
577 if (!adv_stream) {
578 return BT_HCI_ERR_CMD_DISALLOWED;
579 }
580
581 dp = adv_stream->dp;
582 if (dp) {
583 adv_stream->dp = NULL;
584 isoal_source_destroy(dp->source_hdl);
585 ull_iso_datapath_release(dp);
586 } else {
587 /* Datapath was not previously set up */
588 return BT_HCI_ERR_CMD_DISALLOWED;
589 }
590 #endif /* CONFIG_BT_CTLR_ADV_ISO */
591
592 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
593 } else if (IS_SYNC_ISO_HANDLE(handle)) {
594 struct lll_sync_iso_stream *sync_stream;
595 struct ll_iso_datapath *dp;
596 uint16_t stream_handle;
597
598 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST))) {
599 return BT_HCI_ERR_CMD_DISALLOWED;
600 }
601
602 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
603 sync_stream = ull_sync_iso_stream_get(stream_handle);
604 if (!sync_stream) {
605 return BT_HCI_ERR_CMD_DISALLOWED;
606 }
607
608 dp = sync_stream->dp;
609 if (dp) {
610 sync_stream->dp = NULL;
611 isoal_sink_destroy(dp->sink_hdl);
612 ull_iso_datapath_release(dp);
613 } else {
614 /* Datapath was not previously set up */
615 return BT_HCI_ERR_CMD_DISALLOWED;
616 }
617 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
618
619 } else {
620 return BT_HCI_ERR_CMD_DISALLOWED;
621 }
622
623 return 0;
624 }
625
626 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
627 /* The sdu_alloc function is called before combining PDUs into an SDU. Here we
628 * store the paylaod number associated with the first PDU, for unframed usecase.
629 */
ll_iso_test_sdu_alloc(const struct isoal_sink * sink_ctx,const struct isoal_pdu_rx * valid_pdu,struct isoal_sdu_buffer * sdu_buffer)630 static isoal_status_t ll_iso_test_sdu_alloc(const struct isoal_sink *sink_ctx,
631 const struct isoal_pdu_rx *valid_pdu,
632 struct isoal_sdu_buffer *sdu_buffer)
633 {
634 uint16_t handle;
635
636 handle = sink_ctx->session.handle;
637
638 if (IS_CIS_HANDLE(handle)) {
639 if (!sink_ctx->session.framed) {
640 struct ll_conn_iso_stream *cis;
641
642 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
643 LL_ASSERT(cis);
644
645 /* For unframed, SDU counter is the payload number */
646 cis->hdr.test_mode.rx_sdu_counter =
647 (uint32_t)valid_pdu->meta->payload_number;
648 }
649 } else if (IS_SYNC_ISO_HANDLE(handle)) {
650 /* FIXME: Implement for sync receiver */
651 LL_ASSERT(false);
652 }
653
654 return sink_sdu_alloc_hci(sink_ctx, valid_pdu, sdu_buffer);
655 }
656
657 /* The sdu_emit function is called whenever an SDU is combined and ready to be sent
658 * further in the data path. This injected implementation performs statistics on
659 * the SDU and then discards it.
660 */
ll_iso_test_sdu_emit(const struct isoal_sink * sink_ctx,const struct isoal_emitted_sdu_frag * sdu_frag,const struct isoal_emitted_sdu * sdu)661 static isoal_status_t ll_iso_test_sdu_emit(const struct isoal_sink *sink_ctx,
662 const struct isoal_emitted_sdu_frag *sdu_frag,
663 const struct isoal_emitted_sdu *sdu)
664 {
665 isoal_status_t status;
666 struct net_buf *buf;
667 uint16_t handle;
668
669 handle = sink_ctx->session.handle;
670 buf = (struct net_buf *)sdu_frag->sdu.contents.dbuf;
671
672 if (IS_CIS_HANDLE(handle)) {
673 struct ll_conn_iso_stream *cis;
674 isoal_sdu_len_t length;
675 uint32_t sdu_counter;
676 uint8_t framed;
677
678 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
679 LL_ASSERT(cis);
680
681 length = sink_ctx->sdu_production.sdu_written;
682 framed = sink_ctx->session.framed;
683
684 /* In BT_HCI_ISO_TEST_ZERO_SIZE_SDU mode, all SDUs must have length 0 and there is
685 * no sdu_counter field. In the other modes, the first 4 bytes must contain a
686 * packet counter, which is used as SDU counter. The sdu_counter is extracted
687 * regardless of mode as a sanity check, unless the length does not allow it.
688 */
689 if (length >= ISO_TEST_PACKET_COUNTER_SIZE) {
690 sdu_counter = sys_get_le32(buf->data);
691 } else {
692 sdu_counter = 0U;
693 }
694
695 switch (sdu_frag->sdu.status) {
696 case ISOAL_SDU_STATUS_VALID:
697 if (framed && cis->hdr.test_mode.rx_sdu_counter == 0U) {
698 /* BT 5.3, Vol 6, Part B, section 7.2:
699 * When using framed PDUs the expected value of the SDU counter
700 * shall be initialized with the value of the SDU counter of the
701 * first valid received SDU.
702 */
703 cis->hdr.test_mode.rx_sdu_counter = sdu_counter;
704 }
705
706 switch (cis->hdr.test_mode.rx_payload_type) {
707 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
708 if (length == 0) {
709 cis->hdr.test_mode.received_cnt++;
710 } else {
711 cis->hdr.test_mode.failed_cnt++;
712 }
713 break;
714
715 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
716 if ((length >= ISO_TEST_PACKET_COUNTER_SIZE) &&
717 (length <= cis->c_max_sdu) &&
718 (sdu_counter == cis->hdr.test_mode.rx_sdu_counter)) {
719 cis->hdr.test_mode.received_cnt++;
720 } else {
721 cis->hdr.test_mode.failed_cnt++;
722 }
723 break;
724
725 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
726 if ((length == cis->c_max_sdu) &&
727 (sdu_counter == cis->hdr.test_mode.rx_sdu_counter)) {
728 cis->hdr.test_mode.received_cnt++;
729 } else {
730 cis->hdr.test_mode.failed_cnt++;
731 }
732 break;
733
734 default:
735 LL_ASSERT(0);
736 return ISOAL_STATUS_ERR_SDU_EMIT;
737 }
738 break;
739
740 case ISOAL_SDU_STATUS_ERRORS:
741 case ISOAL_SDU_STATUS_LOST_DATA:
742 cis->hdr.test_mode.missed_cnt++;
743 break;
744 }
745
746 /* In framed mode, we may start incrementing the SDU counter when rx_sdu_counter
747 * becomes non zero (initial state), or in case of zero-based counting, if zero
748 * is actually the first valid SDU counter received.
749 */
750 if (framed && (cis->hdr.test_mode.rx_sdu_counter ||
751 (sdu_frag->sdu.status == ISOAL_SDU_STATUS_VALID))) {
752 cis->hdr.test_mode.rx_sdu_counter++;
753 }
754
755 status = ISOAL_STATUS_OK;
756
757 } else if (IS_SYNC_ISO_HANDLE(handle)) {
758 /* FIXME: Implement for sync receiver */
759 status = ISOAL_STATUS_ERR_SDU_EMIT;
760 } else {
761 /* Handle is out of range */
762 status = ISOAL_STATUS_ERR_SDU_EMIT;
763 }
764
765 net_buf_unref(buf);
766
767 return status;
768 }
769
ll_iso_receive_test(uint16_t handle,uint8_t payload_type)770 uint8_t ll_iso_receive_test(uint16_t handle, uint8_t payload_type)
771 {
772 isoal_sink_handle_t sink_handle;
773 struct ll_iso_datapath *dp;
774 uint32_t sdu_interval;
775 isoal_status_t err;
776 uint8_t status;
777
778 status = BT_HCI_ERR_SUCCESS;
779
780 if (IS_CIS_HANDLE(handle)) {
781 struct ll_conn_iso_stream *cis;
782 struct ll_conn_iso_group *cig;
783
784 cis = ll_iso_stream_connected_get(handle);
785 if (!cis) {
786 /* CIS is not connected */
787 return BT_HCI_ERR_UNKNOWN_CONN_ID;
788 }
789
790 if (cis->lll.rx.bn == 0) {
791 /* CIS is not configured for RX */
792 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
793 }
794
795 if (cis->hdr.datapath_out) {
796 /* Data path already set up */
797 return BT_HCI_ERR_CMD_DISALLOWED;
798 }
799
800 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
801 return BT_HCI_ERR_INVALID_LL_PARAM;
802 }
803
804 /* Allocate and configure test datapath */
805 dp = mem_acquire(&datapath_free);
806 if (!dp) {
807 return BT_HCI_ERR_CMD_DISALLOWED;
808 }
809
810 dp->path_dir = BT_HCI_DATAPATH_DIR_CTLR_TO_HOST;
811 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
812
813 cis->hdr.datapath_out = dp;
814 cig = cis->group;
815
816 if (cig->lll.role == BT_HCI_ROLE_PERIPHERAL) {
817 /* peripheral */
818 sdu_interval = cig->c_sdu_interval;
819 } else {
820 /* central */
821 sdu_interval = cig->p_sdu_interval;
822 }
823
824 err = isoal_sink_create(handle, cig->lll.role, cis->framed,
825 cis->lll.rx.bn, cis->lll.rx.ft,
826 sdu_interval, cig->iso_interval,
827 cis->sync_delay, cig->sync_delay,
828 ll_iso_test_sdu_alloc,
829 ll_iso_test_sdu_emit,
830 sink_sdu_write_hci, &sink_handle);
831 if (err) {
832 /* Error creating test source - cleanup source and
833 * datapath
834 */
835 isoal_sink_destroy(sink_handle);
836 ull_iso_datapath_release(dp);
837 cis->hdr.datapath_out = NULL;
838
839 return BT_HCI_ERR_CMD_DISALLOWED;
840 }
841
842 dp->sink_hdl = sink_handle;
843 isoal_sink_enable(sink_handle);
844
845 /* Enable Receive Test Mode */
846 cis->hdr.test_mode.rx_enabled = 1;
847 cis->hdr.test_mode.rx_payload_type = payload_type;
848
849 } else if (IS_SYNC_ISO_HANDLE(handle)) {
850 /* FIXME: Implement for sync receiver */
851 status = BT_HCI_ERR_CMD_DISALLOWED;
852 } else {
853 /* Handle is out of range */
854 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
855 }
856
857 return status;
858 }
859
ll_iso_read_test_counters(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)860 uint8_t ll_iso_read_test_counters(uint16_t handle, uint32_t *received_cnt,
861 uint32_t *missed_cnt,
862 uint32_t *failed_cnt)
863 {
864 uint8_t status;
865
866 *received_cnt = 0U;
867 *missed_cnt = 0U;
868 *failed_cnt = 0U;
869
870 status = BT_HCI_ERR_SUCCESS;
871
872 if (IS_CIS_HANDLE(handle)) {
873 struct ll_conn_iso_stream *cis;
874
875 cis = ll_iso_stream_connected_get(handle);
876 if (!cis) {
877 /* CIS is not connected */
878 return BT_HCI_ERR_UNKNOWN_CONN_ID;
879 }
880
881 if (!cis->hdr.test_mode.rx_enabled) {
882 /* ISO receive Test is not active */
883 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
884 }
885
886 /* Return SDU statistics */
887 *received_cnt = cis->hdr.test_mode.received_cnt;
888 *missed_cnt = cis->hdr.test_mode.missed_cnt;
889 *failed_cnt = cis->hdr.test_mode.failed_cnt;
890
891 } else if (IS_SYNC_ISO_HANDLE(handle)) {
892 /* FIXME: Implement for sync receiver */
893 status = BT_HCI_ERR_CMD_DISALLOWED;
894 } else {
895 /* Handle is out of range */
896 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
897 }
898
899 return status;
900 }
901
902 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
ll_read_iso_link_quality(uint16_t handle,uint32_t * tx_unacked_packets,uint32_t * tx_flushed_packets,uint32_t * tx_last_subevent_packets,uint32_t * retransmitted_packets,uint32_t * crc_error_packets,uint32_t * rx_unreceived_packets,uint32_t * duplicate_packets)903 uint8_t ll_read_iso_link_quality(uint16_t handle,
904 uint32_t *tx_unacked_packets,
905 uint32_t *tx_flushed_packets,
906 uint32_t *tx_last_subevent_packets,
907 uint32_t *retransmitted_packets,
908 uint32_t *crc_error_packets,
909 uint32_t *rx_unreceived_packets,
910 uint32_t *duplicate_packets)
911 {
912 uint8_t status;
913
914 *tx_unacked_packets = 0;
915 *tx_flushed_packets = 0;
916 *tx_last_subevent_packets = 0;
917 *retransmitted_packets = 0;
918 *crc_error_packets = 0;
919 *rx_unreceived_packets = 0;
920 *duplicate_packets = 0;
921
922 status = BT_HCI_ERR_SUCCESS;
923
924 if (IS_CIS_HANDLE(handle)) {
925 struct ll_conn_iso_stream *cis;
926
927 cis = ll_iso_stream_connected_get(handle);
928
929 if (!cis) {
930 /* CIS is not connected */
931 return BT_HCI_ERR_UNKNOWN_CONN_ID;
932 }
933
934 *tx_unacked_packets = cis->hdr.link_quality.tx_unacked_packets;
935 *tx_flushed_packets = cis->hdr.link_quality.tx_flushed_packets;
936 *tx_last_subevent_packets = cis->hdr.link_quality.tx_last_subevent_packets;
937 *retransmitted_packets = cis->hdr.link_quality.retransmitted_packets;
938 *crc_error_packets = cis->hdr.link_quality.crc_error_packets;
939 *rx_unreceived_packets = cis->hdr.link_quality.rx_unreceived_packets;
940 *duplicate_packets = cis->hdr.link_quality.duplicate_packets;
941
942 } else if (IS_SYNC_ISO_HANDLE(handle)) {
943 /* FIXME: Implement for sync receiver */
944 status = BT_HCI_ERR_CMD_DISALLOWED;
945 } else {
946 /* Handle is out of range */
947 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
948 }
949
950 return status;
951 }
952 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
953
954 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
955
956 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_test_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)957 static isoal_status_t ll_iso_test_pdu_release(struct node_tx_iso *node_tx,
958 const uint16_t handle,
959 const isoal_status_t status)
960 {
961 /* Release back to memory pool */
962 if (node_tx->link) {
963 ll_iso_link_tx_release(node_tx->link);
964 }
965 ll_iso_tx_mem_release(node_tx);
966
967 return ISOAL_STATUS_OK;
968 }
969
970 #if defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_transmit_test_send_sdu(uint16_t handle,uint32_t ticks_at_expire)971 void ll_iso_transmit_test_send_sdu(uint16_t handle, uint32_t ticks_at_expire)
972 {
973 isoal_source_handle_t source_handle;
974 struct isoal_sdu_tx sdu;
975 isoal_status_t err;
976 uint8_t tx_buffer[ISO_TEST_TX_BUFFER_SIZE];
977 uint64_t next_payload_number;
978 uint16_t remaining_tx;
979 uint32_t sdu_counter;
980
981 if (IS_CIS_HANDLE(handle)) {
982 struct ll_conn_iso_stream *cis;
983 struct ll_conn_iso_group *cig;
984 uint32_t rand_max_sdu;
985 uint8_t event_offset;
986 uint8_t max_sdu;
987 uint8_t rand_8;
988
989 cis = ll_iso_stream_connected_get(handle);
990 LL_ASSERT(cis);
991
992 if (!cis->hdr.test_mode.tx_enabled) {
993 /* Transmit Test Mode not enabled */
994 return;
995 }
996
997 cig = cis->group;
998 source_handle = cis->hdr.datapath_in->source_hdl;
999
1000 max_sdu = IS_PERIPHERAL(cig) ? cis->p_max_sdu : cis->c_max_sdu;
1001
1002 switch (cis->hdr.test_mode.tx_payload_type) {
1003 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
1004 remaining_tx = 0;
1005 break;
1006
1007 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
1008 /* Randomize the length [4..max_sdu] */
1009 lll_rand_get(&rand_8, sizeof(rand_8));
1010 rand_max_sdu = rand_8 * (max_sdu - ISO_TEST_PACKET_COUNTER_SIZE);
1011 remaining_tx = ISO_TEST_PACKET_COUNTER_SIZE + (rand_max_sdu >> 8);
1012 break;
1013
1014 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
1015 LL_ASSERT(max_sdu > ISO_TEST_PACKET_COUNTER_SIZE);
1016 remaining_tx = max_sdu;
1017 break;
1018
1019 default:
1020 LL_ASSERT(0);
1021 return;
1022 }
1023
1024 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1025 sdu.sdu_state = BT_ISO_START;
1026 } else {
1027 sdu.sdu_state = BT_ISO_SINGLE;
1028 }
1029
1030 /* Configure SDU similarly to one delivered via HCI */
1031 sdu.packet_sn = 0;
1032 sdu.dbuf = tx_buffer;
1033
1034 /* We must ensure sufficient time for ISO-AL to fragment SDU and
1035 * deliver PDUs to the TX queue. By checking ull_ref_get, we
1036 * know if we are within the subevents of an ISO event. If so,
1037 * we can assume that we have enough time to deliver in the next
1038 * ISO event. If we're not active within the ISO event, we don't
1039 * know if there is enough time to deliver in the next event,
1040 * and for safety we set the target to current event + 2.
1041 *
1042 * For FT > 1, we have the opportunity to retransmit in later
1043 * event(s), in which case we have the option to target an
1044 * earlier event (this or next) because being late does not
1045 * instantly flush the payload.
1046 */
1047 event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
1048 if (cis->lll.tx.ft > 1) {
1049 /* FT > 1, target an earlier event */
1050 event_offset -= 1;
1051 }
1052
1053 sdu.grp_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
1054 (event_offset * cig->iso_interval *
1055 ISO_INT_UNIT_US));
1056 sdu.target_event = cis->lll.event_count + event_offset;
1057 sdu.iso_sdu_length = remaining_tx;
1058
1059 /* Send all SDU fragments */
1060 do {
1061 sdu.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticks_at_expire);
1062 sdu.time_stamp = sdu.cntr_time_stamp;
1063 sdu.size = MIN(remaining_tx, ISO_TEST_TX_BUFFER_SIZE);
1064 memset(tx_buffer, 0, sdu.size);
1065
1066 /* If this is the first fragment of a framed SDU, inject the SDU
1067 * counter.
1068 */
1069 if ((sdu.size >= ISO_TEST_PACKET_COUNTER_SIZE) &&
1070 ((sdu.sdu_state == BT_ISO_START) || (sdu.sdu_state == BT_ISO_SINGLE))) {
1071 if (cis->framed) {
1072 sdu_counter = (uint32_t)cis->hdr.test_mode.tx_sdu_counter;
1073 } else {
1074 /* Unframed. Get the next payload counter.
1075 *
1076 * BT 5.3, Vol 6, Part B, Section 7.1:
1077 * When using unframed PDUs, the SDU counter shall be equal
1078 * to the payload counter.
1079 */
1080 isoal_tx_unframed_get_next_payload_number(source_handle,
1081 &sdu,
1082 &next_payload_number);
1083 sdu_counter = (uint32_t)next_payload_number;
1084 }
1085
1086 sys_put_le32(sdu_counter, tx_buffer);
1087 }
1088
1089 /* Send to ISOAL */
1090 err = isoal_tx_sdu_fragment(source_handle, &sdu);
1091 LL_ASSERT(!err);
1092
1093 remaining_tx -= sdu.size;
1094
1095 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1096 sdu.sdu_state = BT_ISO_CONT;
1097 } else {
1098 sdu.sdu_state = BT_ISO_END;
1099 }
1100 } while (remaining_tx);
1101
1102 cis->hdr.test_mode.tx_sdu_counter++;
1103
1104 } else if (IS_ADV_ISO_HANDLE(handle)) {
1105 /* FIXME: Implement for broadcaster */
1106 } else {
1107 LL_ASSERT(0);
1108 }
1109 }
1110 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1111
ll_iso_transmit_test(uint16_t handle,uint8_t payload_type)1112 uint8_t ll_iso_transmit_test(uint16_t handle, uint8_t payload_type)
1113 {
1114 isoal_source_handle_t source_handle;
1115 struct ll_iso_datapath *dp;
1116 uint32_t sdu_interval;
1117 isoal_status_t err;
1118 uint8_t status;
1119
1120 status = BT_HCI_ERR_SUCCESS;
1121
1122 if (IS_CIS_HANDLE(handle)) {
1123 struct ll_conn_iso_stream *cis;
1124 struct ll_conn_iso_group *cig;
1125
1126 cis = ll_iso_stream_connected_get(handle);
1127 if (!cis) {
1128 /* CIS is not connected */
1129 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1130 }
1131
1132 if (cis->lll.tx.bn == 0U) {
1133 /* CIS is not configured for TX */
1134 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1135 }
1136
1137 if (cis->hdr.datapath_in) {
1138 /* Data path already set up */
1139 return BT_HCI_ERR_CMD_DISALLOWED;
1140 }
1141
1142 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
1143 return BT_HCI_ERR_INVALID_LL_PARAM;
1144 }
1145
1146 /* Allocate and configure test datapath */
1147 dp = mem_acquire(&datapath_free);
1148 if (!dp) {
1149 return BT_HCI_ERR_CMD_DISALLOWED;
1150 }
1151
1152 dp->path_dir = BT_HCI_DATAPATH_DIR_HOST_TO_CTLR;
1153 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
1154
1155 cis->hdr.datapath_in = dp;
1156 cig = cis->group;
1157
1158 sdu_interval = IS_PERIPHERAL(cig) ? cig->p_sdu_interval : cig->c_sdu_interval;
1159
1160 /* Setup the test source */
1161 err = isoal_source_create(handle, cig->lll.role, cis->framed,
1162 cis->lll.tx.bn, cis->lll.tx.ft,
1163 cis->lll.tx.max_pdu, sdu_interval,
1164 cig->iso_interval, cis->sync_delay,
1165 cig->sync_delay, ll_iso_pdu_alloc,
1166 ll_iso_pdu_write, ll_iso_pdu_emit,
1167 ll_iso_test_pdu_release,
1168 &source_handle);
1169
1170 if (err) {
1171 /* Error creating test source - cleanup source and datapath */
1172 isoal_source_destroy(source_handle);
1173 ull_iso_datapath_release(dp);
1174 cis->hdr.datapath_in = NULL;
1175
1176 return BT_HCI_ERR_CMD_DISALLOWED;
1177 }
1178
1179 dp->source_hdl = source_handle;
1180 isoal_source_enable(source_handle);
1181
1182 /* Enable Transmit Test Mode */
1183 cis->hdr.test_mode.tx_enabled = 1;
1184 cis->hdr.test_mode.tx_payload_type = payload_type;
1185
1186 } else if (IS_ADV_ISO_HANDLE(handle)) {
1187 struct lll_adv_iso_stream *stream;
1188 uint16_t stream_handle;
1189
1190 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1191 stream = ull_adv_iso_stream_get(stream_handle);
1192 if (!stream) {
1193 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1194 }
1195
1196 /* FIXME: Implement use of common header in stream to enable code sharing
1197 * between CIS and BIS for test commands (and other places).
1198 */
1199 status = BT_HCI_ERR_CMD_DISALLOWED;
1200 } else {
1201 /* Handle is out of range */
1202 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1203 }
1204
1205 return status;
1206 }
1207 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1208
ll_iso_test_end(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)1209 uint8_t ll_iso_test_end(uint16_t handle, uint32_t *received_cnt,
1210 uint32_t *missed_cnt, uint32_t *failed_cnt)
1211 {
1212 uint8_t status;
1213
1214 *received_cnt = 0U;
1215 *missed_cnt = 0U;
1216 *failed_cnt = 0U;
1217
1218 status = BT_HCI_ERR_SUCCESS;
1219
1220 if (IS_CIS_HANDLE(handle)) {
1221 struct ll_conn_iso_stream *cis;
1222
1223 cis = ll_iso_stream_connected_get(handle);
1224 if (!cis) {
1225 /* CIS is not connected */
1226 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1227 }
1228
1229 if (!cis->hdr.test_mode.rx_enabled && !cis->hdr.test_mode.tx_enabled) {
1230 /* Test Mode is not active */
1231 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1232 }
1233
1234 if (cis->hdr.test_mode.rx_enabled) {
1235 isoal_sink_destroy(cis->hdr.datapath_out->sink_hdl);
1236 ull_iso_datapath_release(cis->hdr.datapath_out);
1237 cis->hdr.datapath_out = NULL;
1238
1239 /* Return SDU statistics */
1240 *received_cnt = cis->hdr.test_mode.received_cnt;
1241 *missed_cnt = cis->hdr.test_mode.missed_cnt;
1242 *failed_cnt = cis->hdr.test_mode.failed_cnt;
1243 }
1244
1245 if (cis->hdr.test_mode.tx_enabled) {
1246 /* Tear down source and datapath */
1247 isoal_source_destroy(cis->hdr.datapath_in->source_hdl);
1248 ull_iso_datapath_release(cis->hdr.datapath_in);
1249 cis->hdr.datapath_in = NULL;
1250 }
1251
1252 /* Disable Test Mode */
1253 (void)memset(&cis->hdr.test_mode, 0U, sizeof(cis->hdr.test_mode));
1254
1255 } else if (IS_ADV_ISO_HANDLE(handle)) {
1256 /* FIXME: Implement for broadcaster */
1257 status = BT_HCI_ERR_CMD_DISALLOWED;
1258 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1259 /* FIXME: Implement for sync receiver */
1260 status = BT_HCI_ERR_CMD_DISALLOWED;
1261 } else {
1262 /* Handle is out of range */
1263 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1264 }
1265
1266 return status;
1267 }
1268
1269 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_tx_mem_acquire(void)1270 void *ll_iso_tx_mem_acquire(void)
1271 {
1272 return mem_acquire(&mem_iso_tx.free);
1273 }
1274
ll_iso_tx_mem_release(void * node_tx)1275 void ll_iso_tx_mem_release(void *node_tx)
1276 {
1277 mem_release(node_tx, &mem_iso_tx.free);
1278 }
1279
ll_iso_tx_mem_enqueue(uint16_t handle,void * node_tx,void * link)1280 int ll_iso_tx_mem_enqueue(uint16_t handle, void *node_tx, void *link)
1281 {
1282 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) &&
1283 IS_CIS_HANDLE(handle)) {
1284 struct ll_conn_iso_stream *cis;
1285
1286 cis = ll_conn_iso_stream_get(handle);
1287 memq_enqueue(link, node_tx, &cis->lll.memq_tx.tail);
1288
1289 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) &&
1290 IS_ADV_ISO_HANDLE(handle)) {
1291 struct lll_adv_iso_stream *stream;
1292 uint16_t stream_handle;
1293
1294 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1295 stream = ull_adv_iso_stream_get(stream_handle);
1296 memq_enqueue(link, node_tx, &stream->memq_tx.tail);
1297
1298 } else {
1299 return -EINVAL;
1300 }
1301
1302 return 0;
1303 }
1304 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1305
ull_iso_init(void)1306 int ull_iso_init(void)
1307 {
1308 int err;
1309
1310 err = init_reset();
1311 if (err) {
1312 return err;
1313 }
1314
1315 return 0;
1316 }
1317
ull_iso_reset(void)1318 int ull_iso_reset(void)
1319 {
1320 int err;
1321
1322 err = init_reset();
1323 if (err) {
1324 return err;
1325 }
1326
1327 return 0;
1328 }
1329
1330 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_lll_ack_enqueue(uint16_t handle,struct node_tx_iso * node_tx)1331 void ull_iso_lll_ack_enqueue(uint16_t handle, struct node_tx_iso *node_tx)
1332 {
1333 struct ll_iso_datapath *dp = NULL;
1334
1335 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && IS_CIS_HANDLE(handle)) {
1336 struct ll_conn_iso_stream *cis;
1337
1338 cis = ll_conn_iso_stream_get(handle);
1339 dp = cis->hdr.datapath_in;
1340
1341 if (dp) {
1342 isoal_tx_pdu_release(dp->source_hdl, node_tx);
1343 } else {
1344 /* Race with Data Path remove */
1345 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1346 * used by ACL connections in ULL context to dispatch
1347 * ack.
1348 */
1349 ll_tx_ack_put(handle, (void *)node_tx);
1350 ll_rx_sched();
1351 }
1352 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && IS_ADV_ISO_HANDLE(handle)) {
1353 /* Process as TX ack. TODO: Can be unified with CIS and use
1354 * ISOAL.
1355 */
1356 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1357 * used by ACL connections in ULL context to dispatch
1358 * ack.
1359 */
1360 ll_tx_ack_put(handle, (void *)node_tx);
1361 ll_rx_sched();
1362 } else {
1363 LL_ASSERT(0);
1364 }
1365 }
1366
ull_iso_lll_event_prepare(uint16_t handle,uint64_t event_count)1367 void ull_iso_lll_event_prepare(uint16_t handle, uint64_t event_count)
1368 {
1369 if (IS_CIS_HANDLE(handle)) {
1370 struct ll_iso_datapath *dp = NULL;
1371 struct ll_conn_iso_stream *cis;
1372
1373 cis = ll_iso_stream_connected_get(handle);
1374
1375 if (cis) {
1376 dp = cis->hdr.datapath_in;
1377 }
1378
1379 if (dp) {
1380 isoal_tx_event_prepare(dp->source_hdl, event_count);
1381 }
1382 } else if (IS_ADV_ISO_HANDLE(handle)) {
1383 /* Send event deadline trigger to ISO-AL.
1384 * TODO: Can be unified with CIS implementation.
1385 */
1386 } else {
1387 LL_ASSERT(0);
1388 }
1389 }
1390 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1391
1392 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_pdu_rx_alloc_peek(uint8_t count)1393 void *ull_iso_pdu_rx_alloc_peek(uint8_t count)
1394 {
1395 if (count > MFIFO_AVAIL_COUNT_GET(iso_rx)) {
1396 return NULL;
1397 }
1398
1399 return MFIFO_DEQUEUE_PEEK(iso_rx);
1400 }
1401
ull_iso_pdu_rx_alloc(void)1402 void *ull_iso_pdu_rx_alloc(void)
1403 {
1404 return MFIFO_DEQUEUE(iso_rx);
1405 }
1406
1407 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
ull_iso_rx_put(memq_link_t * link,void * rx)1408 void ull_iso_rx_put(memq_link_t *link, void *rx)
1409 {
1410 /* Enqueue the Rx object */
1411 memq_enqueue(link, rx, &memq_ull_iso_rx.tail);
1412 }
1413
ull_iso_rx_sched(void)1414 void ull_iso_rx_sched(void)
1415 {
1416 static memq_link_t link;
1417 static struct mayfly mfy = {0, 0, &link, NULL, iso_rx_demux};
1418
1419 /* Kick the ULL (using the mayfly, tailchain it) */
1420 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
1421 }
1422
1423 #if defined(CONFIG_BT_CTLR_CONN_ISO)
iso_rx_cig_ref_point_update(struct ll_conn_iso_group * cig,const struct ll_conn_iso_stream * cis,const struct node_rx_iso_meta * meta)1424 static void iso_rx_cig_ref_point_update(struct ll_conn_iso_group *cig,
1425 const struct ll_conn_iso_stream *cis,
1426 const struct node_rx_iso_meta *meta)
1427 {
1428 uint32_t cig_sync_delay;
1429 uint32_t cis_sync_delay;
1430 uint64_t event_count;
1431 uint8_t burst_number;
1432 uint8_t role;
1433
1434 role = cig->lll.role;
1435 cig_sync_delay = cig->sync_delay;
1436 cis_sync_delay = cis->sync_delay;
1437 burst_number = cis->lll.rx.bn;
1438 event_count = cis->lll.event_count;
1439
1440 if (role) {
1441 /* Peripheral */
1442
1443 /* Check if this is the first payload received for this cis in
1444 * this event
1445 */
1446 if (meta->payload_number == (burst_number * event_count)) {
1447 /* Update the CIG reference point based on the CIS
1448 * anchor point
1449 */
1450 cig->cig_ref_point = isoal_get_wrapped_time_us(meta->timestamp,
1451 cis_sync_delay - cig_sync_delay);
1452 }
1453 }
1454 }
1455 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1456
iso_rx_demux(void * param)1457 static void iso_rx_demux(void *param)
1458 {
1459 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1460 struct ll_conn_iso_stream *cis;
1461 struct ll_conn_iso_group *cig;
1462 struct ll_iso_datapath *dp;
1463 struct node_rx_pdu *rx_pdu;
1464 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1465 struct node_rx_hdr *rx;
1466 memq_link_t *link;
1467
1468 do {
1469 link = memq_peek(memq_ull_iso_rx.head, memq_ull_iso_rx.tail,
1470 (void **)&rx);
1471 if (link) {
1472 /* Demux Rx objects */
1473 switch (rx->type) {
1474 case NODE_RX_TYPE_RELEASE:
1475 (void)memq_dequeue(memq_ull_iso_rx.tail,
1476 &memq_ull_iso_rx.head, NULL);
1477 ll_iso_rx_put(link, rx);
1478 ll_rx_sched();
1479 break;
1480
1481 case NODE_RX_TYPE_ISO_PDU:
1482 /* Remove from receive-queue; ULL has received this now */
1483 (void)memq_dequeue(memq_ull_iso_rx.tail, &memq_ull_iso_rx.head,
1484 NULL);
1485
1486 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1487 rx_pdu = (struct node_rx_pdu *)rx;
1488 cis = ll_conn_iso_stream_get(rx_pdu->hdr.handle);
1489 cig = cis->group;
1490 dp = cis->hdr.datapath_out;
1491
1492 iso_rx_cig_ref_point_update(cig, cis, &rx_pdu->hdr.rx_iso_meta);
1493
1494 if (dp && dp->path_id != BT_HCI_DATAPATH_ID_HCI) {
1495 /* If vendor specific datapath pass to ISO AL here,
1496 * in case of HCI destination it will be passed in
1497 * HCI context.
1498 */
1499 struct isoal_pdu_rx pckt_meta = {
1500 .meta = &rx_pdu->hdr.rx_iso_meta,
1501 .pdu = (struct pdu_iso *)&rx_pdu->pdu[0]
1502 };
1503
1504 /* Pass the ISO PDU through ISO-AL */
1505 const isoal_status_t err =
1506 isoal_rx_pdu_recombine(dp->sink_hdl, &pckt_meta);
1507
1508 LL_ASSERT(err == ISOAL_STATUS_OK); /* TODO handle err */
1509 }
1510 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1511
1512 /* Let ISO PDU start its long journey upwards */
1513 ll_iso_rx_put(link, rx);
1514 ll_rx_sched();
1515 break;
1516
1517 default:
1518 LL_ASSERT(0);
1519 break;
1520 }
1521 }
1522 } while (link);
1523 }
1524 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1525
ll_iso_rx_put(memq_link_t * link,void * rx)1526 void ll_iso_rx_put(memq_link_t *link, void *rx)
1527 {
1528 /* Enqueue the Rx object */
1529 memq_enqueue(link, rx, &memq_ll_iso_rx.tail);
1530 }
1531
ll_iso_rx_get(void)1532 void *ll_iso_rx_get(void)
1533 {
1534 struct node_rx_hdr *rx;
1535 memq_link_t *link;
1536
1537 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1538 while (link) {
1539 /* Do not send up buffers to Host thread that are
1540 * marked for release
1541 */
1542 if (rx->type == NODE_RX_TYPE_RELEASE) {
1543 (void)memq_dequeue(memq_ll_iso_rx.tail,
1544 &memq_ll_iso_rx.head, NULL);
1545 mem_release(link, &mem_link_iso_rx.free);
1546 mem_release(rx, &mem_iso_rx.free);
1547 RXFIFO_ALLOC(iso_rx, 1);
1548
1549 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1550 continue;
1551 }
1552 return rx;
1553 }
1554
1555 return NULL;
1556 }
1557
ll_iso_rx_dequeue(void)1558 void ll_iso_rx_dequeue(void)
1559 {
1560 struct node_rx_hdr *rx = NULL;
1561 memq_link_t *link;
1562
1563 link = memq_dequeue(memq_ll_iso_rx.tail, &memq_ll_iso_rx.head,
1564 (void **)&rx);
1565 LL_ASSERT(link);
1566
1567 mem_release(link, &mem_link_iso_rx.free);
1568
1569 /* Handle object specific clean up */
1570 switch (rx->type) {
1571 case NODE_RX_TYPE_ISO_PDU:
1572 break;
1573 default:
1574 LL_ASSERT(0);
1575 break;
1576 }
1577 }
1578
ll_iso_rx_mem_release(void ** node_rx)1579 void ll_iso_rx_mem_release(void **node_rx)
1580 {
1581 struct node_rx_hdr *rx;
1582
1583 rx = *node_rx;
1584 while (rx) {
1585 struct node_rx_hdr *rx_free;
1586
1587 rx_free = rx;
1588 rx = rx->next;
1589
1590 switch (rx_free->type) {
1591 case NODE_RX_TYPE_ISO_PDU:
1592 mem_release(rx_free, &mem_iso_rx.free);
1593 break;
1594 default:
1595 /* Ignore other types as node may have been initialized due to
1596 * race with HCI reset.
1597 */
1598 break;
1599 }
1600 }
1601
1602 *node_rx = rx;
1603
1604 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1605 }
1606 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1607
ull_iso_datapath_alloc(void)1608 struct ll_iso_datapath *ull_iso_datapath_alloc(void)
1609 {
1610 return mem_acquire(&datapath_free);
1611 }
1612
ull_iso_datapath_release(struct ll_iso_datapath * dp)1613 void ull_iso_datapath_release(struct ll_iso_datapath *dp)
1614 {
1615 mem_release(dp, &datapath_free);
1616 }
1617
1618 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_link_tx_release(void * link)1619 void ll_iso_link_tx_release(void *link)
1620 {
1621 mem_release(link, &mem_link_iso_tx.free);
1622 }
1623
1624 /**
1625 * Allocate a PDU from the LL and store the details in the given buffer. Allocation
1626 * is not expected to fail as there must always be sufficient PDU buffers. Any
1627 * failure will trigger the assert.
1628 * @param[in] pdu_buffer Buffer to store PDU details in
1629 * @return Error status of operation
1630 */
ll_iso_pdu_alloc(struct isoal_pdu_buffer * pdu_buffer)1631 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer)
1632 {
1633 struct node_tx_iso *node_tx;
1634
1635 node_tx = ll_iso_tx_mem_acquire();
1636 if (!node_tx) {
1637 LOG_ERR("Tx Buffer Overflow");
1638 /* TODO: Report overflow to HCI and remove assert
1639 * data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO)
1640 */
1641 LL_ASSERT(0);
1642 return ISOAL_STATUS_ERR_PDU_ALLOC;
1643 }
1644
1645 node_tx->link = NULL;
1646
1647 /* node_tx handle will be required to emit the PDU later */
1648 pdu_buffer->handle = (void *)node_tx;
1649 pdu_buffer->pdu = (void *)node_tx->pdu;
1650
1651 /* Use TX buffer size as the limit here. Actual size will be decided in
1652 * the ISOAL based on the minimum of the buffer size and the respective
1653 * Max_PDU_C_To_P or Max_PDU_P_To_C.
1654 */
1655 pdu_buffer->size = MAX(LL_BIS_OCTETS_TX_MAX, LL_CIS_OCTETS_TX_MAX);
1656
1657 return ISOAL_STATUS_OK;
1658 }
1659
1660 /**
1661 * Write the given SDU payload to the target PDU buffer at the given offset.
1662 * @param[in,out] pdu_buffer Target PDU buffer
1663 * @param[in] pdu_offset Offset / current write position within PDU
1664 * @param[in] sdu_payload Location of source data
1665 * @param[in] consume_len Length of data to copy
1666 * @return Error status of write operation
1667 */
ll_iso_pdu_write(struct isoal_pdu_buffer * pdu_buffer,const size_t pdu_offset,const uint8_t * sdu_payload,const size_t consume_len)1668 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
1669 const size_t pdu_offset,
1670 const uint8_t *sdu_payload,
1671 const size_t consume_len)
1672 {
1673 ARG_UNUSED(pdu_offset);
1674 ARG_UNUSED(consume_len);
1675
1676 LL_ASSERT(pdu_buffer);
1677 LL_ASSERT(pdu_buffer->pdu);
1678 LL_ASSERT(sdu_payload);
1679
1680 if ((pdu_offset + consume_len) > pdu_buffer->size) {
1681 /* Exceeded PDU buffer */
1682 return ISOAL_STATUS_ERR_UNSPECIFIED;
1683 }
1684
1685 /* Copy source to destination at given offset */
1686 memcpy(&pdu_buffer->pdu->payload[pdu_offset], sdu_payload, consume_len);
1687
1688 return ISOAL_STATUS_OK;
1689 }
1690
1691 /**
1692 * Emit the encoded node to the transmission queue
1693 * @param node_tx TX node to enqueue
1694 * @param handle CIS/BIS handle
1695 * @return Error status of enqueue operation
1696 */
ll_iso_pdu_emit(struct node_tx_iso * node_tx,const uint16_t handle)1697 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
1698 const uint16_t handle)
1699 {
1700 memq_link_t *link;
1701
1702 link = mem_acquire(&mem_link_iso_tx.free);
1703 LL_ASSERT(link);
1704
1705 if (ll_iso_tx_mem_enqueue(handle, node_tx, link)) {
1706 return ISOAL_STATUS_ERR_PDU_EMIT;
1707 }
1708
1709 return ISOAL_STATUS_OK;
1710 }
1711
1712 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1713 /**
1714 * Release the given payload back to the memory pool.
1715 * @param node_tx TX node to release or forward
1716 * @param handle CIS/BIS handle
1717 * @param status Reason for release
1718 * @return Error status of release operation
1719 */
ll_iso_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)1720 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
1721 const uint16_t handle,
1722 const isoal_status_t status)
1723 {
1724 if (status == ISOAL_STATUS_OK) {
1725 /* Process as TX ack, we are in LLL execution context here.
1726 * status == ISOAL_STATUS_OK when an ISO PDU has been acked.
1727 *
1728 * Call Path:
1729 * ull_iso_lll_ack_enqueue() --> isoal_tx_pdu_release() -->
1730 * pdu_release() == ll_iso_pdu_release() (this function).
1731 */
1732 /* FIXME: ll_tx_ack_put is not LLL callable as it is used by
1733 * ACL connections in ULL context to dispatch ack.
1734 */
1735 ll_tx_ack_put(handle, (void *)node_tx);
1736 ll_rx_sched();
1737 } else {
1738 /* Release back to memory pool, we are in Thread context
1739 * Callers:
1740 * isoal_source_deallocate() with ISOAL_STATUS_ERR_PDU_EMIT
1741 * isoal_tx_pdu_emit with status != ISOAL_STATUS_OK
1742 */
1743 if (node_tx->link) {
1744 ll_iso_link_tx_release(node_tx->link);
1745 }
1746 ll_iso_tx_mem_release(node_tx);
1747 }
1748
1749 return ISOAL_STATUS_OK;
1750 }
1751 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1752 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1753
init_reset(void)1754 static int init_reset(void)
1755 {
1756 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1757 memq_link_t *link;
1758
1759 RXFIFO_INIT(iso_rx);
1760
1761 /* Acquire a link to initialize ull rx memq */
1762 link = mem_acquire(&mem_link_iso_rx.free);
1763 LL_ASSERT(link);
1764
1765 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
1766 /* Initialize ull rx memq */
1767 MEMQ_INIT(ull_iso_rx, link);
1768 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1769
1770 /* Acquire a link to initialize ll_iso_rx memq */
1771 link = mem_acquire(&mem_link_iso_rx.free);
1772 LL_ASSERT(link);
1773
1774 /* Initialize ll_iso_rx memq */
1775 MEMQ_INIT(ll_iso_rx, link);
1776
1777 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1778 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1779
1780 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1781 /* Initialize tx pool. */
1782 mem_init(mem_iso_tx.pool, NODE_TX_BUFFER_SIZE, BT_CTLR_ISO_TX_BUFFERS,
1783 &mem_iso_tx.free);
1784
1785 /* Initialize tx link pool. */
1786 mem_init(mem_link_iso_tx.pool, sizeof(memq_link_t),
1787 BT_CTLR_ISO_TX_BUFFERS, &mem_link_iso_tx.free);
1788 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1789
1790 #if BT_CTLR_ISO_STREAMS
1791 /* Initialize ISO Datapath pool */
1792 mem_init(datapath_pool, sizeof(struct ll_iso_datapath),
1793 sizeof(datapath_pool) / sizeof(struct ll_iso_datapath), &datapath_free);
1794 #endif /* BT_CTLR_ISO_STREAMS */
1795
1796 /* Initialize ISO Adaptation Layer */
1797 isoal_init();
1798
1799 return 0;
1800 }
1801