1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <soc.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 #include <zephyr/bluetooth/buf.h>
12
13 #include "hal/cpu.h"
14 #include "hal/ccm.h"
15 #include "hal/ticker.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mfifo.h"
21 #include "util/mayfly.h"
22 #include "util/dbuf.h"
23
24 #include "ticker/ticker.h"
25
26 #include "pdu_df.h"
27 #include "lll/pdu_vendor.h"
28 #include "pdu.h"
29
30 #include "lll.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_adv_iso.h"
35 #include "lll/lll_df_types.h"
36 #include "lll_sync.h"
37 #include "lll_sync_iso.h"
38 #include "lll_conn.h"
39 #include "lll_conn_iso.h"
40 #include "lll_iso_tx.h"
41 #include "lll/lll_vendor.h"
42
43 #include "ll_sw/ull_tx_queue.h"
44
45 #include "isoal.h"
46
47 #include "ull_adv_types.h"
48 #include "ull_sync_types.h"
49 #include "ull_conn_types.h"
50 #include "ull_iso_types.h"
51 #include "ull_conn_iso_types.h"
52 #include "ull_llcp.h"
53
54 #include "ull_internal.h"
55 #include "ull_adv_internal.h"
56 #include "ull_conn_internal.h"
57 #include "ull_iso_internal.h"
58 #include "ull_sync_iso_internal.h"
59 #include "ull_conn_iso_internal.h"
60
61 #include "ll_feat.h"
62
63 #include "hal/debug.h"
64
65 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
66 #include <zephyr/logging/log.h>
67 LOG_MODULE_REGISTER(bt_ctlr_ull_iso);
68
69 #if defined(CONFIG_BT_CTLR_CONN_ISO_STREAMS)
70 #define BT_CTLR_CONN_ISO_STREAMS CONFIG_BT_CTLR_CONN_ISO_STREAMS
71 #else /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
72 #define BT_CTLR_CONN_ISO_STREAMS 0
73 #endif /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
74
75 #if defined(CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
76 #define BT_CTLR_ADV_ISO_STREAMS (CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
77 #else /* !CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
78 #define BT_CTLR_ADV_ISO_STREAMS 0
79 #endif /* CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
80
81 #if defined(CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
82 #define BT_CTLR_SYNC_ISO_STREAMS (CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
83 #else /* !CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
84 #define BT_CTLR_SYNC_ISO_STREAMS 0
85 #endif /* CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
86
87 static int init_reset(void);
88
89 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
90 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer);
91 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
92 const size_t offset,
93 const uint8_t *sdu_payload,
94 const size_t consume_len);
95 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
96 const uint16_t handle);
97 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
98 const uint16_t handle,
99 const isoal_status_t status);
100 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
101
102 /* Allocate data path pools for RX/TX directions for each stream */
103 #define BT_CTLR_ISO_STREAMS ((2 * (BT_CTLR_CONN_ISO_STREAMS)) + \
104 BT_CTLR_ADV_ISO_STREAMS + \
105 BT_CTLR_SYNC_ISO_STREAMS)
106 #if BT_CTLR_ISO_STREAMS
107 static struct ll_iso_datapath datapath_pool[BT_CTLR_ISO_STREAMS];
108 #endif /* BT_CTLR_ISO_STREAMS */
109
110 static void *datapath_free;
111
112 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
113 static void ticker_resume_op_cb(uint32_t status, void *param);
114 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
115 uint32_t remainder, uint16_t lazy, uint8_t force,
116 void *param);
117
118 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
119 /* ISO LL conformance tests require a PDU size of maximum 251 bytes + header */
120 #define ISO_RX_BUFFER_SIZE (2 + 251)
121
122 /* Declare the ISO rx node RXFIFO. This is a composite pool-backed MFIFO for
123 * rx_nodes. The declaration constructs the following data structures:
124 * - mfifo_iso_rx: FIFO with pointers to PDU buffers
125 * - mem_iso_rx: Backing data pool for PDU buffer elements
126 * - mem_link_iso_rx: Pool of memq_link_t elements
127 *
128 * One extra rx buffer is reserved for empty ISO PDU reception.
129 * Two extra links are reserved for use by the ll_iso_rx and ull_iso_rx memq.
130 */
131 static RXFIFO_DEFINE(iso_rx, ((NODE_RX_HEADER_SIZE) + (ISO_RX_BUFFER_SIZE)),
132 (CONFIG_BT_CTLR_ISO_RX_BUFFERS + 1U), 2U);
133
134 static MEMQ_DECLARE(ll_iso_rx);
135 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
136 static MEMQ_DECLARE(ull_iso_rx);
137 static void iso_rx_demux(void *param);
138 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
139 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
140
141 #define ISO_TEST_PACKET_COUNTER_SIZE 4U
142
143 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
144 void ll_iso_link_tx_release(void *link);
145 void ll_iso_tx_mem_release(void *node_tx);
146
147 #define NODE_TX_BUFFER_SIZE MROUND(offsetof(struct node_tx_iso, pdu) + \
148 offsetof(struct pdu_iso, payload) + \
149 MAX(LL_BIS_OCTETS_TX_MAX, \
150 LL_CIS_OCTETS_TX_MAX))
151
152 #define ISO_TEST_TX_BUFFER_SIZE 32U
153
154 static struct {
155 void *free;
156 uint8_t pool[NODE_TX_BUFFER_SIZE * BT_CTLR_ISO_TX_BUFFERS];
157 } mem_iso_tx;
158
159 static struct {
160 void *free;
161 uint8_t pool[sizeof(memq_link_t) * BT_CTLR_ISO_TX_BUFFERS];
162 } mem_link_iso_tx;
163
164 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
165
ll_read_iso_tx_sync(uint16_t handle,uint16_t * seq,uint32_t * timestamp,uint32_t * offset)166 uint8_t ll_read_iso_tx_sync(uint16_t handle, uint16_t *seq,
167 uint32_t *timestamp, uint32_t *offset)
168 {
169 if (IS_CIS_HANDLE(handle)) {
170 struct ll_iso_datapath *dp = NULL;
171 struct ll_conn_iso_stream *cis;
172
173 cis = ll_conn_iso_stream_get(handle);
174
175 if (cis) {
176 dp = cis->hdr.datapath_in;
177 }
178
179 if (dp &&
180 isoal_tx_get_sync_info(dp->source_hdl, seq,
181 timestamp, offset) == ISOAL_STATUS_OK) {
182 return BT_HCI_ERR_SUCCESS;
183 }
184
185 return BT_HCI_ERR_CMD_DISALLOWED;
186
187 } else if (IS_ADV_ISO_HANDLE(handle)) {
188 const struct lll_adv_iso_stream *adv_stream;
189 uint16_t stream_handle;
190
191 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
192 adv_stream = ull_adv_iso_stream_get(stream_handle);
193 if (!adv_stream || !adv_stream->dp ||
194 isoal_tx_get_sync_info(adv_stream->dp->source_hdl, seq,
195 timestamp, offset) != ISOAL_STATUS_OK) {
196 return BT_HCI_ERR_CMD_DISALLOWED;
197 }
198
199 return BT_HCI_ERR_SUCCESS;
200
201 } else if (IS_SYNC_ISO_HANDLE(handle)) {
202 return BT_HCI_ERR_CMD_DISALLOWED;
203 }
204
205 return BT_HCI_ERR_UNKNOWN_CONN_ID;
206 }
207
path_is_vendor_specific(uint8_t path_id)208 static inline bool path_is_vendor_specific(uint8_t path_id)
209 {
210 return (path_id >= BT_HCI_DATAPATH_ID_VS &&
211 path_id <= BT_HCI_DATAPATH_ID_VS_END);
212 }
213
ll_setup_iso_path(uint16_t handle,uint8_t path_dir,uint8_t path_id,uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint32_t controller_delay,uint8_t codec_config_len,uint8_t * codec_config)214 uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
215 uint8_t coding_format, uint16_t company_id,
216 uint16_t vs_codec_id, uint32_t controller_delay,
217 uint8_t codec_config_len, uint8_t *codec_config)
218 {
219 struct lll_sync_iso_stream *sync_stream = NULL;
220 struct lll_adv_iso_stream *adv_stream = NULL;
221 struct ll_conn_iso_stream *cis = NULL;
222 struct ll_iso_datapath *dp;
223 uint32_t stream_sync_delay;
224 uint32_t group_sync_delay;
225 uint8_t flush_timeout;
226 uint16_t iso_interval;
227 uint32_t sdu_interval;
228 uint8_t burst_number;
229 uint8_t max_octets;
230 uint8_t framed;
231 uint8_t role;
232
233 ARG_UNUSED(controller_delay);
234 ARG_UNUSED(codec_config);
235
236 if (false) {
237
238 #if defined(CONFIG_BT_CTLR_CONN_ISO)
239 } else if (IS_CIS_HANDLE(handle)) {
240 struct ll_conn_iso_group *cig;
241 struct ll_conn *conn;
242
243 /* If the Host attempts to set a data path with a Connection
244 * Handle that does not exist or that is not for a CIS or a BIS,
245 * the Controller shall return the error code Unknown Connection
246 * Identifier (0x02)
247 */
248 cis = ll_conn_iso_stream_get(handle);
249 if (!cis || !cis->group) {
250 /* CIS does not belong to a CIG */
251 return BT_HCI_ERR_UNKNOWN_CONN_ID;
252 }
253
254 conn = ll_connected_get(cis->lll.acl_handle);
255 if (conn) {
256 /* If we're still waiting for accept/response from
257 * host, path setup is premature and we must return
258 * disallowed status.
259 */
260 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
261 const uint8_t cis_waiting = ull_cp_cc_awaiting_reply(conn);
262
263 if (cis_waiting) {
264 return BT_HCI_ERR_CMD_DISALLOWED;
265 }
266 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
267 }
268
269 if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR && cis->hdr.datapath_in) ||
270 (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST && cis->hdr.datapath_out)) {
271 /* Data path has been set up, can only do setup once */
272 return BT_HCI_ERR_CMD_DISALLOWED;
273 }
274
275 cig = cis->group;
276
277 role = cig->lll.role;
278 iso_interval = cig->iso_interval;
279 group_sync_delay = cig->sync_delay;
280 stream_sync_delay = cis->sync_delay;
281 framed = cis->framed;
282
283 if (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) {
284 /* Create sink for RX data path */
285 burst_number = cis->lll.rx.bn;
286 flush_timeout = cis->lll.rx.ft;
287 max_octets = cis->lll.rx.max_pdu;
288
289 if (role) {
290 /* peripheral */
291 sdu_interval = cig->c_sdu_interval;
292 } else {
293 /* central */
294 sdu_interval = cig->p_sdu_interval;
295 }
296 } else {
297 /* path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR */
298 burst_number = cis->lll.tx.bn;
299 flush_timeout = cis->lll.tx.ft;
300 max_octets = cis->lll.tx.max_pdu;
301
302 if (role) {
303 /* peripheral */
304 sdu_interval = cig->p_sdu_interval;
305 } else {
306 /* central */
307 sdu_interval = cig->c_sdu_interval;
308 }
309 }
310 #endif /* CONFIG_BT_CTLR_CONN_ISO */
311
312 #if defined(CONFIG_BT_CTLR_ADV_ISO)
313 } else if (IS_ADV_ISO_HANDLE(handle)) {
314 struct ll_adv_iso_set *adv_iso;
315 struct lll_adv_iso *lll_iso;
316 uint16_t stream_handle;
317
318 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
319 adv_stream = ull_adv_iso_stream_get(stream_handle);
320 if (!adv_stream || adv_stream->dp) {
321 return BT_HCI_ERR_CMD_DISALLOWED;
322 }
323
324 adv_iso = ull_adv_iso_by_stream_get(stream_handle);
325 lll_iso = &adv_iso->lll;
326
327 role = ISOAL_ROLE_BROADCAST_SOURCE;
328 iso_interval = lll_iso->iso_interval;
329 sdu_interval = lll_iso->sdu_interval;
330 burst_number = lll_iso->bn;
331 flush_timeout = 0U; /* Not used for Broadcast ISO */
332 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
333 lll_iso->nse, lll_iso->sub_interval,
334 lll_iso->phy, lll_iso->max_pdu,
335 lll_iso->enc);
336 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
337 framed = lll_iso->framing;
338 max_octets = lll_iso->max_pdu;
339 #endif /* CONFIG_BT_CTLR_ADV_ISO */
340
341 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
342 } else if (IS_SYNC_ISO_HANDLE(handle)) {
343 struct ll_sync_iso_set *sync_iso;
344 struct lll_sync_iso *lll_iso;
345 uint16_t stream_handle;
346
347 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
348 sync_stream = ull_sync_iso_stream_get(stream_handle);
349 if (!sync_stream || sync_stream->dp) {
350 return BT_HCI_ERR_CMD_DISALLOWED;
351 }
352
353 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
354 lll_iso = &sync_iso->lll;
355
356 role = ISOAL_ROLE_BROADCAST_SINK;
357 iso_interval = lll_iso->iso_interval;
358 sdu_interval = lll_iso->sdu_interval;
359 burst_number = lll_iso->bn;
360
361 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
362 lll_iso->nse, lll_iso->sub_interval,
363 lll_iso->phy, lll_iso->max_pdu,
364 lll_iso->enc);
365 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
366 framed = lll_iso->framing;
367 max_octets = lll_iso->max_pdu;
368 flush_timeout = 0U; /* Not used for Broadcast ISO */
369 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
370
371 } else {
372 return BT_HCI_ERR_UNKNOWN_CONN_ID;
373 }
374
375 if (path_is_vendor_specific(path_id) &&
376 (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
377 !ll_data_path_configured(path_dir, path_id))) {
378 /* Data path must be configured prior to setup */
379 return BT_HCI_ERR_CMD_DISALLOWED;
380 }
381
382 /* If Codec_Configuration_Length non-zero and Codec_ID set to
383 * transparent air mode, the Controller shall return the error code
384 * Invalid HCI Command Parameters (0x12).
385 */
386 if (codec_config_len &&
387 (vs_codec_id == BT_HCI_CODING_FORMAT_TRANSPARENT)) {
388 return BT_HCI_ERR_INVALID_PARAM;
389 }
390
391 /* Allocate and configure datapath */
392 dp = mem_acquire(&datapath_free);
393 if (!dp) {
394 return BT_HCI_ERR_CMD_DISALLOWED;
395 }
396
397 dp->path_dir = path_dir;
398 dp->path_id = path_id;
399 dp->coding_format = coding_format;
400 dp->company_id = company_id;
401
402 /* TODO dp->sync_delay = controller_delay; ?*/
403
404 if (false) {
405
406 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
407 } else if ((path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) &&
408 (cis || sync_stream)) {
409 isoal_sink_handle_t sink_handle;
410 isoal_status_t err;
411
412 if (path_id == BT_HCI_DATAPATH_ID_HCI) {
413 /* Not vendor specific, thus alloc and emit functions
414 * known
415 */
416 err = isoal_sink_create(handle, role, framed,
417 burst_number, flush_timeout,
418 sdu_interval, iso_interval,
419 stream_sync_delay,
420 group_sync_delay,
421 sink_sdu_alloc_hci,
422 sink_sdu_emit_hci,
423 sink_sdu_write_hci,
424 &sink_handle);
425 } else {
426 /* Set up vendor specific data path */
427 isoal_sink_sdu_alloc_cb sdu_alloc;
428 isoal_sink_sdu_emit_cb sdu_emit;
429 isoal_sink_sdu_write_cb sdu_write;
430
431 /* Request vendor sink callbacks for path */
432 if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) &&
433 ll_data_path_sink_create(handle, dp, &sdu_alloc,
434 &sdu_emit, &sdu_write)) {
435 err = isoal_sink_create(handle, role, framed,
436 burst_number,
437 flush_timeout,
438 sdu_interval,
439 iso_interval,
440 stream_sync_delay,
441 group_sync_delay,
442 sdu_alloc, sdu_emit,
443 sdu_write,
444 &sink_handle);
445 } else {
446 ull_iso_datapath_release(dp);
447
448 return BT_HCI_ERR_CMD_DISALLOWED;
449 }
450 }
451
452 if (!err) {
453 if (cis) {
454 cis->hdr.datapath_out = dp;
455 }
456
457 if (sync_stream) {
458 sync_stream->dp = dp;
459 }
460
461 dp->sink_hdl = sink_handle;
462 isoal_sink_enable(sink_handle);
463 } else {
464 ull_iso_datapath_release(dp);
465
466 return BT_HCI_ERR_CMD_DISALLOWED;
467 }
468 #else /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
469 ARG_UNUSED(sync_stream);
470 #endif /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
471
472 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
473 } else if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR) &&
474 (cis || adv_stream)) {
475 isoal_source_handle_t source_handle;
476 isoal_status_t err;
477
478 /* Create source for TX data path */
479 isoal_source_pdu_alloc_cb pdu_alloc;
480 isoal_source_pdu_write_cb pdu_write;
481 isoal_source_pdu_emit_cb pdu_emit;
482 isoal_source_pdu_release_cb pdu_release;
483
484 if (path_is_vendor_specific(path_id)) {
485 if (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
486 !ll_data_path_source_create(handle, dp,
487 &pdu_alloc, &pdu_write,
488 &pdu_emit,
489 &pdu_release)) {
490 ull_iso_datapath_release(dp);
491
492 return BT_HCI_ERR_CMD_DISALLOWED;
493 }
494 } else {
495 /* Set default callbacks when not vendor specific
496 * or that the vendor specific path is the same.
497 */
498 pdu_alloc = ll_iso_pdu_alloc;
499 pdu_write = ll_iso_pdu_write;
500 pdu_emit = ll_iso_pdu_emit;
501 pdu_release = ll_iso_pdu_release;
502 }
503
504 err = isoal_source_create(handle, role, framed, burst_number,
505 flush_timeout, max_octets,
506 sdu_interval, iso_interval,
507 stream_sync_delay, group_sync_delay,
508 pdu_alloc, pdu_write, pdu_emit,
509 pdu_release, &source_handle);
510
511 if (!err) {
512 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && cis != NULL) {
513 cis->hdr.datapath_in = dp;
514 }
515
516 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && adv_stream != NULL) {
517 adv_stream->dp = dp;
518 }
519
520 dp->source_hdl = source_handle;
521 isoal_source_enable(source_handle);
522 } else {
523 ull_iso_datapath_release(dp);
524
525 return BT_HCI_ERR_CMD_DISALLOWED;
526 }
527
528 #else /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
529 ARG_UNUSED(adv_stream);
530 #endif /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
531
532 } else {
533 return BT_HCI_ERR_CMD_DISALLOWED;
534 }
535
536 return BT_HCI_ERR_SUCCESS;
537 }
538
ll_remove_iso_path(uint16_t handle,uint8_t path_dir)539 uint8_t ll_remove_iso_path(uint16_t handle, uint8_t path_dir)
540 {
541 /* If the Host issues this command with a Connection_Handle that does
542 * not exist or is not for a CIS or a BIS, the Controller shall return
543 * the error code Unknown Connection Identifier (0x02).
544 */
545 if (false) {
546
547 #if defined(CONFIG_BT_CTLR_CONN_ISO)
548 } else if (IS_CIS_HANDLE(handle)) {
549 struct ll_conn_iso_stream *cis;
550 struct ll_iso_stream_hdr *hdr;
551 struct ll_iso_datapath *dp;
552
553 cis = ll_conn_iso_stream_get(handle);
554 hdr = &cis->hdr;
555
556 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR)) {
557 dp = hdr->datapath_in;
558 if (dp) {
559 isoal_source_destroy(dp->source_hdl);
560
561 hdr->datapath_in = NULL;
562 ull_iso_datapath_release(dp);
563 } else {
564 /* Datapath was not previously set up */
565 return BT_HCI_ERR_CMD_DISALLOWED;
566 }
567 }
568
569 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST)) {
570 dp = hdr->datapath_out;
571 if (dp) {
572 isoal_sink_destroy(dp->sink_hdl);
573
574 hdr->datapath_out = NULL;
575 ull_iso_datapath_release(dp);
576 } else {
577 /* Datapath was not previously set up */
578 return BT_HCI_ERR_CMD_DISALLOWED;
579 }
580 }
581 #endif /* CONFIG_BT_CTLR_CONN_ISO */
582
583 #if defined(CONFIG_BT_CTLR_ADV_ISO)
584 } else if (IS_ADV_ISO_HANDLE(handle)) {
585 struct lll_adv_iso_stream *adv_stream;
586 struct ll_iso_datapath *dp;
587 uint16_t stream_handle;
588
589 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR))) {
590 return BT_HCI_ERR_CMD_DISALLOWED;
591 }
592
593 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
594 adv_stream = ull_adv_iso_stream_get(stream_handle);
595 if (!adv_stream) {
596 return BT_HCI_ERR_CMD_DISALLOWED;
597 }
598
599 dp = adv_stream->dp;
600 if (dp) {
601 adv_stream->dp = NULL;
602 isoal_source_destroy(dp->source_hdl);
603 ull_iso_datapath_release(dp);
604 } else {
605 /* Datapath was not previously set up */
606 return BT_HCI_ERR_CMD_DISALLOWED;
607 }
608 #endif /* CONFIG_BT_CTLR_ADV_ISO */
609
610 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
611 } else if (IS_SYNC_ISO_HANDLE(handle)) {
612 struct lll_sync_iso_stream *sync_stream;
613 struct ll_iso_datapath *dp;
614 uint16_t stream_handle;
615
616 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST))) {
617 return BT_HCI_ERR_CMD_DISALLOWED;
618 }
619
620 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
621 sync_stream = ull_sync_iso_stream_get(stream_handle);
622 if (!sync_stream) {
623 return BT_HCI_ERR_CMD_DISALLOWED;
624 }
625
626 dp = sync_stream->dp;
627 if (dp) {
628 sync_stream->dp = NULL;
629 isoal_sink_destroy(dp->sink_hdl);
630 ull_iso_datapath_release(dp);
631 } else {
632 /* Datapath was not previously set up */
633 return BT_HCI_ERR_CMD_DISALLOWED;
634 }
635 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
636
637 } else {
638 return BT_HCI_ERR_CMD_DISALLOWED;
639 }
640
641 return 0;
642 }
643
644 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
645 /* The sdu_alloc function is called before combining PDUs into an SDU. Here we
646 * store the paylaod number associated with the first PDU, for unframed usecase.
647 */
ll_iso_test_sdu_alloc(const struct isoal_sink * sink_ctx,const struct isoal_pdu_rx * valid_pdu,struct isoal_sdu_buffer * sdu_buffer)648 static isoal_status_t ll_iso_test_sdu_alloc(const struct isoal_sink *sink_ctx,
649 const struct isoal_pdu_rx *valid_pdu,
650 struct isoal_sdu_buffer *sdu_buffer)
651 {
652 uint16_t handle;
653
654 handle = sink_ctx->session.handle;
655
656 if (IS_CIS_HANDLE(handle)) {
657 if (!sink_ctx->session.framed) {
658 struct ll_conn_iso_stream *cis;
659
660 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
661 LL_ASSERT(cis);
662
663 /* For unframed, SDU counter is the payload number */
664 cis->hdr.test_mode.rx.sdu_counter =
665 (uint32_t)valid_pdu->meta->payload_number;
666 }
667 } else if (IS_SYNC_ISO_HANDLE(handle)) {
668 if (!sink_ctx->session.framed) {
669 struct lll_sync_iso_stream *sync_stream;
670 uint16_t stream_handle;
671
672 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
673 sync_stream = ull_sync_iso_stream_get(stream_handle);
674 LL_ASSERT(sync_stream);
675
676 sync_stream->test_mode->sdu_counter =
677 (uint32_t)valid_pdu->meta->payload_number;
678 }
679 }
680
681 return sink_sdu_alloc_hci(sink_ctx, valid_pdu, sdu_buffer);
682 }
683
684 /* The sdu_emit function is called whenever an SDU is combined and ready to be sent
685 * further in the data path. This injected implementation performs statistics on
686 * the SDU and then discards it.
687 */
ll_iso_test_sdu_emit(const struct isoal_sink * sink_ctx,const struct isoal_emitted_sdu_frag * sdu_frag,const struct isoal_emitted_sdu * sdu)688 static isoal_status_t ll_iso_test_sdu_emit(const struct isoal_sink *sink_ctx,
689 const struct isoal_emitted_sdu_frag *sdu_frag,
690 const struct isoal_emitted_sdu *sdu)
691 {
692 struct ll_iso_rx_test_mode *test_mode_rx;
693 isoal_sdu_len_t length;
694 isoal_status_t status;
695 struct net_buf *buf;
696 uint32_t sdu_counter;
697 uint16_t max_sdu;
698 uint16_t handle;
699 uint8_t framed;
700
701 handle = sink_ctx->session.handle;
702 buf = (struct net_buf *)sdu_frag->sdu.contents.dbuf;
703
704 if (IS_CIS_HANDLE(handle)) {
705 struct ll_conn_iso_stream *cis;
706
707 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
708 LL_ASSERT(cis);
709
710 test_mode_rx = &cis->hdr.test_mode.rx;
711 max_sdu = cis->c_max_sdu;
712 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
713 } else if (IS_SYNC_ISO_HANDLE(handle)) {
714 struct lll_sync_iso_stream *sync_stream;
715 struct ll_sync_iso_set *sync_iso;
716 uint16_t stream_handle;
717
718 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
719 sync_stream = ull_sync_iso_stream_get(stream_handle);
720 LL_ASSERT(sync_stream);
721
722 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
723
724 test_mode_rx = sync_stream->test_mode;
725 max_sdu = sync_iso->lll.max_sdu;
726 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
727 } else {
728 /* Handle is out of range */
729 status = ISOAL_STATUS_ERR_SDU_EMIT;
730 net_buf_unref(buf);
731
732 return status;
733 }
734
735 length = sink_ctx->sdu_production.sdu_written;
736 framed = sink_ctx->session.framed;
737
738 /* In BT_HCI_ISO_TEST_ZERO_SIZE_SDU mode, all SDUs must have length 0 and there is
739 * no sdu_counter field. In the other modes, the first 4 bytes must contain a
740 * packet counter, which is used as SDU counter. The sdu_counter is extracted
741 * regardless of mode as a sanity check, unless the length does not allow it.
742 */
743 if (length >= ISO_TEST_PACKET_COUNTER_SIZE) {
744 sdu_counter = sys_get_le32(buf->data);
745 } else {
746 sdu_counter = 0U;
747 }
748
749 switch (sdu_frag->sdu.status) {
750 case ISOAL_SDU_STATUS_VALID:
751 if (framed && test_mode_rx->sdu_counter == 0U) {
752 /* BT 5.3, Vol 6, Part B, section 7.2:
753 * When using framed PDUs the expected value of the SDU counter
754 * shall be initialized with the value of the SDU counter of the
755 * first valid received SDU.
756 */
757 test_mode_rx->sdu_counter = sdu_counter;
758 }
759
760 switch (test_mode_rx->payload_type) {
761 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
762 if (length == 0) {
763 test_mode_rx->received_cnt++;
764 } else {
765 test_mode_rx->failed_cnt++;
766 }
767 break;
768
769 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
770 if ((length >= ISO_TEST_PACKET_COUNTER_SIZE) &&
771 (length <= max_sdu) &&
772 (sdu_counter == test_mode_rx->sdu_counter)) {
773 test_mode_rx->received_cnt++;
774 } else {
775 test_mode_rx->failed_cnt++;
776 }
777 break;
778
779 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
780 if ((length == max_sdu) &&
781 (sdu_counter == test_mode_rx->sdu_counter)) {
782 test_mode_rx->received_cnt++;
783 } else {
784 test_mode_rx->failed_cnt++;
785 }
786 break;
787
788 default:
789 LL_ASSERT(0);
790 return ISOAL_STATUS_ERR_SDU_EMIT;
791 }
792 break;
793
794 case ISOAL_SDU_STATUS_ERRORS:
795 case ISOAL_SDU_STATUS_LOST_DATA:
796 test_mode_rx->missed_cnt++;
797 break;
798 }
799
800 /* In framed mode, we may start incrementing the SDU counter when rx_sdu_counter
801 * becomes non zero (initial state), or in case of zero-based counting, if zero
802 * is actually the first valid SDU counter received.
803 */
804 if (framed && (test_mode_rx->sdu_counter ||
805 (sdu_frag->sdu.status == ISOAL_SDU_STATUS_VALID))) {
806 test_mode_rx->sdu_counter++;
807 }
808
809 status = ISOAL_STATUS_OK;
810 net_buf_unref(buf);
811
812 return status;
813 }
814
ll_iso_receive_test(uint16_t handle,uint8_t payload_type)815 uint8_t ll_iso_receive_test(uint16_t handle, uint8_t payload_type)
816 {
817 struct ll_iso_rx_test_mode *test_mode_rx;
818 isoal_sink_handle_t sink_handle;
819 struct ll_iso_datapath *dp;
820 uint32_t sdu_interval;
821 isoal_status_t err;
822
823 struct ll_iso_datapath **stream_dp;
824
825 uint32_t stream_sync_delay;
826 uint32_t group_sync_delay;
827 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
828 uint16_t stream_handle;
829 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
830 uint16_t iso_interval;
831 uint8_t framed;
832 uint8_t role;
833 uint8_t ft;
834 uint8_t bn;
835
836 if (IS_CIS_HANDLE(handle)) {
837 struct ll_conn_iso_stream *cis;
838 struct ll_conn_iso_group *cig;
839
840 cis = ll_iso_stream_connected_get(handle);
841 if (!cis) {
842 /* CIS is not connected */
843 return BT_HCI_ERR_UNKNOWN_CONN_ID;
844 }
845
846 if (cis->lll.rx.bn == 0) {
847 /* CIS is not configured for RX */
848 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
849 }
850
851 test_mode_rx = &cis->hdr.test_mode.rx;
852 stream_dp = &cis->hdr.datapath_out;
853 cig = cis->group;
854
855 if (cig->lll.role == BT_HCI_ROLE_PERIPHERAL) {
856 /* peripheral */
857 sdu_interval = cig->c_sdu_interval;
858 } else {
859 /* central */
860 sdu_interval = cig->p_sdu_interval;
861 }
862
863 role = cig->lll.role;
864 framed = cis->framed;
865 bn = cis->lll.rx.bn;
866 ft = cis->lll.rx.ft;
867 iso_interval = cig->iso_interval;
868 stream_sync_delay = cis->sync_delay;
869 group_sync_delay = cig->sync_delay;
870 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
871 } else if (IS_SYNC_ISO_HANDLE(handle)) {
872 /* Get the sync stream from the handle */
873 struct lll_sync_iso_stream *sync_stream;
874 struct ll_sync_iso_set *sync_iso;
875 struct lll_sync_iso *lll_iso;
876
877 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
878 sync_stream = ull_sync_iso_stream_get(stream_handle);
879 if (!sync_stream) {
880 return BT_HCI_ERR_UNKNOWN_CONN_ID;
881 }
882
883 if (sync_stream->dp) {
884 /* Data path already set up */
885 return BT_HCI_ERR_CMD_DISALLOWED;
886 }
887
888 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
889 lll_iso = &sync_iso->lll;
890
891 test_mode_rx = sync_stream->test_mode;
892 stream_dp = &sync_stream->dp;
893
894 /* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
895 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing
896 * + (NSE – 1) × Sub_Interval + MPT.
897 */
898 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
899 lll_iso->nse, lll_iso->sub_interval,
900 lll_iso->phy, lll_iso->max_pdu,
901 lll_iso->enc);
902 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
903
904 role = ISOAL_ROLE_BROADCAST_SINK;
905 framed = lll_iso->framing;
906 bn = lll_iso->bn;
907 ft = 0;
908 sdu_interval = lll_iso->sdu_interval;
909 iso_interval = lll_iso->iso_interval;
910 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
911 } else {
912 /* Handle is out of range */
913 return BT_HCI_ERR_UNKNOWN_CONN_ID;
914 }
915
916 if (*stream_dp) {
917 /* Data path already set up */
918 return BT_HCI_ERR_CMD_DISALLOWED;
919 }
920
921 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
922 return BT_HCI_ERR_INVALID_LL_PARAM;
923 }
924
925 /* Allocate and configure test datapath */
926 dp = mem_acquire(&datapath_free);
927 if (!dp) {
928 return BT_HCI_ERR_CMD_DISALLOWED;
929 }
930
931 dp->path_dir = BT_HCI_DATAPATH_DIR_CTLR_TO_HOST;
932 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
933
934 *stream_dp = dp;
935 memset(test_mode_rx, 0, sizeof(struct ll_iso_rx_test_mode));
936
937 err = isoal_sink_create(handle, role, framed, bn, ft,
938 sdu_interval, iso_interval,
939 stream_sync_delay, group_sync_delay,
940 ll_iso_test_sdu_alloc,
941 ll_iso_test_sdu_emit,
942 sink_sdu_write_hci, &sink_handle);
943 if (err) {
944 /* Error creating test source - cleanup source and
945 * datapath
946 */
947 isoal_sink_destroy(sink_handle);
948 ull_iso_datapath_release(dp);
949 *stream_dp = NULL;
950
951 return BT_HCI_ERR_CMD_DISALLOWED;
952 }
953
954 dp->sink_hdl = sink_handle;
955 isoal_sink_enable(sink_handle);
956
957 /* Enable Receive Test Mode */
958 test_mode_rx->enabled = 1;
959 test_mode_rx->payload_type = payload_type;
960
961 return BT_HCI_ERR_SUCCESS;
962 }
963
ll_iso_read_test_counters(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)964 uint8_t ll_iso_read_test_counters(uint16_t handle, uint32_t *received_cnt,
965 uint32_t *missed_cnt,
966 uint32_t *failed_cnt)
967 {
968 struct ll_iso_rx_test_mode *test_mode_rx;
969
970 *received_cnt = 0U;
971 *missed_cnt = 0U;
972 *failed_cnt = 0U;
973
974 if (IS_CIS_HANDLE(handle)) {
975 struct ll_conn_iso_stream *cis;
976
977 cis = ll_iso_stream_connected_get(handle);
978 if (!cis) {
979 /* CIS is not connected */
980 return BT_HCI_ERR_UNKNOWN_CONN_ID;
981 }
982
983 test_mode_rx = &cis->hdr.test_mode.rx;
984
985 } else if (IS_SYNC_ISO_HANDLE(handle)) {
986 /* Get the sync stream from the handle */
987 struct lll_sync_iso_stream *sync_stream;
988 uint16_t stream_handle;
989
990 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
991 sync_stream = ull_sync_iso_stream_get(stream_handle);
992 if (!sync_stream) {
993 return BT_HCI_ERR_UNKNOWN_CONN_ID;
994 }
995
996 test_mode_rx = sync_stream->test_mode;
997
998 } else {
999 /* Handle is out of range */
1000 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1001 }
1002
1003 if (!test_mode_rx->enabled) {
1004 /* ISO receive Test is not active */
1005 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1006 }
1007
1008 /* Return SDU statistics */
1009 *received_cnt = test_mode_rx->received_cnt;
1010 *missed_cnt = test_mode_rx->missed_cnt;
1011 *failed_cnt = test_mode_rx->failed_cnt;
1012
1013 return BT_HCI_ERR_SUCCESS;
1014 }
1015
1016 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
ll_read_iso_link_quality(uint16_t handle,uint32_t * tx_unacked_packets,uint32_t * tx_flushed_packets,uint32_t * tx_last_subevent_packets,uint32_t * retransmitted_packets,uint32_t * crc_error_packets,uint32_t * rx_unreceived_packets,uint32_t * duplicate_packets)1017 uint8_t ll_read_iso_link_quality(uint16_t handle,
1018 uint32_t *tx_unacked_packets,
1019 uint32_t *tx_flushed_packets,
1020 uint32_t *tx_last_subevent_packets,
1021 uint32_t *retransmitted_packets,
1022 uint32_t *crc_error_packets,
1023 uint32_t *rx_unreceived_packets,
1024 uint32_t *duplicate_packets)
1025 {
1026 uint8_t status;
1027
1028 *tx_unacked_packets = 0;
1029 *tx_flushed_packets = 0;
1030 *tx_last_subevent_packets = 0;
1031 *retransmitted_packets = 0;
1032 *crc_error_packets = 0;
1033 *rx_unreceived_packets = 0;
1034 *duplicate_packets = 0;
1035
1036 status = BT_HCI_ERR_SUCCESS;
1037
1038 if (IS_CIS_HANDLE(handle)) {
1039 struct ll_conn_iso_stream *cis;
1040
1041 cis = ll_iso_stream_connected_get(handle);
1042
1043 if (!cis) {
1044 /* CIS is not connected */
1045 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1046 }
1047
1048 *tx_unacked_packets = cis->hdr.link_quality.tx_unacked_packets;
1049 *tx_flushed_packets = cis->hdr.link_quality.tx_flushed_packets;
1050 *tx_last_subevent_packets = cis->hdr.link_quality.tx_last_subevent_packets;
1051 *retransmitted_packets = cis->hdr.link_quality.retransmitted_packets;
1052 *crc_error_packets = cis->hdr.link_quality.crc_error_packets;
1053 *rx_unreceived_packets = cis->hdr.link_quality.rx_unreceived_packets;
1054 *duplicate_packets = cis->hdr.link_quality.duplicate_packets;
1055
1056 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1057 /* FIXME: Implement for sync receiver */
1058 status = BT_HCI_ERR_CMD_DISALLOWED;
1059 } else {
1060 /* Handle is out of range */
1061 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1062 }
1063
1064 return status;
1065 }
1066 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1067
1068 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1069
1070 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_test_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)1071 static isoal_status_t ll_iso_test_pdu_release(struct node_tx_iso *node_tx,
1072 const uint16_t handle,
1073 const isoal_status_t status)
1074 {
1075 /* Release back to memory pool */
1076 if (node_tx->link) {
1077 ll_iso_link_tx_release(node_tx->link);
1078 }
1079 ll_iso_tx_mem_release(node_tx);
1080
1081 return ISOAL_STATUS_OK;
1082 }
1083
1084 #if defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_transmit_test_send_sdu(uint16_t handle,uint32_t ticks_at_expire)1085 void ll_iso_transmit_test_send_sdu(uint16_t handle, uint32_t ticks_at_expire)
1086 {
1087 isoal_source_handle_t source_handle;
1088 struct isoal_sdu_tx sdu;
1089 isoal_status_t err;
1090 uint8_t tx_buffer[ISO_TEST_TX_BUFFER_SIZE];
1091 uint64_t next_payload_number;
1092 uint16_t remaining_tx;
1093 uint32_t sdu_counter;
1094
1095 if (IS_CIS_HANDLE(handle)) {
1096 struct ll_conn_iso_stream *cis;
1097 struct ll_conn_iso_group *cig;
1098 uint32_t rand_max_sdu;
1099 uint8_t event_offset;
1100 uint8_t max_sdu;
1101 uint8_t rand_8;
1102
1103 cis = ll_iso_stream_connected_get(handle);
1104 LL_ASSERT(cis);
1105
1106 if (!cis->hdr.test_mode.tx.enabled) {
1107 /* Transmit Test Mode not enabled */
1108 return;
1109 }
1110
1111 cig = cis->group;
1112 source_handle = cis->hdr.datapath_in->source_hdl;
1113
1114 max_sdu = IS_PERIPHERAL(cig) ? cis->p_max_sdu : cis->c_max_sdu;
1115
1116 switch (cis->hdr.test_mode.tx.payload_type) {
1117 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
1118 remaining_tx = 0;
1119 break;
1120
1121 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
1122 /* Randomize the length [4..max_sdu] */
1123 lll_rand_get(&rand_8, sizeof(rand_8));
1124 rand_max_sdu = rand_8 * (max_sdu - ISO_TEST_PACKET_COUNTER_SIZE);
1125 remaining_tx = ISO_TEST_PACKET_COUNTER_SIZE + (rand_max_sdu >> 8);
1126 break;
1127
1128 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
1129 LL_ASSERT(max_sdu > ISO_TEST_PACKET_COUNTER_SIZE);
1130 remaining_tx = max_sdu;
1131 break;
1132
1133 default:
1134 LL_ASSERT(0);
1135 return;
1136 }
1137
1138 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1139 sdu.sdu_state = BT_ISO_START;
1140 } else {
1141 sdu.sdu_state = BT_ISO_SINGLE;
1142 }
1143
1144 /* Configure SDU similarly to one delivered via HCI */
1145 sdu.packet_sn = 0;
1146 sdu.dbuf = tx_buffer;
1147
1148 /* We must ensure sufficient time for ISO-AL to fragment SDU and
1149 * deliver PDUs to the TX queue. By checking ull_ref_get, we
1150 * know if we are within the subevents of an ISO event. If so,
1151 * we can assume that we have enough time to deliver in the next
1152 * ISO event. If we're not active within the ISO event, we don't
1153 * know if there is enough time to deliver in the next event,
1154 * and for safety we set the target to current event + 2.
1155 *
1156 * For FT > 1, we have the opportunity to retransmit in later
1157 * event(s), in which case we have the option to target an
1158 * earlier event (this or next) because being late does not
1159 * instantly flush the payload.
1160 */
1161 event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
1162 if (cis->lll.tx.ft > 1) {
1163 /* FT > 1, target an earlier event */
1164 event_offset -= 1;
1165 }
1166
1167 sdu.grp_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
1168 (event_offset * cig->iso_interval *
1169 ISO_INT_UNIT_US));
1170 sdu.target_event = cis->lll.event_count + event_offset;
1171 sdu.iso_sdu_length = remaining_tx;
1172
1173 /* Send all SDU fragments */
1174 do {
1175 sdu.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticks_at_expire);
1176 sdu.time_stamp = sdu.cntr_time_stamp;
1177 sdu.size = MIN(remaining_tx, ISO_TEST_TX_BUFFER_SIZE);
1178 memset(tx_buffer, 0, sdu.size);
1179
1180 /* If this is the first fragment of a framed SDU, inject the SDU
1181 * counter.
1182 */
1183 if ((sdu.size >= ISO_TEST_PACKET_COUNTER_SIZE) &&
1184 ((sdu.sdu_state == BT_ISO_START) || (sdu.sdu_state == BT_ISO_SINGLE))) {
1185 if (cis->framed) {
1186 sdu_counter = (uint32_t)cis->hdr.test_mode.tx.sdu_counter;
1187 } else {
1188 /* Unframed. Get the next payload counter.
1189 *
1190 * BT 5.3, Vol 6, Part B, Section 7.1:
1191 * When using unframed PDUs, the SDU counter shall be equal
1192 * to the payload counter.
1193 */
1194 isoal_tx_unframed_get_next_payload_number(source_handle,
1195 &sdu,
1196 &next_payload_number);
1197 sdu_counter = (uint32_t)next_payload_number;
1198 }
1199
1200 sys_put_le32(sdu_counter, tx_buffer);
1201 }
1202
1203 /* Send to ISOAL */
1204 err = isoal_tx_sdu_fragment(source_handle, &sdu);
1205 LL_ASSERT(!err);
1206
1207 remaining_tx -= sdu.size;
1208
1209 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1210 sdu.sdu_state = BT_ISO_CONT;
1211 } else {
1212 sdu.sdu_state = BT_ISO_END;
1213 }
1214 } while (remaining_tx);
1215
1216 cis->hdr.test_mode.tx.sdu_counter++;
1217
1218 } else if (IS_ADV_ISO_HANDLE(handle)) {
1219 /* FIXME: Implement for broadcaster */
1220 } else {
1221 LL_ASSERT(0);
1222 }
1223 }
1224 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1225
ll_iso_transmit_test(uint16_t handle,uint8_t payload_type)1226 uint8_t ll_iso_transmit_test(uint16_t handle, uint8_t payload_type)
1227 {
1228 isoal_source_handle_t source_handle;
1229 struct ll_iso_datapath *dp;
1230 uint32_t sdu_interval;
1231 isoal_status_t err;
1232 uint8_t status;
1233
1234 status = BT_HCI_ERR_SUCCESS;
1235
1236 if (IS_CIS_HANDLE(handle)) {
1237 struct ll_conn_iso_stream *cis;
1238 struct ll_conn_iso_group *cig;
1239
1240 cis = ll_iso_stream_connected_get(handle);
1241 if (!cis) {
1242 /* CIS is not connected */
1243 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1244 }
1245
1246 if (cis->lll.tx.bn == 0U) {
1247 /* CIS is not configured for TX */
1248 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1249 }
1250
1251 if (cis->hdr.datapath_in) {
1252 /* Data path already set up */
1253 return BT_HCI_ERR_CMD_DISALLOWED;
1254 }
1255
1256 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
1257 return BT_HCI_ERR_INVALID_LL_PARAM;
1258 }
1259
1260 /* Allocate and configure test datapath */
1261 dp = mem_acquire(&datapath_free);
1262 if (!dp) {
1263 return BT_HCI_ERR_CMD_DISALLOWED;
1264 }
1265
1266 dp->path_dir = BT_HCI_DATAPATH_DIR_HOST_TO_CTLR;
1267 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
1268
1269 cis->hdr.datapath_in = dp;
1270 cig = cis->group;
1271
1272 sdu_interval = IS_PERIPHERAL(cig) ? cig->p_sdu_interval : cig->c_sdu_interval;
1273
1274 /* Setup the test source */
1275 err = isoal_source_create(handle, cig->lll.role, cis->framed,
1276 cis->lll.tx.bn, cis->lll.tx.ft,
1277 cis->lll.tx.max_pdu, sdu_interval,
1278 cig->iso_interval, cis->sync_delay,
1279 cig->sync_delay, ll_iso_pdu_alloc,
1280 ll_iso_pdu_write, ll_iso_pdu_emit,
1281 ll_iso_test_pdu_release,
1282 &source_handle);
1283
1284 if (err) {
1285 /* Error creating test source - cleanup source and datapath */
1286 isoal_source_destroy(source_handle);
1287 ull_iso_datapath_release(dp);
1288 cis->hdr.datapath_in = NULL;
1289
1290 return BT_HCI_ERR_CMD_DISALLOWED;
1291 }
1292
1293 dp->source_hdl = source_handle;
1294 isoal_source_enable(source_handle);
1295
1296 /* Enable Transmit Test Mode */
1297 cis->hdr.test_mode.tx.enabled = 1;
1298 cis->hdr.test_mode.tx.payload_type = payload_type;
1299
1300 } else if (IS_ADV_ISO_HANDLE(handle)) {
1301 struct lll_adv_iso_stream *stream;
1302 uint16_t stream_handle;
1303
1304 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1305 stream = ull_adv_iso_stream_get(stream_handle);
1306 if (!stream) {
1307 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1308 }
1309
1310 /* FIXME: Implement use of common header in stream to enable code sharing
1311 * between CIS and BIS for test commands (and other places).
1312 */
1313 status = BT_HCI_ERR_CMD_DISALLOWED;
1314 } else {
1315 /* Handle is out of range */
1316 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1317 }
1318
1319 return status;
1320 }
1321 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1322
ll_iso_test_end(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)1323 uint8_t ll_iso_test_end(uint16_t handle, uint32_t *received_cnt,
1324 uint32_t *missed_cnt, uint32_t *failed_cnt)
1325 {
1326 *received_cnt = 0U;
1327 *missed_cnt = 0U;
1328 *failed_cnt = 0U;
1329
1330 if (IS_CIS_HANDLE(handle)) {
1331 struct ll_conn_iso_stream *cis;
1332
1333 cis = ll_iso_stream_connected_get(handle);
1334 if (!cis) {
1335 /* CIS is not connected */
1336 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1337 }
1338
1339 if (!cis->hdr.test_mode.rx.enabled && !cis->hdr.test_mode.tx.enabled) {
1340 /* Test Mode is not active */
1341 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1342 }
1343
1344 if (cis->hdr.test_mode.rx.enabled) {
1345 isoal_sink_destroy(cis->hdr.datapath_out->sink_hdl);
1346 ull_iso_datapath_release(cis->hdr.datapath_out);
1347 cis->hdr.datapath_out = NULL;
1348
1349 /* Return SDU statistics */
1350 *received_cnt = cis->hdr.test_mode.rx.received_cnt;
1351 *missed_cnt = cis->hdr.test_mode.rx.missed_cnt;
1352 *failed_cnt = cis->hdr.test_mode.rx.failed_cnt;
1353 }
1354
1355 if (cis->hdr.test_mode.tx.enabled) {
1356 /* Tear down source and datapath */
1357 isoal_source_destroy(cis->hdr.datapath_in->source_hdl);
1358 ull_iso_datapath_release(cis->hdr.datapath_in);
1359 cis->hdr.datapath_in = NULL;
1360 }
1361
1362 /* Disable Test Mode */
1363 (void)memset(&cis->hdr.test_mode, 0U, sizeof(cis->hdr.test_mode));
1364
1365 } else if (IS_ADV_ISO_HANDLE(handle)) {
1366 /* FIXME: Implement for broadcaster */
1367 return BT_HCI_ERR_CMD_DISALLOWED;
1368
1369 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1370 struct lll_sync_iso_stream *sync_stream;
1371 uint16_t stream_handle;
1372
1373 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
1374 sync_stream = ull_sync_iso_stream_get(stream_handle);
1375 if (!sync_stream) {
1376 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1377 }
1378
1379 if (!sync_stream->test_mode->enabled || !sync_stream->dp) {
1380 /* Test Mode is not active */
1381 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1382 }
1383
1384 isoal_sink_destroy(sync_stream->dp->sink_hdl);
1385 ull_iso_datapath_release(sync_stream->dp);
1386 sync_stream->dp = NULL;
1387
1388 /* Return SDU statistics */
1389 *received_cnt = sync_stream->test_mode->received_cnt;
1390 *missed_cnt = sync_stream->test_mode->missed_cnt;
1391 *failed_cnt = sync_stream->test_mode->failed_cnt;
1392
1393 (void)memset(&sync_stream->test_mode, 0U, sizeof(sync_stream->test_mode));
1394
1395 } else {
1396 /* Handle is out of range */
1397 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1398 }
1399
1400 return BT_HCI_ERR_SUCCESS;
1401 }
1402
1403 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_tx_mem_acquire(void)1404 void *ll_iso_tx_mem_acquire(void)
1405 {
1406 return mem_acquire(&mem_iso_tx.free);
1407 }
1408
ll_iso_tx_mem_release(void * node_tx)1409 void ll_iso_tx_mem_release(void *node_tx)
1410 {
1411 mem_release(node_tx, &mem_iso_tx.free);
1412 }
1413
ll_iso_tx_mem_enqueue(uint16_t handle,void * node_tx,void * link)1414 int ll_iso_tx_mem_enqueue(uint16_t handle, void *node_tx, void *link)
1415 {
1416 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) &&
1417 IS_CIS_HANDLE(handle)) {
1418 struct ll_conn_iso_stream *cis;
1419
1420 cis = ll_conn_iso_stream_get(handle);
1421 memq_enqueue(link, node_tx, &cis->lll.memq_tx.tail);
1422
1423 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) &&
1424 IS_ADV_ISO_HANDLE(handle)) {
1425 struct lll_adv_iso_stream *stream;
1426 uint16_t stream_handle;
1427
1428 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1429 stream = ull_adv_iso_stream_get(stream_handle);
1430 memq_enqueue(link, node_tx, &stream->memq_tx.tail);
1431
1432 } else {
1433 return -EINVAL;
1434 }
1435
1436 return 0;
1437 }
1438 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1439
ull_iso_init(void)1440 int ull_iso_init(void)
1441 {
1442 int err;
1443
1444 err = init_reset();
1445 if (err) {
1446 return err;
1447 }
1448
1449 return 0;
1450 }
1451
ull_iso_reset(void)1452 int ull_iso_reset(void)
1453 {
1454 int err;
1455
1456 err = init_reset();
1457 if (err) {
1458 return err;
1459 }
1460
1461 return 0;
1462 }
1463
1464 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_lll_ack_enqueue(uint16_t handle,struct node_tx_iso * node_tx)1465 void ull_iso_lll_ack_enqueue(uint16_t handle, struct node_tx_iso *node_tx)
1466 {
1467 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && IS_CIS_HANDLE(handle)) {
1468 struct ll_conn_iso_stream *cis;
1469 struct ll_iso_datapath *dp;
1470
1471 cis = ll_conn_iso_stream_get(handle);
1472 dp = cis->hdr.datapath_in;
1473
1474 if (dp) {
1475 isoal_tx_pdu_release(dp->source_hdl, node_tx);
1476 } else {
1477 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
1478 /* Possible race with Data Path remove - handle release in vendor
1479 * function.
1480 */
1481 ll_data_path_tx_pdu_release(handle, node_tx);
1482 #else
1483 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1484 * used by ACL connections in ULL context to dispatch
1485 * ack.
1486 */
1487 ll_tx_ack_put(handle, (void *)node_tx);
1488 ll_rx_sched();
1489 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1490 }
1491 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && IS_ADV_ISO_HANDLE(handle)) {
1492 /* Process as TX ack. TODO: Can be unified with CIS and use
1493 * ISOAL.
1494 */
1495 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1496 * used by ACL connections in ULL context to dispatch
1497 * ack.
1498 */
1499 ll_tx_ack_put(handle, (void *)node_tx);
1500 ll_rx_sched();
1501 } else {
1502 LL_ASSERT(0);
1503 }
1504 }
1505
ull_iso_lll_event_prepare(uint16_t handle,uint64_t event_count)1506 void ull_iso_lll_event_prepare(uint16_t handle, uint64_t event_count)
1507 {
1508 if (IS_CIS_HANDLE(handle)) {
1509 struct ll_iso_datapath *dp = NULL;
1510 struct ll_conn_iso_stream *cis;
1511
1512 cis = ll_iso_stream_connected_get(handle);
1513
1514 if (cis) {
1515 dp = cis->hdr.datapath_in;
1516 }
1517
1518 if (dp) {
1519 isoal_tx_event_prepare(dp->source_hdl, event_count);
1520 }
1521 } else if (IS_ADV_ISO_HANDLE(handle)) {
1522 struct ll_iso_datapath *dp = NULL;
1523 struct lll_adv_iso_stream *stream;
1524 uint16_t stream_handle;
1525
1526 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1527 stream = ull_adv_iso_stream_get(stream_handle);
1528
1529 if (stream) {
1530 dp = stream->dp;
1531 }
1532
1533 if (dp) {
1534 isoal_tx_event_prepare(dp->source_hdl, event_count);
1535 }
1536 } else {
1537 LL_ASSERT(0);
1538 }
1539 }
1540 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1541
1542 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
ull_iso_big_sync_delay(uint8_t num_bis,uint32_t bis_spacing,uint8_t nse,uint32_t sub_interval,uint8_t phy,uint8_t max_pdu,bool enc)1543 uint32_t ull_iso_big_sync_delay(uint8_t num_bis, uint32_t bis_spacing, uint8_t nse,
1544 uint32_t sub_interval, uint8_t phy, uint8_t max_pdu, bool enc)
1545 {
1546 /* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
1547 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing + (NSE – 1) × Sub_Interval + MPT.
1548 */
1549 return (num_bis - 1) * bis_spacing + (nse - 1) * sub_interval +
1550 BYTES2US(PDU_OVERHEAD_SIZE(phy) + max_pdu + (enc ? 4 : 0), phy);
1551 }
1552 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1553
1554 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_pdu_rx_alloc_peek(uint8_t count)1555 void *ull_iso_pdu_rx_alloc_peek(uint8_t count)
1556 {
1557 if (count > MFIFO_AVAIL_COUNT_GET(iso_rx)) {
1558 return NULL;
1559 }
1560
1561 return MFIFO_DEQUEUE_PEEK(iso_rx);
1562 }
1563
ull_iso_pdu_rx_alloc(void)1564 void *ull_iso_pdu_rx_alloc(void)
1565 {
1566 return MFIFO_DEQUEUE(iso_rx);
1567 }
1568
1569 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
ull_iso_rx_put(memq_link_t * link,void * rx)1570 void ull_iso_rx_put(memq_link_t *link, void *rx)
1571 {
1572 /* Enqueue the Rx object */
1573 memq_enqueue(link, rx, &memq_ull_iso_rx.tail);
1574 }
1575
ull_iso_rx_sched(void)1576 void ull_iso_rx_sched(void)
1577 {
1578 static memq_link_t link;
1579 static struct mayfly mfy = {0, 0, &link, NULL, iso_rx_demux};
1580
1581 /* Kick the ULL (using the mayfly, tailchain it) */
1582 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
1583 }
1584
1585 #if defined(CONFIG_BT_CTLR_CONN_ISO)
iso_rx_cig_ref_point_update(struct ll_conn_iso_group * cig,const struct ll_conn_iso_stream * cis,const struct node_rx_iso_meta * meta)1586 static void iso_rx_cig_ref_point_update(struct ll_conn_iso_group *cig,
1587 const struct ll_conn_iso_stream *cis,
1588 const struct node_rx_iso_meta *meta)
1589 {
1590 uint32_t cig_sync_delay;
1591 uint32_t cis_sync_delay;
1592 uint64_t event_count;
1593 uint8_t burst_number;
1594 uint8_t role;
1595
1596 role = cig->lll.role;
1597 cig_sync_delay = cig->sync_delay;
1598 cis_sync_delay = cis->sync_delay;
1599 burst_number = cis->lll.rx.bn;
1600 event_count = cis->lll.event_count;
1601
1602 if (role) {
1603 /* Peripheral */
1604
1605 /* Check if this is the first payload received for this cis in
1606 * this event
1607 */
1608 if (meta->payload_number == (burst_number * event_count)) {
1609 /* Update the CIG reference point based on the CIS
1610 * anchor point
1611 */
1612 cig->cig_ref_point = isoal_get_wrapped_time_us(meta->timestamp,
1613 cis_sync_delay - cig_sync_delay);
1614 }
1615 }
1616 }
1617 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1618
iso_rx_demux(void * param)1619 static void iso_rx_demux(void *param)
1620 {
1621 #if defined(CONFIG_BT_CTLR_CONN_ISO) || \
1622 defined(CONFIG_BT_CTLR_SYNC_ISO)
1623 struct ll_iso_datapath *dp;
1624 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1625 struct node_rx_pdu *rx_pdu;
1626 struct node_rx_hdr *rx;
1627 memq_link_t *link;
1628 uint16_t handle;
1629
1630 do {
1631 link = memq_peek(memq_ull_iso_rx.head, memq_ull_iso_rx.tail,
1632 (void **)&rx);
1633 if (link) {
1634 /* Demux Rx objects */
1635 switch (rx->type) {
1636 case NODE_RX_TYPE_RELEASE:
1637 (void)memq_dequeue(memq_ull_iso_rx.tail,
1638 &memq_ull_iso_rx.head, NULL);
1639 ll_iso_rx_put(link, rx);
1640 ll_rx_sched();
1641 break;
1642
1643 case NODE_RX_TYPE_ISO_PDU:
1644 /* Remove from receive-queue; ULL has received this now */
1645 (void)memq_dequeue(memq_ull_iso_rx.tail, &memq_ull_iso_rx.head,
1646 NULL);
1647
1648 rx_pdu = (struct node_rx_pdu *)rx;
1649 handle = rx_pdu->hdr.handle;
1650 dp = NULL;
1651
1652 if (false) {
1653 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1654 } else if (IS_CIS_HANDLE(handle)) {
1655 struct ll_conn_iso_stream *cis;
1656 struct ll_conn_iso_group *cig;
1657
1658 cis = ll_conn_iso_stream_get(handle);
1659 cig = cis->group;
1660 dp = cis->hdr.datapath_out;
1661
1662 iso_rx_cig_ref_point_update(cig, cis,
1663 &rx_pdu->hdr.rx_iso_meta);
1664 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1665 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1666 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1667 struct lll_sync_iso_stream *sync_stream;
1668 uint16_t stream_handle;
1669
1670 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
1671 sync_stream = ull_sync_iso_stream_get(stream_handle);
1672 dp = sync_stream ? sync_stream->dp : NULL;
1673 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1674 }
1675
1676 #if defined(CONFIG_BT_CTLR_CONN_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
1677 if (dp && dp->path_id != BT_HCI_DATAPATH_ID_HCI) {
1678 /* If vendor specific datapath pass to ISO AL here,
1679 * in case of HCI destination it will be passed in
1680 * HCI context.
1681 */
1682 struct isoal_pdu_rx pckt_meta = {
1683 .meta = &rx_pdu->rx_iso_meta,
1684 .pdu = (struct pdu_iso *)&rx_pdu->pdu[0]
1685 };
1686
1687 /* Pass the ISO PDU through ISO-AL */
1688 const isoal_status_t err =
1689 isoal_rx_pdu_recombine(dp->sink_hdl, &pckt_meta);
1690
1691 LL_ASSERT(err == ISOAL_STATUS_OK); /* TODO handle err */
1692 }
1693 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1694
1695 /* Let ISO PDU start its long journey upwards */
1696 ll_iso_rx_put(link, rx);
1697 ll_rx_sched();
1698 break;
1699
1700 default:
1701 LL_ASSERT(0);
1702 break;
1703 }
1704 }
1705 } while (link);
1706 }
1707 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1708
ll_iso_rx_put(memq_link_t * link,void * rx)1709 void ll_iso_rx_put(memq_link_t *link, void *rx)
1710 {
1711 /* Enqueue the Rx object */
1712 memq_enqueue(link, rx, &memq_ll_iso_rx.tail);
1713 }
1714
ll_iso_rx_get(void)1715 void *ll_iso_rx_get(void)
1716 {
1717 struct node_rx_hdr *rx;
1718 memq_link_t *link;
1719
1720 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1721 while (link) {
1722 /* Do not send up buffers to Host thread that are
1723 * marked for release
1724 */
1725 if (rx->type == NODE_RX_TYPE_RELEASE) {
1726 (void)memq_dequeue(memq_ll_iso_rx.tail,
1727 &memq_ll_iso_rx.head, NULL);
1728 mem_release(link, &mem_link_iso_rx.free);
1729 mem_release(rx, &mem_pool_iso_rx.free);
1730 RXFIFO_ALLOC(iso_rx, 1);
1731
1732 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1733 continue;
1734 }
1735 return rx;
1736 }
1737
1738 return NULL;
1739 }
1740
ll_iso_rx_dequeue(void)1741 void ll_iso_rx_dequeue(void)
1742 {
1743 struct node_rx_hdr *rx = NULL;
1744 memq_link_t *link;
1745
1746 link = memq_dequeue(memq_ll_iso_rx.tail, &memq_ll_iso_rx.head,
1747 (void **)&rx);
1748 LL_ASSERT(link);
1749
1750 mem_release(link, &mem_link_iso_rx.free);
1751
1752 /* Handle object specific clean up */
1753 switch (rx->type) {
1754 case NODE_RX_TYPE_ISO_PDU:
1755 break;
1756 default:
1757 LL_ASSERT(0);
1758 break;
1759 }
1760 }
1761
ll_iso_rx_mem_release(void ** node_rx)1762 void ll_iso_rx_mem_release(void **node_rx)
1763 {
1764 struct node_rx_hdr *rx;
1765
1766 rx = *node_rx;
1767 while (rx) {
1768 struct node_rx_hdr *rx_free;
1769
1770 rx_free = rx;
1771 rx = rx->next;
1772
1773 switch (rx_free->type) {
1774 case NODE_RX_TYPE_ISO_PDU:
1775 mem_release(rx_free, &mem_pool_iso_rx.free);
1776 break;
1777 default:
1778 /* Ignore other types as node may have been initialized due to
1779 * race with HCI reset.
1780 */
1781 break;
1782 }
1783 }
1784
1785 *node_rx = rx;
1786
1787 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1788 }
1789 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1790
ull_iso_datapath_alloc(void)1791 struct ll_iso_datapath *ull_iso_datapath_alloc(void)
1792 {
1793 return mem_acquire(&datapath_free);
1794 }
1795
ull_iso_datapath_release(struct ll_iso_datapath * dp)1796 void ull_iso_datapath_release(struct ll_iso_datapath *dp)
1797 {
1798 mem_release(dp, &datapath_free);
1799 }
1800
1801 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_link_tx_release(void * link)1802 void ll_iso_link_tx_release(void *link)
1803 {
1804 mem_release(link, &mem_link_iso_tx.free);
1805 }
1806
1807 /**
1808 * Allocate a PDU from the LL and store the details in the given buffer. Allocation
1809 * is not expected to fail as there must always be sufficient PDU buffers. Any
1810 * failure will trigger the assert.
1811 * @param[in] pdu_buffer Buffer to store PDU details in
1812 * @return Error status of operation
1813 */
ll_iso_pdu_alloc(struct isoal_pdu_buffer * pdu_buffer)1814 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer)
1815 {
1816 struct node_tx_iso *node_tx;
1817
1818 node_tx = ll_iso_tx_mem_acquire();
1819 if (!node_tx) {
1820 LOG_ERR("Tx Buffer Overflow");
1821 /* TODO: Report overflow to HCI and remove assert
1822 * data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO)
1823 */
1824 LL_ASSERT(0);
1825 return ISOAL_STATUS_ERR_PDU_ALLOC;
1826 }
1827
1828 node_tx->link = NULL;
1829
1830 /* node_tx handle will be required to emit the PDU later */
1831 pdu_buffer->handle = (void *)node_tx;
1832 pdu_buffer->pdu = (void *)node_tx->pdu;
1833
1834 /* Use TX buffer size as the limit here. Actual size will be decided in
1835 * the ISOAL based on the minimum of the buffer size and the respective
1836 * Max_PDU_C_To_P or Max_PDU_P_To_C.
1837 */
1838 pdu_buffer->size = MAX(LL_BIS_OCTETS_TX_MAX, LL_CIS_OCTETS_TX_MAX);
1839
1840 return ISOAL_STATUS_OK;
1841 }
1842
1843 /**
1844 * Write the given SDU payload to the target PDU buffer at the given offset.
1845 * @param[in,out] pdu_buffer Target PDU buffer
1846 * @param[in] pdu_offset Offset / current write position within PDU
1847 * @param[in] sdu_payload Location of source data
1848 * @param[in] consume_len Length of data to copy
1849 * @return Error status of write operation
1850 */
ll_iso_pdu_write(struct isoal_pdu_buffer * pdu_buffer,const size_t pdu_offset,const uint8_t * sdu_payload,const size_t consume_len)1851 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
1852 const size_t pdu_offset,
1853 const uint8_t *sdu_payload,
1854 const size_t consume_len)
1855 {
1856 ARG_UNUSED(pdu_offset);
1857 ARG_UNUSED(consume_len);
1858
1859 LL_ASSERT(pdu_buffer);
1860 LL_ASSERT(pdu_buffer->pdu);
1861 LL_ASSERT(sdu_payload);
1862
1863 if ((pdu_offset + consume_len) > pdu_buffer->size) {
1864 /* Exceeded PDU buffer */
1865 return ISOAL_STATUS_ERR_UNSPECIFIED;
1866 }
1867
1868 /* Copy source to destination at given offset */
1869 memcpy(&pdu_buffer->pdu->payload[pdu_offset], sdu_payload, consume_len);
1870
1871 return ISOAL_STATUS_OK;
1872 }
1873
1874 /**
1875 * Emit the encoded node to the transmission queue
1876 * @param node_tx TX node to enqueue
1877 * @param handle CIS/BIS handle
1878 * @return Error status of enqueue operation
1879 */
ll_iso_pdu_emit(struct node_tx_iso * node_tx,const uint16_t handle)1880 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
1881 const uint16_t handle)
1882 {
1883 memq_link_t *link;
1884
1885 link = mem_acquire(&mem_link_iso_tx.free);
1886 LL_ASSERT(link);
1887
1888 if (ll_iso_tx_mem_enqueue(handle, node_tx, link)) {
1889 return ISOAL_STATUS_ERR_PDU_EMIT;
1890 }
1891
1892 return ISOAL_STATUS_OK;
1893 }
1894
1895 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1896 /**
1897 * Release the given payload back to the memory pool.
1898 * @param node_tx TX node to release or forward
1899 * @param handle CIS/BIS handle
1900 * @param status Reason for release
1901 * @return Error status of release operation
1902 */
ll_iso_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)1903 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
1904 const uint16_t handle,
1905 const isoal_status_t status)
1906 {
1907 if (status == ISOAL_STATUS_OK) {
1908 /* Process as TX ack, we are in LLL execution context here.
1909 * status == ISOAL_STATUS_OK when an ISO PDU has been acked.
1910 *
1911 * Call Path:
1912 * ull_iso_lll_ack_enqueue() --> isoal_tx_pdu_release() -->
1913 * pdu_release() == ll_iso_pdu_release() (this function).
1914 */
1915 /* FIXME: ll_tx_ack_put is not LLL callable as it is used by
1916 * ACL connections in ULL context to dispatch ack.
1917 */
1918 ll_tx_ack_put(handle, (void *)node_tx);
1919 ll_rx_sched();
1920 } else {
1921 /* Release back to memory pool, we are in Thread context
1922 * Callers:
1923 * isoal_source_deallocate() with ISOAL_STATUS_ERR_PDU_EMIT
1924 * isoal_tx_pdu_emit with status != ISOAL_STATUS_OK
1925 */
1926 if (node_tx->link) {
1927 ll_iso_link_tx_release(node_tx->link);
1928 }
1929 ll_iso_tx_mem_release(node_tx);
1930 }
1931
1932 return ISOAL_STATUS_OK;
1933 }
1934 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1935 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1936
init_reset(void)1937 static int init_reset(void)
1938 {
1939 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1940 memq_link_t *link;
1941
1942 RXFIFO_INIT(iso_rx);
1943
1944 /* Acquire a link to initialize ull rx memq */
1945 link = mem_acquire(&mem_link_iso_rx.free);
1946 LL_ASSERT(link);
1947
1948 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
1949 /* Initialize ull rx memq */
1950 MEMQ_INIT(ull_iso_rx, link);
1951 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1952
1953 /* Acquire a link to initialize ll_iso_rx memq */
1954 link = mem_acquire(&mem_link_iso_rx.free);
1955 LL_ASSERT(link);
1956
1957 /* Initialize ll_iso_rx memq */
1958 MEMQ_INIT(ll_iso_rx, link);
1959
1960 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1961 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1962
1963 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1964 /* Initialize tx pool. */
1965 mem_init(mem_iso_tx.pool, NODE_TX_BUFFER_SIZE, BT_CTLR_ISO_TX_BUFFERS,
1966 &mem_iso_tx.free);
1967
1968 /* Initialize tx link pool. */
1969 mem_init(mem_link_iso_tx.pool, sizeof(memq_link_t),
1970 BT_CTLR_ISO_TX_BUFFERS, &mem_link_iso_tx.free);
1971 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1972
1973 #if BT_CTLR_ISO_STREAMS
1974 /* Initialize ISO Datapath pool */
1975 mem_init(datapath_pool, sizeof(struct ll_iso_datapath),
1976 sizeof(datapath_pool) / sizeof(struct ll_iso_datapath), &datapath_free);
1977 #endif /* BT_CTLR_ISO_STREAMS */
1978
1979 /* Initialize ISO Adaptation Layer */
1980 isoal_init();
1981
1982 return 0;
1983 }
1984
1985 #if defined(CONFIG_BT_CTLR_CONN_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
ull_iso_resume_ticker_start(struct lll_event * resume_event,uint16_t group_handle,uint16_t stream_handle,uint8_t role,uint32_t ticks_anchor,uint32_t resume_timeout)1986 void ull_iso_resume_ticker_start(struct lll_event *resume_event,
1987 uint16_t group_handle,
1988 uint16_t stream_handle,
1989 uint8_t role,
1990 uint32_t ticks_anchor,
1991 uint32_t resume_timeout)
1992 {
1993 uint32_t resume_delay_us;
1994 int32_t resume_offset_us;
1995 uint8_t ticker_id = 0;
1996 uint32_t ret;
1997
1998 resume_delay_us = EVENT_OVERHEAD_START_US;
1999 resume_delay_us += EVENT_TICKER_RES_MARGIN_US;
2000
2001 if (0) {
2002 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2003 } else if (IS_CIS_HANDLE(stream_handle)) {
2004 ticker_id = TICKER_ID_CONN_ISO_RESUME_BASE + group_handle;
2005 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2006 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
2007 } else if (IS_SYNC_ISO_HANDLE(stream_handle)) {
2008 ticker_id = TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE + group_handle;
2009 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
2010 } else {
2011 LL_ASSERT(0);
2012 }
2013
2014 if (role == BT_HCI_ROLE_PERIPHERAL) {
2015 /* Add peripheral specific delay */
2016 if (0) {
2017 #if defined(CONFIG_BT_CTLR_PHY)
2018 } else {
2019 uint8_t phy = 0;
2020
2021 if (0) {
2022 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2023 } else if (IS_CIS_HANDLE(stream_handle)) {
2024 struct ll_conn_iso_stream *cis;
2025 struct ll_conn *conn;
2026
2027 cis = ll_conn_iso_stream_get(stream_handle);
2028 conn = ll_conn_get(cis->lll.acl_handle);
2029 phy = conn->lll.phy_rx;
2030 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2031 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
2032 } else if (IS_SYNC_ISO_HANDLE(stream_handle)) {
2033 struct ll_sync_iso_set *sync_iso;
2034 uint16_t stream_idx;
2035
2036 stream_idx = LL_BIS_SYNC_IDX_FROM_HANDLE(stream_handle);
2037 sync_iso = ull_sync_iso_by_stream_get(stream_idx);
2038 phy = sync_iso->lll.phy;
2039 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
2040 } else {
2041 LL_ASSERT(0);
2042 }
2043
2044 resume_delay_us +=
2045 lll_radio_rx_ready_delay_get(phy, PHY_FLAGS_S8);
2046 #else
2047 } else {
2048 resume_delay_us += lll_radio_rx_ready_delay_get(0, 0);
2049 #endif /* CONFIG_BT_CTLR_PHY */
2050 }
2051 }
2052
2053 resume_offset_us = (int32_t)(resume_timeout - resume_delay_us);
2054 LL_ASSERT(resume_offset_us >= 0);
2055
2056 /* Setup resume timeout as single-shot */
2057 ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
2058 TICKER_USER_ID_LLL,
2059 ticker_id,
2060 ticks_anchor,
2061 HAL_TICKER_US_TO_TICKS(resume_offset_us),
2062 TICKER_NULL_PERIOD,
2063 TICKER_NULL_REMAINDER,
2064 TICKER_NULL_LAZY,
2065 TICKER_NULL_SLOT,
2066 ticker_resume_cb, resume_event,
2067 ticker_resume_op_cb, NULL);
2068
2069 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2070 (ret == TICKER_STATUS_BUSY));
2071 }
2072
ticker_resume_op_cb(uint32_t status,void * param)2073 static void ticker_resume_op_cb(uint32_t status, void *param)
2074 {
2075 ARG_UNUSED(param);
2076
2077 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2078 }
2079
ticker_resume_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)2080 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2081 uint32_t remainder, uint16_t lazy, uint8_t force,
2082 void *param)
2083 {
2084 static memq_link_t link;
2085 static struct mayfly mfy = {0, 0, &link, NULL, lll_resume};
2086 struct lll_event *resume_event;
2087 uint32_t ret;
2088
2089 ARG_UNUSED(ticks_drift);
2090 LL_ASSERT(lazy == 0);
2091
2092 resume_event = param;
2093
2094 /* Append timing parameters */
2095 resume_event->prepare_param.ticks_at_expire = ticks_at_expire;
2096 resume_event->prepare_param.remainder = remainder;
2097 resume_event->prepare_param.lazy = 0;
2098 resume_event->prepare_param.force = force;
2099 mfy.param = resume_event;
2100
2101 /* Kick LLL resume */
2102 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
2103 0, &mfy);
2104
2105 LL_ASSERT(!ret);
2106 }
2107 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
2108