Lines Matching full:cis

70 static void cis_lazy_fill(struct ll_conn_iso_stream *cis);
135 struct ll_conn_iso_stream *cis = mem_acquire(&cis_free); in ll_conn_iso_stream_acquire() local
137 if (cis) { in ll_conn_iso_stream_acquire()
138 (void)memset(&cis->hdr, 0U, sizeof(cis->hdr)); in ll_conn_iso_stream_acquire()
141 return cis; in ll_conn_iso_stream_acquire()
144 void ll_conn_iso_stream_release(struct ll_conn_iso_stream *cis) in ll_conn_iso_stream_release() argument
146 cis->cis_id = 0; in ll_conn_iso_stream_release()
147 cis->group = NULL; in ll_conn_iso_stream_release()
149 mem_release(cis, &cis_free); in ll_conn_iso_stream_release()
152 uint16_t ll_conn_iso_stream_handle_get(struct ll_conn_iso_stream *cis) in ll_conn_iso_stream_handle_get() argument
154 return mem_index_get(cis, cis_pool, in ll_conn_iso_stream_handle_get()
167 struct ll_conn_iso_stream *cis; in ull_conn_iso_lll_stream_get() local
169 cis = ll_conn_iso_stream_get(handle); in ull_conn_iso_lll_stream_get()
170 if (!cis) { in ull_conn_iso_lll_stream_get()
174 return &cis->lll; in ull_conn_iso_lll_stream_get()
179 struct ll_conn_iso_stream *cis; in ll_iso_stream_connected_get() local
186 cis = ll_conn_iso_stream_get(handle); in ll_iso_stream_connected_get()
187 if ((cis->group == NULL) || (cis->lll.handle != handle) || !cis->established) { in ll_iso_stream_connected_get()
188 /* CIS does not belong to a group, has inconsistent handle or is in ll_iso_stream_connected_get()
194 return cis; in ll_iso_stream_connected_get()
202 /* Find CIS associated with ACL conn */ in ll_conn_iso_stream_get_by_acl()
204 struct ll_conn_iso_stream *cis; in ll_conn_iso_stream_get_by_acl() local
216 /* Find next connected CIS in the group */ in ll_conn_iso_stream_get_by_acl()
218 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter); in ll_conn_iso_stream_get_by_acl()
219 if (cis) { in ll_conn_iso_stream_get_by_acl()
220 uint16_t cis_handle = cis->lll.handle; in ll_conn_iso_stream_get_by_acl()
222 cis = ll_iso_stream_connected_get(cis_handle); in ll_conn_iso_stream_get_by_acl()
223 if (!cis) { in ll_conn_iso_stream_get_by_acl()
224 /* CIS is not connected */ in ll_conn_iso_stream_get_by_acl()
231 } else if (cis->lll.acl_handle == conn->lll.handle) { in ll_conn_iso_stream_get_by_acl()
236 return cis; in ll_conn_iso_stream_get_by_acl()
248 struct ll_conn_iso_stream *cis; in ll_conn_iso_stream_get_by_group() local
256 cis = ll_conn_iso_stream_get(handle); in ll_conn_iso_stream_get_by_group()
257 if (cis->group == cig) { in ll_conn_iso_stream_get_by_group()
261 return cis; in ll_conn_iso_stream_get_by_group()
270 struct ll_conn_iso_stream *cis; in ll_conn_iso_stream_get_by_id() local
274 cis = ll_conn_iso_stream_get(handle); in ll_conn_iso_stream_get_by_id()
275 if (cis->group && (cis->cis_id == cis_id)) { in ll_conn_iso_stream_get_by_id()
276 return cis; in ll_conn_iso_stream_get_by_id()
287 struct ll_conn_iso_stream *cis; in ull_conn_iso_lll_stream_get_by_group() local
291 cis = ll_conn_iso_stream_get_by_group(cig, handle_iter); in ull_conn_iso_lll_stream_get_by_group()
292 if (!cis) { in ull_conn_iso_lll_stream_get_by_group()
296 return &cis->lll; in ull_conn_iso_lll_stream_get_by_group()
300 * Helper function to iterate and return CIS LLL context sorted based on
301 * ascending order of the CIS offset from associated ACL and the CIG.
304 * CIS offsets used when creating CISes to peripheral.
323 * find the first CIS offset of the active CIS. in ull_conn_iso_lll_stream_sorted_get_by_group()
327 /* Subsequent iteration, get reference to current CIS and use in ull_conn_iso_lll_stream_sorted_get_by_group()
328 * its CIS offset to find the next active CIS with offset in ull_conn_iso_lll_stream_sorted_get_by_group()
329 * greater than the current CIS. in ull_conn_iso_lll_stream_sorted_get_by_group()
339 /* Loop through all CIS contexts */ in ull_conn_iso_lll_stream_sorted_get_by_group()
342 struct ll_conn_iso_stream *cis; in ull_conn_iso_lll_stream_sorted_get_by_group() local
344 /* Get CIS reference corresponding to loop handle */ in ull_conn_iso_lll_stream_sorted_get_by_group()
345 cis = ll_conn_iso_stream_get(handle); in ull_conn_iso_lll_stream_sorted_get_by_group()
347 /* Match CIS contexts associated with the CIG */ in ull_conn_iso_lll_stream_sorted_get_by_group()
348 if (cis->group == cig) { in ull_conn_iso_lll_stream_sorted_get_by_group()
349 if (cis->offset <= cis_offset_curr) { in ull_conn_iso_lll_stream_sorted_get_by_group()
351 * than the current CIS. in ull_conn_iso_lll_stream_sorted_get_by_group()
356 /* Remember CIS with offset greater than current but in ull_conn_iso_lll_stream_sorted_get_by_group()
357 * lower than previous that we remember as the next CIS in ull_conn_iso_lll_stream_sorted_get_by_group()
360 if (cis->offset < cis_offset_next) { in ull_conn_iso_lll_stream_sorted_get_by_group()
361 cis_next = cis; in ull_conn_iso_lll_stream_sorted_get_by_group()
372 /* Found the next CIS with offset in ascending order. */ in ull_conn_iso_lll_stream_sorted_get_by_group()
382 struct ll_conn_iso_stream *cis; in ull_conn_iso_lll_group_get_by_stream() local
385 cis = ll_conn_iso_stream_get(cis_lll->handle); in ull_conn_iso_lll_group_get_by_stream()
386 cig = cis->group; in ull_conn_iso_lll_group_get_by_stream()
412 struct ll_conn_iso_stream *cis = in ull_conn_iso_lll_cis_established() local
416 if (cis->established) { in ull_conn_iso_lll_cis_established()
429 node_rx->hdr.handle = cis->lll.acl_handle; in ull_conn_iso_lll_cis_established()
433 cis->established = 1; in ull_conn_iso_lll_cis_established()
440 struct ll_conn_iso_stream *cis; in ull_conn_iso_done() local
458 cis = NULL; in ull_conn_iso_done()
462 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter); in ull_conn_iso_done()
463 LL_ASSERT(cis); in ull_conn_iso_done()
465 if (cis->lll.active && cis->lll.handle != LLL_HANDLE_INVALID) { in ull_conn_iso_done()
466 /* CIS was setup and is now expected to be going */ in ull_conn_iso_done()
468 (1U << LL_CIS_IDX_FROM_HANDLE(cis->lll.handle))) { in ull_conn_iso_done()
472 /* MIC failure - stop CIS and defer cleanup to after in ull_conn_iso_done()
475 ull_conn_iso_cis_stop(cis, NULL, in ull_conn_iso_done()
479 cis->event_expire = 0U; in ull_conn_iso_done()
482 /* We did NOT have successful transaction on established CIS, in ull_conn_iso_done()
483 * or CIS was not yet established, so handle timeout in ull_conn_iso_done()
485 if (!cis->event_expire) { in ull_conn_iso_done()
486 struct ll_conn *conn = ll_conn_get(cis->lll.acl_handle); in ull_conn_iso_done()
488 cis->event_expire = RADIO_CONN_EVENTS( in ull_conn_iso_done()
497 if (cis->event_expire > event_elapsed) { in ull_conn_iso_done()
498 cis->event_expire -= event_elapsed; in ull_conn_iso_done()
500 cis->event_expire = 0U; in ull_conn_iso_done()
502 /* Stop CIS and defer cleanup to after teardown. in ull_conn_iso_done()
504 * host if CIS has been established. If CIS was not in ull_conn_iso_done()
508 ull_conn_iso_cis_stop(cis, NULL, cis->established ? in ull_conn_iso_done()
524 * any CIS found in the above for-loop will do to dereference the ACL in ull_conn_iso_done()
526 if (cis && (ticks_drift_plus || ticks_drift_minus)) { in ull_conn_iso_done()
529 struct ll_conn *conn = ll_connected_get(cis->lll.acl_handle); in ull_conn_iso_done()
549 * This function may be called to tear down a CIS. When the CIS teardown
553 * @param cis Pointer to connected ISO stream to stop
554 * @param cis_released_cb Callback to invoke when the CIS has been released.
558 void ull_conn_iso_cis_stop(struct ll_conn_iso_stream *cis, in ull_conn_iso_cis_stop() argument
565 if (cis->teardown) { in ull_conn_iso_cis_stop()
567 LL_ASSERT(!cis->released_cb || !cis_released_cb || in ull_conn_iso_cis_stop()
568 (cis->released_cb == cis_released_cb)); in ull_conn_iso_cis_stop()
571 cis->released_cb = cis_released_cb; in ull_conn_iso_cis_stop()
577 cis->teardown = 1; in ull_conn_iso_cis_stop()
578 cis->released_cb = cis_released_cb; in ull_conn_iso_cis_stop()
579 cis->terminate_reason = reason; in ull_conn_iso_cis_stop()
582 cig = cis->group; in ull_conn_iso_cis_stop()
595 * continue CIS teardown from there. The disabled_cb cannot be in ull_conn_iso_cis_stop()
610 /* Tear down CIS now in ULL_HIGH context. Ignore enqueue in ull_conn_iso_cis_stop()
631 struct ll_conn_iso_stream *cis; in init_reset() local
641 /* Initialize CIS pool */ in init_reset()
660 cis = ll_conn_iso_stream_get(handle); in init_reset()
661 cis->cis_id = 0; in init_reset()
662 cis->group = NULL; in init_reset()
663 cis->lll.link_tx_free = NULL; in init_reset()
685 struct ll_conn_iso_stream *cis; in ull_conn_iso_ticker_cb() local
702 /* Increment CIS event counters */ in ull_conn_iso_ticker_cb()
704 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter); in ull_conn_iso_ticker_cb()
705 LL_ASSERT(cis); in ull_conn_iso_ticker_cb()
707 /* New CIS may become available by creation prior to the CIG in ull_conn_iso_ticker_cb()
713 if (cis->lll.handle != 0xFFFF && cis->lll.active) { in ull_conn_iso_ticker_cb()
714 cis->lll.event_count += (lazy + 1U); in ull_conn_iso_ticker_cb()
717 cis->lll.event_count -= cis->lll.lazy_active; in ull_conn_iso_ticker_cb()
718 cis->lll.lazy_active = 0U; in ull_conn_iso_ticker_cb()
722 cis->lll.event_count); in ull_conn_iso_ticker_cb()
724 ull_iso_lll_event_prepare(cis->lll.handle, cis->lll.event_count); in ull_conn_iso_ticker_cb()
728 cis->lll.datapath_ready_rx = cis->hdr.datapath_out != NULL; in ull_conn_iso_ticker_cb()
732 * leading CIS in the CIG would have had it's reference point set in in ull_conn_iso_ticker_cb()
736 * point for the leading CIS is available for this event. in ull_conn_iso_ticker_cb()
795 static uint32_t cig_offset_calc(struct ll_conn_iso_group *cig, struct ll_conn_iso_stream *cis, in cig_offset_calc() argument
805 cis_offs_to_cig_ref = cig->sync_delay - cis->sync_delay; in cig_offset_calc()
807 /* Establish the CIG reference point by adjusting ACL-to-CIS offset in cig_offset_calc()
808 * (cis->offset) by the difference between CIG- and CIS sync delays. in cig_offset_calc()
814 * until the first anchor point for the leading CIS is available. in cig_offset_calc()
829 struct ll_conn_iso_stream *cis; in ull_conn_iso_start() local
837 cis = ll_conn_iso_stream_get(cis_handle); in ull_conn_iso_start()
838 cig = cis->group; in ull_conn_iso_start()
840 cis->lll.offset = cig->sync_delay - cis->sync_delay; in ull_conn_iso_start()
841 cis->lll.handle = cis_handle; in ull_conn_iso_start()
846 memcpy(cis->lll.tx.ccm.key, conn->lll.ccm_tx.key, in ull_conn_iso_start()
847 sizeof(cis->lll.tx.ccm.key)); in ull_conn_iso_start()
850 memcpy(&cis->lll.tx.ccm.iv[4], &conn->lll.ccm_tx.iv[4], 4); in ull_conn_iso_start()
852 /* XOR the CIS access address to get IV */ in ull_conn_iso_start()
853 mem_xor_32(cis->lll.tx.ccm.iv, conn->lll.ccm_tx.iv, in ull_conn_iso_start()
854 cis->lll.access_addr); in ull_conn_iso_start()
857 cis->lll.tx.ccm.counter = 0U; in ull_conn_iso_start()
862 cis->lll.tx.ccm.direction = !conn->lll.role; in ull_conn_iso_start()
867 memcpy(cis->lll.rx.ccm.key, conn->lll.ccm_rx.key, in ull_conn_iso_start()
868 sizeof(cis->lll.rx.ccm.key)); in ull_conn_iso_start()
871 memcpy(&cis->lll.rx.ccm.iv[4], &conn->lll.ccm_rx.iv[4], 4); in ull_conn_iso_start()
873 /* XOR the CIS access address to get IV */ in ull_conn_iso_start()
874 mem_xor_32(cis->lll.rx.ccm.iv, conn->lll.ccm_rx.iv, in ull_conn_iso_start()
875 cis->lll.access_addr); in ull_conn_iso_start()
878 cis->lll.rx.ccm.counter = 0U; in ull_conn_iso_start()
883 cis->lll.rx.ccm.direction = conn->lll.role; in ull_conn_iso_start()
888 cis->event_expire = CONN_ESTAB_COUNTDOWN; in ull_conn_iso_start()
890 /* Check if another CIS was already started and CIG ticker is in ull_conn_iso_start()
896 /* Initialize CIS event lazy at CIS create */ in ull_conn_iso_start()
897 cis->lll.lazy_active = 0U; in ull_conn_iso_start()
899 /* Deferred fill CIS event lazy value at CIS create */ in ull_conn_iso_start()
900 cis_lazy_fill(cis); in ull_conn_iso_start()
902 /* Set CIS active in already active CIG */ in ull_conn_iso_start()
903 cis->lll.active = 1U; in ull_conn_iso_start()
912 cig_offset_us = cig_offset_calc(cig, cis, cis->offset, &ticks_at_expire, remainder); in ull_conn_iso_start()
928 bool early_start = (cis->offset < EVENT_OVERHEAD_START_US); in ull_conn_iso_start()
966 lost_cig_events = DIV_ROUND_UP(acl_latency_us - cis->offset, in ull_conn_iso_start()
968 cis_offset = cis->offset + (lost_cig_events * iso_interval_us) - in ull_conn_iso_start()
975 cis_offset = cis->offset + iso_interval_us - acl_latency_us; in ull_conn_iso_start()
978 cis->lll.event_count += lost_cig_events; in ull_conn_iso_start()
980 lost_payloads = (lost_cig_events - (cis->lll.rx.ft - 1)) * cis->lll.rx.bn; in ull_conn_iso_start()
981 cis->lll.rx.payload_count += lost_payloads; in ull_conn_iso_start()
983 lost_payloads = (lost_cig_events - (cis->lll.tx.ft - 1)) * cis->lll.tx.bn; in ull_conn_iso_start()
984 cis->lll.tx.payload_count += lost_payloads; in ull_conn_iso_start()
992 cig_offset_us = cig_offset_calc(cig, cis, cis_offset, &ticks_at_expire, in ull_conn_iso_start()
1040 slot_us = cis->lll.sub_interval * cis->lll.nse; in ull_conn_iso_start()
1042 slot_us = cis->lll.sub_interval * MAX(cis->lll.tx.bn, cis->lll.rx.bn); in ull_conn_iso_start()
1076 /* Initialize CIS event lazy at CIS create */ in ull_conn_iso_start()
1077 cis->lll.lazy_active = 0U; in ull_conn_iso_start()
1080 /* Start CIS peripheral CIG ticker */ in ull_conn_iso_start()
1093 /* Set CIG and the first CIS state as active */ in ull_conn_iso_start()
1095 cis->lll.active = 1U; in ull_conn_iso_start()
1099 static void cis_lazy_fill(struct ll_conn_iso_stream *cis) in cis_lazy_fill() argument
1105 mfy.param = cis; in cis_lazy_fill()
1112 struct ll_conn_iso_stream *cis; in mfy_cis_lazy_fill() local
1122 cis = param; in mfy_cis_lazy_fill()
1123 cig = cis->group; in mfy_cis_lazy_fill()
1177 /* Set CIS active in already active CIG and any previous laziness in in mfy_cis_lazy_fill()
1178 * CIG before the CIS gets active that be decremented when event_count in mfy_cis_lazy_fill()
1181 cis->lll.active = 1U; in mfy_cis_lazy_fill()
1182 cis->lll.lazy_active = lazy; in mfy_cis_lazy_fill()
1212 struct ll_conn_iso_stream *cis; in cis_disabled_cb() local
1227 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter); in cis_disabled_cb()
1228 LL_ASSERT(cis); in cis_disabled_cb()
1230 if (!cis->lll.active && (cis->lll.flush != LLL_CIS_FLUSH_COMPLETE)) { in cis_disabled_cb()
1231 /* CIS is not active and did not just complete LLL flush - skip it */ in cis_disabled_cb()
1237 if (cis->lll.flush == LLL_CIS_FLUSH_PENDING) { in cis_disabled_cb()
1238 /* CIS has LLL flush pending - wait for completion */ in cis_disabled_cb()
1240 } else if (cis->lll.flush == LLL_CIS_FLUSH_COMPLETE) { in cis_disabled_cb()
1243 conn = ll_conn_get(cis->lll.acl_handle); in cis_disabled_cb()
1244 cis_released_cb = cis->released_cb; in cis_disabled_cb()
1245 cis->released_cb = NULL; in cis_disabled_cb()
1249 * CIS for both directions. Disable them one at a time to make sure in cis_disabled_cb()
1252 ll_remove_iso_path(cis->lll.handle, in cis_disabled_cb()
1254 ll_remove_iso_path(cis->lll.handle, in cis_disabled_cb()
1257 ll_conn_iso_stream_release(cis); in cis_disabled_cb()
1262 cis->established = 0U; in cis_disabled_cb()
1263 cis->teardown = 0U; in cis_disabled_cb()
1265 /* Prevent referencing inactive CIS */ in cis_disabled_cb()
1266 cis->lll.flush = LLL_CIS_FLUSH_NONE; in cis_disabled_cb()
1267 cis->lll.acl_handle = LLL_HANDLE_INVALID; in cis_disabled_cb()
1273 /* CIS is no longer active */ in cis_disabled_cb()
1276 /* CIS terminated, triggers completion of CIS_TERMINATE_IND procedure */ in cis_disabled_cb()
1278 conn->llcp.cis.terminate_ack = 1U; in cis_disabled_cb()
1280 /* Check if removed CIS has an ACL disassociation callback. Invoke in cis_disabled_cb()
1284 /* CIS removed - notify caller */ in cis_disabled_cb()
1287 } else if (cis->teardown) { in cis_disabled_cb()
1292 if (cis->established) { in cis_disabled_cb()
1296 * further enqueuing of TX nodes for terminating CIS. in cis_disabled_cb()
1300 node_terminate->hdr.handle = cis->lll.handle; in cis_disabled_cb()
1302 *((uint8_t *)node_terminate->pdu) = cis->terminate_reason; in cis_disabled_cb()
1306 conn = ll_conn_get(cis->lll.acl_handle); in cis_disabled_cb()
1308 /* CIS was not established - complete the procedure with error */ in cis_disabled_cb()
1310 ull_cp_cc_established(conn, cis->terminate_reason); in cis_disabled_cb()
1314 if (cig->lll.resume_cis == cis->lll.handle) { in cis_disabled_cb()
1315 /* Resume pending for terminating CIS - stop ticker */ in cis_disabled_cb()
1329 cis->lll.flush = LLL_CIS_FLUSH_PENDING; in cis_disabled_cb()
1331 mfys[cig->lll.handle].param = &cis->lll; in cis_disabled_cb()
1341 /* This was the last active CIS of the CIG. Initiate CIG teardown by in cis_disabled_cb()
1363 struct ll_conn_iso_stream *cis; in cis_tx_lll_flush() local
1371 cis = ll_conn_iso_stream_get(lll->handle); in cis_tx_lll_flush()
1372 cig = cis->group; in cis_tx_lll_flush()
1394 /* Resume CIS teardown in ULL_HIGH context */ in cis_tx_lll_flush()
1481 struct ll_conn_iso_stream *cis; in ull_conn_iso_transmit_test_cig_interval() local
1512 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter); in ull_conn_iso_transmit_test_cig_interval()
1513 LL_ASSERT(cis); in ull_conn_iso_transmit_test_cig_interval()
1515 if (!cis->hdr.test_mode.tx.enabled || cis->lll.handle == LLL_HANDLE_INVALID) { in ull_conn_iso_transmit_test_cig_interval()
1523 sdu_counter = DIV_ROUND_UP((cis->lll.event_count + 1U) * iso_interval, in ull_conn_iso_transmit_test_cig_interval()
1526 if (cis->hdr.test_mode.tx.sdu_counter == 0U) { in ull_conn_iso_transmit_test_cig_interval()
1528 cis->hdr.test_mode.tx.sdu_counter = sdu_counter; in ull_conn_iso_transmit_test_cig_interval()
1532 tx_sdu_count = sdu_counter - cis->hdr.test_mode.tx.sdu_counter; in ull_conn_iso_transmit_test_cig_interval()
1537 ll_iso_transmit_test_send_sdu(cis->lll.handle, ticks_at_expire); in ull_conn_iso_transmit_test_cig_interval()