Lines Matching full:tx

85 	uint8_t               blocked:1,     /* Blocked by ongoing tx */
119 static int send_unseg(struct bt_mesh_net_tx *tx, struct net_buf_simple *sdu, in send_unseg() argument
126 tx->xmit, BUF_TIMEOUT); in send_unseg()
136 } else if (BT_MESH_IS_DEV_KEY(tx->ctx->app_idx)) { in send_unseg()
139 net_buf_simple_add_u8(&adv->b, UNSEG_HDR(1, tx->aid)); in send_unseg()
145 if (!bt_mesh_friend_queue_has_space(tx->sub->net_idx, in send_unseg()
146 tx->src, tx->ctx->addr, in send_unseg()
148 if (BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) { in send_unseg()
158 if (bt_mesh_friend_enqueue_tx(tx, BT_MESH_FRIEND_PDU_SINGLE, in send_unseg()
160 BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) { in send_unseg()
171 return bt_mesh_net_send(tx, adv, cb, cb_data); in send_unseg()
196 static void seg_tx_done(struct seg_tx *tx, uint8_t seg_idx) in seg_tx_done() argument
198 k_mem_slab_free(&segs, (void *)tx->seg[seg_idx]); in seg_tx_done()
199 tx->seg[seg_idx] = NULL; in seg_tx_done()
200 tx->nack_count--; in seg_tx_done()
203 static bool seg_tx_blocks(struct seg_tx *tx, uint16_t src, uint16_t dst) in seg_tx_blocks() argument
205 return (tx->src == src) && (tx->dst == dst); in seg_tx_blocks()
208 static void seg_tx_unblock_check(struct seg_tx *tx) in seg_tx_unblock_check() argument
213 /* Unblock the first blocked tx with the same params. */ in seg_tx_unblock_check()
215 if (&seg_tx[i] != tx && in seg_tx_unblock_check()
217 seg_tx_blocks(tx, seg_tx[i].src, seg_tx[i].dst) && in seg_tx_unblock_check()
230 static void seg_tx_reset(struct seg_tx *tx) in seg_tx_reset() argument
235 (void)k_work_cancel_delayable(&tx->retransmit); in seg_tx_reset()
237 tx->cb = NULL; in seg_tx_reset()
238 tx->cb_data = NULL; in seg_tx_reset()
239 tx->seq_auth = 0U; in seg_tx_reset()
240 tx->sub = NULL; in seg_tx_reset()
241 tx->src = BT_MESH_ADDR_UNASSIGNED; in seg_tx_reset()
242 tx->dst = BT_MESH_ADDR_UNASSIGNED; in seg_tx_reset()
243 tx->ack_src = BT_MESH_ADDR_UNASSIGNED; in seg_tx_reset()
244 tx->blocked = false; in seg_tx_reset()
246 for (i = 0; i <= tx->seg_n && tx->nack_count; i++) { in seg_tx_reset()
247 if (!tx->seg[i]) { in seg_tx_reset()
251 seg_tx_done(tx, i); in seg_tx_reset()
254 tx->nack_count = 0; in seg_tx_reset()
255 tx->seg_send_started = 0; in seg_tx_reset()
256 tx->ack_received = 0; in seg_tx_reset()
267 static inline void seg_tx_complete(struct seg_tx *tx, int err) in seg_tx_complete() argument
269 const struct bt_mesh_send_cb *cb = tx->cb; in seg_tx_complete()
270 void *cb_data = tx->cb_data; in seg_tx_complete()
272 seg_tx_unblock_check(tx); in seg_tx_complete()
274 seg_tx_reset(tx); in seg_tx_complete()
281 static void schedule_transmit_continue(struct seg_tx *tx, uint32_t delta) in schedule_transmit_continue() argument
285 if (!tx->nack_count) { in schedule_transmit_continue()
299 k_work_reschedule(&tx->retransmit, in schedule_transmit_continue()
300 (tx->seg_o <= tx->seg_n) ? in schedule_transmit_continue()
307 struct seg_tx *tx = user_data; in seg_send_start() local
309 if (!tx->started && tx->cb && tx->cb->start) { in seg_send_start()
310 tx->cb->start(duration, err, tx->cb_data); in seg_send_start()
311 tx->started = 1U; in seg_send_start()
314 tx->seg_send_started = 1U; in seg_send_start()
315 tx->adv_start_timestamp = k_uptime_get(); in seg_send_start()
322 schedule_transmit_continue(tx, 0); in seg_send_start()
328 struct seg_tx *tx = user_data; in seg_sent() local
329 uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp); in seg_sent()
331 if (!tx->seg_send_started) { in seg_sent()
335 schedule_transmit_continue(tx, delta_ms); in seg_sent()
343 static void seg_tx_buf_build(struct seg_tx *tx, uint8_t seg_o, in seg_tx_buf_build() argument
346 uint16_t seq_zero = tx->seq_auth & TRANS_SEQ_ZERO_MASK; in seg_tx_buf_build()
347 uint8_t len = MIN(seg_len(tx->ctl), tx->len - (seg_len(tx->ctl) * seg_o)); in seg_tx_buf_build()
349 net_buf_simple_add_u8(buf, tx->hdr); in seg_tx_buf_build()
350 net_buf_simple_add_u8(buf, (tx->aszmic << 7) | seq_zero >> 6); in seg_tx_buf_build()
352 net_buf_simple_add_u8(buf, ((seg_o & 0x07) << 5) | tx->seg_n); in seg_tx_buf_build()
353 net_buf_simple_add_mem(buf, tx->seg[seg_o], len); in seg_tx_buf_build()
356 static void seg_tx_send_unacked(struct seg_tx *tx) in seg_tx_send_unacked() argument
358 if (!tx->nack_count) { in seg_tx_send_unacked()
365 .net_idx = tx->sub->net_idx, in seg_tx_send_unacked()
367 .app_idx = (tx->ctl ? BT_MESH_KEY_UNUSED : 0), in seg_tx_send_unacked()
368 .addr = tx->dst, in seg_tx_send_unacked()
370 .send_ttl = tx->ttl, in seg_tx_send_unacked()
373 .sub = tx->sub, in seg_tx_send_unacked()
375 .src = tx->src, in seg_tx_send_unacked()
376 .xmit = tx->xmit, in seg_tx_send_unacked()
377 .friend_cred = tx->friend_cred, in seg_tx_send_unacked()
378 .aid = tx->hdr & AID_MASK, in seg_tx_send_unacked()
381 if (BT_MESH_ADDR_IS_UNICAST(tx->dst) && in seg_tx_send_unacked()
382 !tx->attempts_left_without_progress) { in seg_tx_send_unacked()
384 seg_tx_complete(tx, -ETIMEDOUT); in seg_tx_send_unacked()
388 if (!tx->attempts_left) { in seg_tx_send_unacked()
389 if (BT_MESH_ADDR_IS_UNICAST(tx->dst)) { in seg_tx_send_unacked()
391 seg_tx_complete(tx, -ETIMEDOUT); in seg_tx_send_unacked()
396 seg_tx_complete(tx, 0); in seg_tx_send_unacked()
403 (uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), tx->attempts_left); in seg_tx_send_unacked()
405 while (tx->seg_o <= tx->seg_n) { in seg_tx_send_unacked()
409 if (!tx->seg[tx->seg_o]) { in seg_tx_send_unacked()
411 tx->seg_o++; in seg_tx_send_unacked()
416 tx->xmit, BUF_TIMEOUT); in seg_tx_send_unacked()
423 seg_tx_buf_build(tx, tx->seg_o, &seg->b); in seg_tx_send_unacked()
425 LOG_DBG("Sending %u/%u", tx->seg_o, tx->seg_n); in seg_tx_send_unacked()
427 err = bt_mesh_net_send(&net_tx, seg, &seg_sent_cb, tx); in seg_tx_send_unacked()
434 tx->seg_o++; in seg_tx_send_unacked()
436 tx->ack_received = 0U; in seg_tx_send_unacked()
446 tx->seg_o = 0U; in seg_tx_send_unacked()
447 tx->attempts_left--; in seg_tx_send_unacked()
448 if (BT_MESH_ADDR_IS_UNICAST(tx->dst) && !tx->ack_received) { in seg_tx_send_unacked()
449 tx->attempts_left_without_progress--; in seg_tx_send_unacked()
458 delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp); in seg_tx_send_unacked()
459 if (tx->ack_received) { in seg_tx_send_unacked()
464 tx->ack_received = 0U; in seg_tx_send_unacked()
466 timeout = BT_MESH_SAR_TX_RETRANS_TIMEOUT_MS(tx->dst, tx->ttl); in seg_tx_send_unacked()
474 k_work_reschedule(&tx->retransmit, K_MSEC(timeout)); in seg_tx_send_unacked()
480 struct seg_tx *tx = CONTAINER_OF(dwork, struct seg_tx, retransmit); in seg_retransmit() local
482 seg_tx_send_unacked(tx); in seg_retransmit()
490 struct seg_tx *tx; in send_seg() local
497 for (tx = NULL, i = 0; i < ARRAY_SIZE(seg_tx); i++) { in send_seg()
501 } else if (!tx) { in send_seg()
502 tx = &seg_tx[i]; in send_seg()
506 if (!tx) { in send_seg()
512 tx->hdr = TRANS_CTL_HDR(*ctl_op, 1); in send_seg()
514 tx->hdr = SEG_HDR(0, 0); in send_seg()
516 tx->hdr = SEG_HDR(1, net_tx->aid); in send_seg()
519 tx->src = net_tx->src; in send_seg()
520 tx->dst = net_tx->ctx->addr; in send_seg()
521 tx->seg_n = (sdu->len - 1) / seg_len(!!ctl_op); in send_seg()
522 tx->seg_o = 0; in send_seg()
523 tx->len = sdu->len; in send_seg()
524 tx->nack_count = tx->seg_n + 1; in send_seg()
525 tx->seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_TX, bt_mesh.seq); in send_seg()
526 tx->sub = net_tx->sub; in send_seg()
527 tx->cb = cb; in send_seg()
528 tx->cb_data = cb_data; in send_seg()
529 tx->attempts_left = BT_MESH_SAR_TX_RETRANS_COUNT(tx->dst); in send_seg()
530 tx->attempts_left_without_progress = BT_MESH_SAR_TX_RETRANS_NO_PROGRESS; in send_seg()
531 tx->xmit = net_tx->xmit; in send_seg()
532 tx->aszmic = net_tx->aszmic; in send_seg()
533 tx->friend_cred = net_tx->friend_cred; in send_seg()
534 tx->blocked = blocked; in send_seg()
535 tx->started = 0; in send_seg()
536 tx->seg_send_started = 0; in send_seg()
537 tx->ctl = !!ctl_op; in send_seg()
538 tx->ttl = net_tx->ctx->send_ttl; in send_seg()
540 LOG_DBG("SeqZero 0x%04x (segs: %u)", (uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), in send_seg()
541 tx->nack_count); in send_seg()
544 !bt_mesh_friend_queue_has_space(tx->sub->net_idx, net_tx->src, in send_seg()
545 tx->dst, &tx->seq_auth, in send_seg()
546 tx->seg_n + 1) && in send_seg()
547 BT_MESH_ADDR_IS_UNICAST(tx->dst)) { in send_seg()
548 LOG_ERR("Not enough space in Friend Queue for %u segments", tx->seg_n + 1); in send_seg()
549 seg_tx_reset(tx); in send_seg()
561 seg_tx_reset(tx); in send_seg()
570 tx->seg[seg_o] = buf; in send_seg()
576 seg_tx_buf_build(tx, seg_o, &seg); in send_seg()
578 if (seg_o == tx->seg_n) { in send_seg()
585 net_tx, type, ctl_op ? NULL : &tx->seq_auth, in send_seg()
586 tx->seg_n + 1, &seg) && in send_seg()
592 tx->seg[seg_o] = NULL; in send_seg()
600 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !tx->seg[0]) { in send_seg()
601 seg_tx_reset(tx); in send_seg()
621 seg_tx_send_unacked(tx); in send_seg()
626 static int trans_encrypt(const struct bt_mesh_net_tx *tx, const struct bt_mesh_key *key, in trans_encrypt() argument
630 .dev_key = BT_MESH_IS_DEV_KEY(tx->ctx->app_idx), in trans_encrypt()
631 .aszmic = tx->aszmic, in trans_encrypt()
632 .src = tx->src, in trans_encrypt()
633 .dst = tx->ctx->addr, in trans_encrypt()
638 if (BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) { in trans_encrypt()
639 crypto.ad = tx->ctx->uuid; in trans_encrypt()
645 int bt_mesh_trans_send(struct bt_mesh_net_tx *tx, struct net_buf_simple *msg, in bt_mesh_trans_send() argument
667 if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) { in bt_mesh_trans_send()
668 tx->ctx->send_ttl = bt_mesh_default_ttl_get(); in bt_mesh_trans_send()
669 } else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) { in bt_mesh_trans_send()
675 tx->ctx->send_rel = true; in bt_mesh_trans_send()
678 if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED || in bt_mesh_trans_send()
679 (!BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr) && in bt_mesh_trans_send()
680 BT_MESH_IS_DEV_KEY(tx->ctx->app_idx))) { in bt_mesh_trans_send()
685 err = bt_mesh_keys_resolve(tx->ctx, &tx->sub, &key, &aid); in bt_mesh_trans_send()
690 LOG_DBG("net_idx 0x%04x app_idx 0x%04x dst 0x%04x", tx->sub->net_idx, tx->ctx->app_idx, in bt_mesh_trans_send()
691 tx->ctx->addr); in bt_mesh_trans_send()
694 tx->xmit = bt_mesh_net_transmit_get(); in bt_mesh_trans_send()
695 tx->aid = aid; in bt_mesh_trans_send()
697 if (!tx->ctx->send_rel || net_buf_simple_tailroom(msg) < 8) { in bt_mesh_trans_send()
698 tx->aszmic = 0U; in bt_mesh_trans_send()
700 tx->aszmic = 1U; in bt_mesh_trans_send()
703 err = trans_encrypt(tx, key, msg); in bt_mesh_trans_send()
708 if (tx->ctx->send_rel) { in bt_mesh_trans_send()
709 err = send_seg(tx, msg, cb, cb_data, NULL); in bt_mesh_trans_send()
711 err = send_unseg(tx, msg, cb, cb_data, NULL); in bt_mesh_trans_send()
818 struct seg_tx *tx; in seg_tx_lookup() local
822 tx = &seg_tx[i]; in seg_tx_lookup()
824 if ((tx->seq_auth & TRANS_SEQ_ZERO_MASK) != seq_zero) { in seg_tx_lookup()
828 if (tx->dst == addr) { in seg_tx_lookup()
829 return tx; in seg_tx_lookup()
837 if (obo && (tx->nack_count == tx->seg_n + 1 || tx->ack_src == addr)) { in seg_tx_lookup()
838 tx->ack_src = addr; in seg_tx_lookup()
839 return tx; in seg_tx_lookup()
850 struct seg_tx *tx; in trans_ack() local
878 tx = seg_tx_lookup(seq_zero, obo, rx->ctx.addr); in trans_ack()
879 if (!tx) { in trans_ack()
880 LOG_DBG("No matching TX context for ack"); in trans_ack()
884 if (!BT_MESH_ADDR_IS_UNICAST(tx->dst)) { in trans_ack()
889 *seq_auth = tx->seq_auth; in trans_ack()
893 seg_tx_complete(tx, -ECANCELED); in trans_ack()
897 if (find_msb_set(ack) - 1 > tx->seg_n) { in trans_ack()
903 if (tx->seg[bit - 1]) { in trans_ack()
904 LOG_DBG("seg %u/%u acked", bit - 1, tx->seg_n); in trans_ack()
905 seg_tx_done(tx, bit - 1); in trans_ack()
913 tx->attempts_left_without_progress = in trans_ack()
917 if (tx->nack_count) { in trans_ack()
921 if (tx->seg_o == 0) { in trans_ack()
928 if ((BT_MESH_ADDR_IS_UNICAST(tx->dst) && in trans_ack()
929 !tx->attempts_left_without_progress) || in trans_ack()
930 !tx->attempts_left) { in trans_ack()
934 uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp); in trans_ack()
946 k_work_reschedule(&tx->retransmit, timeout); in trans_ack()
948 tx->ack_received = 1U; in trans_ack()
951 LOG_DBG("SDU TX complete"); in trans_ack()
952 seg_tx_complete(tx, 0); in trans_ack()
1065 int bt_mesh_ctl_send(struct bt_mesh_net_tx *tx, uint8_t ctl_op, void *data, in bt_mesh_ctl_send() argument
1071 if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) { in bt_mesh_ctl_send()
1072 tx->ctx->send_ttl = bt_mesh_default_ttl_get(); in bt_mesh_ctl_send()
1073 } else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) { in bt_mesh_ctl_send()
1081 tx->ctx->send_rel = true; in bt_mesh_ctl_send()
1084 tx->ctx->app_idx = BT_MESH_KEY_UNUSED; in bt_mesh_ctl_send()
1086 if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED || in bt_mesh_ctl_send()
1087 BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) { in bt_mesh_ctl_send()
1092 LOG_DBG("src 0x%04x dst 0x%04x ttl 0x%02x ctl 0x%02x", tx->src, tx->ctx->addr, in bt_mesh_ctl_send()
1093 tx->ctx->send_ttl, ctl_op); in bt_mesh_ctl_send()
1096 if (tx->ctx->send_rel) { in bt_mesh_ctl_send()
1097 return send_seg(tx, &buf, cb, cb_data, &ctl_op); in bt_mesh_ctl_send()
1099 return send_unseg(tx, &buf, cb, cb_data, &ctl_op); in bt_mesh_ctl_send()
1112 struct bt_mesh_net_tx tx = { in send_ack() local
1139 return bt_mesh_ctl_send(&tx, TRANS_CTL_OP_ACK, buf, sizeof(buf), in send_ack()