1 /*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <stdlib.h>
11 #include <sys/types.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/sys/byteorder.h>
14
15 #include <zephyr/net_buf.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/mesh.h>
19
20 #include "common/bt_str.h"
21
22 #include "crypto.h"
23 #include "mesh.h"
24 #include "net.h"
25 #include "app_keys.h"
26 #include "lpn.h"
27 #include "rpl.h"
28 #include "friend.h"
29 #include "access.h"
30 #include "foundation.h"
31 #include "sar_cfg_internal.h"
32 #include "settings.h"
33 #include "heartbeat.h"
34 #include "testing.h"
35 #include "transport.h"
36 #include "va.h"
37
38 #define LOG_LEVEL CONFIG_BT_MESH_TRANS_LOG_LEVEL
39 #include <zephyr/logging/log.h>
40 LOG_MODULE_REGISTER(bt_mesh_transport);
41
42 #define AID_MASK ((uint8_t)(BIT_MASK(6)))
43
44 #define SEG(data) ((data)[0] >> 7)
45 #define AKF(data) (((data)[0] >> 6) & 0x01)
46 #define AID(data) ((data)[0] & AID_MASK)
47 #define ASZMIC(data) (((data)[1] >> 7) & 1)
48
49 #define APP_MIC_LEN(aszmic) ((aszmic) ? BT_MESH_MIC_LONG : BT_MESH_MIC_SHORT)
50
51 #define UNSEG_HDR(akf, aid) ((akf << 6) | (aid & AID_MASK))
52 #define SEG_HDR(akf, aid) (UNSEG_HDR(akf, aid) | 0x80)
53
54 #define BLOCK_COMPLETE(seg_n) (uint32_t)(((uint64_t)1 << (seg_n + 1)) - 1)
55
56 #define SEQ_AUTH(iv_index, seq) (((uint64_t)iv_index) << 24 | (uint64_t)seq)
57
58 /* How long to wait for available buffers before giving up */
59 #define BUF_TIMEOUT K_NO_WAIT
60
61 #define ACK_DELAY(seg_n) \
62 (MIN(2 * seg_n + 1, BT_MESH_SAR_RX_ACK_DELAY_INC_X2) * \
63 BT_MESH_SAR_RX_SEG_INT_MS / 2)
64
65 #define SEQAUTH_ALREADY_PROCESSED_TIMEOUT \
66 (BT_MESH_SAR_RX_ACK_DELAY_INC_X2 * BT_MESH_SAR_RX_SEG_INT_MS / 2)
67
68 static struct seg_tx {
69 struct bt_mesh_subnet *sub;
70 void *seg[BT_MESH_TX_SEG_MAX];
71 uint64_t seq_auth;
72 int64_t adv_start_timestamp; /* Calculate adv duration and adjust intervals*/
73 uint16_t src;
74 uint16_t dst;
75 uint16_t ack_src;
76 uint16_t len;
77 uint8_t hdr;
78 uint8_t xmit;
79 uint8_t seg_n; /* Last segment index */
80 uint8_t seg_o; /* Segment being sent */
81 uint8_t nack_count; /* Number of unacked segs */
82 uint8_t attempts_left;
83 uint8_t attempts_left_without_progress;
84 uint8_t ttl; /* Transmitted TTL value */
85 uint8_t blocked:1, /* Blocked by ongoing tx */
86 ctl:1, /* Control packet */
87 aszmic:1, /* MIC size */
88 started:1, /* Start cb called */
89 friend_cred:1, /* Using Friend credentials */
90 seg_send_started:1, /* Used to check if seg_send_start cb is called */
91 ack_received:1; /* Ack received during seg message transmission. */
92 const struct bt_mesh_send_cb *cb;
93 void *cb_data;
94 struct k_work_delayable retransmit; /* Retransmit timer */
95 } seg_tx[CONFIG_BT_MESH_TX_SEG_MSG_COUNT];
96
97 static struct seg_rx {
98 struct bt_mesh_subnet *sub;
99 void *seg[BT_MESH_RX_SEG_MAX];
100 uint64_t seq_auth;
101 uint16_t src;
102 uint16_t dst;
103 uint16_t len;
104 uint8_t hdr;
105 uint8_t seg_n:5,
106 ctl:1,
107 in_use:1,
108 obo:1;
109 uint8_t ttl;
110 uint8_t attempts_left;
111 uint32_t block;
112 uint32_t last_ack;
113 struct k_work_delayable ack;
114 struct k_work_delayable discard;
115 } seg_rx[CONFIG_BT_MESH_RX_SEG_MSG_COUNT];
116
117 K_MEM_SLAB_DEFINE(segs, BT_MESH_APP_SEG_SDU_MAX, CONFIG_BT_MESH_SEG_BUFS, 4);
118
send_unseg(struct bt_mesh_net_tx * tx,struct net_buf_simple * sdu,const struct bt_mesh_send_cb * cb,void * cb_data,const uint8_t * ctl_op)119 static int send_unseg(struct bt_mesh_net_tx *tx, struct net_buf_simple *sdu,
120 const struct bt_mesh_send_cb *cb, void *cb_data,
121 const uint8_t *ctl_op)
122 {
123 struct bt_mesh_adv *adv;
124
125 adv = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_ADV_TAG_LOCAL,
126 tx->xmit, BUF_TIMEOUT);
127 if (!adv) {
128 LOG_ERR("Out of network advs");
129 return -ENOBUFS;
130 }
131
132 net_buf_simple_reserve(&adv->b, BT_MESH_NET_HDR_LEN);
133
134 if (ctl_op) {
135 net_buf_simple_add_u8(&adv->b, TRANS_CTL_HDR(*ctl_op, 0));
136 } else if (BT_MESH_IS_DEV_KEY(tx->ctx->app_idx)) {
137 net_buf_simple_add_u8(&adv->b, UNSEG_HDR(0, 0));
138 } else {
139 net_buf_simple_add_u8(&adv->b, UNSEG_HDR(1, tx->aid));
140 }
141
142 net_buf_simple_add_mem(&adv->b, sdu->data, sdu->len);
143
144 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
145 if (!bt_mesh_friend_queue_has_space(tx->sub->net_idx,
146 tx->src, tx->ctx->addr,
147 NULL, 1)) {
148 if (BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
149 LOG_ERR("Not enough space in Friend Queue");
150 bt_mesh_adv_unref(adv);
151 return -ENOBUFS;
152 } else {
153 LOG_WRN("No space in Friend Queue");
154 goto send;
155 }
156 }
157
158 if (bt_mesh_friend_enqueue_tx(tx, BT_MESH_FRIEND_PDU_SINGLE,
159 NULL, 1, &adv->b) &&
160 BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
161 /* PDUs for a specific Friend should only go
162 * out through the Friend Queue.
163 */
164 bt_mesh_adv_unref(adv);
165 send_cb_finalize(cb, cb_data);
166 return 0;
167 }
168 }
169
170 send:
171 return bt_mesh_net_send(tx, adv, cb, cb_data);
172 }
173
seg_len(bool ctl)174 static inline uint8_t seg_len(bool ctl)
175 {
176 if (ctl) {
177 return BT_MESH_CTL_SEG_SDU_MAX;
178 } else {
179 return BT_MESH_APP_SEG_SDU_MAX;
180 }
181 }
182
bt_mesh_tx_in_progress(void)183 bool bt_mesh_tx_in_progress(void)
184 {
185 int i;
186
187 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
188 if (seg_tx[i].nack_count) {
189 return true;
190 }
191 }
192
193 return false;
194 }
195
seg_tx_done(struct seg_tx * tx,uint8_t seg_idx)196 static void seg_tx_done(struct seg_tx *tx, uint8_t seg_idx)
197 {
198 k_mem_slab_free(&segs, (void *)tx->seg[seg_idx]);
199 tx->seg[seg_idx] = NULL;
200 tx->nack_count--;
201 }
202
seg_tx_blocks(struct seg_tx * tx,uint16_t src,uint16_t dst)203 static bool seg_tx_blocks(struct seg_tx *tx, uint16_t src, uint16_t dst)
204 {
205 return (tx->src == src) && (tx->dst == dst);
206 }
207
seg_tx_unblock_check(struct seg_tx * tx)208 static void seg_tx_unblock_check(struct seg_tx *tx)
209 {
210 struct seg_tx *blocked = NULL;
211 int i;
212
213 /* Unblock the first blocked tx with the same params. */
214 for (i = 0; i < ARRAY_SIZE(seg_tx); ++i) {
215 if (&seg_tx[i] != tx &&
216 seg_tx[i].blocked &&
217 seg_tx_blocks(tx, seg_tx[i].src, seg_tx[i].dst) &&
218 (!blocked || seg_tx[i].seq_auth < blocked->seq_auth)) {
219 blocked = &seg_tx[i];
220 }
221 }
222
223 if (blocked) {
224 LOG_DBG("Unblocked 0x%04x", (uint16_t)(blocked->seq_auth & TRANS_SEQ_ZERO_MASK));
225 blocked->blocked = false;
226 k_work_reschedule(&blocked->retransmit, K_NO_WAIT);
227 }
228 }
229
seg_tx_reset(struct seg_tx * tx)230 static void seg_tx_reset(struct seg_tx *tx)
231 {
232 int i;
233
234 /* If this call fails, the handler will exit early, as nack_count is 0. */
235 (void)k_work_cancel_delayable(&tx->retransmit);
236
237 tx->cb = NULL;
238 tx->cb_data = NULL;
239 tx->seq_auth = 0U;
240 tx->sub = NULL;
241 tx->src = BT_MESH_ADDR_UNASSIGNED;
242 tx->dst = BT_MESH_ADDR_UNASSIGNED;
243 tx->ack_src = BT_MESH_ADDR_UNASSIGNED;
244 tx->blocked = false;
245
246 for (i = 0; i <= tx->seg_n && tx->nack_count; i++) {
247 if (!tx->seg[i]) {
248 continue;
249 }
250
251 seg_tx_done(tx, i);
252 }
253
254 tx->nack_count = 0;
255 tx->seg_send_started = 0;
256 tx->ack_received = 0;
257
258 if (atomic_test_and_clear_bit(bt_mesh.flags, BT_MESH_IVU_PENDING)) {
259 LOG_DBG("Proceeding with pending IV Update");
260 /* bt_mesh_net_iv_update() will re-enable the flag if this
261 * wasn't the only transfer.
262 */
263 bt_mesh_net_iv_update(bt_mesh.iv_index, false);
264 }
265 }
266
seg_tx_complete(struct seg_tx * tx,int err)267 static inline void seg_tx_complete(struct seg_tx *tx, int err)
268 {
269 const struct bt_mesh_send_cb *cb = tx->cb;
270 void *cb_data = tx->cb_data;
271
272 seg_tx_unblock_check(tx);
273
274 seg_tx_reset(tx);
275
276 if (cb && cb->end) {
277 cb->end(err, cb_data);
278 }
279 }
280
schedule_transmit_continue(struct seg_tx * tx,uint32_t delta)281 static void schedule_transmit_continue(struct seg_tx *tx, uint32_t delta)
282 {
283 uint32_t timeout = 0;
284
285 if (!tx->nack_count) {
286 return;
287 }
288
289 LOG_DBG("");
290
291 if (delta < BT_MESH_SAR_TX_SEG_INT_MS) {
292 timeout = BT_MESH_SAR_TX_SEG_INT_MS - delta;
293 }
294
295 /* If it is not the last segment then continue transmission after Segment Interval,
296 * otherwise continue immediately as the callback will finish this transmission and
297 * progress into retransmission.
298 */
299 k_work_reschedule(&tx->retransmit,
300 (tx->seg_o <= tx->seg_n) ?
301 K_MSEC(timeout) :
302 K_NO_WAIT);
303 }
304
seg_send_start(uint16_t duration,int err,void * user_data)305 static void seg_send_start(uint16_t duration, int err, void *user_data)
306 {
307 struct seg_tx *tx = user_data;
308
309 if (!tx->started && tx->cb && tx->cb->start) {
310 tx->cb->start(duration, err, tx->cb_data);
311 tx->started = 1U;
312 }
313
314 tx->seg_send_started = 1U;
315 tx->adv_start_timestamp = k_uptime_get();
316
317 /* If there's an error in transmitting the 'sent' callback will never
318 * be called. Make sure that we kick the retransmit timer also in this
319 * case since otherwise we risk the transmission of becoming stale.
320 */
321 if (err) {
322 schedule_transmit_continue(tx, 0);
323 }
324 }
325
seg_sent(int err,void * user_data)326 static void seg_sent(int err, void *user_data)
327 {
328 struct seg_tx *tx = user_data;
329 uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
330
331 if (!tx->seg_send_started) {
332 return;
333 }
334
335 schedule_transmit_continue(tx, delta_ms);
336 }
337
338 static const struct bt_mesh_send_cb seg_sent_cb = {
339 .start = seg_send_start,
340 .end = seg_sent,
341 };
342
seg_tx_buf_build(struct seg_tx * tx,uint8_t seg_o,struct net_buf_simple * buf)343 static void seg_tx_buf_build(struct seg_tx *tx, uint8_t seg_o,
344 struct net_buf_simple *buf)
345 {
346 uint16_t seq_zero = tx->seq_auth & TRANS_SEQ_ZERO_MASK;
347 uint8_t len = MIN(seg_len(tx->ctl), tx->len - (seg_len(tx->ctl) * seg_o));
348
349 net_buf_simple_add_u8(buf, tx->hdr);
350 net_buf_simple_add_u8(buf, (tx->aszmic << 7) | seq_zero >> 6);
351 net_buf_simple_add_u8(buf, (((seq_zero & 0x3f) << 2) | (seg_o >> 3)));
352 net_buf_simple_add_u8(buf, ((seg_o & 0x07) << 5) | tx->seg_n);
353 net_buf_simple_add_mem(buf, tx->seg[seg_o], len);
354 }
355
seg_tx_send_unacked(struct seg_tx * tx)356 static void seg_tx_send_unacked(struct seg_tx *tx)
357 {
358 if (!tx->nack_count) {
359 return;
360 }
361
362 uint32_t delta_ms;
363 uint32_t timeout;
364 struct bt_mesh_msg_ctx ctx = {
365 .net_idx = tx->sub->net_idx,
366 /* App idx only used by network to detect control messages: */
367 .app_idx = (tx->ctl ? BT_MESH_KEY_UNUSED : 0),
368 .addr = tx->dst,
369 .send_rel = true,
370 .send_ttl = tx->ttl,
371 };
372 struct bt_mesh_net_tx net_tx = {
373 .sub = tx->sub,
374 .ctx = &ctx,
375 .src = tx->src,
376 .xmit = tx->xmit,
377 .friend_cred = tx->friend_cred,
378 .aid = tx->hdr & AID_MASK,
379 };
380
381 if (BT_MESH_ADDR_IS_UNICAST(tx->dst) &&
382 !tx->attempts_left_without_progress) {
383 LOG_ERR("Ran out of retransmit without progress attempts");
384 seg_tx_complete(tx, -ETIMEDOUT);
385 return;
386 }
387
388 if (!tx->attempts_left) {
389 if (BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
390 LOG_ERR("Ran out of retransmit attempts");
391 seg_tx_complete(tx, -ETIMEDOUT);
392 } else {
393 /* Segmented sending to groups doesn't have acks, so
394 * running out of attempts is the expected behavior.
395 */
396 seg_tx_complete(tx, 0);
397 }
398
399 return;
400 }
401
402 LOG_DBG("SeqZero: 0x%04x Attempts: %u",
403 (uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), tx->attempts_left);
404
405 while (tx->seg_o <= tx->seg_n) {
406 struct bt_mesh_adv *seg;
407 int err;
408
409 if (!tx->seg[tx->seg_o]) {
410 /* Move on to the next segment */
411 tx->seg_o++;
412 continue;
413 }
414
415 seg = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_ADV_TAG_LOCAL,
416 tx->xmit, BUF_TIMEOUT);
417 if (!seg) {
418 LOG_DBG("Allocating segment failed");
419 goto end;
420 }
421
422 net_buf_simple_reserve(&seg->b, BT_MESH_NET_HDR_LEN);
423 seg_tx_buf_build(tx, tx->seg_o, &seg->b);
424
425 LOG_DBG("Sending %u/%u", tx->seg_o, tx->seg_n);
426
427 err = bt_mesh_net_send(&net_tx, seg, &seg_sent_cb, tx);
428 if (err) {
429 LOG_DBG("Sending segment failed");
430 goto end;
431 }
432
433 /* Move on to the next segment */
434 tx->seg_o++;
435
436 tx->ack_received = 0U;
437
438 /* Return here to let the advertising layer process the message.
439 * This function will be called again after Segment Interval.
440 */
441 return;
442 }
443
444
445 /* All segments have been sent */
446 tx->seg_o = 0U;
447 tx->attempts_left--;
448 if (BT_MESH_ADDR_IS_UNICAST(tx->dst) && !tx->ack_received) {
449 tx->attempts_left_without_progress--;
450 }
451
452 end:
453 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
454 bt_mesh_lpn_established() && !bt_mesh_has_addr(ctx.addr)) {
455 bt_mesh_lpn_poll();
456 }
457
458 delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
459 if (tx->ack_received) {
460 /* Schedule retransmission immediately but keep SAR segment interval time if
461 * SegAck was received while sending last segment.
462 */
463 timeout = BT_MESH_SAR_TX_SEG_INT_MS;
464 tx->ack_received = 0U;
465 } else {
466 timeout = BT_MESH_SAR_TX_RETRANS_TIMEOUT_MS(tx->dst, tx->ttl);
467 }
468
469 if (delta_ms < timeout) {
470 timeout -= delta_ms;
471 }
472
473 /* Schedule a retransmission */
474 k_work_reschedule(&tx->retransmit, K_MSEC(timeout));
475 }
476
seg_retransmit(struct k_work * work)477 static void seg_retransmit(struct k_work *work)
478 {
479 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
480 struct seg_tx *tx = CONTAINER_OF(dwork, struct seg_tx, retransmit);
481
482 seg_tx_send_unacked(tx);
483 }
484
send_seg(struct bt_mesh_net_tx * net_tx,struct net_buf_simple * sdu,const struct bt_mesh_send_cb * cb,void * cb_data,uint8_t * ctl_op)485 static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu,
486 const struct bt_mesh_send_cb *cb, void *cb_data,
487 uint8_t *ctl_op)
488 {
489 bool blocked = false;
490 struct seg_tx *tx;
491 uint8_t seg_o;
492 int i;
493
494 LOG_DBG("src 0x%04x dst 0x%04x app_idx 0x%04x aszmic %u sdu_len %u", net_tx->src,
495 net_tx->ctx->addr, net_tx->ctx->app_idx, net_tx->aszmic, sdu->len);
496
497 for (tx = NULL, i = 0; i < ARRAY_SIZE(seg_tx); i++) {
498 if (seg_tx[i].nack_count) {
499 blocked |= seg_tx_blocks(&seg_tx[i], net_tx->src,
500 net_tx->ctx->addr);
501 } else if (!tx) {
502 tx = &seg_tx[i];
503 }
504 }
505
506 if (!tx) {
507 LOG_ERR("No multi-segment message contexts available");
508 return -EBUSY;
509 }
510
511 if (ctl_op) {
512 tx->hdr = TRANS_CTL_HDR(*ctl_op, 1);
513 } else if (BT_MESH_IS_DEV_KEY(net_tx->ctx->app_idx)) {
514 tx->hdr = SEG_HDR(0, 0);
515 } else {
516 tx->hdr = SEG_HDR(1, net_tx->aid);
517 }
518
519 tx->src = net_tx->src;
520 tx->dst = net_tx->ctx->addr;
521 tx->seg_n = (sdu->len - 1) / seg_len(!!ctl_op);
522 tx->seg_o = 0;
523 tx->len = sdu->len;
524 tx->nack_count = tx->seg_n + 1;
525 tx->seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_TX, bt_mesh.seq);
526 tx->sub = net_tx->sub;
527 tx->cb = cb;
528 tx->cb_data = cb_data;
529 tx->attempts_left = BT_MESH_SAR_TX_RETRANS_COUNT(tx->dst);
530 tx->attempts_left_without_progress = BT_MESH_SAR_TX_RETRANS_NO_PROGRESS;
531 tx->xmit = net_tx->xmit;
532 tx->aszmic = net_tx->aszmic;
533 tx->friend_cred = net_tx->friend_cred;
534 tx->blocked = blocked;
535 tx->started = 0;
536 tx->seg_send_started = 0;
537 tx->ctl = !!ctl_op;
538 tx->ttl = net_tx->ctx->send_ttl;
539
540 LOG_DBG("SeqZero 0x%04x (segs: %u)", (uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK),
541 tx->nack_count);
542
543 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
544 !bt_mesh_friend_queue_has_space(tx->sub->net_idx, net_tx->src,
545 tx->dst, &tx->seq_auth,
546 tx->seg_n + 1) &&
547 BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
548 LOG_ERR("Not enough space in Friend Queue for %u segments", tx->seg_n + 1);
549 seg_tx_reset(tx);
550 return -ENOBUFS;
551 }
552
553 for (seg_o = 0U; sdu->len; seg_o++) {
554 void *buf;
555 uint16_t len;
556 int err;
557
558 err = k_mem_slab_alloc(&segs, &buf, BUF_TIMEOUT);
559 if (err) {
560 LOG_ERR("Out of segment buffers");
561 seg_tx_reset(tx);
562 return -ENOBUFS;
563 }
564
565 len = MIN(sdu->len, seg_len(!!ctl_op));
566 memcpy(buf, net_buf_simple_pull_mem(sdu, len), len);
567
568 LOG_DBG("seg %u: %s", seg_o, bt_hex(buf, len));
569
570 tx->seg[seg_o] = buf;
571
572 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
573 enum bt_mesh_friend_pdu_type type;
574
575 NET_BUF_SIMPLE_DEFINE(seg, 16);
576 seg_tx_buf_build(tx, seg_o, &seg);
577
578 if (seg_o == tx->seg_n) {
579 type = BT_MESH_FRIEND_PDU_COMPLETE;
580 } else {
581 type = BT_MESH_FRIEND_PDU_PARTIAL;
582 }
583
584 if (bt_mesh_friend_enqueue_tx(
585 net_tx, type, ctl_op ? NULL : &tx->seq_auth,
586 tx->seg_n + 1, &seg) &&
587 BT_MESH_ADDR_IS_UNICAST(net_tx->ctx->addr)) {
588 /* PDUs for a specific Friend should only go
589 * out through the Friend Queue.
590 */
591 k_mem_slab_free(&segs, buf);
592 tx->seg[seg_o] = NULL;
593 }
594
595 }
596
597 }
598
599 /* This can happen if segments only went into the Friend Queue */
600 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !tx->seg[0]) {
601 seg_tx_reset(tx);
602
603 /* If there was a callback notify sending immediately since
604 * there's no other way to track this (at least currently)
605 * with the Friend Queue.
606 */
607 send_cb_finalize(cb, cb_data);
608 return 0;
609 }
610
611 if (blocked) {
612 /* Move the sequence number, so we don't end up creating
613 * another segmented transmission with the same SeqZero while
614 * this one is blocked.
615 */
616 bt_mesh_next_seq();
617 LOG_DBG("Blocked.");
618 return 0;
619 }
620
621 seg_tx_send_unacked(tx);
622
623 return 0;
624 }
625
trans_encrypt(const struct bt_mesh_net_tx * tx,const struct bt_mesh_key * key,struct net_buf_simple * msg)626 static int trans_encrypt(const struct bt_mesh_net_tx *tx, const struct bt_mesh_key *key,
627 struct net_buf_simple *msg)
628 {
629 struct bt_mesh_app_crypto_ctx crypto = {
630 .dev_key = BT_MESH_IS_DEV_KEY(tx->ctx->app_idx),
631 .aszmic = tx->aszmic,
632 .src = tx->src,
633 .dst = tx->ctx->addr,
634 .seq_num = bt_mesh.seq,
635 .iv_index = BT_MESH_NET_IVI_TX,
636 };
637
638 if (BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
639 crypto.ad = tx->ctx->uuid;
640 }
641
642 return bt_mesh_app_encrypt(key, &crypto, msg);
643 }
644
bt_mesh_trans_send(struct bt_mesh_net_tx * tx,struct net_buf_simple * msg,const struct bt_mesh_send_cb * cb,void * cb_data)645 int bt_mesh_trans_send(struct bt_mesh_net_tx *tx, struct net_buf_simple *msg,
646 const struct bt_mesh_send_cb *cb, void *cb_data)
647 {
648 const struct bt_mesh_key *key;
649 uint8_t aid;
650 int err;
651
652 if (msg->len < 1) {
653 LOG_ERR("Zero-length SDU not allowed");
654 return -EINVAL;
655 }
656
657 if (msg->len > BT_MESH_TX_SDU_MAX - BT_MESH_MIC_SHORT) {
658 LOG_ERR("Message too big: %u", msg->len);
659 return -EMSGSIZE;
660 }
661
662 if (net_buf_simple_tailroom(msg) < BT_MESH_MIC_SHORT) {
663 LOG_ERR("Insufficient tailroom for Transport MIC");
664 return -EINVAL;
665 }
666
667 if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
668 tx->ctx->send_ttl = bt_mesh_default_ttl_get();
669 } else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
670 LOG_ERR("TTL too large (max 127)");
671 return -EINVAL;
672 }
673
674 if (msg->len > BT_MESH_SDU_UNSEG_MAX) {
675 tx->ctx->send_rel = true;
676 }
677
678 if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
679 (!BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr) &&
680 BT_MESH_IS_DEV_KEY(tx->ctx->app_idx))) {
681 LOG_ERR("Invalid destination address");
682 return -EINVAL;
683 }
684
685 err = bt_mesh_keys_resolve(tx->ctx, &tx->sub, &key, &aid);
686 if (err) {
687 return err;
688 }
689
690 LOG_DBG("net_idx 0x%04x app_idx 0x%04x dst 0x%04x", tx->sub->net_idx, tx->ctx->app_idx,
691 tx->ctx->addr);
692 LOG_DBG("len %u: %s", msg->len, bt_hex(msg->data, msg->len));
693
694 tx->xmit = bt_mesh_net_transmit_get();
695 tx->aid = aid;
696
697 if (!tx->ctx->send_rel || net_buf_simple_tailroom(msg) < 8) {
698 tx->aszmic = 0U;
699 } else {
700 tx->aszmic = 1U;
701 }
702
703 err = trans_encrypt(tx, key, msg);
704 if (err) {
705 return err;
706 }
707
708 if (tx->ctx->send_rel) {
709 err = send_seg(tx, msg, cb, cb_data, NULL);
710 } else {
711 err = send_unseg(tx, msg, cb, cb_data, NULL);
712 }
713
714 return err;
715 }
716
seg_rx_assemble(struct seg_rx * rx,struct net_buf_simple * buf,uint8_t aszmic)717 static void seg_rx_assemble(struct seg_rx *rx, struct net_buf_simple *buf,
718 uint8_t aszmic)
719 {
720 int i;
721
722 net_buf_simple_reset(buf);
723
724 for (i = 0; i <= rx->seg_n; i++) {
725 net_buf_simple_add_mem(buf, rx->seg[i],
726 MIN(seg_len(rx->ctl),
727 rx->len - (i * seg_len(rx->ctl))));
728 }
729
730 /* Adjust the length to not contain the MIC at the end */
731 if (!rx->ctl) {
732 buf->len -= APP_MIC_LEN(aszmic);
733 }
734 }
735
736 struct decrypt_ctx {
737 struct bt_mesh_app_crypto_ctx crypto;
738 struct net_buf_simple *buf;
739 struct net_buf_simple *sdu;
740 struct seg_rx *seg;
741 };
742
sdu_try_decrypt(struct bt_mesh_net_rx * rx,const struct bt_mesh_key * key,void * cb_data)743 static int sdu_try_decrypt(struct bt_mesh_net_rx *rx, const struct bt_mesh_key *key,
744 void *cb_data)
745 {
746 struct decrypt_ctx *ctx = cb_data;
747 int err;
748
749 ctx->crypto.ad = NULL;
750
751 do {
752 if (ctx->seg) {
753 seg_rx_assemble(ctx->seg, ctx->buf, ctx->crypto.aszmic);
754 }
755
756 if (BT_MESH_ADDR_IS_VIRTUAL(rx->ctx.recv_dst)) {
757 ctx->crypto.ad = bt_mesh_va_uuid_get(rx->ctx.recv_dst, ctx->crypto.ad,
758 NULL);
759
760 if (!ctx->crypto.ad) {
761 return -ENOENT;
762 }
763 }
764
765 net_buf_simple_reset(ctx->sdu);
766
767 err = bt_mesh_app_decrypt(key, &ctx->crypto, ctx->buf, ctx->sdu);
768 } while (err && ctx->crypto.ad != NULL);
769
770 if (!err && BT_MESH_ADDR_IS_VIRTUAL(rx->ctx.recv_dst)) {
771 rx->ctx.uuid = ctx->crypto.ad;
772 }
773
774 return err;
775 }
776
sdu_recv(struct bt_mesh_net_rx * rx,uint8_t hdr,uint8_t aszmic,struct net_buf_simple * buf,struct net_buf_simple * sdu,struct seg_rx * seg)777 static int sdu_recv(struct bt_mesh_net_rx *rx, uint8_t hdr, uint8_t aszmic,
778 struct net_buf_simple *buf, struct net_buf_simple *sdu,
779 struct seg_rx *seg)
780 {
781 struct decrypt_ctx ctx = {
782 .crypto = {
783 .dev_key = !AKF(&hdr),
784 .aszmic = aszmic,
785 .src = rx->ctx.addr,
786 .dst = rx->ctx.recv_dst,
787 .seq_num = seg ? (seg->seq_auth & 0xffffff) : rx->seq,
788 .iv_index = BT_MESH_NET_IVI_RX(rx),
789 },
790 .buf = buf,
791 .sdu = sdu,
792 .seg = seg,
793 };
794
795 LOG_DBG("AKF %u AID 0x%02x", !ctx.crypto.dev_key, AID(&hdr));
796
797 if (!rx->local_match) {
798 /* if friend_match was set the frame is for LPN which we are friends. */
799 return rx->friend_match ? 0 : -ENXIO;
800 }
801
802 rx->ctx.app_idx = bt_mesh_app_key_find(ctx.crypto.dev_key, AID(&hdr),
803 rx, sdu_try_decrypt, &ctx);
804 if (rx->ctx.app_idx == BT_MESH_KEY_UNUSED) {
805 LOG_DBG("No matching AppKey");
806 return -EACCES;
807 }
808
809 rx->ctx.uuid = ctx.crypto.ad;
810
811 LOG_DBG("Decrypted (AppIdx: 0x%03x)", rx->ctx.app_idx);
812
813 return bt_mesh_access_recv(&rx->ctx, sdu);
814 }
815
seg_tx_lookup(uint16_t seq_zero,uint8_t obo,uint16_t addr)816 static struct seg_tx *seg_tx_lookup(uint16_t seq_zero, uint8_t obo, uint16_t addr)
817 {
818 struct seg_tx *tx;
819 int i;
820
821 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
822 tx = &seg_tx[i];
823
824 if ((tx->seq_auth & TRANS_SEQ_ZERO_MASK) != seq_zero) {
825 continue;
826 }
827
828 if (tx->dst == addr) {
829 return tx;
830 }
831
832 /* If the expected remote address doesn't match,
833 * but the OBO flag is set and this is the first
834 * acknowledgement, assume it's a Friend that's
835 * responding and therefore accept the message.
836 */
837 if (obo && (tx->nack_count == tx->seg_n + 1 || tx->ack_src == addr)) {
838 tx->ack_src = addr;
839 return tx;
840 }
841 }
842
843 return NULL;
844 }
845
trans_ack(struct bt_mesh_net_rx * rx,uint8_t hdr,struct net_buf_simple * buf,uint64_t * seq_auth)846 static int trans_ack(struct bt_mesh_net_rx *rx, uint8_t hdr,
847 struct net_buf_simple *buf, uint64_t *seq_auth)
848 {
849 bool new_seg_ack = false;
850 struct seg_tx *tx;
851 unsigned int bit;
852 uint32_t ack;
853 uint16_t seq_zero;
854 uint8_t obo;
855
856 if (buf->len < 6) {
857 LOG_ERR("Too short ack message");
858 return -EBADMSG;
859 }
860
861 seq_zero = net_buf_simple_pull_be16(buf);
862 obo = seq_zero >> 15;
863 seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
864
865 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match) {
866 LOG_DBG("Ack for LPN 0x%04x of this Friend", rx->ctx.recv_dst);
867 /* Best effort - we don't have enough info for true SeqAuth */
868 *seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(rx), seq_zero);
869 return 0;
870 } else if (!rx->local_match) {
871 return 0;
872 }
873
874 ack = net_buf_simple_pull_be32(buf);
875
876 LOG_DBG("OBO %u seq_zero 0x%04x ack 0x%08x", obo, seq_zero, ack);
877
878 tx = seg_tx_lookup(seq_zero, obo, rx->ctx.addr);
879 if (!tx) {
880 LOG_DBG("No matching TX context for ack");
881 return -ENOENT;
882 }
883
884 if (!BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
885 LOG_ERR("Received ack for group seg");
886 return -EINVAL;
887 }
888
889 *seq_auth = tx->seq_auth;
890
891 if (!ack) {
892 LOG_WRN("SDU canceled");
893 seg_tx_complete(tx, -ECANCELED);
894 return 0;
895 }
896
897 if (find_msb_set(ack) - 1 > tx->seg_n) {
898 LOG_ERR("Too large segment number in ack");
899 return -EINVAL;
900 }
901
902 while ((bit = find_lsb_set(ack))) {
903 if (tx->seg[bit - 1]) {
904 LOG_DBG("seg %u/%u acked", bit - 1, tx->seg_n);
905 seg_tx_done(tx, bit - 1);
906 new_seg_ack = true;
907 }
908
909 ack &= ~BIT(bit - 1);
910 }
911
912 if (new_seg_ack) {
913 tx->attempts_left_without_progress =
914 BT_MESH_SAR_TX_RETRANS_NO_PROGRESS;
915 }
916
917 if (tx->nack_count) {
918 /* If transmission is not in progress it means
919 * that Retransmission Timer is running
920 */
921 if (tx->seg_o == 0) {
922 k_timeout_t timeout = K_NO_WAIT;
923
924 /* If there are no retransmission attempts left we
925 * immediately trigger the retransmit call that will
926 * end the transmission.
927 */
928 if ((BT_MESH_ADDR_IS_UNICAST(tx->dst) &&
929 !tx->attempts_left_without_progress) ||
930 !tx->attempts_left) {
931 goto reschedule;
932 }
933
934 uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
935
936 /* According to MshPRTv1.1: 3.5.3.3.2, we should reset the retransmit timer
937 * and retransmit immediately when receiving a valid ack message while
938 * Retransmisison timer is running. However, transport should still keep
939 * segment transmission interval time between transmission of each segment.
940 */
941 if (delta_ms < BT_MESH_SAR_TX_SEG_INT_MS) {
942 timeout = K_MSEC(BT_MESH_SAR_TX_SEG_INT_MS - delta_ms);
943 }
944
945 reschedule:
946 k_work_reschedule(&tx->retransmit, timeout);
947 } else {
948 tx->ack_received = 1U;
949 }
950 } else {
951 LOG_DBG("SDU TX complete");
952 seg_tx_complete(tx, 0);
953 }
954
955 return 0;
956 }
957
ctl_recv(struct bt_mesh_net_rx * rx,uint8_t hdr,struct net_buf_simple * buf,uint64_t * seq_auth)958 static int ctl_recv(struct bt_mesh_net_rx *rx, uint8_t hdr,
959 struct net_buf_simple *buf, uint64_t *seq_auth)
960 {
961 uint8_t ctl_op = TRANS_CTL_OP(&hdr);
962
963 LOG_DBG("OpCode 0x%02x len %u", ctl_op, buf->len);
964
965 switch (ctl_op) {
966 case TRANS_CTL_OP_ACK:
967 return trans_ack(rx, hdr, buf, seq_auth);
968 case TRANS_CTL_OP_HEARTBEAT:
969 return bt_mesh_hb_recv(rx, buf);
970 }
971
972 /* Only acks for friendship and heartbeats may need processing without local_match */
973 if (!rx->local_match) {
974 return 0;
975 }
976
977 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !bt_mesh_lpn_established()) {
978 switch (ctl_op) {
979 case TRANS_CTL_OP_FRIEND_POLL:
980 return bt_mesh_friend_poll(rx, buf);
981 case TRANS_CTL_OP_FRIEND_REQ:
982 return bt_mesh_friend_req(rx, buf);
983 case TRANS_CTL_OP_FRIEND_CLEAR:
984 return bt_mesh_friend_clear(rx, buf);
985 case TRANS_CTL_OP_FRIEND_CLEAR_CFM:
986 return bt_mesh_friend_clear_cfm(rx, buf);
987 case TRANS_CTL_OP_FRIEND_SUB_ADD:
988 return bt_mesh_friend_sub_add(rx, buf);
989 case TRANS_CTL_OP_FRIEND_SUB_REM:
990 return bt_mesh_friend_sub_rem(rx, buf);
991 }
992 }
993
994 #if defined(CONFIG_BT_MESH_LOW_POWER)
995 if (ctl_op == TRANS_CTL_OP_FRIEND_OFFER) {
996 return bt_mesh_lpn_friend_offer(rx, buf);
997 }
998
999 if (rx->ctx.addr == bt_mesh.lpn.frnd) {
1000 if (ctl_op == TRANS_CTL_OP_FRIEND_CLEAR_CFM) {
1001 return bt_mesh_lpn_friend_clear_cfm(rx, buf);
1002 }
1003
1004 if (!rx->friend_cred) {
1005 LOG_WRN("Message from friend with wrong credentials");
1006 return -EINVAL;
1007 }
1008
1009 switch (ctl_op) {
1010 case TRANS_CTL_OP_FRIEND_UPDATE:
1011 return bt_mesh_lpn_friend_update(rx, buf);
1012 case TRANS_CTL_OP_FRIEND_SUB_CFM:
1013 return bt_mesh_lpn_friend_sub_cfm(rx, buf);
1014 }
1015 }
1016 #endif /* CONFIG_BT_MESH_LOW_POWER */
1017
1018 LOG_WRN("Unhandled TransOpCode 0x%02x", ctl_op);
1019
1020 return -EBADMSG;
1021 }
1022
trans_unseg(struct net_buf_simple * buf,struct bt_mesh_net_rx * rx,uint64_t * seq_auth)1023 static int trans_unseg(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx,
1024 uint64_t *seq_auth)
1025 {
1026 NET_BUF_SIMPLE_DEFINE_STATIC(sdu, BT_MESH_SDU_UNSEG_MAX);
1027 uint8_t hdr;
1028 struct bt_mesh_rpl *rpl = NULL;
1029 int err;
1030
1031 LOG_DBG("AFK %u AID 0x%02x", AKF(buf->data), AID(buf->data));
1032
1033 if (buf->len < 1) {
1034 LOG_ERR("Too small unsegmented PDU");
1035 return -EBADMSG;
1036 }
1037
1038 if (bt_mesh_rpl_check(rx, &rpl, false)) {
1039 LOG_WRN("Replay: src 0x%04x dst 0x%04x seq 0x%06x", rx->ctx.addr, rx->ctx.recv_dst,
1040 rx->seq);
1041 return -EINVAL;
1042 }
1043
1044 hdr = net_buf_simple_pull_u8(buf);
1045
1046 if (rx->ctl) {
1047 err = ctl_recv(rx, hdr, buf, seq_auth);
1048 } else if (buf->len < 1 + APP_MIC_LEN(0)) {
1049 LOG_ERR("Too short SDU + MIC");
1050 err = -EINVAL;
1051 } else {
1052 /* Adjust the length to not contain the MIC at the end */
1053 buf->len -= APP_MIC_LEN(0);
1054 err = sdu_recv(rx, hdr, 0, buf, &sdu, NULL);
1055 }
1056
1057 /* Update rpl only if there is place and upper logic accepted incoming data. */
1058 if (err == 0 && rpl != NULL) {
1059 bt_mesh_rpl_update(rpl, rx);
1060 }
1061
1062 return err;
1063 }
1064
bt_mesh_ctl_send(struct bt_mesh_net_tx * tx,uint8_t ctl_op,void * data,size_t data_len,const struct bt_mesh_send_cb * cb,void * cb_data)1065 int bt_mesh_ctl_send(struct bt_mesh_net_tx *tx, uint8_t ctl_op, void *data,
1066 size_t data_len,
1067 const struct bt_mesh_send_cb *cb, void *cb_data)
1068 {
1069 struct net_buf_simple buf;
1070
1071 if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
1072 tx->ctx->send_ttl = bt_mesh_default_ttl_get();
1073 } else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
1074 LOG_ERR("TTL too large (max 127)");
1075 return -EINVAL;
1076 }
1077
1078 net_buf_simple_init_with_data(&buf, data, data_len);
1079
1080 if (data_len > BT_MESH_SDU_UNSEG_MAX) {
1081 tx->ctx->send_rel = true;
1082 }
1083
1084 tx->ctx->app_idx = BT_MESH_KEY_UNUSED;
1085
1086 if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
1087 BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
1088 LOG_ERR("Invalid destination address");
1089 return -EINVAL;
1090 }
1091
1092 LOG_DBG("src 0x%04x dst 0x%04x ttl 0x%02x ctl 0x%02x", tx->src, tx->ctx->addr,
1093 tx->ctx->send_ttl, ctl_op);
1094 LOG_DBG("len %zu: %s", data_len, bt_hex(data, data_len));
1095
1096 if (tx->ctx->send_rel) {
1097 return send_seg(tx, &buf, cb, cb_data, &ctl_op);
1098 } else {
1099 return send_unseg(tx, &buf, cb, cb_data, &ctl_op);
1100 }
1101 }
1102
send_ack(struct bt_mesh_subnet * sub,uint16_t src,uint16_t dst,uint8_t ttl,uint64_t * seq_auth,uint32_t block,uint8_t obo)1103 static int send_ack(struct bt_mesh_subnet *sub, uint16_t src, uint16_t dst,
1104 uint8_t ttl, uint64_t *seq_auth, uint32_t block, uint8_t obo)
1105 {
1106 struct bt_mesh_msg_ctx ctx = {
1107 .net_idx = sub->net_idx,
1108 .app_idx = BT_MESH_KEY_UNUSED,
1109 .addr = dst,
1110 .send_ttl = ttl,
1111 };
1112 struct bt_mesh_net_tx tx = {
1113 .sub = sub,
1114 .ctx = &ctx,
1115 .src = obo ? bt_mesh_primary_addr() : src,
1116 .xmit = bt_mesh_net_transmit_get(),
1117 };
1118 uint16_t seq_zero = *seq_auth & TRANS_SEQ_ZERO_MASK;
1119 uint8_t buf[6];
1120
1121 LOG_DBG("SeqZero 0x%04x Block 0x%08x OBO %u", seq_zero, block, obo);
1122
1123 if (bt_mesh_lpn_established() && !bt_mesh_has_addr(ctx.addr)) {
1124 LOG_WRN("Not sending ack when LPN is enabled");
1125 return 0;
1126 }
1127
1128 /* This can happen if the segmented message was destined for a group
1129 * or virtual address.
1130 */
1131 if (!BT_MESH_ADDR_IS_UNICAST(src)) {
1132 LOG_DBG("Not sending ack for non-unicast address");
1133 return 0;
1134 }
1135
1136 sys_put_be16(((seq_zero << 2) & 0x7ffc) | (obo << 15), buf);
1137 sys_put_be32(block, &buf[2]);
1138
1139 return bt_mesh_ctl_send(&tx, TRANS_CTL_OP_ACK, buf, sizeof(buf),
1140 NULL, NULL);
1141 }
1142
seg_rx_reset(struct seg_rx * rx,bool full_reset)1143 static void seg_rx_reset(struct seg_rx *rx, bool full_reset)
1144 {
1145 int i;
1146
1147 LOG_DBG("rx %p", rx);
1148
1149 /* If this fails, the handler will exit early on the next execution, as
1150 * it checks rx->in_use.
1151 */
1152 (void)k_work_cancel_delayable(&rx->ack);
1153 (void)k_work_cancel_delayable(&rx->discard);
1154
1155 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->obo &&
1156 rx->block != BLOCK_COMPLETE(rx->seg_n)) {
1157 LOG_WRN("Clearing incomplete buffers from Friend queue");
1158 bt_mesh_friend_clear_incomplete(rx->sub, rx->src, rx->dst,
1159 &rx->seq_auth);
1160 }
1161
1162 for (i = 0; i <= rx->seg_n; i++) {
1163 if (!rx->seg[i]) {
1164 continue;
1165 }
1166
1167 k_mem_slab_free(&segs, rx->seg[i]);
1168 rx->seg[i] = NULL;
1169 }
1170
1171 rx->in_use = 0U;
1172
1173 /* We don't always reset these values since we need to be able to
1174 * send an ack if we receive a segment after we've already received
1175 * the full SDU.
1176 */
1177 if (full_reset) {
1178 rx->seq_auth = 0U;
1179 rx->sub = NULL;
1180 rx->src = BT_MESH_ADDR_UNASSIGNED;
1181 rx->dst = BT_MESH_ADDR_UNASSIGNED;
1182 }
1183 }
1184
seg_discard(struct k_work * work)1185 static void seg_discard(struct k_work *work)
1186 {
1187 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1188 struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, discard);
1189
1190 LOG_WRN("SAR Discard timeout expired");
1191 seg_rx_reset(rx, false);
1192 rx->block = 0U;
1193
1194 if (IS_ENABLED(CONFIG_BT_TESTING)) {
1195 bt_mesh_test_trans_incomp_timer_exp();
1196 }
1197 }
1198
seg_ack(struct k_work * work)1199 static void seg_ack(struct k_work *work)
1200 {
1201 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1202 struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, ack);
1203
1204 if (!rx->in_use || rx->block == BLOCK_COMPLETE(rx->seg_n)) {
1205 /* Cancellation of this timer may have failed. If it fails as
1206 * part of seg_reset, in_use will be false.
1207 * If it fails as part of the processing of a fully received
1208 * SDU, the ack is already being sent from the receive handler,
1209 * and the timer based ack sending can be ignored.
1210 */
1211 return;
1212 }
1213
1214 LOG_DBG("rx %p", rx);
1215
1216 send_ack(rx->sub, rx->dst, rx->src, rx->ttl, &rx->seq_auth,
1217 rx->block, rx->obo);
1218
1219 rx->last_ack = k_uptime_get_32();
1220
1221 if (rx->attempts_left == 0) {
1222 LOG_DBG("Ran out of ack retransmit attempts");
1223 return;
1224 }
1225
1226 if (rx->seg_n > BT_MESH_SAR_RX_SEG_THRESHOLD) {
1227 --rx->attempts_left;
1228 k_work_schedule(&rx->ack, K_MSEC(BT_MESH_SAR_RX_SEG_INT_MS));
1229 }
1230 }
1231
sdu_len_is_ok(bool ctl,uint8_t seg_n)1232 static inline bool sdu_len_is_ok(bool ctl, uint8_t seg_n)
1233 {
1234 return (seg_n < BT_MESH_RX_SEG_MAX);
1235 }
1236
seg_rx_find(struct bt_mesh_net_rx * net_rx,const uint64_t * seq_auth)1237 static struct seg_rx *seg_rx_find(struct bt_mesh_net_rx *net_rx,
1238 const uint64_t *seq_auth)
1239 {
1240 int i;
1241
1242 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1243 struct seg_rx *rx = &seg_rx[i];
1244
1245 if (rx->src != net_rx->ctx.addr ||
1246 rx->dst != net_rx->ctx.recv_dst) {
1247 continue;
1248 }
1249
1250 /* Return newer RX context in addition to an exact match, so
1251 * the calling function can properly discard an old SeqAuth.
1252 */
1253 if (rx->seq_auth >= *seq_auth) {
1254 return rx;
1255 }
1256
1257 if (rx->in_use) {
1258 LOG_WRN("Duplicate SDU from src 0x%04x", net_rx->ctx.addr);
1259
1260 /* Clear out the old context since the sender
1261 * has apparently started sending a new SDU.
1262 */
1263 seg_rx_reset(rx, true);
1264
1265 /* Return non-match so caller can re-allocate */
1266 return NULL;
1267 }
1268 }
1269
1270 return NULL;
1271 }
1272
seg_rx_is_valid(struct seg_rx * rx,struct bt_mesh_net_rx * net_rx,const uint8_t * hdr,uint8_t seg_n)1273 static bool seg_rx_is_valid(struct seg_rx *rx, struct bt_mesh_net_rx *net_rx,
1274 const uint8_t *hdr, uint8_t seg_n)
1275 {
1276 if (rx->hdr != *hdr || rx->seg_n != seg_n) {
1277 LOG_ERR("Invalid segment for ongoing session");
1278 return false;
1279 }
1280
1281 if (rx->src != net_rx->ctx.addr || rx->dst != net_rx->ctx.recv_dst) {
1282 LOG_ERR("Invalid source or destination for segment");
1283 return false;
1284 }
1285
1286 if (rx->ctl != net_rx->ctl) {
1287 LOG_ERR("Inconsistent CTL in segment");
1288 return false;
1289 }
1290
1291 return true;
1292 }
1293
seg_rx_alloc(struct bt_mesh_net_rx * net_rx,const uint8_t * hdr,const uint64_t * seq_auth,uint8_t seg_n)1294 static struct seg_rx *seg_rx_alloc(struct bt_mesh_net_rx *net_rx,
1295 const uint8_t *hdr, const uint64_t *seq_auth,
1296 uint8_t seg_n)
1297 {
1298 int i;
1299
1300 /* No race condition on this check, as this function only executes in
1301 * the collaborative Bluetooth rx thread:
1302 */
1303 if (k_mem_slab_num_free_get(&segs) < 1) {
1304 LOG_WRN("Not enough segments for incoming message");
1305 return NULL;
1306 }
1307
1308 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1309 struct seg_rx *rx = &seg_rx[i];
1310
1311 if (rx->in_use) {
1312 continue;
1313 }
1314
1315 rx->in_use = 1U;
1316 rx->sub = net_rx->sub;
1317 rx->ctl = net_rx->ctl;
1318 rx->seq_auth = *seq_auth;
1319 rx->seg_n = seg_n;
1320 rx->hdr = *hdr;
1321 rx->ttl = net_rx->ctx.send_ttl;
1322 rx->src = net_rx->ctx.addr;
1323 rx->dst = net_rx->ctx.recv_dst;
1324 rx->block = 0U;
1325
1326 LOG_DBG("New RX context. Block Complete 0x%08x", BLOCK_COMPLETE(seg_n));
1327
1328 return rx;
1329 }
1330
1331 return NULL;
1332 }
1333
trans_seg(struct net_buf_simple * buf,struct bt_mesh_net_rx * net_rx,enum bt_mesh_friend_pdu_type * pdu_type,uint64_t * seq_auth,uint8_t * seg_count)1334 static int trans_seg(struct net_buf_simple *buf, struct bt_mesh_net_rx *net_rx,
1335 enum bt_mesh_friend_pdu_type *pdu_type, uint64_t *seq_auth,
1336 uint8_t *seg_count)
1337 {
1338 struct bt_mesh_rpl *rpl = NULL;
1339 struct seg_rx *rx;
1340 uint8_t *hdr = buf->data;
1341 uint16_t seq_zero;
1342 uint32_t auth_seqnum;
1343 uint8_t seg_n;
1344 uint8_t seg_o;
1345 int err;
1346
1347 if (buf->len < 5) {
1348 LOG_ERR("Too short segmented message (len %u)", buf->len);
1349 return -EBADMSG;
1350 }
1351
1352 if (bt_mesh_rpl_check(net_rx, &rpl, false)) {
1353 LOG_WRN("Replay: src 0x%04x dst 0x%04x seq 0x%06x", net_rx->ctx.addr,
1354 net_rx->ctx.recv_dst, net_rx->seq);
1355 return -EINVAL;
1356 }
1357
1358 LOG_DBG("ASZMIC %u AKF %u AID 0x%02x", ASZMIC(hdr), AKF(hdr), AID(hdr));
1359
1360 net_buf_simple_pull(buf, 1);
1361
1362 seq_zero = net_buf_simple_pull_be16(buf);
1363 seg_o = (seq_zero & 0x03) << 3;
1364 seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
1365 seg_n = net_buf_simple_pull_u8(buf);
1366 seg_o |= seg_n >> 5;
1367 seg_n &= 0x1f;
1368
1369 LOG_DBG("SeqZero 0x%04x SegO %u SegN %u", seq_zero, seg_o, seg_n);
1370
1371 if (seg_o > seg_n) {
1372 LOG_ERR("SegO greater than SegN (%u > %u)", seg_o, seg_n);
1373 return -EBADMSG;
1374 }
1375
1376 /* According to MshPRTv1.1:
1377 * "The SeqAuth is composed of the IV Index and the sequence number
1378 * (SEQ) of the first segment"
1379 *
1380 * Therefore we need to calculate very first SEQ in order to find
1381 * seqAuth. We can calculate as below:
1382 *
1383 * SEQ(0) = SEQ(n) - (delta between seqZero and SEQ(n) by looking into
1384 * 14 least significant bits of SEQ(n))
1385 *
1386 * Mentioned delta shall be >= 0, if it is not then seq_auth will
1387 * be broken and it will be verified by the code below.
1388 */
1389 *seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(net_rx),
1390 (net_rx->seq -
1391 ((((net_rx->seq & BIT_MASK(14)) - seq_zero)) &
1392 BIT_MASK(13))));
1393 auth_seqnum = *seq_auth & BIT_MASK(24);
1394 *seg_count = seg_n + 1;
1395
1396 /* Look for old RX sessions */
1397 rx = seg_rx_find(net_rx, seq_auth);
1398 if (rx) {
1399 /* Discard old SeqAuth packet */
1400 if (rx->seq_auth > *seq_auth) {
1401 LOG_WRN("Ignoring old SeqAuth");
1402 return -EINVAL;
1403 }
1404
1405 if (!seg_rx_is_valid(rx, net_rx, hdr, seg_n)) {
1406 return -EINVAL;
1407 }
1408
1409 if (rx->in_use) {
1410 LOG_DBG("Existing RX context. Block 0x%08x", rx->block);
1411 goto found_rx;
1412 }
1413
1414 if (rx->block == BLOCK_COMPLETE(rx->seg_n)) {
1415 LOG_DBG("Got segment for already complete SDU");
1416
1417 /* We should not send more than one Segment Acknowledgment message
1418 * for the same SeqAuth in a period of:
1419 * [acknowledgment delay increment * segment transmission interval]
1420 * milliseconds
1421 */
1422 if (k_uptime_get_32() - rx->last_ack >
1423 SEQAUTH_ALREADY_PROCESSED_TIMEOUT) {
1424 send_ack(net_rx->sub, net_rx->ctx.recv_dst,
1425 net_rx->ctx.addr, net_rx->ctx.send_ttl,
1426 seq_auth, rx->block, rx->obo);
1427 rx->last_ack = k_uptime_get_32();
1428 }
1429
1430 if (rpl) {
1431 bt_mesh_rpl_update(rpl, net_rx);
1432 }
1433
1434 return -EALREADY;
1435 }
1436
1437 /* We ignore instead of sending block ack 0 since the
1438 * ack timer is always smaller than the incomplete
1439 * timer, i.e. the sender is misbehaving.
1440 */
1441 LOG_WRN("Got segment for canceled SDU");
1442 return -EINVAL;
1443 }
1444
1445 /* Bail out early if we're not ready to receive such a large SDU */
1446 if (!sdu_len_is_ok(net_rx->ctl, seg_n)) {
1447 LOG_ERR("Too big incoming SDU length");
1448 send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1449 net_rx->ctx.send_ttl, seq_auth, 0,
1450 net_rx->friend_match);
1451 return -EMSGSIZE;
1452 }
1453
1454 /* Verify early that there will be space in the Friend Queue(s) in
1455 * case this message is destined to an LPN of ours.
1456 */
1457 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
1458 net_rx->friend_match && !net_rx->local_match &&
1459 !bt_mesh_friend_queue_has_space(net_rx->sub->net_idx,
1460 net_rx->ctx.addr,
1461 net_rx->ctx.recv_dst, seq_auth,
1462 *seg_count)) {
1463 LOG_ERR("No space in Friend Queue for %u segments", *seg_count);
1464 send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1465 net_rx->ctx.send_ttl, seq_auth, 0,
1466 net_rx->friend_match);
1467 return -ENOBUFS;
1468 }
1469
1470 /* Keep track of the received SeqAuth values received from this address
1471 * and discard segmented messages that are not newer, as described in
1472 * MshPRTv1.1: 3.5.3.4.
1473 *
1474 * The logic on the first segmented receive is a bit special, since the
1475 * initial value of rpl->seg is 0, which would normally fail the
1476 * comparison check with auth_seqnum:
1477 * - If this is the first time we receive from this source, rpl->src
1478 * will be 0, and we can skip this check.
1479 * - If this is the first time we receive from this source on the new IV
1480 * index, rpl->old_iv will be set, and the check is also skipped.
1481 * - If this is the first segmented message on the new IV index, but we
1482 * have received an unsegmented message already, the unsegmented
1483 * message will have reset rpl->seg to 0, and this message's SeqAuth
1484 * cannot be zero.
1485 */
1486 if (rpl && rpl->src && auth_seqnum <= rpl->seg &&
1487 (!rpl->old_iv || net_rx->old_iv)) {
1488 LOG_WRN("Ignoring old SeqAuth 0x%06x", auth_seqnum);
1489 return -EALREADY;
1490 }
1491
1492 /* Look for free slot for a new RX session */
1493 rx = seg_rx_alloc(net_rx, hdr, seq_auth, seg_n);
1494 if (!rx) {
1495 /* Warn but don't cancel since the existing slots will
1496 * eventually be freed up and we'll be able to process
1497 * this one.
1498 */
1499 LOG_WRN("No free slots for new incoming segmented messages");
1500 return -ENOMEM;
1501 }
1502
1503 rx->obo = net_rx->friend_match;
1504
1505 found_rx:
1506 if (BIT(seg_o) & rx->block) {
1507 LOG_DBG("Received already received fragment");
1508 return -EALREADY;
1509 }
1510
1511 /* All segments, except the last one, must either have 8 bytes of
1512 * payload (for 64bit Net MIC) or 12 bytes of payload (for 32bit
1513 * Net MIC).
1514 */
1515 if (seg_o == seg_n) {
1516 /* Set the expected final buffer length */
1517 rx->len = seg_n * seg_len(rx->ctl) + buf->len;
1518 LOG_DBG("Target len %u * %u + %u = %u", seg_n, seg_len(rx->ctl), buf->len, rx->len);
1519
1520 if (rx->len > BT_MESH_RX_SDU_MAX) {
1521 LOG_ERR("Too large SDU len");
1522 send_ack(net_rx->sub, net_rx->ctx.recv_dst,
1523 net_rx->ctx.addr, net_rx->ctx.send_ttl,
1524 seq_auth, 0, rx->obo);
1525 seg_rx_reset(rx, true);
1526 return -EMSGSIZE;
1527 }
1528 } else {
1529 if (buf->len != seg_len(rx->ctl)) {
1530 LOG_ERR("Incorrect segment size for message type");
1531 return -EINVAL;
1532 }
1533 }
1534
1535 LOG_DBG("discard timeout %u", BT_MESH_SAR_RX_DISCARD_TIMEOUT_MS);
1536 k_work_schedule(&rx->discard,
1537 K_MSEC(BT_MESH_SAR_RX_DISCARD_TIMEOUT_MS));
1538 rx->attempts_left = BT_MESH_SAR_RX_ACK_RETRANS_COUNT;
1539
1540 if (!bt_mesh_lpn_established() && BT_MESH_ADDR_IS_UNICAST(rx->dst)) {
1541 LOG_DBG("ack delay %u", ACK_DELAY(rx->seg_n));
1542 k_work_reschedule(&rx->ack, K_MSEC(ACK_DELAY(rx->seg_n)));
1543 }
1544
1545 /* Allocated segment here */
1546 err = k_mem_slab_alloc(&segs, &rx->seg[seg_o], K_NO_WAIT);
1547 if (err) {
1548 LOG_WRN("Unable allocate buffer for Seg %u", seg_o);
1549 return -ENOBUFS;
1550 }
1551
1552 memcpy(rx->seg[seg_o], buf->data, buf->len);
1553
1554 LOG_DBG("Received %u/%u", seg_o, seg_n);
1555
1556 /* Mark segment as received */
1557 rx->block |= BIT(seg_o);
1558
1559 if (rx->block != BLOCK_COMPLETE(seg_n)) {
1560 *pdu_type = BT_MESH_FRIEND_PDU_PARTIAL;
1561 return 0;
1562 }
1563
1564 LOG_DBG("Complete SDU");
1565 *pdu_type = BT_MESH_FRIEND_PDU_COMPLETE;
1566
1567 /* If this fails, the work handler will either exit early because the
1568 * block is fully received, or rx->in_use is false.
1569 */
1570 (void)k_work_cancel_delayable(&rx->ack);
1571
1572 send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1573 net_rx->ctx.send_ttl, seq_auth, rx->block, rx->obo);
1574 rx->last_ack = k_uptime_get_32();
1575
1576 if (net_rx->ctl) {
1577 NET_BUF_SIMPLE_DEFINE(sdu, BT_MESH_RX_CTL_MAX);
1578 seg_rx_assemble(rx, &sdu, 0U);
1579 err = ctl_recv(net_rx, *hdr, &sdu, seq_auth);
1580 } else if (rx->len < 1 + APP_MIC_LEN(ASZMIC(hdr))) {
1581 LOG_ERR("Too short SDU + MIC");
1582 err = -EINVAL;
1583 } else {
1584 NET_BUF_SIMPLE_DEFINE_STATIC(seg_buf, BT_MESH_RX_SDU_MAX);
1585 struct net_buf_simple sdu;
1586
1587 /* Decrypting in place to avoid creating two assembly buffers.
1588 * We'll reassemble the buffer from the segments before each
1589 * decryption attempt.
1590 */
1591 net_buf_simple_init(&seg_buf, 0);
1592 net_buf_simple_init_with_data(
1593 &sdu, seg_buf.data, rx->len - APP_MIC_LEN(ASZMIC(hdr)));
1594
1595 err = sdu_recv(net_rx, *hdr, ASZMIC(hdr), &seg_buf, &sdu, rx);
1596 }
1597
1598 /* Update rpl only if there is place and upper logic accepted incoming data. */
1599 if (err == 0 && rpl != NULL) {
1600 bt_mesh_rpl_update(rpl, net_rx);
1601 /* Update the seg, unless it has already been surpassed:
1602 * This needs to happen after rpl_update to ensure that the IV
1603 * update reset logic inside rpl_update doesn't overwrite the
1604 * change.
1605 */
1606 rpl->seg = MAX(rpl->seg, auth_seqnum);
1607 }
1608
1609 seg_rx_reset(rx, false);
1610
1611 return err;
1612 }
1613
bt_mesh_trans_recv(struct net_buf_simple * buf,struct bt_mesh_net_rx * rx)1614 int bt_mesh_trans_recv(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx)
1615 {
1616 uint64_t seq_auth = TRANS_SEQ_AUTH_NVAL;
1617 enum bt_mesh_friend_pdu_type pdu_type = BT_MESH_FRIEND_PDU_SINGLE;
1618 struct net_buf_simple_state state;
1619 uint8_t seg_count = 0;
1620 int err;
1621
1622 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
1623 rx->friend_match = bt_mesh_friend_match(rx->sub->net_idx,
1624 rx->ctx.recv_dst);
1625 } else {
1626 rx->friend_match = false;
1627 }
1628
1629 LOG_DBG("src 0x%04x dst 0x%04x seq 0x%08x friend_match %u", rx->ctx.addr, rx->ctx.recv_dst,
1630 rx->seq, rx->friend_match);
1631
1632 /* Remove network headers */
1633 net_buf_simple_pull(buf, BT_MESH_NET_HDR_LEN);
1634
1635 LOG_DBG("Payload %s", bt_hex(buf->data, buf->len));
1636
1637 if (IS_ENABLED(CONFIG_BT_TESTING)) {
1638 bt_mesh_test_net_recv(rx->ctx.recv_ttl, rx->ctl, rx->ctx.addr, rx->ctx.recv_dst,
1639 buf->data, buf->len);
1640 }
1641
1642 /* If LPN mode is enabled messages are only accepted when we've
1643 * requested the Friend to send them. The messages must also
1644 * be encrypted using the Friend Credentials.
1645 */
1646 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
1647 bt_mesh_lpn_established() && rx->net_if == BT_MESH_NET_IF_ADV &&
1648 (!bt_mesh_lpn_waiting_update() || !rx->friend_cred)) {
1649 LOG_WRN("Ignoring unexpected message in Low Power mode");
1650 return -EAGAIN;
1651 }
1652
1653 /* Save the app-level state so the buffer can later be placed in
1654 * the Friend Queue.
1655 */
1656 net_buf_simple_save(buf, &state);
1657
1658 if (SEG(buf->data)) {
1659 /* Segmented messages must match a local element or an
1660 * LPN of this Friend.
1661 */
1662 if (!rx->local_match && !rx->friend_match) {
1663 return 0;
1664 }
1665
1666 err = trans_seg(buf, rx, &pdu_type, &seq_auth, &seg_count);
1667 } else {
1668 seg_count = 1;
1669
1670 /* Avoid further processing of unsegmented messages that are not a
1671 * local match nor a Friend match, with the exception of ctl messages.
1672 */
1673 if (!rx->ctl && !rx->local_match && !rx->friend_match) {
1674 return 0;
1675 }
1676
1677 err = trans_unseg(buf, rx, &seq_auth);
1678 }
1679
1680 /* Notify LPN state machine so a Friend Poll will be sent. */
1681 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER)) {
1682 bt_mesh_lpn_msg_received(rx);
1683 }
1684
1685 net_buf_simple_restore(buf, &state);
1686
1687 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match && !err) {
1688 if (seq_auth == TRANS_SEQ_AUTH_NVAL) {
1689 bt_mesh_friend_enqueue_rx(rx, pdu_type, NULL,
1690 seg_count, buf);
1691 } else {
1692 bt_mesh_friend_enqueue_rx(rx, pdu_type, &seq_auth,
1693 seg_count, buf);
1694 }
1695 }
1696
1697 return err;
1698 }
1699
bt_mesh_rx_reset(void)1700 void bt_mesh_rx_reset(void)
1701 {
1702 int i;
1703
1704 LOG_DBG("");
1705
1706 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1707 seg_rx_reset(&seg_rx[i], true);
1708 }
1709 }
1710
bt_mesh_trans_reset(void)1711 void bt_mesh_trans_reset(void)
1712 {
1713 int i;
1714
1715 bt_mesh_rx_reset();
1716
1717 LOG_DBG("");
1718
1719 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
1720 seg_tx_reset(&seg_tx[i]);
1721 }
1722
1723 bt_mesh_rpl_clear();
1724 bt_mesh_va_clear();
1725 }
1726
bt_mesh_trans_init(void)1727 void bt_mesh_trans_init(void)
1728 {
1729 int i;
1730
1731 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
1732 k_work_init_delayable(&seg_tx[i].retransmit, seg_retransmit);
1733 }
1734
1735 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1736 k_work_init_delayable(&seg_rx[i].ack, seg_ack);
1737 k_work_init_delayable(&seg_rx[i].discard, seg_discard);
1738 }
1739 }
1740