1 /*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <stdlib.h>
11 #include <sys/types.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/sys/byteorder.h>
14
15 #include <zephyr/net/buf.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/mesh.h>
19
20 #include "common/bt_str.h"
21
22 #include "host/testing.h"
23
24 #include "crypto.h"
25 #include "mesh.h"
26 #include "net.h"
27 #include "app_keys.h"
28 #include "lpn.h"
29 #include "rpl.h"
30 #include "friend.h"
31 #include "access.h"
32 #include "foundation.h"
33 #include "sar_cfg_internal.h"
34 #include "settings.h"
35 #include "heartbeat.h"
36 #include "transport.h"
37 #include "va.h"
38
39 #define LOG_LEVEL CONFIG_BT_MESH_TRANS_LOG_LEVEL
40 #include <zephyr/logging/log.h>
41 LOG_MODULE_REGISTER(bt_mesh_transport);
42
43 #define AID_MASK ((uint8_t)(BIT_MASK(6)))
44
45 #define SEG(data) ((data)[0] >> 7)
46 #define AKF(data) (((data)[0] >> 6) & 0x01)
47 #define AID(data) ((data)[0] & AID_MASK)
48 #define ASZMIC(data) (((data)[1] >> 7) & 1)
49
50 #define APP_MIC_LEN(aszmic) ((aszmic) ? BT_MESH_MIC_LONG : BT_MESH_MIC_SHORT)
51
52 #define UNSEG_HDR(akf, aid) ((akf << 6) | (aid & AID_MASK))
53 #define SEG_HDR(akf, aid) (UNSEG_HDR(akf, aid) | 0x80)
54
55 #define BLOCK_COMPLETE(seg_n) (uint32_t)(((uint64_t)1 << (seg_n + 1)) - 1)
56
57 #define SEQ_AUTH(iv_index, seq) (((uint64_t)iv_index) << 24 | (uint64_t)seq)
58
59 /* How long to wait for available buffers before giving up */
60 #define BUF_TIMEOUT K_NO_WAIT
61
62 #define ACK_DELAY(seg_n) \
63 (MIN(2 * seg_n + 1, BT_MESH_SAR_RX_ACK_DELAY_INC_X2) * \
64 BT_MESH_SAR_RX_SEG_INT_MS / 2)
65
66 #define SEQAUTH_ALREADY_PROCESSED_TIMEOUT \
67 (BT_MESH_SAR_RX_ACK_DELAY_INC_X2 * BT_MESH_SAR_RX_SEG_INT_MS / 2)
68
69 static struct seg_tx {
70 struct bt_mesh_subnet *sub;
71 void *seg[BT_MESH_TX_SEG_MAX];
72 uint64_t seq_auth;
73 int64_t adv_start_timestamp; /* Calculate adv duration and adjust intervals*/
74 uint16_t src;
75 uint16_t dst;
76 uint16_t ack_src;
77 uint16_t len;
78 uint8_t hdr;
79 uint8_t xmit;
80 uint8_t seg_n; /* Last segment index */
81 uint8_t seg_o; /* Segment being sent */
82 uint8_t nack_count; /* Number of unacked segs */
83 uint8_t attempts_left;
84 uint8_t attempts_left_without_progress;
85 uint8_t ttl; /* Transmitted TTL value */
86 uint8_t blocked:1, /* Blocked by ongoing tx */
87 ctl:1, /* Control packet */
88 aszmic:1, /* MIC size */
89 started:1, /* Start cb called */
90 friend_cred:1, /* Using Friend credentials */
91 seg_send_started:1, /* Used to check if seg_send_start cb is called */
92 ack_received:1; /* Ack received during seg message transmission. */
93 const struct bt_mesh_send_cb *cb;
94 void *cb_data;
95 struct k_work_delayable retransmit; /* Retransmit timer */
96 } seg_tx[CONFIG_BT_MESH_TX_SEG_MSG_COUNT];
97
98 static struct seg_rx {
99 struct bt_mesh_subnet *sub;
100 void *seg[BT_MESH_RX_SEG_MAX];
101 uint64_t seq_auth;
102 uint16_t src;
103 uint16_t dst;
104 uint16_t len;
105 uint8_t hdr;
106 uint8_t seg_n:5,
107 ctl:1,
108 in_use:1,
109 obo:1;
110 uint8_t ttl;
111 uint8_t attempts_left;
112 uint32_t block;
113 uint32_t last_ack;
114 struct k_work_delayable ack;
115 struct k_work_delayable discard;
116 } seg_rx[CONFIG_BT_MESH_RX_SEG_MSG_COUNT];
117
118 K_MEM_SLAB_DEFINE(segs, BT_MESH_APP_SEG_SDU_MAX, CONFIG_BT_MESH_SEG_BUFS, 4);
119
send_unseg(struct bt_mesh_net_tx * tx,struct net_buf_simple * sdu,const struct bt_mesh_send_cb * cb,void * cb_data,const uint8_t * ctl_op)120 static int send_unseg(struct bt_mesh_net_tx *tx, struct net_buf_simple *sdu,
121 const struct bt_mesh_send_cb *cb, void *cb_data,
122 const uint8_t *ctl_op)
123 {
124 struct bt_mesh_adv *adv;
125
126 adv = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_ADV_TAG_LOCAL,
127 tx->xmit, BUF_TIMEOUT);
128 if (!adv) {
129 LOG_ERR("Out of network advs");
130 return -ENOBUFS;
131 }
132
133 net_buf_simple_reserve(&adv->b, BT_MESH_NET_HDR_LEN);
134
135 if (ctl_op) {
136 net_buf_simple_add_u8(&adv->b, TRANS_CTL_HDR(*ctl_op, 0));
137 } else if (BT_MESH_IS_DEV_KEY(tx->ctx->app_idx)) {
138 net_buf_simple_add_u8(&adv->b, UNSEG_HDR(0, 0));
139 } else {
140 net_buf_simple_add_u8(&adv->b, UNSEG_HDR(1, tx->aid));
141 }
142
143 net_buf_simple_add_mem(&adv->b, sdu->data, sdu->len);
144
145 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
146 if (!bt_mesh_friend_queue_has_space(tx->sub->net_idx,
147 tx->src, tx->ctx->addr,
148 NULL, 1)) {
149 if (BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
150 LOG_ERR("Not enough space in Friend Queue");
151 bt_mesh_adv_unref(adv);
152 return -ENOBUFS;
153 } else {
154 LOG_WRN("No space in Friend Queue");
155 goto send;
156 }
157 }
158
159 if (bt_mesh_friend_enqueue_tx(tx, BT_MESH_FRIEND_PDU_SINGLE,
160 NULL, 1, &adv->b) &&
161 BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
162 /* PDUs for a specific Friend should only go
163 * out through the Friend Queue.
164 */
165 bt_mesh_adv_unref(adv);
166 send_cb_finalize(cb, cb_data);
167 return 0;
168 }
169 }
170
171 send:
172 return bt_mesh_net_send(tx, adv, cb, cb_data);
173 }
174
seg_len(bool ctl)175 static inline uint8_t seg_len(bool ctl)
176 {
177 if (ctl) {
178 return BT_MESH_CTL_SEG_SDU_MAX;
179 } else {
180 return BT_MESH_APP_SEG_SDU_MAX;
181 }
182 }
183
bt_mesh_tx_in_progress(void)184 bool bt_mesh_tx_in_progress(void)
185 {
186 int i;
187
188 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
189 if (seg_tx[i].nack_count) {
190 return true;
191 }
192 }
193
194 return false;
195 }
196
seg_tx_done(struct seg_tx * tx,uint8_t seg_idx)197 static void seg_tx_done(struct seg_tx *tx, uint8_t seg_idx)
198 {
199 k_mem_slab_free(&segs, (void *)tx->seg[seg_idx]);
200 tx->seg[seg_idx] = NULL;
201 tx->nack_count--;
202 }
203
seg_tx_blocks(struct seg_tx * tx,uint16_t src,uint16_t dst)204 static bool seg_tx_blocks(struct seg_tx *tx, uint16_t src, uint16_t dst)
205 {
206 return (tx->src == src) && (tx->dst == dst);
207 }
208
seg_tx_unblock_check(struct seg_tx * tx)209 static void seg_tx_unblock_check(struct seg_tx *tx)
210 {
211 struct seg_tx *blocked = NULL;
212 int i;
213
214 /* Unblock the first blocked tx with the same params. */
215 for (i = 0; i < ARRAY_SIZE(seg_tx); ++i) {
216 if (&seg_tx[i] != tx &&
217 seg_tx[i].blocked &&
218 seg_tx_blocks(tx, seg_tx[i].src, seg_tx[i].dst) &&
219 (!blocked || seg_tx[i].seq_auth < blocked->seq_auth)) {
220 blocked = &seg_tx[i];
221 }
222 }
223
224 if (blocked) {
225 LOG_DBG("Unblocked 0x%04x", (uint16_t)(blocked->seq_auth & TRANS_SEQ_ZERO_MASK));
226 blocked->blocked = false;
227 k_work_reschedule(&blocked->retransmit, K_NO_WAIT);
228 }
229 }
230
seg_tx_reset(struct seg_tx * tx)231 static void seg_tx_reset(struct seg_tx *tx)
232 {
233 int i;
234
235 /* If this call fails, the handler will exit early, as nack_count is 0. */
236 (void)k_work_cancel_delayable(&tx->retransmit);
237
238 tx->cb = NULL;
239 tx->cb_data = NULL;
240 tx->seq_auth = 0U;
241 tx->sub = NULL;
242 tx->src = BT_MESH_ADDR_UNASSIGNED;
243 tx->dst = BT_MESH_ADDR_UNASSIGNED;
244 tx->ack_src = BT_MESH_ADDR_UNASSIGNED;
245 tx->blocked = false;
246
247 for (i = 0; i <= tx->seg_n && tx->nack_count; i++) {
248 if (!tx->seg[i]) {
249 continue;
250 }
251
252 seg_tx_done(tx, i);
253 }
254
255 tx->nack_count = 0;
256 tx->seg_send_started = 0;
257 tx->ack_received = 0;
258
259 if (atomic_test_and_clear_bit(bt_mesh.flags, BT_MESH_IVU_PENDING)) {
260 LOG_DBG("Proceeding with pending IV Update");
261 /* bt_mesh_net_iv_update() will re-enable the flag if this
262 * wasn't the only transfer.
263 */
264 bt_mesh_net_iv_update(bt_mesh.iv_index, false);
265 }
266 }
267
seg_tx_complete(struct seg_tx * tx,int err)268 static inline void seg_tx_complete(struct seg_tx *tx, int err)
269 {
270 const struct bt_mesh_send_cb *cb = tx->cb;
271 void *cb_data = tx->cb_data;
272
273 seg_tx_unblock_check(tx);
274
275 seg_tx_reset(tx);
276
277 if (cb && cb->end) {
278 cb->end(err, cb_data);
279 }
280 }
281
schedule_transmit_continue(struct seg_tx * tx,uint32_t delta)282 static void schedule_transmit_continue(struct seg_tx *tx, uint32_t delta)
283 {
284 uint32_t timeout = 0;
285
286 if (!tx->nack_count) {
287 return;
288 }
289
290 LOG_DBG("");
291
292 if (delta < BT_MESH_SAR_TX_SEG_INT_MS) {
293 timeout = BT_MESH_SAR_TX_SEG_INT_MS - delta;
294 }
295
296 /* If it is not the last segment then continue transmission after Segment Interval,
297 * otherwise continue immediately as the callback will finish this transmission and
298 * progress into retransmission.
299 */
300 k_work_reschedule(&tx->retransmit,
301 (tx->seg_o <= tx->seg_n) ?
302 K_MSEC(timeout) :
303 K_NO_WAIT);
304 }
305
seg_send_start(uint16_t duration,int err,void * user_data)306 static void seg_send_start(uint16_t duration, int err, void *user_data)
307 {
308 struct seg_tx *tx = user_data;
309
310 if (!tx->started && tx->cb && tx->cb->start) {
311 tx->cb->start(duration, err, tx->cb_data);
312 tx->started = 1U;
313 }
314
315 tx->seg_send_started = 1U;
316 tx->adv_start_timestamp = k_uptime_get();
317
318 /* If there's an error in transmitting the 'sent' callback will never
319 * be called. Make sure that we kick the retransmit timer also in this
320 * case since otherwise we risk the transmission of becoming stale.
321 */
322 if (err) {
323 schedule_transmit_continue(tx, 0);
324 }
325 }
326
seg_sent(int err,void * user_data)327 static void seg_sent(int err, void *user_data)
328 {
329 struct seg_tx *tx = user_data;
330 uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
331
332 if (!tx->seg_send_started) {
333 return;
334 }
335
336 schedule_transmit_continue(tx, delta_ms);
337 }
338
339 static const struct bt_mesh_send_cb seg_sent_cb = {
340 .start = seg_send_start,
341 .end = seg_sent,
342 };
343
seg_tx_buf_build(struct seg_tx * tx,uint8_t seg_o,struct net_buf_simple * buf)344 static void seg_tx_buf_build(struct seg_tx *tx, uint8_t seg_o,
345 struct net_buf_simple *buf)
346 {
347 uint16_t seq_zero = tx->seq_auth & TRANS_SEQ_ZERO_MASK;
348 uint8_t len = MIN(seg_len(tx->ctl), tx->len - (seg_len(tx->ctl) * seg_o));
349
350 net_buf_simple_add_u8(buf, tx->hdr);
351 net_buf_simple_add_u8(buf, (tx->aszmic << 7) | seq_zero >> 6);
352 net_buf_simple_add_u8(buf, (((seq_zero & 0x3f) << 2) | (seg_o >> 3)));
353 net_buf_simple_add_u8(buf, ((seg_o & 0x07) << 5) | tx->seg_n);
354 net_buf_simple_add_mem(buf, tx->seg[seg_o], len);
355 }
356
seg_tx_send_unacked(struct seg_tx * tx)357 static void seg_tx_send_unacked(struct seg_tx *tx)
358 {
359 if (!tx->nack_count) {
360 return;
361 }
362
363 uint32_t delta_ms;
364 uint32_t timeout;
365 struct bt_mesh_msg_ctx ctx = {
366 .net_idx = tx->sub->net_idx,
367 /* App idx only used by network to detect control messages: */
368 .app_idx = (tx->ctl ? BT_MESH_KEY_UNUSED : 0),
369 .addr = tx->dst,
370 .send_rel = true,
371 .send_ttl = tx->ttl,
372 };
373 struct bt_mesh_net_tx net_tx = {
374 .sub = tx->sub,
375 .ctx = &ctx,
376 .src = tx->src,
377 .xmit = tx->xmit,
378 .friend_cred = tx->friend_cred,
379 .aid = tx->hdr & AID_MASK,
380 };
381
382 if (BT_MESH_ADDR_IS_UNICAST(tx->dst) &&
383 !tx->attempts_left_without_progress) {
384 LOG_ERR("Ran out of retransmit without progress attempts");
385 seg_tx_complete(tx, -ETIMEDOUT);
386 return;
387 }
388
389 if (!tx->attempts_left) {
390 if (BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
391 LOG_ERR("Ran out of retransmit attempts");
392 seg_tx_complete(tx, -ETIMEDOUT);
393 } else {
394 /* Segmented sending to groups doesn't have acks, so
395 * running out of attempts is the expected behavior.
396 */
397 seg_tx_complete(tx, 0);
398 }
399
400 return;
401 }
402
403 LOG_DBG("SeqZero: 0x%04x Attempts: %u",
404 (uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), tx->attempts_left);
405
406 while (tx->seg_o <= tx->seg_n) {
407 struct bt_mesh_adv *seg;
408 int err;
409
410 if (!tx->seg[tx->seg_o]) {
411 /* Move on to the next segment */
412 tx->seg_o++;
413 continue;
414 }
415
416 seg = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_ADV_TAG_LOCAL,
417 tx->xmit, BUF_TIMEOUT);
418 if (!seg) {
419 LOG_DBG("Allocating segment failed");
420 goto end;
421 }
422
423 net_buf_simple_reserve(&seg->b, BT_MESH_NET_HDR_LEN);
424 seg_tx_buf_build(tx, tx->seg_o, &seg->b);
425
426 LOG_DBG("Sending %u/%u", tx->seg_o, tx->seg_n);
427
428 err = bt_mesh_net_send(&net_tx, seg, &seg_sent_cb, tx);
429 if (err) {
430 LOG_DBG("Sending segment failed");
431 goto end;
432 }
433
434 /* Move on to the next segment */
435 tx->seg_o++;
436
437 tx->ack_received = 0U;
438
439 /* Return here to let the advertising layer process the message.
440 * This function will be called again after Segment Interval.
441 */
442 return;
443 }
444
445
446 /* All segments have been sent */
447 tx->seg_o = 0U;
448 tx->attempts_left--;
449 if (BT_MESH_ADDR_IS_UNICAST(tx->dst) && !tx->ack_received) {
450 tx->attempts_left_without_progress--;
451 }
452
453 end:
454 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
455 bt_mesh_lpn_established() && !bt_mesh_has_addr(ctx.addr)) {
456 bt_mesh_lpn_poll();
457 }
458
459 delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
460 if (tx->ack_received) {
461 /* Schedule retransmission immediately but keep SAR segment interval time if
462 * SegAck was received while sending last segment.
463 */
464 timeout = BT_MESH_SAR_TX_SEG_INT_MS;
465 tx->ack_received = 0U;
466 } else {
467 timeout = BT_MESH_SAR_TX_RETRANS_TIMEOUT_MS(tx->dst, tx->ttl);
468 }
469
470 if (delta_ms < timeout) {
471 timeout -= delta_ms;
472 }
473
474 /* Schedule a retransmission */
475 k_work_reschedule(&tx->retransmit, K_MSEC(timeout));
476 }
477
seg_retransmit(struct k_work * work)478 static void seg_retransmit(struct k_work *work)
479 {
480 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
481 struct seg_tx *tx = CONTAINER_OF(dwork, struct seg_tx, retransmit);
482
483 seg_tx_send_unacked(tx);
484 }
485
send_seg(struct bt_mesh_net_tx * net_tx,struct net_buf_simple * sdu,const struct bt_mesh_send_cb * cb,void * cb_data,uint8_t * ctl_op)486 static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu,
487 const struct bt_mesh_send_cb *cb, void *cb_data,
488 uint8_t *ctl_op)
489 {
490 bool blocked = false;
491 struct seg_tx *tx;
492 uint8_t seg_o;
493 int i;
494
495 LOG_DBG("src 0x%04x dst 0x%04x app_idx 0x%04x aszmic %u sdu_len %u", net_tx->src,
496 net_tx->ctx->addr, net_tx->ctx->app_idx, net_tx->aszmic, sdu->len);
497
498 for (tx = NULL, i = 0; i < ARRAY_SIZE(seg_tx); i++) {
499 if (seg_tx[i].nack_count) {
500 blocked |= seg_tx_blocks(&seg_tx[i], net_tx->src,
501 net_tx->ctx->addr);
502 } else if (!tx) {
503 tx = &seg_tx[i];
504 }
505 }
506
507 if (!tx) {
508 LOG_ERR("No multi-segment message contexts available");
509 return -EBUSY;
510 }
511
512 if (ctl_op) {
513 tx->hdr = TRANS_CTL_HDR(*ctl_op, 1);
514 } else if (BT_MESH_IS_DEV_KEY(net_tx->ctx->app_idx)) {
515 tx->hdr = SEG_HDR(0, 0);
516 } else {
517 tx->hdr = SEG_HDR(1, net_tx->aid);
518 }
519
520 tx->src = net_tx->src;
521 tx->dst = net_tx->ctx->addr;
522 tx->seg_n = (sdu->len - 1) / seg_len(!!ctl_op);
523 tx->seg_o = 0;
524 tx->len = sdu->len;
525 tx->nack_count = tx->seg_n + 1;
526 tx->seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_TX, bt_mesh.seq);
527 tx->sub = net_tx->sub;
528 tx->cb = cb;
529 tx->cb_data = cb_data;
530 tx->attempts_left = BT_MESH_SAR_TX_RETRANS_COUNT(tx->dst);
531 tx->attempts_left_without_progress = BT_MESH_SAR_TX_RETRANS_NO_PROGRESS;
532 tx->xmit = net_tx->xmit;
533 tx->aszmic = net_tx->aszmic;
534 tx->friend_cred = net_tx->friend_cred;
535 tx->blocked = blocked;
536 tx->started = 0;
537 tx->seg_send_started = 0;
538 tx->ctl = !!ctl_op;
539 tx->ttl = net_tx->ctx->send_ttl;
540
541 LOG_DBG("SeqZero 0x%04x (segs: %u)", (uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK),
542 tx->nack_count);
543
544 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
545 !bt_mesh_friend_queue_has_space(tx->sub->net_idx, net_tx->src,
546 tx->dst, &tx->seq_auth,
547 tx->seg_n + 1) &&
548 BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
549 LOG_ERR("Not enough space in Friend Queue for %u segments", tx->seg_n + 1);
550 seg_tx_reset(tx);
551 return -ENOBUFS;
552 }
553
554 for (seg_o = 0U; sdu->len; seg_o++) {
555 void *buf;
556 uint16_t len;
557 int err;
558
559 err = k_mem_slab_alloc(&segs, &buf, BUF_TIMEOUT);
560 if (err) {
561 LOG_ERR("Out of segment buffers");
562 seg_tx_reset(tx);
563 return -ENOBUFS;
564 }
565
566 len = MIN(sdu->len, seg_len(!!ctl_op));
567 memcpy(buf, net_buf_simple_pull_mem(sdu, len), len);
568
569 LOG_DBG("seg %u: %s", seg_o, bt_hex(buf, len));
570
571 tx->seg[seg_o] = buf;
572
573 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
574 enum bt_mesh_friend_pdu_type type;
575
576 NET_BUF_SIMPLE_DEFINE(seg, 16);
577 seg_tx_buf_build(tx, seg_o, &seg);
578
579 if (seg_o == tx->seg_n) {
580 type = BT_MESH_FRIEND_PDU_COMPLETE;
581 } else {
582 type = BT_MESH_FRIEND_PDU_PARTIAL;
583 }
584
585 if (bt_mesh_friend_enqueue_tx(
586 net_tx, type, ctl_op ? NULL : &tx->seq_auth,
587 tx->seg_n + 1, &seg) &&
588 BT_MESH_ADDR_IS_UNICAST(net_tx->ctx->addr)) {
589 /* PDUs for a specific Friend should only go
590 * out through the Friend Queue.
591 */
592 k_mem_slab_free(&segs, buf);
593 tx->seg[seg_o] = NULL;
594 }
595
596 }
597
598 }
599
600 /* This can happen if segments only went into the Friend Queue */
601 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !tx->seg[0]) {
602 seg_tx_reset(tx);
603
604 /* If there was a callback notify sending immediately since
605 * there's no other way to track this (at least currently)
606 * with the Friend Queue.
607 */
608 send_cb_finalize(cb, cb_data);
609 return 0;
610 }
611
612 if (blocked) {
613 /* Move the sequence number, so we don't end up creating
614 * another segmented transmission with the same SeqZero while
615 * this one is blocked.
616 */
617 bt_mesh_next_seq();
618 LOG_DBG("Blocked.");
619 return 0;
620 }
621
622 seg_tx_send_unacked(tx);
623
624 return 0;
625 }
626
trans_encrypt(const struct bt_mesh_net_tx * tx,const struct bt_mesh_key * key,struct net_buf_simple * msg)627 static int trans_encrypt(const struct bt_mesh_net_tx *tx, const struct bt_mesh_key *key,
628 struct net_buf_simple *msg)
629 {
630 struct bt_mesh_app_crypto_ctx crypto = {
631 .dev_key = BT_MESH_IS_DEV_KEY(tx->ctx->app_idx),
632 .aszmic = tx->aszmic,
633 .src = tx->src,
634 .dst = tx->ctx->addr,
635 .seq_num = bt_mesh.seq,
636 .iv_index = BT_MESH_NET_IVI_TX,
637 };
638
639 if (BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
640 crypto.ad = tx->ctx->uuid;
641 }
642
643 return bt_mesh_app_encrypt(key, &crypto, msg);
644 }
645
bt_mesh_trans_send(struct bt_mesh_net_tx * tx,struct net_buf_simple * msg,const struct bt_mesh_send_cb * cb,void * cb_data)646 int bt_mesh_trans_send(struct bt_mesh_net_tx *tx, struct net_buf_simple *msg,
647 const struct bt_mesh_send_cb *cb, void *cb_data)
648 {
649 const struct bt_mesh_key *key;
650 uint8_t aid;
651 int err;
652
653 if (msg->len < 1) {
654 LOG_ERR("Zero-length SDU not allowed");
655 return -EINVAL;
656 }
657
658 if (msg->len > BT_MESH_TX_SDU_MAX - BT_MESH_MIC_SHORT) {
659 LOG_ERR("Message too big: %u", msg->len);
660 return -EMSGSIZE;
661 }
662
663 if (net_buf_simple_tailroom(msg) < BT_MESH_MIC_SHORT) {
664 LOG_ERR("Insufficient tailroom for Transport MIC");
665 return -EINVAL;
666 }
667
668 if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
669 tx->ctx->send_ttl = bt_mesh_default_ttl_get();
670 } else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
671 LOG_ERR("TTL too large (max 127)");
672 return -EINVAL;
673 }
674
675 if (msg->len > BT_MESH_SDU_UNSEG_MAX) {
676 tx->ctx->send_rel = true;
677 }
678
679 if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
680 (!BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr) &&
681 BT_MESH_IS_DEV_KEY(tx->ctx->app_idx))) {
682 LOG_ERR("Invalid destination address");
683 return -EINVAL;
684 }
685
686 err = bt_mesh_keys_resolve(tx->ctx, &tx->sub, &key, &aid);
687 if (err) {
688 return err;
689 }
690
691 LOG_DBG("net_idx 0x%04x app_idx 0x%04x dst 0x%04x", tx->sub->net_idx, tx->ctx->app_idx,
692 tx->ctx->addr);
693 LOG_DBG("len %u: %s", msg->len, bt_hex(msg->data, msg->len));
694
695 tx->xmit = bt_mesh_net_transmit_get();
696 tx->aid = aid;
697
698 if (!tx->ctx->send_rel || net_buf_simple_tailroom(msg) < 8) {
699 tx->aszmic = 0U;
700 } else {
701 tx->aszmic = 1U;
702 }
703
704 err = trans_encrypt(tx, key, msg);
705 if (err) {
706 return err;
707 }
708
709 if (tx->ctx->send_rel) {
710 err = send_seg(tx, msg, cb, cb_data, NULL);
711 } else {
712 err = send_unseg(tx, msg, cb, cb_data, NULL);
713 }
714
715 return err;
716 }
717
seg_rx_assemble(struct seg_rx * rx,struct net_buf_simple * buf,uint8_t aszmic)718 static void seg_rx_assemble(struct seg_rx *rx, struct net_buf_simple *buf,
719 uint8_t aszmic)
720 {
721 int i;
722
723 net_buf_simple_reset(buf);
724
725 for (i = 0; i <= rx->seg_n; i++) {
726 net_buf_simple_add_mem(buf, rx->seg[i],
727 MIN(seg_len(rx->ctl),
728 rx->len - (i * seg_len(rx->ctl))));
729 }
730
731 /* Adjust the length to not contain the MIC at the end */
732 if (!rx->ctl) {
733 buf->len -= APP_MIC_LEN(aszmic);
734 }
735 }
736
737 struct decrypt_ctx {
738 struct bt_mesh_app_crypto_ctx crypto;
739 struct net_buf_simple *buf;
740 struct net_buf_simple *sdu;
741 struct seg_rx *seg;
742 };
743
sdu_try_decrypt(struct bt_mesh_net_rx * rx,const struct bt_mesh_key * key,void * cb_data)744 static int sdu_try_decrypt(struct bt_mesh_net_rx *rx, const struct bt_mesh_key *key,
745 void *cb_data)
746 {
747 struct decrypt_ctx *ctx = cb_data;
748 int err;
749
750 ctx->crypto.ad = NULL;
751
752 do {
753 if (ctx->seg) {
754 seg_rx_assemble(ctx->seg, ctx->buf, ctx->crypto.aszmic);
755 }
756
757 if (BT_MESH_ADDR_IS_VIRTUAL(rx->ctx.recv_dst)) {
758 ctx->crypto.ad = bt_mesh_va_uuid_get(rx->ctx.recv_dst, ctx->crypto.ad,
759 NULL);
760
761 if (!ctx->crypto.ad) {
762 return -ENOENT;
763 }
764 }
765
766 net_buf_simple_reset(ctx->sdu);
767
768 err = bt_mesh_app_decrypt(key, &ctx->crypto, ctx->buf, ctx->sdu);
769 } while (err && ctx->crypto.ad != NULL);
770
771 if (!err && BT_MESH_ADDR_IS_VIRTUAL(rx->ctx.recv_dst)) {
772 rx->ctx.uuid = ctx->crypto.ad;
773 }
774
775 return err;
776 }
777
sdu_recv(struct bt_mesh_net_rx * rx,uint8_t hdr,uint8_t aszmic,struct net_buf_simple * buf,struct net_buf_simple * sdu,struct seg_rx * seg)778 static int sdu_recv(struct bt_mesh_net_rx *rx, uint8_t hdr, uint8_t aszmic,
779 struct net_buf_simple *buf, struct net_buf_simple *sdu,
780 struct seg_rx *seg)
781 {
782 struct decrypt_ctx ctx = {
783 .crypto = {
784 .dev_key = !AKF(&hdr),
785 .aszmic = aszmic,
786 .src = rx->ctx.addr,
787 .dst = rx->ctx.recv_dst,
788 .seq_num = seg ? (seg->seq_auth & 0xffffff) : rx->seq,
789 .iv_index = BT_MESH_NET_IVI_RX(rx),
790 },
791 .buf = buf,
792 .sdu = sdu,
793 .seg = seg,
794 };
795
796 LOG_DBG("AKF %u AID 0x%02x", !ctx.crypto.dev_key, AID(&hdr));
797
798 if (!rx->local_match) {
799 return 0;
800 }
801
802 rx->ctx.app_idx = bt_mesh_app_key_find(ctx.crypto.dev_key, AID(&hdr),
803 rx, sdu_try_decrypt, &ctx);
804 if (rx->ctx.app_idx == BT_MESH_KEY_UNUSED) {
805 LOG_DBG("No matching AppKey");
806 return 0;
807 }
808
809 rx->ctx.uuid = ctx.crypto.ad;
810
811 LOG_DBG("Decrypted (AppIdx: 0x%03x)", rx->ctx.app_idx);
812
813 (void)bt_mesh_model_recv(&rx->ctx, sdu);
814
815 return 0;
816 }
817
seg_tx_lookup(uint16_t seq_zero,uint8_t obo,uint16_t addr)818 static struct seg_tx *seg_tx_lookup(uint16_t seq_zero, uint8_t obo, uint16_t addr)
819 {
820 struct seg_tx *tx;
821 int i;
822
823 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
824 tx = &seg_tx[i];
825
826 if ((tx->seq_auth & TRANS_SEQ_ZERO_MASK) != seq_zero) {
827 continue;
828 }
829
830 if (tx->dst == addr) {
831 return tx;
832 }
833
834 /* If the expected remote address doesn't match,
835 * but the OBO flag is set and this is the first
836 * acknowledgement, assume it's a Friend that's
837 * responding and therefore accept the message.
838 */
839 if (obo && (tx->nack_count == tx->seg_n + 1 || tx->ack_src == addr)) {
840 tx->ack_src = addr;
841 return tx;
842 }
843 }
844
845 return NULL;
846 }
847
trans_ack(struct bt_mesh_net_rx * rx,uint8_t hdr,struct net_buf_simple * buf,uint64_t * seq_auth)848 static int trans_ack(struct bt_mesh_net_rx *rx, uint8_t hdr,
849 struct net_buf_simple *buf, uint64_t *seq_auth)
850 {
851 bool new_seg_ack = false;
852 struct seg_tx *tx;
853 unsigned int bit;
854 uint32_t ack;
855 uint16_t seq_zero;
856 uint8_t obo;
857
858 if (buf->len < 6) {
859 LOG_ERR("Too short ack message");
860 return -EBADMSG;
861 }
862
863 seq_zero = net_buf_simple_pull_be16(buf);
864 obo = seq_zero >> 15;
865 seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
866
867 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match) {
868 LOG_DBG("Ack for LPN 0x%04x of this Friend", rx->ctx.recv_dst);
869 /* Best effort - we don't have enough info for true SeqAuth */
870 *seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(rx), seq_zero);
871 return 0;
872 } else if (!rx->local_match) {
873 return 0;
874 }
875
876 ack = net_buf_simple_pull_be32(buf);
877
878 LOG_DBG("OBO %u seq_zero 0x%04x ack 0x%08x", obo, seq_zero, ack);
879
880 tx = seg_tx_lookup(seq_zero, obo, rx->ctx.addr);
881 if (!tx) {
882 LOG_DBG("No matching TX context for ack");
883 return -ENOENT;
884 }
885
886 if (!BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
887 LOG_ERR("Received ack for group seg");
888 return -EINVAL;
889 }
890
891 *seq_auth = tx->seq_auth;
892
893 if (!ack) {
894 LOG_WRN("SDU canceled");
895 seg_tx_complete(tx, -ECANCELED);
896 return 0;
897 }
898
899 if (find_msb_set(ack) - 1 > tx->seg_n) {
900 LOG_ERR("Too large segment number in ack");
901 return -EINVAL;
902 }
903
904 while ((bit = find_lsb_set(ack))) {
905 if (tx->seg[bit - 1]) {
906 LOG_DBG("seg %u/%u acked", bit - 1, tx->seg_n);
907 seg_tx_done(tx, bit - 1);
908 new_seg_ack = true;
909 }
910
911 ack &= ~BIT(bit - 1);
912 }
913
914 if (new_seg_ack) {
915 tx->attempts_left_without_progress =
916 BT_MESH_SAR_TX_RETRANS_NO_PROGRESS;
917 }
918
919 if (tx->nack_count) {
920 /* If transmission is not in progress it means
921 * that Retransmission Timer is running
922 */
923 if (tx->seg_o == 0) {
924 k_timeout_t timeout = K_NO_WAIT;
925
926 /* If there are no retransmission attempts left we
927 * immediately trigger the retransmit call that will
928 * end the transmission.
929 */
930 if ((BT_MESH_ADDR_IS_UNICAST(tx->dst) &&
931 !tx->attempts_left_without_progress) ||
932 !tx->attempts_left) {
933 goto reschedule;
934 }
935
936 uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
937
938 /* According to MshPRTv1.1: 3.5.3.3.2, we should reset the retransmit timer
939 * and retransmit immediately when receiving a valid ack message while
940 * Retransmisison timer is running. However, transport should still keep
941 * segment transmission interval time between transmission of each segment.
942 */
943 if (delta_ms < BT_MESH_SAR_TX_SEG_INT_MS) {
944 timeout = K_MSEC(BT_MESH_SAR_TX_SEG_INT_MS - delta_ms);
945 }
946
947 reschedule:
948 k_work_reschedule(&tx->retransmit, timeout);
949 } else {
950 tx->ack_received = 1U;
951 }
952 } else {
953 LOG_DBG("SDU TX complete");
954 seg_tx_complete(tx, 0);
955 }
956
957 return 0;
958 }
959
ctl_recv(struct bt_mesh_net_rx * rx,uint8_t hdr,struct net_buf_simple * buf,uint64_t * seq_auth)960 static int ctl_recv(struct bt_mesh_net_rx *rx, uint8_t hdr,
961 struct net_buf_simple *buf, uint64_t *seq_auth)
962 {
963 uint8_t ctl_op = TRANS_CTL_OP(&hdr);
964
965 LOG_DBG("OpCode 0x%02x len %u", ctl_op, buf->len);
966
967 switch (ctl_op) {
968 case TRANS_CTL_OP_ACK:
969 return trans_ack(rx, hdr, buf, seq_auth);
970 case TRANS_CTL_OP_HEARTBEAT:
971 return bt_mesh_hb_recv(rx, buf);
972 }
973
974 /* Only acks for friendship and heartbeats may need processing without local_match */
975 if (!rx->local_match) {
976 return 0;
977 }
978
979 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !bt_mesh_lpn_established()) {
980 switch (ctl_op) {
981 case TRANS_CTL_OP_FRIEND_POLL:
982 return bt_mesh_friend_poll(rx, buf);
983 case TRANS_CTL_OP_FRIEND_REQ:
984 return bt_mesh_friend_req(rx, buf);
985 case TRANS_CTL_OP_FRIEND_CLEAR:
986 return bt_mesh_friend_clear(rx, buf);
987 case TRANS_CTL_OP_FRIEND_CLEAR_CFM:
988 return bt_mesh_friend_clear_cfm(rx, buf);
989 case TRANS_CTL_OP_FRIEND_SUB_ADD:
990 return bt_mesh_friend_sub_add(rx, buf);
991 case TRANS_CTL_OP_FRIEND_SUB_REM:
992 return bt_mesh_friend_sub_rem(rx, buf);
993 }
994 }
995
996 #if defined(CONFIG_BT_MESH_LOW_POWER)
997 if (ctl_op == TRANS_CTL_OP_FRIEND_OFFER) {
998 return bt_mesh_lpn_friend_offer(rx, buf);
999 }
1000
1001 if (rx->ctx.addr == bt_mesh.lpn.frnd) {
1002 if (ctl_op == TRANS_CTL_OP_FRIEND_CLEAR_CFM) {
1003 return bt_mesh_lpn_friend_clear_cfm(rx, buf);
1004 }
1005
1006 if (!rx->friend_cred) {
1007 LOG_WRN("Message from friend with wrong credentials");
1008 return -EINVAL;
1009 }
1010
1011 switch (ctl_op) {
1012 case TRANS_CTL_OP_FRIEND_UPDATE:
1013 return bt_mesh_lpn_friend_update(rx, buf);
1014 case TRANS_CTL_OP_FRIEND_SUB_CFM:
1015 return bt_mesh_lpn_friend_sub_cfm(rx, buf);
1016 }
1017 }
1018 #endif /* CONFIG_BT_MESH_LOW_POWER */
1019
1020 LOG_WRN("Unhandled TransOpCode 0x%02x", ctl_op);
1021
1022 return -EBADMSG;
1023 }
1024
trans_unseg(struct net_buf_simple * buf,struct bt_mesh_net_rx * rx,uint64_t * seq_auth)1025 static int trans_unseg(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx,
1026 uint64_t *seq_auth)
1027 {
1028 NET_BUF_SIMPLE_DEFINE_STATIC(sdu, BT_MESH_SDU_UNSEG_MAX);
1029 uint8_t hdr;
1030
1031 LOG_DBG("AFK %u AID 0x%02x", AKF(buf->data), AID(buf->data));
1032
1033 if (buf->len < 1) {
1034 LOG_ERR("Too small unsegmented PDU");
1035 return -EBADMSG;
1036 }
1037
1038 if (bt_mesh_rpl_check(rx, NULL)) {
1039 LOG_WRN("Replay: src 0x%04x dst 0x%04x seq 0x%06x", rx->ctx.addr, rx->ctx.recv_dst,
1040 rx->seq);
1041 return -EINVAL;
1042 }
1043
1044 hdr = net_buf_simple_pull_u8(buf);
1045
1046 if (rx->ctl) {
1047 return ctl_recv(rx, hdr, buf, seq_auth);
1048 }
1049
1050 if (buf->len < 1 + APP_MIC_LEN(0)) {
1051 LOG_ERR("Too short SDU + MIC");
1052 return -EINVAL;
1053 }
1054
1055 /* Adjust the length to not contain the MIC at the end */
1056 buf->len -= APP_MIC_LEN(0);
1057
1058 return sdu_recv(rx, hdr, 0, buf, &sdu, NULL);
1059 }
1060
bt_mesh_ctl_send(struct bt_mesh_net_tx * tx,uint8_t ctl_op,void * data,size_t data_len,const struct bt_mesh_send_cb * cb,void * cb_data)1061 int bt_mesh_ctl_send(struct bt_mesh_net_tx *tx, uint8_t ctl_op, void *data,
1062 size_t data_len,
1063 const struct bt_mesh_send_cb *cb, void *cb_data)
1064 {
1065 struct net_buf_simple buf;
1066
1067 if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
1068 tx->ctx->send_ttl = bt_mesh_default_ttl_get();
1069 } else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
1070 LOG_ERR("TTL too large (max 127)");
1071 return -EINVAL;
1072 }
1073
1074 net_buf_simple_init_with_data(&buf, data, data_len);
1075
1076 if (data_len > BT_MESH_SDU_UNSEG_MAX) {
1077 tx->ctx->send_rel = true;
1078 }
1079
1080 tx->ctx->app_idx = BT_MESH_KEY_UNUSED;
1081
1082 if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
1083 BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
1084 LOG_ERR("Invalid destination address");
1085 return -EINVAL;
1086 }
1087
1088 LOG_DBG("src 0x%04x dst 0x%04x ttl 0x%02x ctl 0x%02x", tx->src, tx->ctx->addr,
1089 tx->ctx->send_ttl, ctl_op);
1090 LOG_DBG("len %zu: %s", data_len, bt_hex(data, data_len));
1091
1092 if (tx->ctx->send_rel) {
1093 return send_seg(tx, &buf, cb, cb_data, &ctl_op);
1094 } else {
1095 return send_unseg(tx, &buf, cb, cb_data, &ctl_op);
1096 }
1097 }
1098
send_ack(struct bt_mesh_subnet * sub,uint16_t src,uint16_t dst,uint8_t ttl,uint64_t * seq_auth,uint32_t block,uint8_t obo)1099 static int send_ack(struct bt_mesh_subnet *sub, uint16_t src, uint16_t dst,
1100 uint8_t ttl, uint64_t *seq_auth, uint32_t block, uint8_t obo)
1101 {
1102 struct bt_mesh_msg_ctx ctx = {
1103 .net_idx = sub->net_idx,
1104 .app_idx = BT_MESH_KEY_UNUSED,
1105 .addr = dst,
1106 .send_ttl = ttl,
1107 };
1108 struct bt_mesh_net_tx tx = {
1109 .sub = sub,
1110 .ctx = &ctx,
1111 .src = obo ? bt_mesh_primary_addr() : src,
1112 .xmit = bt_mesh_net_transmit_get(),
1113 };
1114 uint16_t seq_zero = *seq_auth & TRANS_SEQ_ZERO_MASK;
1115 uint8_t buf[6];
1116
1117 LOG_DBG("SeqZero 0x%04x Block 0x%08x OBO %u", seq_zero, block, obo);
1118
1119 if (bt_mesh_lpn_established() && !bt_mesh_has_addr(ctx.addr)) {
1120 LOG_WRN("Not sending ack when LPN is enabled");
1121 return 0;
1122 }
1123
1124 /* This can happen if the segmented message was destined for a group
1125 * or virtual address.
1126 */
1127 if (!BT_MESH_ADDR_IS_UNICAST(src)) {
1128 LOG_DBG("Not sending ack for non-unicast address");
1129 return 0;
1130 }
1131
1132 sys_put_be16(((seq_zero << 2) & 0x7ffc) | (obo << 15), buf);
1133 sys_put_be32(block, &buf[2]);
1134
1135 return bt_mesh_ctl_send(&tx, TRANS_CTL_OP_ACK, buf, sizeof(buf),
1136 NULL, NULL);
1137 }
1138
seg_rx_reset(struct seg_rx * rx,bool full_reset)1139 static void seg_rx_reset(struct seg_rx *rx, bool full_reset)
1140 {
1141 int i;
1142
1143 LOG_DBG("rx %p", rx);
1144
1145 /* If this fails, the handler will exit early on the next execution, as
1146 * it checks rx->in_use.
1147 */
1148 (void)k_work_cancel_delayable(&rx->ack);
1149 (void)k_work_cancel_delayable(&rx->discard);
1150
1151 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->obo &&
1152 rx->block != BLOCK_COMPLETE(rx->seg_n)) {
1153 LOG_WRN("Clearing incomplete buffers from Friend queue");
1154 bt_mesh_friend_clear_incomplete(rx->sub, rx->src, rx->dst,
1155 &rx->seq_auth);
1156 }
1157
1158 for (i = 0; i <= rx->seg_n; i++) {
1159 if (!rx->seg[i]) {
1160 continue;
1161 }
1162
1163 k_mem_slab_free(&segs, rx->seg[i]);
1164 rx->seg[i] = NULL;
1165 }
1166
1167 rx->in_use = 0U;
1168
1169 /* We don't always reset these values since we need to be able to
1170 * send an ack if we receive a segment after we've already received
1171 * the full SDU.
1172 */
1173 if (full_reset) {
1174 rx->seq_auth = 0U;
1175 rx->sub = NULL;
1176 rx->src = BT_MESH_ADDR_UNASSIGNED;
1177 rx->dst = BT_MESH_ADDR_UNASSIGNED;
1178 }
1179 }
1180
seg_discard(struct k_work * work)1181 static void seg_discard(struct k_work *work)
1182 {
1183 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1184 struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, discard);
1185
1186 LOG_WRN("SAR Discard timeout expired");
1187 seg_rx_reset(rx, false);
1188 rx->block = 0U;
1189
1190 if (IS_ENABLED(CONFIG_BT_TESTING)) {
1191 bt_test_mesh_trans_incomp_timer_exp();
1192 }
1193 }
1194
seg_ack(struct k_work * work)1195 static void seg_ack(struct k_work *work)
1196 {
1197 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1198 struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, ack);
1199
1200 if (!rx->in_use || rx->block == BLOCK_COMPLETE(rx->seg_n)) {
1201 /* Cancellation of this timer may have failed. If it fails as
1202 * part of seg_reset, in_use will be false.
1203 * If it fails as part of the processing of a fully received
1204 * SDU, the ack is already being sent from the receive handler,
1205 * and the timer based ack sending can be ignored.
1206 */
1207 return;
1208 }
1209
1210 LOG_DBG("rx %p", rx);
1211
1212 send_ack(rx->sub, rx->dst, rx->src, rx->ttl, &rx->seq_auth,
1213 rx->block, rx->obo);
1214
1215 rx->last_ack = k_uptime_get_32();
1216
1217 if (rx->attempts_left == 0) {
1218 LOG_DBG("Ran out of retransmit attempts");
1219 return;
1220 }
1221
1222 if (rx->seg_n > BT_MESH_SAR_RX_SEG_THRESHOLD) {
1223 --rx->attempts_left;
1224 k_work_schedule(&rx->ack, K_MSEC(BT_MESH_SAR_RX_SEG_INT_MS));
1225 }
1226 }
1227
sdu_len_is_ok(bool ctl,uint8_t seg_n)1228 static inline bool sdu_len_is_ok(bool ctl, uint8_t seg_n)
1229 {
1230 return (seg_n < BT_MESH_RX_SEG_MAX);
1231 }
1232
seg_rx_find(struct bt_mesh_net_rx * net_rx,const uint64_t * seq_auth)1233 static struct seg_rx *seg_rx_find(struct bt_mesh_net_rx *net_rx,
1234 const uint64_t *seq_auth)
1235 {
1236 int i;
1237
1238 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1239 struct seg_rx *rx = &seg_rx[i];
1240
1241 if (rx->src != net_rx->ctx.addr ||
1242 rx->dst != net_rx->ctx.recv_dst) {
1243 continue;
1244 }
1245
1246 /* Return newer RX context in addition to an exact match, so
1247 * the calling function can properly discard an old SeqAuth.
1248 */
1249 if (rx->seq_auth >= *seq_auth) {
1250 return rx;
1251 }
1252
1253 if (rx->in_use) {
1254 LOG_WRN("Duplicate SDU from src 0x%04x", net_rx->ctx.addr);
1255
1256 /* Clear out the old context since the sender
1257 * has apparently started sending a new SDU.
1258 */
1259 seg_rx_reset(rx, true);
1260
1261 /* Return non-match so caller can re-allocate */
1262 return NULL;
1263 }
1264 }
1265
1266 return NULL;
1267 }
1268
seg_rx_is_valid(struct seg_rx * rx,struct bt_mesh_net_rx * net_rx,const uint8_t * hdr,uint8_t seg_n)1269 static bool seg_rx_is_valid(struct seg_rx *rx, struct bt_mesh_net_rx *net_rx,
1270 const uint8_t *hdr, uint8_t seg_n)
1271 {
1272 if (rx->hdr != *hdr || rx->seg_n != seg_n) {
1273 LOG_ERR("Invalid segment for ongoing session");
1274 return false;
1275 }
1276
1277 if (rx->src != net_rx->ctx.addr || rx->dst != net_rx->ctx.recv_dst) {
1278 LOG_ERR("Invalid source or destination for segment");
1279 return false;
1280 }
1281
1282 if (rx->ctl != net_rx->ctl) {
1283 LOG_ERR("Inconsistent CTL in segment");
1284 return false;
1285 }
1286
1287 return true;
1288 }
1289
seg_rx_alloc(struct bt_mesh_net_rx * net_rx,const uint8_t * hdr,const uint64_t * seq_auth,uint8_t seg_n)1290 static struct seg_rx *seg_rx_alloc(struct bt_mesh_net_rx *net_rx,
1291 const uint8_t *hdr, const uint64_t *seq_auth,
1292 uint8_t seg_n)
1293 {
1294 int i;
1295
1296 /* No race condition on this check, as this function only executes in
1297 * the collaborative Bluetooth rx thread:
1298 */
1299 if (k_mem_slab_num_free_get(&segs) < 1) {
1300 LOG_WRN("Not enough segments for incoming message");
1301 return NULL;
1302 }
1303
1304 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1305 struct seg_rx *rx = &seg_rx[i];
1306
1307 if (rx->in_use) {
1308 continue;
1309 }
1310
1311 rx->in_use = 1U;
1312 rx->sub = net_rx->sub;
1313 rx->ctl = net_rx->ctl;
1314 rx->seq_auth = *seq_auth;
1315 rx->seg_n = seg_n;
1316 rx->hdr = *hdr;
1317 rx->ttl = net_rx->ctx.send_ttl;
1318 rx->src = net_rx->ctx.addr;
1319 rx->dst = net_rx->ctx.recv_dst;
1320 rx->block = 0U;
1321
1322 LOG_DBG("New RX context. Block Complete 0x%08x", BLOCK_COMPLETE(seg_n));
1323
1324 return rx;
1325 }
1326
1327 return NULL;
1328 }
1329
trans_seg(struct net_buf_simple * buf,struct bt_mesh_net_rx * net_rx,enum bt_mesh_friend_pdu_type * pdu_type,uint64_t * seq_auth,uint8_t * seg_count)1330 static int trans_seg(struct net_buf_simple *buf, struct bt_mesh_net_rx *net_rx,
1331 enum bt_mesh_friend_pdu_type *pdu_type, uint64_t *seq_auth,
1332 uint8_t *seg_count)
1333 {
1334 struct bt_mesh_rpl *rpl = NULL;
1335 struct seg_rx *rx;
1336 uint8_t *hdr = buf->data;
1337 uint16_t seq_zero;
1338 uint32_t auth_seqnum;
1339 uint8_t seg_n;
1340 uint8_t seg_o;
1341 int err;
1342
1343 if (buf->len < 5) {
1344 LOG_ERR("Too short segmented message (len %u)", buf->len);
1345 return -EBADMSG;
1346 }
1347
1348 if (bt_mesh_rpl_check(net_rx, &rpl)) {
1349 LOG_WRN("Replay: src 0x%04x dst 0x%04x seq 0x%06x", net_rx->ctx.addr,
1350 net_rx->ctx.recv_dst, net_rx->seq);
1351 return -EINVAL;
1352 }
1353
1354 LOG_DBG("ASZMIC %u AKF %u AID 0x%02x", ASZMIC(hdr), AKF(hdr), AID(hdr));
1355
1356 net_buf_simple_pull(buf, 1);
1357
1358 seq_zero = net_buf_simple_pull_be16(buf);
1359 seg_o = (seq_zero & 0x03) << 3;
1360 seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
1361 seg_n = net_buf_simple_pull_u8(buf);
1362 seg_o |= seg_n >> 5;
1363 seg_n &= 0x1f;
1364
1365 LOG_DBG("SeqZero 0x%04x SegO %u SegN %u", seq_zero, seg_o, seg_n);
1366
1367 if (seg_o > seg_n) {
1368 LOG_ERR("SegO greater than SegN (%u > %u)", seg_o, seg_n);
1369 return -EBADMSG;
1370 }
1371
1372 /* According to MshPRTv1.1:
1373 * "The SeqAuth is composed of the IV Index and the sequence number
1374 * (SEQ) of the first segment"
1375 *
1376 * Therefore we need to calculate very first SEQ in order to find
1377 * seqAuth. We can calculate as below:
1378 *
1379 * SEQ(0) = SEQ(n) - (delta between seqZero and SEQ(n) by looking into
1380 * 14 least significant bits of SEQ(n))
1381 *
1382 * Mentioned delta shall be >= 0, if it is not then seq_auth will
1383 * be broken and it will be verified by the code below.
1384 */
1385 *seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(net_rx),
1386 (net_rx->seq -
1387 ((((net_rx->seq & BIT_MASK(14)) - seq_zero)) &
1388 BIT_MASK(13))));
1389 auth_seqnum = *seq_auth & BIT_MASK(24);
1390 *seg_count = seg_n + 1;
1391
1392 /* Look for old RX sessions */
1393 rx = seg_rx_find(net_rx, seq_auth);
1394 if (rx) {
1395 /* Discard old SeqAuth packet */
1396 if (rx->seq_auth > *seq_auth) {
1397 LOG_WRN("Ignoring old SeqAuth");
1398 return -EINVAL;
1399 }
1400
1401 if (!seg_rx_is_valid(rx, net_rx, hdr, seg_n)) {
1402 return -EINVAL;
1403 }
1404
1405 if (rx->in_use) {
1406 LOG_DBG("Existing RX context. Block 0x%08x", rx->block);
1407 goto found_rx;
1408 }
1409
1410 if (rx->block == BLOCK_COMPLETE(rx->seg_n)) {
1411 LOG_DBG("Got segment for already complete SDU");
1412
1413 /* We should not send more than one Segment Acknowledgment message
1414 * for the same SeqAuth in a period of:
1415 * [acknowledgment delay increment * segment transmission interval]
1416 * milliseconds
1417 */
1418 if (k_uptime_get_32() - rx->last_ack >
1419 SEQAUTH_ALREADY_PROCESSED_TIMEOUT) {
1420 send_ack(net_rx->sub, net_rx->ctx.recv_dst,
1421 net_rx->ctx.addr, net_rx->ctx.send_ttl,
1422 seq_auth, rx->block, rx->obo);
1423 rx->last_ack = k_uptime_get_32();
1424 }
1425
1426 if (rpl) {
1427 bt_mesh_rpl_update(rpl, net_rx);
1428 }
1429
1430 return -EALREADY;
1431 }
1432
1433 /* We ignore instead of sending block ack 0 since the
1434 * ack timer is always smaller than the incomplete
1435 * timer, i.e. the sender is misbehaving.
1436 */
1437 LOG_WRN("Got segment for canceled SDU");
1438 return -EINVAL;
1439 }
1440
1441 /* Bail out early if we're not ready to receive such a large SDU */
1442 if (!sdu_len_is_ok(net_rx->ctl, seg_n)) {
1443 LOG_ERR("Too big incoming SDU length");
1444 send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1445 net_rx->ctx.send_ttl, seq_auth, 0,
1446 net_rx->friend_match);
1447 return -EMSGSIZE;
1448 }
1449
1450 /* Verify early that there will be space in the Friend Queue(s) in
1451 * case this message is destined to an LPN of ours.
1452 */
1453 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
1454 net_rx->friend_match && !net_rx->local_match &&
1455 !bt_mesh_friend_queue_has_space(net_rx->sub->net_idx,
1456 net_rx->ctx.addr,
1457 net_rx->ctx.recv_dst, seq_auth,
1458 *seg_count)) {
1459 LOG_ERR("No space in Friend Queue for %u segments", *seg_count);
1460 send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1461 net_rx->ctx.send_ttl, seq_auth, 0,
1462 net_rx->friend_match);
1463 return -ENOBUFS;
1464 }
1465
1466 /* Keep track of the received SeqAuth values received from this address
1467 * and discard segmented messages that are not newer, as described in
1468 * MshPRTv1.1: 3.5.3.4.
1469 *
1470 * The logic on the first segmented receive is a bit special, since the
1471 * initial value of rpl->seg is 0, which would normally fail the
1472 * comparison check with auth_seqnum:
1473 * - If this is the first time we receive from this source, rpl->src
1474 * will be 0, and we can skip this check.
1475 * - If this is the first time we receive from this source on the new IV
1476 * index, rpl->old_iv will be set, and the check is also skipped.
1477 * - If this is the first segmented message on the new IV index, but we
1478 * have received an unsegmented message already, the unsegmented
1479 * message will have reset rpl->seg to 0, and this message's SeqAuth
1480 * cannot be zero.
1481 */
1482 if (rpl && rpl->src && auth_seqnum <= rpl->seg &&
1483 (!rpl->old_iv || net_rx->old_iv)) {
1484 LOG_WRN("Ignoring old SeqAuth 0x%06x", auth_seqnum);
1485 return -EALREADY;
1486 }
1487
1488 /* Look for free slot for a new RX session */
1489 rx = seg_rx_alloc(net_rx, hdr, seq_auth, seg_n);
1490 if (!rx) {
1491 /* Warn but don't cancel since the existing slots will
1492 * eventually be freed up and we'll be able to process
1493 * this one.
1494 */
1495 LOG_WRN("No free slots for new incoming segmented messages");
1496 return -ENOMEM;
1497 }
1498
1499 rx->obo = net_rx->friend_match;
1500
1501 found_rx:
1502 if (BIT(seg_o) & rx->block) {
1503 LOG_DBG("Received already received fragment");
1504 return -EALREADY;
1505 }
1506
1507 /* All segments, except the last one, must either have 8 bytes of
1508 * payload (for 64bit Net MIC) or 12 bytes of payload (for 32bit
1509 * Net MIC).
1510 */
1511 if (seg_o == seg_n) {
1512 /* Set the expected final buffer length */
1513 rx->len = seg_n * seg_len(rx->ctl) + buf->len;
1514 LOG_DBG("Target len %u * %u + %u = %u", seg_n, seg_len(rx->ctl), buf->len, rx->len);
1515
1516 if (rx->len > BT_MESH_RX_SDU_MAX) {
1517 LOG_ERR("Too large SDU len");
1518 send_ack(net_rx->sub, net_rx->ctx.recv_dst,
1519 net_rx->ctx.addr, net_rx->ctx.send_ttl,
1520 seq_auth, 0, rx->obo);
1521 seg_rx_reset(rx, true);
1522 return -EMSGSIZE;
1523 }
1524 } else {
1525 if (buf->len != seg_len(rx->ctl)) {
1526 LOG_ERR("Incorrect segment size for message type");
1527 return -EINVAL;
1528 }
1529 }
1530
1531 LOG_DBG("discard timeout %u", BT_MESH_SAR_RX_DISCARD_TIMEOUT_MS);
1532 k_work_schedule(&rx->discard,
1533 K_MSEC(BT_MESH_SAR_RX_DISCARD_TIMEOUT_MS));
1534 rx->attempts_left = BT_MESH_SAR_RX_ACK_RETRANS_COUNT;
1535
1536 if (!bt_mesh_lpn_established() && BT_MESH_ADDR_IS_UNICAST(rx->dst)) {
1537 LOG_DBG("ack delay %u", ACK_DELAY(rx->seg_n));
1538 k_work_reschedule(&rx->ack, K_MSEC(ACK_DELAY(rx->seg_n)));
1539 }
1540
1541 /* Allocated segment here */
1542 err = k_mem_slab_alloc(&segs, &rx->seg[seg_o], K_NO_WAIT);
1543 if (err) {
1544 LOG_WRN("Unable allocate buffer for Seg %u", seg_o);
1545 return -ENOBUFS;
1546 }
1547
1548 memcpy(rx->seg[seg_o], buf->data, buf->len);
1549
1550 LOG_DBG("Received %u/%u", seg_o, seg_n);
1551
1552 /* Mark segment as received */
1553 rx->block |= BIT(seg_o);
1554
1555 if (rx->block != BLOCK_COMPLETE(seg_n)) {
1556 *pdu_type = BT_MESH_FRIEND_PDU_PARTIAL;
1557 return 0;
1558 }
1559
1560 LOG_DBG("Complete SDU");
1561
1562 if (rpl) {
1563 bt_mesh_rpl_update(rpl, net_rx);
1564 /* Update the seg, unless it has already been surpassed:
1565 * This needs to happen after rpl_update to ensure that the IV
1566 * update reset logic inside rpl_update doesn't overwrite the
1567 * change.
1568 */
1569 rpl->seg = MAX(rpl->seg, auth_seqnum);
1570 }
1571
1572 *pdu_type = BT_MESH_FRIEND_PDU_COMPLETE;
1573
1574 /* If this fails, the work handler will either exit early because the
1575 * block is fully received, or rx->in_use is false.
1576 */
1577 (void)k_work_cancel_delayable(&rx->ack);
1578
1579 send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1580 net_rx->ctx.send_ttl, seq_auth, rx->block, rx->obo);
1581 rx->last_ack = k_uptime_get_32();
1582
1583 if (net_rx->ctl) {
1584 NET_BUF_SIMPLE_DEFINE(sdu, BT_MESH_RX_CTL_MAX);
1585 seg_rx_assemble(rx, &sdu, 0U);
1586 err = ctl_recv(net_rx, *hdr, &sdu, seq_auth);
1587 } else if (rx->len < 1 + APP_MIC_LEN(ASZMIC(hdr))) {
1588 LOG_ERR("Too short SDU + MIC");
1589 err = -EINVAL;
1590 } else {
1591 NET_BUF_SIMPLE_DEFINE_STATIC(seg_buf, BT_MESH_RX_SDU_MAX);
1592 struct net_buf_simple sdu;
1593
1594 /* Decrypting in place to avoid creating two assembly buffers.
1595 * We'll reassemble the buffer from the segments before each
1596 * decryption attempt.
1597 */
1598 net_buf_simple_init(&seg_buf, 0);
1599 net_buf_simple_init_with_data(
1600 &sdu, seg_buf.data, rx->len - APP_MIC_LEN(ASZMIC(hdr)));
1601
1602 err = sdu_recv(net_rx, *hdr, ASZMIC(hdr), &seg_buf, &sdu, rx);
1603 }
1604
1605 seg_rx_reset(rx, false);
1606
1607 return err;
1608 }
1609
bt_mesh_trans_recv(struct net_buf_simple * buf,struct bt_mesh_net_rx * rx)1610 int bt_mesh_trans_recv(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx)
1611 {
1612 uint64_t seq_auth = TRANS_SEQ_AUTH_NVAL;
1613 enum bt_mesh_friend_pdu_type pdu_type = BT_MESH_FRIEND_PDU_SINGLE;
1614 struct net_buf_simple_state state;
1615 uint8_t seg_count = 0;
1616 int err;
1617
1618 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
1619 rx->friend_match = bt_mesh_friend_match(rx->sub->net_idx,
1620 rx->ctx.recv_dst);
1621 } else {
1622 rx->friend_match = false;
1623 }
1624
1625 LOG_DBG("src 0x%04x dst 0x%04x seq 0x%08x friend_match %u", rx->ctx.addr, rx->ctx.recv_dst,
1626 rx->seq, rx->friend_match);
1627
1628 /* Remove network headers */
1629 net_buf_simple_pull(buf, BT_MESH_NET_HDR_LEN);
1630
1631 LOG_DBG("Payload %s", bt_hex(buf->data, buf->len));
1632
1633 if (IS_ENABLED(CONFIG_BT_TESTING)) {
1634 bt_test_mesh_net_recv(rx->ctx.recv_ttl, rx->ctl, rx->ctx.addr,
1635 rx->ctx.recv_dst, buf->data, buf->len);
1636 }
1637
1638 /* If LPN mode is enabled messages are only accepted when we've
1639 * requested the Friend to send them. The messages must also
1640 * be encrypted using the Friend Credentials.
1641 */
1642 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
1643 bt_mesh_lpn_established() && rx->net_if == BT_MESH_NET_IF_ADV &&
1644 (!bt_mesh_lpn_waiting_update() || !rx->friend_cred)) {
1645 LOG_WRN("Ignoring unexpected message in Low Power mode");
1646 return -EAGAIN;
1647 }
1648
1649 /* Save the app-level state so the buffer can later be placed in
1650 * the Friend Queue.
1651 */
1652 net_buf_simple_save(buf, &state);
1653
1654 if (SEG(buf->data)) {
1655 /* Segmented messages must match a local element or an
1656 * LPN of this Friend.
1657 */
1658 if (!rx->local_match && !rx->friend_match) {
1659 return 0;
1660 }
1661
1662 err = trans_seg(buf, rx, &pdu_type, &seq_auth, &seg_count);
1663 } else {
1664 seg_count = 1;
1665 err = trans_unseg(buf, rx, &seq_auth);
1666 }
1667
1668 /* Notify LPN state machine so a Friend Poll will be sent. */
1669 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER)) {
1670 bt_mesh_lpn_msg_received(rx);
1671 }
1672
1673 net_buf_simple_restore(buf, &state);
1674
1675 if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match && !err) {
1676 if (seq_auth == TRANS_SEQ_AUTH_NVAL) {
1677 bt_mesh_friend_enqueue_rx(rx, pdu_type, NULL,
1678 seg_count, buf);
1679 } else {
1680 bt_mesh_friend_enqueue_rx(rx, pdu_type, &seq_auth,
1681 seg_count, buf);
1682 }
1683 }
1684
1685 return err;
1686 }
1687
bt_mesh_rx_reset(void)1688 void bt_mesh_rx_reset(void)
1689 {
1690 int i;
1691
1692 LOG_DBG("");
1693
1694 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1695 seg_rx_reset(&seg_rx[i], true);
1696 }
1697 }
1698
bt_mesh_trans_reset(void)1699 void bt_mesh_trans_reset(void)
1700 {
1701 int i;
1702
1703 bt_mesh_rx_reset();
1704
1705 LOG_DBG("");
1706
1707 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
1708 seg_tx_reset(&seg_tx[i]);
1709 }
1710
1711 bt_mesh_rpl_clear();
1712 bt_mesh_va_clear();
1713 }
1714
bt_mesh_trans_init(void)1715 void bt_mesh_trans_init(void)
1716 {
1717 int i;
1718
1719 for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
1720 k_work_init_delayable(&seg_tx[i].retransmit, seg_retransmit);
1721 }
1722
1723 for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1724 k_work_init_delayable(&seg_rx[i].ack, seg_ack);
1725 k_work_init_delayable(&seg_rx[i].discard, seg_discard);
1726 }
1727 }
1728