1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <stdlib.h>
11 #include <sys/types.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/sys/byteorder.h>
14 
15 #include <zephyr/net/buf.h>
16 
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/mesh.h>
19 
20 #include "common/bt_str.h"
21 
22 #include "host/testing.h"
23 
24 #include "crypto.h"
25 #include "mesh.h"
26 #include "net.h"
27 #include "app_keys.h"
28 #include "lpn.h"
29 #include "rpl.h"
30 #include "friend.h"
31 #include "access.h"
32 #include "foundation.h"
33 #include "sar_cfg_internal.h"
34 #include "settings.h"
35 #include "heartbeat.h"
36 #include "transport.h"
37 #include "va.h"
38 
39 #define LOG_LEVEL CONFIG_BT_MESH_TRANS_LOG_LEVEL
40 #include <zephyr/logging/log.h>
41 LOG_MODULE_REGISTER(bt_mesh_transport);
42 
43 #define AID_MASK                    ((uint8_t)(BIT_MASK(6)))
44 
45 #define SEG(data)                   ((data)[0] >> 7)
46 #define AKF(data)                   (((data)[0] >> 6) & 0x01)
47 #define AID(data)                   ((data)[0] & AID_MASK)
48 #define ASZMIC(data)                (((data)[1] >> 7) & 1)
49 
50 #define APP_MIC_LEN(aszmic) ((aszmic) ? BT_MESH_MIC_LONG : BT_MESH_MIC_SHORT)
51 
52 #define UNSEG_HDR(akf, aid)         ((akf << 6) | (aid & AID_MASK))
53 #define SEG_HDR(akf, aid)           (UNSEG_HDR(akf, aid) | 0x80)
54 
55 #define BLOCK_COMPLETE(seg_n)       (uint32_t)(((uint64_t)1 << (seg_n + 1)) - 1)
56 
57 #define SEQ_AUTH(iv_index, seq)     (((uint64_t)iv_index) << 24 | (uint64_t)seq)
58 
59 /* How long to wait for available buffers before giving up */
60 #define BUF_TIMEOUT                 K_NO_WAIT
61 
62 #define ACK_DELAY(seg_n)                                                       \
63 	(MIN(2 * seg_n + 1, BT_MESH_SAR_RX_ACK_DELAY_INC_X2) *                 \
64 	 BT_MESH_SAR_RX_SEG_INT_MS / 2)
65 
66 #define SEQAUTH_ALREADY_PROCESSED_TIMEOUT                                      \
67 	(BT_MESH_SAR_RX_ACK_DELAY_INC_X2 * BT_MESH_SAR_RX_SEG_INT_MS / 2)
68 
69 static struct seg_tx {
70 	struct bt_mesh_subnet *sub;
71 	void                  *seg[BT_MESH_TX_SEG_MAX];
72 	uint64_t              seq_auth;
73 	int64_t               adv_start_timestamp; /* Calculate adv duration and adjust intervals*/
74 	uint16_t              src;
75 	uint16_t              dst;
76 	uint16_t              ack_src;
77 	uint16_t              len;
78 	uint8_t               hdr;
79 	uint8_t               xmit;
80 	uint8_t               seg_n;         /* Last segment index */
81 	uint8_t               seg_o;         /* Segment being sent */
82 	uint8_t               nack_count;    /* Number of unacked segs */
83 	uint8_t               attempts_left;
84 	uint8_t               attempts_left_without_progress;
85 	uint8_t               ttl;           /* Transmitted TTL value */
86 	uint8_t               blocked:1,     /* Blocked by ongoing tx */
87 			      ctl:1,         /* Control packet */
88 			      aszmic:1,      /* MIC size */
89 			      started:1,     /* Start cb called */
90 			      friend_cred:1, /* Using Friend credentials */
91 			      seg_send_started:1, /* Used to check if seg_send_start cb is called */
92 			      ack_received:1; /* Ack received during seg message transmission. */
93 	const struct bt_mesh_send_cb *cb;
94 	void                  *cb_data;
95 	struct k_work_delayable retransmit;    /* Retransmit timer */
96 } seg_tx[CONFIG_BT_MESH_TX_SEG_MSG_COUNT];
97 
98 static struct seg_rx {
99 	struct bt_mesh_subnet   *sub;
100 	void                    *seg[BT_MESH_RX_SEG_MAX];
101 	uint64_t                    seq_auth;
102 	uint16_t                    src;
103 	uint16_t                    dst;
104 	uint16_t                    len;
105 	uint8_t                     hdr;
106 	uint8_t                     seg_n:5,
107 				 ctl:1,
108 				 in_use:1,
109 				 obo:1;
110 	uint8_t                     ttl;
111 	uint8_t                     attempts_left;
112 	uint32_t                    block;
113 	uint32_t                    last_ack;
114 	struct k_work_delayable    ack;
115 	struct k_work_delayable    discard;
116 } seg_rx[CONFIG_BT_MESH_RX_SEG_MSG_COUNT];
117 
118 K_MEM_SLAB_DEFINE(segs, BT_MESH_APP_SEG_SDU_MAX, CONFIG_BT_MESH_SEG_BUFS, 4);
119 
send_unseg(struct bt_mesh_net_tx * tx,struct net_buf_simple * sdu,const struct bt_mesh_send_cb * cb,void * cb_data,const uint8_t * ctl_op)120 static int send_unseg(struct bt_mesh_net_tx *tx, struct net_buf_simple *sdu,
121 		      const struct bt_mesh_send_cb *cb, void *cb_data,
122 		      const uint8_t *ctl_op)
123 {
124 	struct bt_mesh_adv *adv;
125 
126 	adv = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_ADV_TAG_LOCAL,
127 				 tx->xmit, BUF_TIMEOUT);
128 	if (!adv) {
129 		LOG_ERR("Out of network advs");
130 		return -ENOBUFS;
131 	}
132 
133 	net_buf_simple_reserve(&adv->b, BT_MESH_NET_HDR_LEN);
134 
135 	if (ctl_op) {
136 		net_buf_simple_add_u8(&adv->b, TRANS_CTL_HDR(*ctl_op, 0));
137 	} else if (BT_MESH_IS_DEV_KEY(tx->ctx->app_idx)) {
138 		net_buf_simple_add_u8(&adv->b, UNSEG_HDR(0, 0));
139 	} else {
140 		net_buf_simple_add_u8(&adv->b, UNSEG_HDR(1, tx->aid));
141 	}
142 
143 	net_buf_simple_add_mem(&adv->b, sdu->data, sdu->len);
144 
145 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
146 		if (!bt_mesh_friend_queue_has_space(tx->sub->net_idx,
147 						    tx->src, tx->ctx->addr,
148 						    NULL, 1)) {
149 			if (BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
150 				LOG_ERR("Not enough space in Friend Queue");
151 				bt_mesh_adv_unref(adv);
152 				return -ENOBUFS;
153 			} else {
154 				LOG_WRN("No space in Friend Queue");
155 				goto send;
156 			}
157 		}
158 
159 		if (bt_mesh_friend_enqueue_tx(tx, BT_MESH_FRIEND_PDU_SINGLE,
160 					      NULL, 1, &adv->b) &&
161 		    BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
162 			/* PDUs for a specific Friend should only go
163 			 * out through the Friend Queue.
164 			 */
165 			bt_mesh_adv_unref(adv);
166 			send_cb_finalize(cb, cb_data);
167 			return 0;
168 		}
169 	}
170 
171 send:
172 	return bt_mesh_net_send(tx, adv, cb, cb_data);
173 }
174 
seg_len(bool ctl)175 static inline uint8_t seg_len(bool ctl)
176 {
177 	if (ctl) {
178 		return BT_MESH_CTL_SEG_SDU_MAX;
179 	} else {
180 		return BT_MESH_APP_SEG_SDU_MAX;
181 	}
182 }
183 
bt_mesh_tx_in_progress(void)184 bool bt_mesh_tx_in_progress(void)
185 {
186 	int i;
187 
188 	for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
189 		if (seg_tx[i].nack_count) {
190 			return true;
191 		}
192 	}
193 
194 	return false;
195 }
196 
seg_tx_done(struct seg_tx * tx,uint8_t seg_idx)197 static void seg_tx_done(struct seg_tx *tx, uint8_t seg_idx)
198 {
199 	k_mem_slab_free(&segs, (void *)tx->seg[seg_idx]);
200 	tx->seg[seg_idx] = NULL;
201 	tx->nack_count--;
202 }
203 
seg_tx_blocks(struct seg_tx * tx,uint16_t src,uint16_t dst)204 static bool seg_tx_blocks(struct seg_tx *tx, uint16_t src, uint16_t dst)
205 {
206 	return (tx->src == src) && (tx->dst == dst);
207 }
208 
seg_tx_unblock_check(struct seg_tx * tx)209 static void seg_tx_unblock_check(struct seg_tx *tx)
210 {
211 	struct seg_tx *blocked = NULL;
212 	int i;
213 
214 	/* Unblock the first blocked tx with the same params. */
215 	for (i = 0; i < ARRAY_SIZE(seg_tx); ++i) {
216 		if (&seg_tx[i] != tx &&
217 		    seg_tx[i].blocked &&
218 		    seg_tx_blocks(tx, seg_tx[i].src, seg_tx[i].dst) &&
219 		    (!blocked || seg_tx[i].seq_auth < blocked->seq_auth)) {
220 			blocked = &seg_tx[i];
221 		}
222 	}
223 
224 	if (blocked) {
225 		LOG_DBG("Unblocked 0x%04x", (uint16_t)(blocked->seq_auth & TRANS_SEQ_ZERO_MASK));
226 		blocked->blocked = false;
227 		k_work_reschedule(&blocked->retransmit, K_NO_WAIT);
228 	}
229 }
230 
seg_tx_reset(struct seg_tx * tx)231 static void seg_tx_reset(struct seg_tx *tx)
232 {
233 	int i;
234 
235 	/* If this call fails, the handler will exit early, as nack_count is 0. */
236 	(void)k_work_cancel_delayable(&tx->retransmit);
237 
238 	tx->cb = NULL;
239 	tx->cb_data = NULL;
240 	tx->seq_auth = 0U;
241 	tx->sub = NULL;
242 	tx->src = BT_MESH_ADDR_UNASSIGNED;
243 	tx->dst = BT_MESH_ADDR_UNASSIGNED;
244 	tx->ack_src = BT_MESH_ADDR_UNASSIGNED;
245 	tx->blocked = false;
246 
247 	for (i = 0; i <= tx->seg_n && tx->nack_count; i++) {
248 		if (!tx->seg[i]) {
249 			continue;
250 		}
251 
252 		seg_tx_done(tx, i);
253 	}
254 
255 	tx->nack_count = 0;
256 	tx->seg_send_started = 0;
257 	tx->ack_received = 0;
258 
259 	if (atomic_test_and_clear_bit(bt_mesh.flags, BT_MESH_IVU_PENDING)) {
260 		LOG_DBG("Proceeding with pending IV Update");
261 		/* bt_mesh_net_iv_update() will re-enable the flag if this
262 		 * wasn't the only transfer.
263 		 */
264 		bt_mesh_net_iv_update(bt_mesh.iv_index, false);
265 	}
266 }
267 
seg_tx_complete(struct seg_tx * tx,int err)268 static inline void seg_tx_complete(struct seg_tx *tx, int err)
269 {
270 	const struct bt_mesh_send_cb *cb = tx->cb;
271 	void *cb_data = tx->cb_data;
272 
273 	seg_tx_unblock_check(tx);
274 
275 	seg_tx_reset(tx);
276 
277 	if (cb && cb->end) {
278 		cb->end(err, cb_data);
279 	}
280 }
281 
schedule_transmit_continue(struct seg_tx * tx,uint32_t delta)282 static void schedule_transmit_continue(struct seg_tx *tx, uint32_t delta)
283 {
284 	uint32_t timeout = 0;
285 
286 	if (!tx->nack_count) {
287 		return;
288 	}
289 
290 	LOG_DBG("");
291 
292 	if (delta < BT_MESH_SAR_TX_SEG_INT_MS) {
293 		timeout = BT_MESH_SAR_TX_SEG_INT_MS - delta;
294 	}
295 
296 	/* If it is not the last segment then continue transmission after Segment Interval,
297 	 * otherwise continue immediately as the callback will finish this transmission and
298 	 * progress into retransmission.
299 	 */
300 	k_work_reschedule(&tx->retransmit,
301 			  (tx->seg_o <= tx->seg_n) ?
302 					K_MSEC(timeout) :
303 					K_NO_WAIT);
304 }
305 
seg_send_start(uint16_t duration,int err,void * user_data)306 static void seg_send_start(uint16_t duration, int err, void *user_data)
307 {
308 	struct seg_tx *tx = user_data;
309 
310 	if (!tx->started && tx->cb && tx->cb->start) {
311 		tx->cb->start(duration, err, tx->cb_data);
312 		tx->started = 1U;
313 	}
314 
315 	tx->seg_send_started = 1U;
316 	tx->adv_start_timestamp = k_uptime_get();
317 
318 	/* If there's an error in transmitting the 'sent' callback will never
319 	 * be called. Make sure that we kick the retransmit timer also in this
320 	 * case since otherwise we risk the transmission of becoming stale.
321 	 */
322 	if (err) {
323 		schedule_transmit_continue(tx, 0);
324 	}
325 }
326 
seg_sent(int err,void * user_data)327 static void seg_sent(int err, void *user_data)
328 {
329 	struct seg_tx *tx = user_data;
330 	uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
331 
332 	if (!tx->seg_send_started) {
333 		return;
334 	}
335 
336 	schedule_transmit_continue(tx, delta_ms);
337 }
338 
339 static const struct bt_mesh_send_cb seg_sent_cb = {
340 	.start = seg_send_start,
341 	.end = seg_sent,
342 };
343 
seg_tx_buf_build(struct seg_tx * tx,uint8_t seg_o,struct net_buf_simple * buf)344 static void seg_tx_buf_build(struct seg_tx *tx, uint8_t seg_o,
345 			     struct net_buf_simple *buf)
346 {
347 	uint16_t seq_zero = tx->seq_auth & TRANS_SEQ_ZERO_MASK;
348 	uint8_t len = MIN(seg_len(tx->ctl), tx->len - (seg_len(tx->ctl) * seg_o));
349 
350 	net_buf_simple_add_u8(buf, tx->hdr);
351 	net_buf_simple_add_u8(buf, (tx->aszmic << 7) | seq_zero >> 6);
352 	net_buf_simple_add_u8(buf, (((seq_zero & 0x3f) << 2) | (seg_o >> 3)));
353 	net_buf_simple_add_u8(buf, ((seg_o & 0x07) << 5) | tx->seg_n);
354 	net_buf_simple_add_mem(buf, tx->seg[seg_o], len);
355 }
356 
seg_tx_send_unacked(struct seg_tx * tx)357 static void seg_tx_send_unacked(struct seg_tx *tx)
358 {
359 	if (!tx->nack_count) {
360 		return;
361 	}
362 
363 	uint32_t delta_ms;
364 	uint32_t timeout;
365 	struct bt_mesh_msg_ctx ctx = {
366 		.net_idx = tx->sub->net_idx,
367 		/* App idx only used by network to detect control messages: */
368 		.app_idx = (tx->ctl ? BT_MESH_KEY_UNUSED : 0),
369 		.addr = tx->dst,
370 		.send_rel = true,
371 		.send_ttl = tx->ttl,
372 	};
373 	struct bt_mesh_net_tx net_tx = {
374 		.sub = tx->sub,
375 		.ctx = &ctx,
376 		.src = tx->src,
377 		.xmit = tx->xmit,
378 		.friend_cred = tx->friend_cred,
379 		.aid = tx->hdr & AID_MASK,
380 	};
381 
382 	if (BT_MESH_ADDR_IS_UNICAST(tx->dst) &&
383 	    !tx->attempts_left_without_progress) {
384 		LOG_ERR("Ran out of retransmit without progress attempts");
385 		seg_tx_complete(tx, -ETIMEDOUT);
386 		return;
387 	}
388 
389 	if (!tx->attempts_left) {
390 		if (BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
391 			LOG_ERR("Ran out of retransmit attempts");
392 			seg_tx_complete(tx, -ETIMEDOUT);
393 		} else {
394 			/* Segmented sending to groups doesn't have acks, so
395 			 * running out of attempts is the expected behavior.
396 			 */
397 			seg_tx_complete(tx, 0);
398 		}
399 
400 		return;
401 	}
402 
403 	LOG_DBG("SeqZero: 0x%04x Attempts: %u",
404 		(uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), tx->attempts_left);
405 
406 	while (tx->seg_o <= tx->seg_n) {
407 		struct bt_mesh_adv *seg;
408 		int err;
409 
410 		if (!tx->seg[tx->seg_o]) {
411 			/* Move on to the next segment */
412 			tx->seg_o++;
413 			continue;
414 		}
415 
416 		seg = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_ADV_TAG_LOCAL,
417 					 tx->xmit, BUF_TIMEOUT);
418 		if (!seg) {
419 			LOG_DBG("Allocating segment failed");
420 			goto end;
421 		}
422 
423 		net_buf_simple_reserve(&seg->b, BT_MESH_NET_HDR_LEN);
424 		seg_tx_buf_build(tx, tx->seg_o, &seg->b);
425 
426 		LOG_DBG("Sending %u/%u", tx->seg_o, tx->seg_n);
427 
428 		err = bt_mesh_net_send(&net_tx, seg, &seg_sent_cb, tx);
429 		if (err) {
430 			LOG_DBG("Sending segment failed");
431 			goto end;
432 		}
433 
434 		/* Move on to the next segment */
435 		tx->seg_o++;
436 
437 		tx->ack_received = 0U;
438 
439 		/* Return here to let the advertising layer process the message.
440 		 * This function will be called again after Segment Interval.
441 		 */
442 		return;
443 	}
444 
445 
446 	/* All segments have been sent */
447 	tx->seg_o = 0U;
448 	tx->attempts_left--;
449 	if (BT_MESH_ADDR_IS_UNICAST(tx->dst) && !tx->ack_received) {
450 		tx->attempts_left_without_progress--;
451 	}
452 
453 end:
454 	if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
455 	    bt_mesh_lpn_established() && !bt_mesh_has_addr(ctx.addr)) {
456 		bt_mesh_lpn_poll();
457 	}
458 
459 	delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
460 	if (tx->ack_received) {
461 		/* Schedule retransmission immediately but keep SAR segment interval time if
462 		 * SegAck was received while sending last segment.
463 		 */
464 		timeout = BT_MESH_SAR_TX_SEG_INT_MS;
465 		tx->ack_received = 0U;
466 	} else {
467 		timeout = BT_MESH_SAR_TX_RETRANS_TIMEOUT_MS(tx->dst, tx->ttl);
468 	}
469 
470 	if (delta_ms < timeout) {
471 		timeout -= delta_ms;
472 	}
473 
474 	/* Schedule a retransmission */
475 	k_work_reschedule(&tx->retransmit, K_MSEC(timeout));
476 }
477 
seg_retransmit(struct k_work * work)478 static void seg_retransmit(struct k_work *work)
479 {
480 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
481 	struct seg_tx *tx = CONTAINER_OF(dwork, struct seg_tx, retransmit);
482 
483 	seg_tx_send_unacked(tx);
484 }
485 
send_seg(struct bt_mesh_net_tx * net_tx,struct net_buf_simple * sdu,const struct bt_mesh_send_cb * cb,void * cb_data,uint8_t * ctl_op)486 static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu,
487 		    const struct bt_mesh_send_cb *cb, void *cb_data,
488 		    uint8_t *ctl_op)
489 {
490 	bool blocked = false;
491 	struct seg_tx *tx;
492 	uint8_t seg_o;
493 	int i;
494 
495 	LOG_DBG("src 0x%04x dst 0x%04x app_idx 0x%04x aszmic %u sdu_len %u", net_tx->src,
496 		net_tx->ctx->addr, net_tx->ctx->app_idx, net_tx->aszmic, sdu->len);
497 
498 	for (tx = NULL, i = 0; i < ARRAY_SIZE(seg_tx); i++) {
499 		if (seg_tx[i].nack_count) {
500 			blocked |= seg_tx_blocks(&seg_tx[i], net_tx->src,
501 						 net_tx->ctx->addr);
502 		} else if (!tx) {
503 			tx = &seg_tx[i];
504 		}
505 	}
506 
507 	if (!tx) {
508 		LOG_ERR("No multi-segment message contexts available");
509 		return -EBUSY;
510 	}
511 
512 	if (ctl_op) {
513 		tx->hdr = TRANS_CTL_HDR(*ctl_op, 1);
514 	} else if (BT_MESH_IS_DEV_KEY(net_tx->ctx->app_idx)) {
515 		tx->hdr = SEG_HDR(0, 0);
516 	} else {
517 		tx->hdr = SEG_HDR(1, net_tx->aid);
518 	}
519 
520 	tx->src = net_tx->src;
521 	tx->dst = net_tx->ctx->addr;
522 	tx->seg_n = (sdu->len - 1) / seg_len(!!ctl_op);
523 	tx->seg_o = 0;
524 	tx->len = sdu->len;
525 	tx->nack_count = tx->seg_n + 1;
526 	tx->seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_TX, bt_mesh.seq);
527 	tx->sub = net_tx->sub;
528 	tx->cb = cb;
529 	tx->cb_data = cb_data;
530 	tx->attempts_left = BT_MESH_SAR_TX_RETRANS_COUNT(tx->dst);
531 	tx->attempts_left_without_progress = BT_MESH_SAR_TX_RETRANS_NO_PROGRESS;
532 	tx->xmit = net_tx->xmit;
533 	tx->aszmic = net_tx->aszmic;
534 	tx->friend_cred = net_tx->friend_cred;
535 	tx->blocked = blocked;
536 	tx->started = 0;
537 	tx->seg_send_started = 0;
538 	tx->ctl = !!ctl_op;
539 	tx->ttl = net_tx->ctx->send_ttl;
540 
541 	LOG_DBG("SeqZero 0x%04x (segs: %u)", (uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK),
542 		tx->nack_count);
543 
544 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
545 	    !bt_mesh_friend_queue_has_space(tx->sub->net_idx, net_tx->src,
546 					    tx->dst, &tx->seq_auth,
547 					    tx->seg_n + 1) &&
548 	    BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
549 		LOG_ERR("Not enough space in Friend Queue for %u segments", tx->seg_n + 1);
550 		seg_tx_reset(tx);
551 		return -ENOBUFS;
552 	}
553 
554 	for (seg_o = 0U; sdu->len; seg_o++) {
555 		void *buf;
556 		uint16_t len;
557 		int err;
558 
559 		err = k_mem_slab_alloc(&segs, &buf, BUF_TIMEOUT);
560 		if (err) {
561 			LOG_ERR("Out of segment buffers");
562 			seg_tx_reset(tx);
563 			return -ENOBUFS;
564 		}
565 
566 		len = MIN(sdu->len, seg_len(!!ctl_op));
567 		memcpy(buf, net_buf_simple_pull_mem(sdu, len), len);
568 
569 		LOG_DBG("seg %u: %s", seg_o, bt_hex(buf, len));
570 
571 		tx->seg[seg_o] = buf;
572 
573 		if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
574 			enum bt_mesh_friend_pdu_type type;
575 
576 			NET_BUF_SIMPLE_DEFINE(seg, 16);
577 			seg_tx_buf_build(tx, seg_o, &seg);
578 
579 			if (seg_o == tx->seg_n) {
580 				type = BT_MESH_FRIEND_PDU_COMPLETE;
581 			} else {
582 				type = BT_MESH_FRIEND_PDU_PARTIAL;
583 			}
584 
585 			if (bt_mesh_friend_enqueue_tx(
586 				    net_tx, type, ctl_op ? NULL : &tx->seq_auth,
587 				    tx->seg_n + 1, &seg) &&
588 			    BT_MESH_ADDR_IS_UNICAST(net_tx->ctx->addr)) {
589 				/* PDUs for a specific Friend should only go
590 				 * out through the Friend Queue.
591 				 */
592 				k_mem_slab_free(&segs, buf);
593 				tx->seg[seg_o] = NULL;
594 			}
595 
596 		}
597 
598 	}
599 
600 	/* This can happen if segments only went into the Friend Queue */
601 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !tx->seg[0]) {
602 		seg_tx_reset(tx);
603 
604 		/* If there was a callback notify sending immediately since
605 		 * there's no other way to track this (at least currently)
606 		 * with the Friend Queue.
607 		 */
608 		send_cb_finalize(cb, cb_data);
609 		return 0;
610 	}
611 
612 	if (blocked) {
613 		/* Move the sequence number, so we don't end up creating
614 		 * another segmented transmission with the same SeqZero while
615 		 * this one is blocked.
616 		 */
617 		bt_mesh_next_seq();
618 		LOG_DBG("Blocked.");
619 		return 0;
620 	}
621 
622 	seg_tx_send_unacked(tx);
623 
624 	return 0;
625 }
626 
trans_encrypt(const struct bt_mesh_net_tx * tx,const struct bt_mesh_key * key,struct net_buf_simple * msg)627 static int trans_encrypt(const struct bt_mesh_net_tx *tx, const struct bt_mesh_key *key,
628 			 struct net_buf_simple *msg)
629 {
630 	struct bt_mesh_app_crypto_ctx crypto = {
631 		.dev_key = BT_MESH_IS_DEV_KEY(tx->ctx->app_idx),
632 		.aszmic = tx->aszmic,
633 		.src = tx->src,
634 		.dst = tx->ctx->addr,
635 		.seq_num = bt_mesh.seq,
636 		.iv_index = BT_MESH_NET_IVI_TX,
637 	};
638 
639 	if (BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
640 		crypto.ad = tx->ctx->uuid;
641 	}
642 
643 	return bt_mesh_app_encrypt(key, &crypto, msg);
644 }
645 
bt_mesh_trans_send(struct bt_mesh_net_tx * tx,struct net_buf_simple * msg,const struct bt_mesh_send_cb * cb,void * cb_data)646 int bt_mesh_trans_send(struct bt_mesh_net_tx *tx, struct net_buf_simple *msg,
647 		       const struct bt_mesh_send_cb *cb, void *cb_data)
648 {
649 	const struct bt_mesh_key *key;
650 	uint8_t aid;
651 	int err;
652 
653 	if (msg->len < 1) {
654 		LOG_ERR("Zero-length SDU not allowed");
655 		return -EINVAL;
656 	}
657 
658 	if (msg->len > BT_MESH_TX_SDU_MAX - BT_MESH_MIC_SHORT) {
659 		LOG_ERR("Message too big: %u", msg->len);
660 		return -EMSGSIZE;
661 	}
662 
663 	if (net_buf_simple_tailroom(msg) < BT_MESH_MIC_SHORT) {
664 		LOG_ERR("Insufficient tailroom for Transport MIC");
665 		return -EINVAL;
666 	}
667 
668 	if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
669 		tx->ctx->send_ttl = bt_mesh_default_ttl_get();
670 	} else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
671 		LOG_ERR("TTL too large (max 127)");
672 		return -EINVAL;
673 	}
674 
675 	if (msg->len > BT_MESH_SDU_UNSEG_MAX) {
676 		tx->ctx->send_rel = true;
677 	}
678 
679 	if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
680 	    (!BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr) &&
681 	     BT_MESH_IS_DEV_KEY(tx->ctx->app_idx))) {
682 		LOG_ERR("Invalid destination address");
683 		return -EINVAL;
684 	}
685 
686 	err = bt_mesh_keys_resolve(tx->ctx, &tx->sub, &key, &aid);
687 	if (err) {
688 		return err;
689 	}
690 
691 	LOG_DBG("net_idx 0x%04x app_idx 0x%04x dst 0x%04x", tx->sub->net_idx, tx->ctx->app_idx,
692 		tx->ctx->addr);
693 	LOG_DBG("len %u: %s", msg->len, bt_hex(msg->data, msg->len));
694 
695 	tx->xmit = bt_mesh_net_transmit_get();
696 	tx->aid = aid;
697 
698 	if (!tx->ctx->send_rel || net_buf_simple_tailroom(msg) < 8) {
699 		tx->aszmic = 0U;
700 	} else {
701 		tx->aszmic = 1U;
702 	}
703 
704 	err = trans_encrypt(tx, key, msg);
705 	if (err) {
706 		return err;
707 	}
708 
709 	if (tx->ctx->send_rel) {
710 		err = send_seg(tx, msg, cb, cb_data, NULL);
711 	} else {
712 		err = send_unseg(tx, msg, cb, cb_data, NULL);
713 	}
714 
715 	return err;
716 }
717 
seg_rx_assemble(struct seg_rx * rx,struct net_buf_simple * buf,uint8_t aszmic)718 static void seg_rx_assemble(struct seg_rx *rx, struct net_buf_simple *buf,
719 			    uint8_t aszmic)
720 {
721 	int i;
722 
723 	net_buf_simple_reset(buf);
724 
725 	for (i = 0; i <= rx->seg_n; i++) {
726 		net_buf_simple_add_mem(buf, rx->seg[i],
727 				       MIN(seg_len(rx->ctl),
728 					   rx->len - (i * seg_len(rx->ctl))));
729 	}
730 
731 	/* Adjust the length to not contain the MIC at the end */
732 	if (!rx->ctl) {
733 		buf->len -= APP_MIC_LEN(aszmic);
734 	}
735 }
736 
737 struct decrypt_ctx {
738 	struct bt_mesh_app_crypto_ctx crypto;
739 	struct net_buf_simple *buf;
740 	struct net_buf_simple *sdu;
741 	struct seg_rx *seg;
742 };
743 
sdu_try_decrypt(struct bt_mesh_net_rx * rx,const struct bt_mesh_key * key,void * cb_data)744 static int sdu_try_decrypt(struct bt_mesh_net_rx *rx, const struct bt_mesh_key *key,
745 			   void *cb_data)
746 {
747 	struct decrypt_ctx *ctx = cb_data;
748 	int err;
749 
750 	ctx->crypto.ad = NULL;
751 
752 	do {
753 		if (ctx->seg) {
754 			seg_rx_assemble(ctx->seg, ctx->buf, ctx->crypto.aszmic);
755 		}
756 
757 		if (BT_MESH_ADDR_IS_VIRTUAL(rx->ctx.recv_dst)) {
758 			ctx->crypto.ad = bt_mesh_va_uuid_get(rx->ctx.recv_dst, ctx->crypto.ad,
759 							     NULL);
760 
761 			if (!ctx->crypto.ad) {
762 				return -ENOENT;
763 			}
764 		}
765 
766 		net_buf_simple_reset(ctx->sdu);
767 
768 		err = bt_mesh_app_decrypt(key, &ctx->crypto, ctx->buf, ctx->sdu);
769 	} while (err && ctx->crypto.ad != NULL);
770 
771 	if (!err && BT_MESH_ADDR_IS_VIRTUAL(rx->ctx.recv_dst)) {
772 		rx->ctx.uuid = ctx->crypto.ad;
773 	}
774 
775 	return err;
776 }
777 
sdu_recv(struct bt_mesh_net_rx * rx,uint8_t hdr,uint8_t aszmic,struct net_buf_simple * buf,struct net_buf_simple * sdu,struct seg_rx * seg)778 static int sdu_recv(struct bt_mesh_net_rx *rx, uint8_t hdr, uint8_t aszmic,
779 		    struct net_buf_simple *buf, struct net_buf_simple *sdu,
780 		    struct seg_rx *seg)
781 {
782 	struct decrypt_ctx ctx = {
783 		.crypto = {
784 			.dev_key = !AKF(&hdr),
785 			.aszmic = aszmic,
786 			.src = rx->ctx.addr,
787 			.dst = rx->ctx.recv_dst,
788 			.seq_num = seg ? (seg->seq_auth & 0xffffff) : rx->seq,
789 			.iv_index = BT_MESH_NET_IVI_RX(rx),
790 		},
791 		.buf = buf,
792 		.sdu = sdu,
793 		.seg = seg,
794 	};
795 
796 	LOG_DBG("AKF %u AID 0x%02x", !ctx.crypto.dev_key, AID(&hdr));
797 
798 	if (!rx->local_match) {
799 		/* if friend_match was set the frame is for LPN which we are friends. */
800 		return rx->friend_match ? 0 : -ENXIO;
801 	}
802 
803 	rx->ctx.app_idx = bt_mesh_app_key_find(ctx.crypto.dev_key, AID(&hdr),
804 					       rx, sdu_try_decrypt, &ctx);
805 	if (rx->ctx.app_idx == BT_MESH_KEY_UNUSED) {
806 		LOG_DBG("No matching AppKey");
807 		return -EACCES;
808 	}
809 
810 	rx->ctx.uuid = ctx.crypto.ad;
811 
812 	LOG_DBG("Decrypted (AppIdx: 0x%03x)", rx->ctx.app_idx);
813 
814 	return bt_mesh_access_recv(&rx->ctx, sdu);
815 }
816 
seg_tx_lookup(uint16_t seq_zero,uint8_t obo,uint16_t addr)817 static struct seg_tx *seg_tx_lookup(uint16_t seq_zero, uint8_t obo, uint16_t addr)
818 {
819 	struct seg_tx *tx;
820 	int i;
821 
822 	for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
823 		tx = &seg_tx[i];
824 
825 		if ((tx->seq_auth & TRANS_SEQ_ZERO_MASK) != seq_zero) {
826 			continue;
827 		}
828 
829 		if (tx->dst == addr) {
830 			return tx;
831 		}
832 
833 		/* If the expected remote address doesn't match,
834 		 * but the OBO flag is set and this is the first
835 		 * acknowledgement, assume it's a Friend that's
836 		 * responding and therefore accept the message.
837 		 */
838 		if (obo && (tx->nack_count == tx->seg_n + 1 || tx->ack_src == addr)) {
839 			tx->ack_src = addr;
840 			return tx;
841 		}
842 	}
843 
844 	return NULL;
845 }
846 
trans_ack(struct bt_mesh_net_rx * rx,uint8_t hdr,struct net_buf_simple * buf,uint64_t * seq_auth)847 static int trans_ack(struct bt_mesh_net_rx *rx, uint8_t hdr,
848 		     struct net_buf_simple *buf, uint64_t *seq_auth)
849 {
850 	bool new_seg_ack = false;
851 	struct seg_tx *tx;
852 	unsigned int bit;
853 	uint32_t ack;
854 	uint16_t seq_zero;
855 	uint8_t obo;
856 
857 	if (buf->len < 6) {
858 		LOG_ERR("Too short ack message");
859 		return -EBADMSG;
860 	}
861 
862 	seq_zero = net_buf_simple_pull_be16(buf);
863 	obo = seq_zero >> 15;
864 	seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
865 
866 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match) {
867 		LOG_DBG("Ack for LPN 0x%04x of this Friend", rx->ctx.recv_dst);
868 		/* Best effort - we don't have enough info for true SeqAuth */
869 		*seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(rx), seq_zero);
870 		return 0;
871 	} else if (!rx->local_match) {
872 		return 0;
873 	}
874 
875 	ack = net_buf_simple_pull_be32(buf);
876 
877 	LOG_DBG("OBO %u seq_zero 0x%04x ack 0x%08x", obo, seq_zero, ack);
878 
879 	tx = seg_tx_lookup(seq_zero, obo, rx->ctx.addr);
880 	if (!tx) {
881 		LOG_DBG("No matching TX context for ack");
882 		return -ENOENT;
883 	}
884 
885 	if (!BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
886 		LOG_ERR("Received ack for group seg");
887 		return -EINVAL;
888 	}
889 
890 	*seq_auth = tx->seq_auth;
891 
892 	if (!ack) {
893 		LOG_WRN("SDU canceled");
894 		seg_tx_complete(tx, -ECANCELED);
895 		return 0;
896 	}
897 
898 	if (find_msb_set(ack) - 1 > tx->seg_n) {
899 		LOG_ERR("Too large segment number in ack");
900 		return -EINVAL;
901 	}
902 
903 	while ((bit = find_lsb_set(ack))) {
904 		if (tx->seg[bit - 1]) {
905 			LOG_DBG("seg %u/%u acked", bit - 1, tx->seg_n);
906 			seg_tx_done(tx, bit - 1);
907 			new_seg_ack = true;
908 		}
909 
910 		ack &= ~BIT(bit - 1);
911 	}
912 
913 	if (new_seg_ack) {
914 		tx->attempts_left_without_progress =
915 			BT_MESH_SAR_TX_RETRANS_NO_PROGRESS;
916 	}
917 
918 	if (tx->nack_count) {
919 		/* If transmission is not in progress it means
920 		 * that Retransmission Timer is running
921 		 */
922 		if (tx->seg_o == 0) {
923 			k_timeout_t timeout = K_NO_WAIT;
924 
925 			/* If there are no retransmission attempts left we
926 			 * immediately trigger the retransmit call that will
927 			 * end the transmission.
928 			 */
929 			if ((BT_MESH_ADDR_IS_UNICAST(tx->dst) &&
930 			     !tx->attempts_left_without_progress) ||
931 			    !tx->attempts_left) {
932 				goto reschedule;
933 			}
934 
935 			uint32_t delta_ms = (uint32_t)(k_uptime_get() - tx->adv_start_timestamp);
936 
937 			/* According to MshPRTv1.1: 3.5.3.3.2, we should reset the retransmit timer
938 			 * and retransmit immediately when receiving a valid ack message while
939 			 * Retransmisison timer is running. However, transport should still keep
940 			 * segment transmission interval time between transmission of each segment.
941 			 */
942 			if (delta_ms < BT_MESH_SAR_TX_SEG_INT_MS) {
943 				timeout = K_MSEC(BT_MESH_SAR_TX_SEG_INT_MS - delta_ms);
944 			}
945 
946 reschedule:
947 			k_work_reschedule(&tx->retransmit, timeout);
948 		} else {
949 			tx->ack_received = 1U;
950 		}
951 	} else {
952 		LOG_DBG("SDU TX complete");
953 		seg_tx_complete(tx, 0);
954 	}
955 
956 	return 0;
957 }
958 
ctl_recv(struct bt_mesh_net_rx * rx,uint8_t hdr,struct net_buf_simple * buf,uint64_t * seq_auth)959 static int ctl_recv(struct bt_mesh_net_rx *rx, uint8_t hdr,
960 		    struct net_buf_simple *buf, uint64_t *seq_auth)
961 {
962 	uint8_t ctl_op = TRANS_CTL_OP(&hdr);
963 
964 	LOG_DBG("OpCode 0x%02x len %u", ctl_op, buf->len);
965 
966 	switch (ctl_op) {
967 	case TRANS_CTL_OP_ACK:
968 		return trans_ack(rx, hdr, buf, seq_auth);
969 	case TRANS_CTL_OP_HEARTBEAT:
970 		return bt_mesh_hb_recv(rx, buf);
971 	}
972 
973 	/* Only acks for friendship and heartbeats may need processing without local_match */
974 	if (!rx->local_match) {
975 		return 0;
976 	}
977 
978 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !bt_mesh_lpn_established()) {
979 		switch (ctl_op) {
980 		case TRANS_CTL_OP_FRIEND_POLL:
981 			return bt_mesh_friend_poll(rx, buf);
982 		case TRANS_CTL_OP_FRIEND_REQ:
983 			return bt_mesh_friend_req(rx, buf);
984 		case TRANS_CTL_OP_FRIEND_CLEAR:
985 			return bt_mesh_friend_clear(rx, buf);
986 		case TRANS_CTL_OP_FRIEND_CLEAR_CFM:
987 			return bt_mesh_friend_clear_cfm(rx, buf);
988 		case TRANS_CTL_OP_FRIEND_SUB_ADD:
989 			return bt_mesh_friend_sub_add(rx, buf);
990 		case TRANS_CTL_OP_FRIEND_SUB_REM:
991 			return bt_mesh_friend_sub_rem(rx, buf);
992 		}
993 	}
994 
995 #if defined(CONFIG_BT_MESH_LOW_POWER)
996 	if (ctl_op == TRANS_CTL_OP_FRIEND_OFFER) {
997 		return bt_mesh_lpn_friend_offer(rx, buf);
998 	}
999 
1000 	if (rx->ctx.addr == bt_mesh.lpn.frnd) {
1001 		if (ctl_op == TRANS_CTL_OP_FRIEND_CLEAR_CFM) {
1002 			return bt_mesh_lpn_friend_clear_cfm(rx, buf);
1003 		}
1004 
1005 		if (!rx->friend_cred) {
1006 			LOG_WRN("Message from friend with wrong credentials");
1007 			return -EINVAL;
1008 		}
1009 
1010 		switch (ctl_op) {
1011 		case TRANS_CTL_OP_FRIEND_UPDATE:
1012 			return bt_mesh_lpn_friend_update(rx, buf);
1013 		case TRANS_CTL_OP_FRIEND_SUB_CFM:
1014 			return bt_mesh_lpn_friend_sub_cfm(rx, buf);
1015 		}
1016 	}
1017 #endif /* CONFIG_BT_MESH_LOW_POWER */
1018 
1019 	LOG_WRN("Unhandled TransOpCode 0x%02x", ctl_op);
1020 
1021 	return -EBADMSG;
1022 }
1023 
trans_unseg(struct net_buf_simple * buf,struct bt_mesh_net_rx * rx,uint64_t * seq_auth)1024 static int trans_unseg(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx,
1025 		       uint64_t *seq_auth)
1026 {
1027 	NET_BUF_SIMPLE_DEFINE_STATIC(sdu, BT_MESH_SDU_UNSEG_MAX);
1028 	uint8_t hdr;
1029 	struct bt_mesh_rpl *rpl = NULL;
1030 	int err;
1031 
1032 	LOG_DBG("AFK %u AID 0x%02x", AKF(buf->data), AID(buf->data));
1033 
1034 	if (buf->len < 1) {
1035 		LOG_ERR("Too small unsegmented PDU");
1036 		return -EBADMSG;
1037 	}
1038 
1039 	if (bt_mesh_rpl_check(rx, &rpl)) {
1040 		LOG_WRN("Replay: src 0x%04x dst 0x%04x seq 0x%06x", rx->ctx.addr, rx->ctx.recv_dst,
1041 			rx->seq);
1042 		return -EINVAL;
1043 	}
1044 
1045 	hdr = net_buf_simple_pull_u8(buf);
1046 
1047 	if (rx->ctl) {
1048 		err = ctl_recv(rx, hdr, buf, seq_auth);
1049 	} else if (buf->len < 1 + APP_MIC_LEN(0)) {
1050 		LOG_ERR("Too short SDU + MIC");
1051 		err = -EINVAL;
1052 	} else {
1053 		/* Adjust the length to not contain the MIC at the end */
1054 		buf->len -= APP_MIC_LEN(0);
1055 		err = sdu_recv(rx, hdr, 0, buf, &sdu, NULL);
1056 	}
1057 
1058 	/* Update rpl only if there is place and upper logic accepted incoming data. */
1059 	if (err == 0 && rpl != NULL) {
1060 		bt_mesh_rpl_update(rpl, rx);
1061 	}
1062 
1063 	return err;
1064 }
1065 
bt_mesh_ctl_send(struct bt_mesh_net_tx * tx,uint8_t ctl_op,void * data,size_t data_len,const struct bt_mesh_send_cb * cb,void * cb_data)1066 int bt_mesh_ctl_send(struct bt_mesh_net_tx *tx, uint8_t ctl_op, void *data,
1067 		     size_t data_len,
1068 		     const struct bt_mesh_send_cb *cb, void *cb_data)
1069 {
1070 	struct net_buf_simple buf;
1071 
1072 	if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
1073 		tx->ctx->send_ttl = bt_mesh_default_ttl_get();
1074 	} else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
1075 		LOG_ERR("TTL too large (max 127)");
1076 		return -EINVAL;
1077 	}
1078 
1079 	net_buf_simple_init_with_data(&buf, data, data_len);
1080 
1081 	if (data_len > BT_MESH_SDU_UNSEG_MAX) {
1082 		tx->ctx->send_rel = true;
1083 	}
1084 
1085 	tx->ctx->app_idx = BT_MESH_KEY_UNUSED;
1086 
1087 	if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
1088 	    BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
1089 		LOG_ERR("Invalid destination address");
1090 		return -EINVAL;
1091 	}
1092 
1093 	LOG_DBG("src 0x%04x dst 0x%04x ttl 0x%02x ctl 0x%02x", tx->src, tx->ctx->addr,
1094 		tx->ctx->send_ttl, ctl_op);
1095 	LOG_DBG("len %zu: %s", data_len, bt_hex(data, data_len));
1096 
1097 	if (tx->ctx->send_rel) {
1098 		return send_seg(tx, &buf, cb, cb_data, &ctl_op);
1099 	} else {
1100 		return send_unseg(tx, &buf, cb, cb_data, &ctl_op);
1101 	}
1102 }
1103 
send_ack(struct bt_mesh_subnet * sub,uint16_t src,uint16_t dst,uint8_t ttl,uint64_t * seq_auth,uint32_t block,uint8_t obo)1104 static int send_ack(struct bt_mesh_subnet *sub, uint16_t src, uint16_t dst,
1105 		    uint8_t ttl, uint64_t *seq_auth, uint32_t block, uint8_t obo)
1106 {
1107 	struct bt_mesh_msg_ctx ctx = {
1108 		.net_idx = sub->net_idx,
1109 		.app_idx = BT_MESH_KEY_UNUSED,
1110 		.addr = dst,
1111 		.send_ttl = ttl,
1112 	};
1113 	struct bt_mesh_net_tx tx = {
1114 		.sub = sub,
1115 		.ctx = &ctx,
1116 		.src = obo ? bt_mesh_primary_addr() : src,
1117 		.xmit = bt_mesh_net_transmit_get(),
1118 	};
1119 	uint16_t seq_zero = *seq_auth & TRANS_SEQ_ZERO_MASK;
1120 	uint8_t buf[6];
1121 
1122 	LOG_DBG("SeqZero 0x%04x Block 0x%08x OBO %u", seq_zero, block, obo);
1123 
1124 	if (bt_mesh_lpn_established() && !bt_mesh_has_addr(ctx.addr)) {
1125 		LOG_WRN("Not sending ack when LPN is enabled");
1126 		return 0;
1127 	}
1128 
1129 	/* This can happen if the segmented message was destined for a group
1130 	 * or virtual address.
1131 	 */
1132 	if (!BT_MESH_ADDR_IS_UNICAST(src)) {
1133 		LOG_DBG("Not sending ack for non-unicast address");
1134 		return 0;
1135 	}
1136 
1137 	sys_put_be16(((seq_zero << 2) & 0x7ffc) | (obo << 15), buf);
1138 	sys_put_be32(block, &buf[2]);
1139 
1140 	return bt_mesh_ctl_send(&tx, TRANS_CTL_OP_ACK, buf, sizeof(buf),
1141 				NULL, NULL);
1142 }
1143 
seg_rx_reset(struct seg_rx * rx,bool full_reset)1144 static void seg_rx_reset(struct seg_rx *rx, bool full_reset)
1145 {
1146 	int i;
1147 
1148 	LOG_DBG("rx %p", rx);
1149 
1150 	/* If this fails, the handler will exit early on the next execution, as
1151 	 * it checks rx->in_use.
1152 	 */
1153 	(void)k_work_cancel_delayable(&rx->ack);
1154 	(void)k_work_cancel_delayable(&rx->discard);
1155 
1156 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->obo &&
1157 	    rx->block != BLOCK_COMPLETE(rx->seg_n)) {
1158 		LOG_WRN("Clearing incomplete buffers from Friend queue");
1159 		bt_mesh_friend_clear_incomplete(rx->sub, rx->src, rx->dst,
1160 						&rx->seq_auth);
1161 	}
1162 
1163 	for (i = 0; i <= rx->seg_n; i++) {
1164 		if (!rx->seg[i]) {
1165 			continue;
1166 		}
1167 
1168 		k_mem_slab_free(&segs, rx->seg[i]);
1169 		rx->seg[i] = NULL;
1170 	}
1171 
1172 	rx->in_use = 0U;
1173 
1174 	/* We don't always reset these values since we need to be able to
1175 	 * send an ack if we receive a segment after we've already received
1176 	 * the full SDU.
1177 	 */
1178 	if (full_reset) {
1179 		rx->seq_auth = 0U;
1180 		rx->sub = NULL;
1181 		rx->src = BT_MESH_ADDR_UNASSIGNED;
1182 		rx->dst = BT_MESH_ADDR_UNASSIGNED;
1183 	}
1184 }
1185 
seg_discard(struct k_work * work)1186 static void seg_discard(struct k_work *work)
1187 {
1188 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1189 	struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, discard);
1190 
1191 	LOG_WRN("SAR Discard timeout expired");
1192 	seg_rx_reset(rx, false);
1193 	rx->block = 0U;
1194 
1195 	if (IS_ENABLED(CONFIG_BT_TESTING)) {
1196 		bt_test_mesh_trans_incomp_timer_exp();
1197 	}
1198 }
1199 
seg_ack(struct k_work * work)1200 static void seg_ack(struct k_work *work)
1201 {
1202 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1203 	struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, ack);
1204 
1205 	if (!rx->in_use || rx->block == BLOCK_COMPLETE(rx->seg_n)) {
1206 		/* Cancellation of this timer may have failed. If it fails as
1207 		 * part of seg_reset, in_use will be false.
1208 		 * If it fails as part of the processing of a fully received
1209 		 * SDU, the ack is already being sent from the receive handler,
1210 		 * and the timer based ack sending can be ignored.
1211 		 */
1212 		return;
1213 	}
1214 
1215 	LOG_DBG("rx %p", rx);
1216 
1217 	send_ack(rx->sub, rx->dst, rx->src, rx->ttl, &rx->seq_auth,
1218 		 rx->block, rx->obo);
1219 
1220 	rx->last_ack = k_uptime_get_32();
1221 
1222 	if (rx->attempts_left == 0) {
1223 		LOG_DBG("Ran out of retransmit attempts");
1224 		return;
1225 	}
1226 
1227 	if (rx->seg_n > BT_MESH_SAR_RX_SEG_THRESHOLD) {
1228 		--rx->attempts_left;
1229 		k_work_schedule(&rx->ack, K_MSEC(BT_MESH_SAR_RX_SEG_INT_MS));
1230 	}
1231 }
1232 
sdu_len_is_ok(bool ctl,uint8_t seg_n)1233 static inline bool sdu_len_is_ok(bool ctl, uint8_t seg_n)
1234 {
1235 	return (seg_n < BT_MESH_RX_SEG_MAX);
1236 }
1237 
seg_rx_find(struct bt_mesh_net_rx * net_rx,const uint64_t * seq_auth)1238 static struct seg_rx *seg_rx_find(struct bt_mesh_net_rx *net_rx,
1239 				  const uint64_t *seq_auth)
1240 {
1241 	int i;
1242 
1243 	for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1244 		struct seg_rx *rx = &seg_rx[i];
1245 
1246 		if (rx->src != net_rx->ctx.addr ||
1247 		    rx->dst != net_rx->ctx.recv_dst) {
1248 			continue;
1249 		}
1250 
1251 		/* Return newer RX context in addition to an exact match, so
1252 		 * the calling function can properly discard an old SeqAuth.
1253 		 */
1254 		if (rx->seq_auth >= *seq_auth) {
1255 			return rx;
1256 		}
1257 
1258 		if (rx->in_use) {
1259 			LOG_WRN("Duplicate SDU from src 0x%04x", net_rx->ctx.addr);
1260 
1261 			/* Clear out the old context since the sender
1262 			 * has apparently started sending a new SDU.
1263 			 */
1264 			seg_rx_reset(rx, true);
1265 
1266 			/* Return non-match so caller can re-allocate */
1267 			return NULL;
1268 		}
1269 	}
1270 
1271 	return NULL;
1272 }
1273 
seg_rx_is_valid(struct seg_rx * rx,struct bt_mesh_net_rx * net_rx,const uint8_t * hdr,uint8_t seg_n)1274 static bool seg_rx_is_valid(struct seg_rx *rx, struct bt_mesh_net_rx *net_rx,
1275 			    const uint8_t *hdr, uint8_t seg_n)
1276 {
1277 	if (rx->hdr != *hdr || rx->seg_n != seg_n) {
1278 		LOG_ERR("Invalid segment for ongoing session");
1279 		return false;
1280 	}
1281 
1282 	if (rx->src != net_rx->ctx.addr || rx->dst != net_rx->ctx.recv_dst) {
1283 		LOG_ERR("Invalid source or destination for segment");
1284 		return false;
1285 	}
1286 
1287 	if (rx->ctl != net_rx->ctl) {
1288 		LOG_ERR("Inconsistent CTL in segment");
1289 		return false;
1290 	}
1291 
1292 	return true;
1293 }
1294 
seg_rx_alloc(struct bt_mesh_net_rx * net_rx,const uint8_t * hdr,const uint64_t * seq_auth,uint8_t seg_n)1295 static struct seg_rx *seg_rx_alloc(struct bt_mesh_net_rx *net_rx,
1296 				   const uint8_t *hdr, const uint64_t *seq_auth,
1297 				   uint8_t seg_n)
1298 {
1299 	int i;
1300 
1301 	/* No race condition on this check, as this function only executes in
1302 	 * the collaborative Bluetooth rx thread:
1303 	 */
1304 	if (k_mem_slab_num_free_get(&segs) < 1) {
1305 		LOG_WRN("Not enough segments for incoming message");
1306 		return NULL;
1307 	}
1308 
1309 	for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1310 		struct seg_rx *rx = &seg_rx[i];
1311 
1312 		if (rx->in_use) {
1313 			continue;
1314 		}
1315 
1316 		rx->in_use = 1U;
1317 		rx->sub = net_rx->sub;
1318 		rx->ctl = net_rx->ctl;
1319 		rx->seq_auth = *seq_auth;
1320 		rx->seg_n = seg_n;
1321 		rx->hdr = *hdr;
1322 		rx->ttl = net_rx->ctx.send_ttl;
1323 		rx->src = net_rx->ctx.addr;
1324 		rx->dst = net_rx->ctx.recv_dst;
1325 		rx->block = 0U;
1326 
1327 		LOG_DBG("New RX context. Block Complete 0x%08x", BLOCK_COMPLETE(seg_n));
1328 
1329 		return rx;
1330 	}
1331 
1332 	return NULL;
1333 }
1334 
trans_seg(struct net_buf_simple * buf,struct bt_mesh_net_rx * net_rx,enum bt_mesh_friend_pdu_type * pdu_type,uint64_t * seq_auth,uint8_t * seg_count)1335 static int trans_seg(struct net_buf_simple *buf, struct bt_mesh_net_rx *net_rx,
1336 		     enum bt_mesh_friend_pdu_type *pdu_type, uint64_t *seq_auth,
1337 		     uint8_t *seg_count)
1338 {
1339 	struct bt_mesh_rpl *rpl = NULL;
1340 	struct seg_rx *rx;
1341 	uint8_t *hdr = buf->data;
1342 	uint16_t seq_zero;
1343 	uint32_t auth_seqnum;
1344 	uint8_t seg_n;
1345 	uint8_t seg_o;
1346 	int err;
1347 
1348 	if (buf->len < 5) {
1349 		LOG_ERR("Too short segmented message (len %u)", buf->len);
1350 		return -EBADMSG;
1351 	}
1352 
1353 	if (bt_mesh_rpl_check(net_rx, &rpl)) {
1354 		LOG_WRN("Replay: src 0x%04x dst 0x%04x seq 0x%06x", net_rx->ctx.addr,
1355 			net_rx->ctx.recv_dst, net_rx->seq);
1356 		return -EINVAL;
1357 	}
1358 
1359 	LOG_DBG("ASZMIC %u AKF %u AID 0x%02x", ASZMIC(hdr), AKF(hdr), AID(hdr));
1360 
1361 	net_buf_simple_pull(buf, 1);
1362 
1363 	seq_zero = net_buf_simple_pull_be16(buf);
1364 	seg_o = (seq_zero & 0x03) << 3;
1365 	seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
1366 	seg_n = net_buf_simple_pull_u8(buf);
1367 	seg_o |= seg_n >> 5;
1368 	seg_n &= 0x1f;
1369 
1370 	LOG_DBG("SeqZero 0x%04x SegO %u SegN %u", seq_zero, seg_o, seg_n);
1371 
1372 	if (seg_o > seg_n) {
1373 		LOG_ERR("SegO greater than SegN (%u > %u)", seg_o, seg_n);
1374 		return -EBADMSG;
1375 	}
1376 
1377 	/* According to MshPRTv1.1:
1378 	 * "The SeqAuth is composed of the IV Index and the sequence number
1379 	 *  (SEQ) of the first segment"
1380 	 *
1381 	 * Therefore we need to calculate very first SEQ in order to find
1382 	 * seqAuth. We can calculate as below:
1383 	 *
1384 	 * SEQ(0) = SEQ(n) - (delta between seqZero and SEQ(n) by looking into
1385 	 * 14 least significant bits of SEQ(n))
1386 	 *
1387 	 * Mentioned delta shall be >= 0, if it is not then seq_auth will
1388 	 * be broken and it will be verified by the code below.
1389 	 */
1390 	*seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(net_rx),
1391 			     (net_rx->seq -
1392 			      ((((net_rx->seq & BIT_MASK(14)) - seq_zero)) &
1393 			       BIT_MASK(13))));
1394 	auth_seqnum = *seq_auth & BIT_MASK(24);
1395 	*seg_count = seg_n + 1;
1396 
1397 	/* Look for old RX sessions */
1398 	rx = seg_rx_find(net_rx, seq_auth);
1399 	if (rx) {
1400 		/* Discard old SeqAuth packet */
1401 		if (rx->seq_auth > *seq_auth) {
1402 			LOG_WRN("Ignoring old SeqAuth");
1403 			return -EINVAL;
1404 		}
1405 
1406 		if (!seg_rx_is_valid(rx, net_rx, hdr, seg_n)) {
1407 			return -EINVAL;
1408 		}
1409 
1410 		if (rx->in_use) {
1411 			LOG_DBG("Existing RX context. Block 0x%08x", rx->block);
1412 			goto found_rx;
1413 		}
1414 
1415 		if (rx->block == BLOCK_COMPLETE(rx->seg_n)) {
1416 			LOG_DBG("Got segment for already complete SDU");
1417 
1418 			/* We should not send more than one Segment Acknowledgment message
1419 			 * for the same SeqAuth in a period of:
1420 			 * [acknowledgment delay increment * segment transmission interval]
1421 			 *  milliseconds
1422 			 */
1423 			if (k_uptime_get_32() - rx->last_ack >
1424 			    SEQAUTH_ALREADY_PROCESSED_TIMEOUT) {
1425 				send_ack(net_rx->sub, net_rx->ctx.recv_dst,
1426 					 net_rx->ctx.addr, net_rx->ctx.send_ttl,
1427 					 seq_auth, rx->block, rx->obo);
1428 				rx->last_ack = k_uptime_get_32();
1429 			}
1430 
1431 			if (rpl) {
1432 				bt_mesh_rpl_update(rpl, net_rx);
1433 			}
1434 
1435 			return -EALREADY;
1436 		}
1437 
1438 		/* We ignore instead of sending block ack 0 since the
1439 		 * ack timer is always smaller than the incomplete
1440 		 * timer, i.e. the sender is misbehaving.
1441 		 */
1442 		LOG_WRN("Got segment for canceled SDU");
1443 		return -EINVAL;
1444 	}
1445 
1446 	/* Bail out early if we're not ready to receive such a large SDU */
1447 	if (!sdu_len_is_ok(net_rx->ctl, seg_n)) {
1448 		LOG_ERR("Too big incoming SDU length");
1449 		send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1450 			 net_rx->ctx.send_ttl, seq_auth, 0,
1451 			 net_rx->friend_match);
1452 		return -EMSGSIZE;
1453 	}
1454 
1455 	/* Verify early that there will be space in the Friend Queue(s) in
1456 	 * case this message is destined to an LPN of ours.
1457 	 */
1458 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
1459 	    net_rx->friend_match && !net_rx->local_match &&
1460 	    !bt_mesh_friend_queue_has_space(net_rx->sub->net_idx,
1461 					    net_rx->ctx.addr,
1462 					    net_rx->ctx.recv_dst, seq_auth,
1463 					    *seg_count)) {
1464 		LOG_ERR("No space in Friend Queue for %u segments", *seg_count);
1465 		send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1466 			 net_rx->ctx.send_ttl, seq_auth, 0,
1467 			 net_rx->friend_match);
1468 		return -ENOBUFS;
1469 	}
1470 
1471 	/* Keep track of the received SeqAuth values received from this address
1472 	 * and discard segmented messages that are not newer, as described in
1473 	 * MshPRTv1.1: 3.5.3.4.
1474 	 *
1475 	 * The logic on the first segmented receive is a bit special, since the
1476 	 * initial value of rpl->seg is 0, which would normally fail the
1477 	 * comparison check with auth_seqnum:
1478 	 * - If this is the first time we receive from this source, rpl->src
1479 	 *   will be 0, and we can skip this check.
1480 	 * - If this is the first time we receive from this source on the new IV
1481 	 *   index, rpl->old_iv will be set, and the check is also skipped.
1482 	 * - If this is the first segmented message on the new IV index, but we
1483 	 *   have received an unsegmented message already, the unsegmented
1484 	 *   message will have reset rpl->seg to 0, and this message's SeqAuth
1485 	 *   cannot be zero.
1486 	 */
1487 	if (rpl && rpl->src && auth_seqnum <= rpl->seg &&
1488 	    (!rpl->old_iv || net_rx->old_iv)) {
1489 		LOG_WRN("Ignoring old SeqAuth 0x%06x", auth_seqnum);
1490 		return -EALREADY;
1491 	}
1492 
1493 	/* Look for free slot for a new RX session */
1494 	rx = seg_rx_alloc(net_rx, hdr, seq_auth, seg_n);
1495 	if (!rx) {
1496 		/* Warn but don't cancel since the existing slots will
1497 		 * eventually be freed up and we'll be able to process
1498 		 * this one.
1499 		 */
1500 		LOG_WRN("No free slots for new incoming segmented messages");
1501 		return -ENOMEM;
1502 	}
1503 
1504 	rx->obo = net_rx->friend_match;
1505 
1506 found_rx:
1507 	if (BIT(seg_o) & rx->block) {
1508 		LOG_DBG("Received already received fragment");
1509 		return -EALREADY;
1510 	}
1511 
1512 	/* All segments, except the last one, must either have 8 bytes of
1513 	 * payload (for 64bit Net MIC) or 12 bytes of payload (for 32bit
1514 	 * Net MIC).
1515 	 */
1516 	if (seg_o == seg_n) {
1517 		/* Set the expected final buffer length */
1518 		rx->len = seg_n * seg_len(rx->ctl) + buf->len;
1519 		LOG_DBG("Target len %u * %u + %u = %u", seg_n, seg_len(rx->ctl), buf->len, rx->len);
1520 
1521 		if (rx->len > BT_MESH_RX_SDU_MAX) {
1522 			LOG_ERR("Too large SDU len");
1523 			send_ack(net_rx->sub, net_rx->ctx.recv_dst,
1524 				 net_rx->ctx.addr, net_rx->ctx.send_ttl,
1525 				 seq_auth, 0, rx->obo);
1526 			seg_rx_reset(rx, true);
1527 			return -EMSGSIZE;
1528 		}
1529 	} else {
1530 		if (buf->len != seg_len(rx->ctl)) {
1531 			LOG_ERR("Incorrect segment size for message type");
1532 			return -EINVAL;
1533 		}
1534 	}
1535 
1536 	LOG_DBG("discard timeout %u", BT_MESH_SAR_RX_DISCARD_TIMEOUT_MS);
1537 	k_work_schedule(&rx->discard,
1538 			K_MSEC(BT_MESH_SAR_RX_DISCARD_TIMEOUT_MS));
1539 	rx->attempts_left = BT_MESH_SAR_RX_ACK_RETRANS_COUNT;
1540 
1541 	if (!bt_mesh_lpn_established() && BT_MESH_ADDR_IS_UNICAST(rx->dst)) {
1542 		LOG_DBG("ack delay %u", ACK_DELAY(rx->seg_n));
1543 		k_work_reschedule(&rx->ack, K_MSEC(ACK_DELAY(rx->seg_n)));
1544 	}
1545 
1546 	/* Allocated segment here */
1547 	err = k_mem_slab_alloc(&segs, &rx->seg[seg_o], K_NO_WAIT);
1548 	if (err) {
1549 		LOG_WRN("Unable allocate buffer for Seg %u", seg_o);
1550 		return -ENOBUFS;
1551 	}
1552 
1553 	memcpy(rx->seg[seg_o], buf->data, buf->len);
1554 
1555 	LOG_DBG("Received %u/%u", seg_o, seg_n);
1556 
1557 	/* Mark segment as received */
1558 	rx->block |= BIT(seg_o);
1559 
1560 	if (rx->block != BLOCK_COMPLETE(seg_n)) {
1561 		*pdu_type = BT_MESH_FRIEND_PDU_PARTIAL;
1562 		return 0;
1563 	}
1564 
1565 	LOG_DBG("Complete SDU");
1566 	*pdu_type = BT_MESH_FRIEND_PDU_COMPLETE;
1567 
1568 	/* If this fails, the work handler will either exit early because the
1569 	 * block is fully received, or rx->in_use is false.
1570 	 */
1571 	(void)k_work_cancel_delayable(&rx->ack);
1572 
1573 	send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
1574 		 net_rx->ctx.send_ttl, seq_auth, rx->block, rx->obo);
1575 	rx->last_ack = k_uptime_get_32();
1576 
1577 	if (net_rx->ctl) {
1578 		NET_BUF_SIMPLE_DEFINE(sdu, BT_MESH_RX_CTL_MAX);
1579 		seg_rx_assemble(rx, &sdu, 0U);
1580 		err = ctl_recv(net_rx, *hdr, &sdu, seq_auth);
1581 	} else if (rx->len < 1 + APP_MIC_LEN(ASZMIC(hdr))) {
1582 		LOG_ERR("Too short SDU + MIC");
1583 		err = -EINVAL;
1584 	} else {
1585 		NET_BUF_SIMPLE_DEFINE_STATIC(seg_buf, BT_MESH_RX_SDU_MAX);
1586 		struct net_buf_simple sdu;
1587 
1588 		/* Decrypting in place to avoid creating two assembly buffers.
1589 		 * We'll reassemble the buffer from the segments before each
1590 		 * decryption attempt.
1591 		 */
1592 		net_buf_simple_init(&seg_buf, 0);
1593 		net_buf_simple_init_with_data(
1594 			&sdu, seg_buf.data, rx->len - APP_MIC_LEN(ASZMIC(hdr)));
1595 
1596 		err = sdu_recv(net_rx, *hdr, ASZMIC(hdr), &seg_buf, &sdu, rx);
1597 	}
1598 
1599 	/* Update rpl only if there is place and upper logic accepted incoming data. */
1600 	if (err == 0 && rpl != NULL) {
1601 		bt_mesh_rpl_update(rpl, net_rx);
1602 		/* Update the seg, unless it has already been surpassed:
1603 		 * This needs to happen after rpl_update to ensure that the IV
1604 		 * update reset logic inside rpl_update doesn't overwrite the
1605 		 * change.
1606 		 */
1607 		rpl->seg = MAX(rpl->seg, auth_seqnum);
1608 	}
1609 
1610 	seg_rx_reset(rx, false);
1611 
1612 	return err;
1613 }
1614 
bt_mesh_trans_recv(struct net_buf_simple * buf,struct bt_mesh_net_rx * rx)1615 int bt_mesh_trans_recv(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx)
1616 {
1617 	uint64_t seq_auth = TRANS_SEQ_AUTH_NVAL;
1618 	enum bt_mesh_friend_pdu_type pdu_type = BT_MESH_FRIEND_PDU_SINGLE;
1619 	struct net_buf_simple_state state;
1620 	uint8_t seg_count = 0;
1621 	int err;
1622 
1623 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
1624 		rx->friend_match = bt_mesh_friend_match(rx->sub->net_idx,
1625 							rx->ctx.recv_dst);
1626 	} else {
1627 		rx->friend_match = false;
1628 	}
1629 
1630 	LOG_DBG("src 0x%04x dst 0x%04x seq 0x%08x friend_match %u", rx->ctx.addr, rx->ctx.recv_dst,
1631 		rx->seq, rx->friend_match);
1632 
1633 	/* Remove network headers */
1634 	net_buf_simple_pull(buf, BT_MESH_NET_HDR_LEN);
1635 
1636 	LOG_DBG("Payload %s", bt_hex(buf->data, buf->len));
1637 
1638 	if (IS_ENABLED(CONFIG_BT_TESTING)) {
1639 		bt_test_mesh_net_recv(rx->ctx.recv_ttl, rx->ctl, rx->ctx.addr,
1640 				      rx->ctx.recv_dst, buf->data, buf->len);
1641 	}
1642 
1643 	/* If LPN mode is enabled messages are only accepted when we've
1644 	 * requested the Friend to send them. The messages must also
1645 	 * be encrypted using the Friend Credentials.
1646 	 */
1647 	if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
1648 	    bt_mesh_lpn_established() && rx->net_if == BT_MESH_NET_IF_ADV &&
1649 	    (!bt_mesh_lpn_waiting_update() || !rx->friend_cred)) {
1650 		LOG_WRN("Ignoring unexpected message in Low Power mode");
1651 		return -EAGAIN;
1652 	}
1653 
1654 	/* Save the app-level state so the buffer can later be placed in
1655 	 * the Friend Queue.
1656 	 */
1657 	net_buf_simple_save(buf, &state);
1658 
1659 	if (SEG(buf->data)) {
1660 		/* Segmented messages must match a local element or an
1661 		 * LPN of this Friend.
1662 		 */
1663 		if (!rx->local_match && !rx->friend_match) {
1664 			return 0;
1665 		}
1666 
1667 		err = trans_seg(buf, rx, &pdu_type, &seq_auth, &seg_count);
1668 	} else {
1669 		seg_count = 1;
1670 		err = trans_unseg(buf, rx, &seq_auth);
1671 	}
1672 
1673 	/* Notify LPN state machine so a Friend Poll will be sent. */
1674 	if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER)) {
1675 		bt_mesh_lpn_msg_received(rx);
1676 	}
1677 
1678 	net_buf_simple_restore(buf, &state);
1679 
1680 	if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match && !err) {
1681 		if (seq_auth == TRANS_SEQ_AUTH_NVAL) {
1682 			bt_mesh_friend_enqueue_rx(rx, pdu_type, NULL,
1683 						  seg_count, buf);
1684 		} else {
1685 			bt_mesh_friend_enqueue_rx(rx, pdu_type, &seq_auth,
1686 						  seg_count, buf);
1687 		}
1688 	}
1689 
1690 	return err;
1691 }
1692 
bt_mesh_rx_reset(void)1693 void bt_mesh_rx_reset(void)
1694 {
1695 	int i;
1696 
1697 	LOG_DBG("");
1698 
1699 	for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1700 		seg_rx_reset(&seg_rx[i], true);
1701 	}
1702 }
1703 
bt_mesh_trans_reset(void)1704 void bt_mesh_trans_reset(void)
1705 {
1706 	int i;
1707 
1708 	bt_mesh_rx_reset();
1709 
1710 	LOG_DBG("");
1711 
1712 	for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
1713 		seg_tx_reset(&seg_tx[i]);
1714 	}
1715 
1716 	bt_mesh_rpl_clear();
1717 	bt_mesh_va_clear();
1718 }
1719 
bt_mesh_trans_init(void)1720 void bt_mesh_trans_init(void)
1721 {
1722 	int i;
1723 
1724 	for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
1725 		k_work_init_delayable(&seg_tx[i].retransmit, seg_retransmit);
1726 	}
1727 
1728 	for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
1729 		k_work_init_delayable(&seg_rx[i].ack, seg_ack);
1730 		k_work_init_delayable(&seg_rx[i].discard, seg_discard);
1731 	}
1732 }
1733