1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <string.h>
7 #include <zephyr/bluetooth/mesh.h>
8 #include <common/bt_str.h>
9 #include "net.h"
10 #include "access.h"
11 #include "transport.h"
12 #include "lpn.h"
13 #include "blob.h"
14
15 #define LOG_LEVEL CONFIG_BT_MESH_MODEL_LOG_LEVEL
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(bt_mesh_blob_srv);
18
19 #define MTU_SIZE_MAX (BT_MESH_RX_SDU_MAX - BT_MESH_MIC_SHORT)
20
21 /* The Receive BLOB Timeout Timer */
22 #define SERVER_TIMEOUT_SECS(srv) (10 * (1 + (srv)->state.timeout_base))
23 /* The initial timer value used by an instance of the Pull BLOB State machine - T_BPI */
24 #define REPORT_TIMER_TIMEOUT K_SECONDS(CONFIG_BT_MESH_BLOB_REPORT_TIMEOUT)
25
26 BUILD_ASSERT(BLOB_BLOCK_SIZE_LOG_MIN <= BLOB_BLOCK_SIZE_LOG_MAX,
27 "The must be at least one number between the min and "
28 "max block size that is the power of two.");
29
30 BUILD_ASSERT((BLOB_XFER_STATUS_MSG_MAXLEN + BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_XFER_STATUS) +
31 BT_MESH_MIC_SHORT) <= BT_MESH_TX_SDU_MAX,
32 "The BLOB Transfer Status message does not fit into the maximum outgoing SDU size.");
33
34 BUILD_ASSERT((BLOB_BLOCK_REPORT_STATUS_MSG_MAXLEN +
35 BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_BLOCK_REPORT) + BT_MESH_MIC_SHORT)
36 <= BT_MESH_TX_SDU_MAX,
37 "The BLOB Partial Block Report message does not fit into the maximum outgoing SDU "
38 "size.");
39
40 BUILD_ASSERT((BLOB_BLOCK_STATUS_MSG_MAXLEN + BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_BLOCK_STATUS) +
41 BT_MESH_MIC_SHORT) <= BT_MESH_TX_SDU_MAX,
42 "The BLOB Block Status message does not fit into the maximum outgoing SDU size.");
43
44 static void cancel(struct bt_mesh_blob_srv *srv);
45 static void suspend(struct bt_mesh_blob_srv *srv);
46
block_count_get(const struct bt_mesh_blob_srv * srv)47 static inline uint32_t block_count_get(const struct bt_mesh_blob_srv *srv)
48 {
49 return DIV_ROUND_UP(srv->state.xfer.size,
50 (1U << srv->state.xfer.block_size_log));
51 }
52
max_chunk_size(const struct bt_mesh_blob_srv * srv)53 static inline uint32_t max_chunk_size(const struct bt_mesh_blob_srv *srv)
54 {
55 return MIN((srv->state.mtu_size - 2 - BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_CHUNK)),
56 BLOB_RX_CHUNK_SIZE);
57 }
58
max_chunk_count(const struct bt_mesh_blob_srv * srv)59 static inline uint32_t max_chunk_count(const struct bt_mesh_blob_srv *srv)
60 {
61 return MIN(8 * (srv->state.mtu_size - 6),
62 CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX);
63 }
64
missing_chunks(const struct bt_mesh_blob_block * block)65 static inline uint32_t missing_chunks(const struct bt_mesh_blob_block *block)
66 {
67 int i;
68 uint32_t count = 0;
69
70 for (i = 0; i < ARRAY_SIZE(block->missing); ++i) {
71 count += POPCOUNT(block->missing[i]);
72 }
73
74 return count;
75 }
76
store_state(const struct bt_mesh_blob_srv * srv)77 static void store_state(const struct bt_mesh_blob_srv *srv)
78 {
79 if (!IS_ENABLED(CONFIG_BT_SETTINGS)) {
80 return;
81 }
82
83 /* Convert bit count to byte count: */
84 uint32_t block_len = DIV_ROUND_UP(block_count_get(srv), 8);
85
86 bt_mesh_model_data_store(
87 srv->mod, false, NULL, &srv->state,
88 offsetof(struct bt_mesh_blob_srv_state, blocks) + block_len);
89 }
90
erase_state(struct bt_mesh_blob_srv * srv)91 static void erase_state(struct bt_mesh_blob_srv *srv)
92 {
93 if (!IS_ENABLED(CONFIG_BT_SETTINGS)) {
94 return;
95 }
96
97 bt_mesh_model_data_store(srv->mod, false, NULL, NULL, 0);
98 }
99
io_open(struct bt_mesh_blob_srv * srv)100 static int io_open(struct bt_mesh_blob_srv *srv)
101 {
102 if (!srv->io->open) {
103 return 0;
104 }
105
106 return srv->io->open(srv->io, &srv->state.xfer, BT_MESH_BLOB_WRITE);
107 }
108
io_close(struct bt_mesh_blob_srv * srv)109 static void io_close(struct bt_mesh_blob_srv *srv)
110 {
111 if (!srv->io->close) {
112 return;
113 }
114
115 srv->io->close(srv->io, &srv->state.xfer);
116 }
117
reset_timer(struct bt_mesh_blob_srv * srv)118 static void reset_timer(struct bt_mesh_blob_srv *srv)
119 {
120 uint32_t timeout_secs =
121 srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL ?
122 MAX(SERVER_TIMEOUT_SECS(srv),
123 CONFIG_BT_MESH_BLOB_REPORT_TIMEOUT + 1) :
124 SERVER_TIMEOUT_SECS(srv);
125 k_work_reschedule(&srv->rx_timeout, K_SECONDS(timeout_secs));
126 }
127
buf_chunk_index_add(struct net_buf_simple * buf,uint16_t chunk)128 static void buf_chunk_index_add(struct net_buf_simple *buf, uint16_t chunk)
129 {
130 /* utf-8 encoded: */
131 if (chunk < 0x80) {
132 net_buf_simple_add_u8(buf, chunk);
133 } else if (chunk < 0x8000) {
134 net_buf_simple_add_u8(buf, 0xc0 | chunk >> 6);
135 net_buf_simple_add_u8(buf, 0x80 | (chunk & BIT_MASK(6)));
136 } else {
137 net_buf_simple_add_u8(buf, 0xe0 | chunk >> 12);
138 net_buf_simple_add_u8(buf, 0x80 | ((chunk >> 6) & BIT_MASK(6)));
139 net_buf_simple_add_u8(buf, 0x80 | (chunk & BIT_MASK(6)));
140 }
141 }
142
pull_req_max(const struct bt_mesh_blob_srv * srv)143 static int pull_req_max(const struct bt_mesh_blob_srv *srv)
144 {
145 int count = CONFIG_BT_MESH_BLOB_SRV_PULL_REQ_COUNT;
146
147 #if defined(CONFIG_BT_MESH_LOW_POWER)
148 /* No point in requesting more than the friend node can hold: */
149 if (bt_mesh_lpn_established()) {
150 uint32_t segments_per_chunk = DIV_ROUND_UP(
151 BLOB_CHUNK_SDU_LEN(srv->state.xfer.chunk_size),
152 BT_MESH_APP_SEG_SDU_MAX);
153
154 count = MIN(CONFIG_BT_MESH_BLOB_SRV_PULL_REQ_COUNT,
155 bt_mesh.lpn.queue_size / segments_per_chunk);
156 }
157 #endif
158
159 return MIN(count, missing_chunks(&srv->block));
160 }
161
report_sent(int err,void * cb_data)162 static void report_sent(int err, void *cb_data)
163 {
164 struct bt_mesh_blob_srv *srv = cb_data;
165
166 LOG_DBG("");
167
168 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) && bt_mesh_lpn_established()) {
169 bt_mesh_lpn_poll();
170 }
171
172 if (k_work_delayable_is_pending(&srv->rx_timeout)) {
173 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
174 }
175 }
176
block_report(struct bt_mesh_blob_srv * srv)177 static void block_report(struct bt_mesh_blob_srv *srv)
178 {
179 static const struct bt_mesh_send_cb report_cb = { .end = report_sent };
180 struct bt_mesh_msg_ctx ctx = {
181 .app_idx = srv->state.app_idx,
182 .send_ttl = srv->state.ttl,
183 .addr = srv->state.cli,
184 };
185 int count;
186 int i;
187
188 LOG_DBG("rx BLOB Timeout Timer: %i", k_work_delayable_is_pending(&srv->rx_timeout));
189
190 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_BLOCK_REPORT,
191 BLOB_BLOCK_REPORT_STATUS_MSG_MAXLEN);
192 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_BLOCK_REPORT);
193
194 count = pull_req_max(srv);
195
196 for (i = 0; i < srv->block.chunk_count && count; ++i) {
197 if (blob_chunk_missing_get(srv->block.missing, i)) {
198 buf_chunk_index_add(&buf, i);
199 count--;
200 }
201 }
202
203 (void)bt_mesh_model_send(srv->mod, &ctx, &buf, &report_cb, srv);
204 }
205
phase_set(struct bt_mesh_blob_srv * srv,enum bt_mesh_blob_xfer_phase phase)206 static void phase_set(struct bt_mesh_blob_srv *srv,
207 enum bt_mesh_blob_xfer_phase phase)
208 {
209 srv->phase = phase;
210 LOG_DBG("Phase: %u", phase);
211 }
212
cancel(struct bt_mesh_blob_srv * srv)213 static void cancel(struct bt_mesh_blob_srv *srv)
214 {
215 /* TODO: Could this state be preserved instead somehow? Wiping the
216 * entire transfer state is a bit overkill
217 */
218 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
219 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
220 srv->state.ttl = BT_MESH_TTL_DEFAULT;
221 srv->block.number = 0xffff;
222 memset(srv->block.missing, 0, sizeof(srv->block.missing));
223 srv->state.xfer.chunk_size = 0xffff;
224 k_work_cancel_delayable(&srv->rx_timeout);
225 k_work_cancel_delayable(&srv->pull.report);
226 io_close(srv);
227 erase_state(srv);
228
229 if (srv->cb && srv->cb->end) {
230 srv->cb->end(srv, srv->state.xfer.id, false);
231 }
232 }
233
suspend(struct bt_mesh_blob_srv * srv)234 static void suspend(struct bt_mesh_blob_srv *srv)
235 {
236 LOG_DBG("");
237 k_work_cancel_delayable(&srv->rx_timeout);
238 k_work_cancel_delayable(&srv->pull.report);
239 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_SUSPENDED);
240 if (srv->cb && srv->cb->suspended) {
241 srv->cb->suspended(srv);
242 }
243 }
244
resume(struct bt_mesh_blob_srv * srv)245 static void resume(struct bt_mesh_blob_srv *srv)
246 {
247 LOG_DBG("Resuming");
248
249 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
250 reset_timer(srv);
251 }
252
end(struct bt_mesh_blob_srv * srv)253 static void end(struct bt_mesh_blob_srv *srv)
254 {
255 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_COMPLETE);
256 k_work_cancel_delayable(&srv->rx_timeout);
257 k_work_cancel_delayable(&srv->pull.report);
258 io_close(srv);
259 erase_state(srv);
260
261 if (srv->cb && srv->cb->end) {
262 srv->cb->end(srv, srv->state.xfer.id, true);
263 }
264 }
265
all_blocks_received(struct bt_mesh_blob_srv * srv)266 static bool all_blocks_received(struct bt_mesh_blob_srv *srv)
267 {
268 for (int i = 0; i < ARRAY_SIZE(srv->state.blocks); ++i) {
269 if (srv->state.blocks[i]) {
270 return false;
271 }
272 }
273
274 return true;
275 }
276
pull_mode_xfer_complete(struct bt_mesh_blob_srv * srv)277 static bool pull_mode_xfer_complete(struct bt_mesh_blob_srv *srv)
278 {
279 return srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL &&
280 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK &&
281 all_blocks_received(srv);
282 }
283
timeout(struct k_work * work)284 static void timeout(struct k_work *work)
285 {
286 struct bt_mesh_blob_srv *srv =
287 CONTAINER_OF(work, struct bt_mesh_blob_srv, rx_timeout.work);
288
289 LOG_DBG("");
290
291 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
292 cancel(srv);
293 } else if (pull_mode_xfer_complete(srv)) {
294 end(srv);
295 } else {
296 suspend(srv);
297 }
298 }
299
report_timeout(struct k_work * work)300 static void report_timeout(struct k_work *work)
301 {
302 struct bt_mesh_blob_srv *srv =
303 CONTAINER_OF(work, struct bt_mesh_blob_srv, pull.report.work);
304
305 LOG_DBG("");
306
307 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK &&
308 srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK) {
309 return;
310 }
311
312 block_report(srv);
313 }
314
315 /*******************************************************************************
316 * Message handling
317 ******************************************************************************/
318
xfer_status_rsp(struct bt_mesh_blob_srv * srv,struct bt_mesh_msg_ctx * ctx,enum bt_mesh_blob_status status)319 static void xfer_status_rsp(struct bt_mesh_blob_srv *srv,
320 struct bt_mesh_msg_ctx *ctx,
321 enum bt_mesh_blob_status status)
322 {
323 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_XFER_STATUS,
324 BLOB_XFER_STATUS_MSG_MAXLEN);
325 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_XFER_STATUS);
326
327 net_buf_simple_add_u8(&buf, ((status & BIT_MASK(4)) |
328 (srv->state.xfer.mode << 6)));
329 net_buf_simple_add_u8(&buf, srv->phase);
330
331 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
332 goto send;
333 }
334
335 net_buf_simple_add_le64(&buf, srv->state.xfer.id);
336
337 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
338 goto send;
339 }
340
341 net_buf_simple_add_le32(&buf, srv->state.xfer.size);
342 net_buf_simple_add_u8(&buf, srv->state.xfer.block_size_log);
343 net_buf_simple_add_le16(&buf, srv->state.mtu_size);
344 net_buf_simple_add_mem(&buf, srv->state.blocks,
345 DIV_ROUND_UP(block_count_get(srv), 8));
346
347 send:
348 ctx->send_ttl = srv->state.ttl;
349 (void)bt_mesh_model_send(srv->mod, ctx, &buf, NULL, NULL);
350 }
351
block_status_rsp(struct bt_mesh_blob_srv * srv,struct bt_mesh_msg_ctx * ctx,enum bt_mesh_blob_status status)352 static void block_status_rsp(struct bt_mesh_blob_srv *srv,
353 struct bt_mesh_msg_ctx *ctx,
354 enum bt_mesh_blob_status status)
355 {
356 enum bt_mesh_blob_chunks_missing format;
357 uint32_t missing;
358 int i;
359
360 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_BLOCK_STATUS,
361 BLOB_BLOCK_STATUS_MSG_MAXLEN);
362 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_BLOCK_STATUS);
363
364 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE ||
365 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
366 missing = srv->block.chunk_count;
367 } else if (srv->phase == BT_MESH_BLOB_XFER_PHASE_COMPLETE) {
368 missing = 0U;
369 } else {
370 missing = missing_chunks(&srv->block);
371 }
372
373 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
374 format = BT_MESH_BLOB_CHUNKS_MISSING_ENCODED;
375 } else if (missing == srv->block.chunk_count) {
376 format = BT_MESH_BLOB_CHUNKS_MISSING_ALL;
377 } else if (missing == 0) {
378 format = BT_MESH_BLOB_CHUNKS_MISSING_NONE;
379 } else {
380 format = BT_MESH_BLOB_CHUNKS_MISSING_SOME;
381 }
382
383 LOG_DBG("Status: %u, missing: %u/%u", status, missing, srv->block.chunk_count);
384
385 net_buf_simple_add_u8(&buf, (status & BIT_MASK(4)) | (format << 6));
386 net_buf_simple_add_le16(&buf, srv->block.number);
387 net_buf_simple_add_le16(&buf, srv->state.xfer.chunk_size);
388
389 if (format == BT_MESH_BLOB_CHUNKS_MISSING_SOME) {
390 net_buf_simple_add_mem(&buf, srv->block.missing,
391 DIV_ROUND_UP(srv->block.chunk_count,
392 8));
393
394 LOG_DBG("Bits: %s",
395 bt_hex(srv->block.missing,
396 DIV_ROUND_UP(srv->block.chunk_count, 8)));
397
398 } else if (format == BT_MESH_BLOB_CHUNKS_MISSING_ENCODED) {
399 int count = pull_req_max(srv);
400
401 for (i = 0; (i < srv->block.chunk_count) && count; ++i) {
402 if (blob_chunk_missing_get(srv->block.missing, i)) {
403 LOG_DBG("Missing %u", i);
404 buf_chunk_index_add(&buf, i);
405 count--;
406 }
407 }
408 }
409
410 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
411 ctx->send_ttl = srv->state.ttl;
412 }
413
414 (void)bt_mesh_model_send(srv->mod, ctx, &buf, NULL, NULL);
415 }
416
handle_xfer_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)417 static int handle_xfer_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
418 struct net_buf_simple *buf)
419 {
420 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
421
422 LOG_DBG("");
423
424 if (pull_mode_xfer_complete(srv)) {
425 /* The client requested transfer. If we are in Pull mode and all blocks were
426 * received, we should change the Transfer state here to Complete so that the client
427 * receives the correct state.
428 */
429 end(srv);
430 }
431
432 xfer_status_rsp(srv, ctx, BT_MESH_BLOB_SUCCESS);
433
434 return 0;
435 }
436
handle_xfer_start(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)437 static int handle_xfer_start(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
438 struct net_buf_simple *buf)
439 {
440 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
441 enum bt_mesh_blob_status status;
442 enum bt_mesh_blob_xfer_mode mode;
443 uint64_t id;
444 size_t size;
445 uint8_t block_size_log;
446 uint32_t block_count;
447 uint16_t mtu_size;
448 int err;
449
450 mode = (net_buf_simple_pull_u8(buf) >> 6);
451 id = net_buf_simple_pull_le64(buf);
452 size = net_buf_simple_pull_le32(buf);
453 block_size_log = net_buf_simple_pull_u8(buf);
454 mtu_size = net_buf_simple_pull_le16(buf);
455
456 LOG_DBG("\n\tsize: %u block size: %u\n\tmtu_size: %u\n\tmode: %s",
457 size, (1U << block_size_log), mtu_size,
458 mode == BT_MESH_BLOB_XFER_MODE_PUSH ? "push" : "pull");
459
460 if (mode != BT_MESH_BLOB_XFER_MODE_PULL &&
461 mode != BT_MESH_BLOB_XFER_MODE_PUSH) {
462 LOG_WRN("Invalid mode 0x%x", mode);
463 return -EINVAL;
464 }
465
466 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
467 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
468 LOG_WRN("Uninitialized");
469 goto rsp;
470 }
471
472 if (srv->state.xfer.id != id) {
473 status = BT_MESH_BLOB_ERR_WRONG_BLOB_ID;
474 /* bt_hex uses static array for the resulting hex string.
475 * Not possible to use bt_hex in the same logging function twice.
476 */
477 LOG_WRN("Invalid ID: %s", bt_hex(&id, sizeof(uint64_t)));
478 LOG_WRN("Expected ID: %s", bt_hex(&srv->state.xfer.id, sizeof(uint64_t)));
479 goto rsp;
480 }
481
482 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
483 if (srv->state.xfer.mode != mode ||
484 srv->state.xfer.size != size ||
485 srv->state.xfer.block_size_log != block_size_log ||
486 srv->state.mtu_size > mtu_size) {
487 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
488 LOG_WRN("Busy");
489 goto rsp;
490 }
491
492 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_SUSPENDED) {
493 resume(srv);
494 store_state(srv);
495 } else {
496 LOG_DBG("Duplicate");
497 }
498
499 status = BT_MESH_BLOB_SUCCESS;
500 goto rsp;
501 }
502
503 if (size > CONFIG_BT_MESH_BLOB_SIZE_MAX) {
504 LOG_WRN("Too large");
505 status = BT_MESH_BLOB_ERR_BLOB_TOO_LARGE;
506 goto rsp;
507 }
508
509 if (((1U << block_size_log) < CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MIN) ||
510 ((1U << block_size_log) > CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX)) {
511 LOG_WRN("Invalid block size: %u", block_size_log);
512 status = BT_MESH_BLOB_ERR_INVALID_BLOCK_SIZE;
513 goto rsp;
514 }
515
516 srv->state.cli = ctx->addr;
517 srv->state.app_idx = ctx->app_idx;
518 srv->state.mtu_size = MIN(mtu_size, MTU_SIZE_MAX);
519 srv->state.xfer.id = id;
520 srv->state.xfer.size = size;
521 srv->state.xfer.mode = mode;
522 srv->state.xfer.block_size_log = block_size_log;
523 srv->state.xfer.chunk_size = 0xffff;
524 srv->block.number = 0xffff;
525
526 block_count = block_count_get(srv);
527 if (block_count > BT_MESH_BLOB_BLOCKS_MAX) {
528 LOG_WRN("Invalid block count (%u)", block_count);
529 status = BT_MESH_BLOB_ERR_INVALID_PARAM;
530 cancel(srv);
531 goto rsp;
532 }
533
534 memset(srv->state.blocks, 0, sizeof(srv->state.blocks));
535 for (int i = 0; i < block_count; i++) {
536 atomic_set_bit(srv->state.blocks, i);
537 }
538
539 err = io_open(srv);
540 if (err) {
541 LOG_ERR("Couldn't open stream (err: %d)", err);
542 status = BT_MESH_BLOB_ERR_INTERNAL;
543 cancel(srv);
544 goto rsp;
545 }
546
547 if (srv->cb && srv->cb->start) {
548 err = srv->cb->start(srv, ctx, &srv->state.xfer);
549 if (err) {
550 LOG_ERR("Couldn't start transfer (err: %d)", err);
551 status = BT_MESH_BLOB_ERR_INTERNAL;
552 cancel(srv);
553 goto rsp;
554 }
555 }
556
557 reset_timer(srv);
558 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
559 store_state(srv);
560 status = BT_MESH_BLOB_SUCCESS;
561
562 rsp:
563 xfer_status_rsp(srv, ctx, status);
564
565 return 0;
566 }
567
handle_xfer_cancel(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)568 static int handle_xfer_cancel(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
569 struct net_buf_simple *buf)
570 {
571 enum bt_mesh_blob_status status = BT_MESH_BLOB_SUCCESS;
572 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
573 uint64_t id;
574
575 id = net_buf_simple_pull_le64(buf);
576
577 LOG_DBG("%u", (uint32_t)id);
578
579 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
580 goto rsp;
581 }
582
583 if (srv->state.xfer.id != id) {
584 status = BT_MESH_BLOB_ERR_WRONG_BLOB_ID;
585 goto rsp;
586 }
587
588 cancel(srv);
589
590 rsp:
591 xfer_status_rsp(srv, ctx, status);
592
593 return 0;
594 }
595
handle_block_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)596 static int handle_block_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
597 struct net_buf_simple *buf)
598 {
599 enum bt_mesh_blob_status status;
600 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
601
602 switch (srv->phase) {
603 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK:
604 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK:
605 case BT_MESH_BLOB_XFER_PHASE_COMPLETE:
606 status = BT_MESH_BLOB_SUCCESS;
607 break;
608 case BT_MESH_BLOB_XFER_PHASE_SUSPENDED:
609 status = BT_MESH_BLOB_ERR_INFO_UNAVAILABLE;
610 break;
611 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START:
612 case BT_MESH_BLOB_XFER_PHASE_INACTIVE:
613 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
614 break;
615 default:
616 status = BT_MESH_BLOB_ERR_INTERNAL;
617 break;
618 }
619
620 LOG_DBG("");
621
622 block_status_rsp(srv, ctx, status);
623
624 return 0;
625 }
626
handle_block_start(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)627 static int handle_block_start(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
628 struct net_buf_simple *buf)
629 {
630 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
631 enum bt_mesh_blob_status status;
632 uint16_t block_number, chunk_size;
633 int err;
634
635 block_number = net_buf_simple_pull_le16(buf);
636 chunk_size = net_buf_simple_pull_le16(buf);
637
638 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START ||
639 srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
640 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
641 goto rsp;
642 }
643
644 reset_timer(srv);
645
646 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK) {
647 if (block_number != srv->block.number ||
648 chunk_size != srv->state.xfer.chunk_size) {
649 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
650 } else {
651 status = BT_MESH_BLOB_SUCCESS;
652 }
653
654 goto rsp;
655 }
656
657 if (block_number >= block_count_get(srv)) {
658 status = BT_MESH_BLOB_ERR_INVALID_BLOCK_NUM;
659 goto rsp;
660 }
661
662 if (!chunk_size || chunk_size > max_chunk_size(srv) ||
663 (DIV_ROUND_UP((1 << srv->state.xfer.block_size_log), chunk_size) >
664 max_chunk_count(srv))) {
665 LOG_WRN("Invalid chunk size: (chunk size: %u, max: %u, block log: %u, count: %u)",
666 chunk_size, max_chunk_size(srv),
667 srv->state.xfer.block_size_log,
668 max_chunk_count(srv));
669 status = BT_MESH_BLOB_ERR_INVALID_CHUNK_SIZE;
670 goto rsp;
671 }
672
673 srv->block.size = blob_block_size(
674 srv->state.xfer.size, srv->state.xfer.block_size_log, block_number);
675 srv->block.number = block_number;
676 srv->block.chunk_count = DIV_ROUND_UP(srv->block.size, chunk_size);
677 srv->state.xfer.chunk_size = chunk_size;
678 srv->block.offset = block_number * (1UL << srv->state.xfer.block_size_log);
679
680 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_COMPLETE ||
681 !atomic_test_bit(srv->state.blocks, block_number)) {
682 memset(srv->block.missing, 0, sizeof(srv->block.missing));
683 status = BT_MESH_BLOB_SUCCESS;
684 goto rsp;
685 }
686
687 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_SUSPENDED && srv->cb &&
688 srv->cb->resume) {
689 srv->cb->resume(srv);
690 }
691
692 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK);
693 blob_chunk_missing_set_all(&srv->block);
694
695 LOG_DBG("%u: (%u/%u)\n\tsize: %u\n\tchunk size: %u\n\tchunk count: %u",
696 srv->block.number, srv->block.number + 1, block_count_get(srv),
697 srv->block.size, chunk_size, srv->block.chunk_count);
698
699 if (srv->io->block_start) {
700 err = srv->io->block_start(srv->io, &srv->state.xfer,
701 &srv->block);
702 if (err) {
703 cancel(srv);
704 status = BT_MESH_BLOB_ERR_INTERNAL;
705 goto rsp;
706 }
707 }
708
709 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
710 /* Wait for the client to send the first chunk */
711 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
712 }
713
714 status = BT_MESH_BLOB_SUCCESS;
715
716 rsp:
717 block_status_rsp(srv, ctx, status);
718
719 return 0;
720 }
721
handle_chunk(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)722 static int handle_chunk(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
723 struct net_buf_simple *buf)
724 {
725 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
726 struct bt_mesh_blob_chunk chunk;
727 size_t expected_size = 0;
728 uint16_t idx;
729 int err;
730
731 idx = net_buf_simple_pull_le16(buf);
732 chunk.size = buf->len;
733 chunk.data = net_buf_simple_pull_mem(buf, chunk.size);
734 chunk.offset = idx * srv->state.xfer.chunk_size;
735
736 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK ||
737 idx >= srv->block.chunk_count) {
738 LOG_ERR("Invalid phase or index (%u %u)", srv->phase,
739 idx);
740 return -EINVAL;
741 }
742
743 if (idx == srv->block.chunk_count - 1) {
744 expected_size = srv->block.size % srv->state.xfer.chunk_size;
745 }
746
747 if (expected_size == 0) {
748 expected_size = srv->state.xfer.chunk_size;
749 }
750
751 if (chunk.size != expected_size) {
752 LOG_ERR("Unexpected size: %u != %u", expected_size, chunk.size);
753 return -EINVAL;
754 }
755
756 LOG_DBG("%u/%u (%u bytes)", idx + 1, srv->block.chunk_count,
757 chunk.size);
758
759 reset_timer(srv);
760 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
761 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
762 }
763
764 if (!blob_chunk_missing_get(srv->block.missing, idx)) {
765 LOG_DBG("Duplicate chunk %u", idx);
766 return -EALREADY;
767 }
768
769 err = srv->io->wr(srv->io, &srv->state.xfer, &srv->block, &chunk);
770 if (err) {
771 return err;
772 }
773
774 blob_chunk_missing_set(srv->block.missing, idx, false);
775 if (missing_chunks(&srv->block)) {
776 return 0;
777 }
778
779 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
780 block_report(srv);
781 }
782
783 if (srv->io->block_end) {
784 srv->io->block_end(srv->io, &srv->state.xfer, &srv->block);
785 }
786
787 atomic_clear_bit(srv->state.blocks, srv->block.number);
788
789 if (!all_blocks_received(srv)) {
790 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
791 store_state(srv);
792 return 0;
793 }
794
795 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
796 /* By spec (section 5.2.4), the BLOB Server stops sending BLOB Partial Block Report
797 * messages "If the current block is the last block, then the server determines that
798 * the client knows the transfer is complete. For example, a higher-layer model may
799 * indicate that the client considers the transfer complete."
800 *
801 * We don't have any way for higher-layer model to indicate that the transfer is
802 * complete. Therefore we need to keep sending Partial Block Report messages until
803 * the client sends BLOB Transfer Get message or the Block Timer expires.
804 */
805 return 0;
806 }
807
808 end(srv);
809 return 0;
810 }
811
handle_info_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)812 static int handle_info_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
813 struct net_buf_simple *buf)
814 {
815 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
816
817 LOG_DBG("");
818
819 BT_MESH_MODEL_BUF_DEFINE(rsp, BT_MESH_BLOB_OP_INFO_STATUS, 15);
820 bt_mesh_model_msg_init(&rsp, BT_MESH_BLOB_OP_INFO_STATUS);
821 net_buf_simple_add_u8(&rsp, BLOB_BLOCK_SIZE_LOG_MIN);
822 net_buf_simple_add_u8(&rsp, BLOB_BLOCK_SIZE_LOG_MAX);
823 net_buf_simple_add_le16(&rsp, CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX);
824 net_buf_simple_add_le16(&rsp, BLOB_RX_CHUNK_SIZE);
825 net_buf_simple_add_le32(&rsp, CONFIG_BT_MESH_BLOB_SIZE_MAX);
826 net_buf_simple_add_le16(&rsp, MTU_SIZE_MAX);
827 net_buf_simple_add_u8(&rsp, BT_MESH_BLOB_XFER_MODE_ALL);
828
829 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
830 ctx->send_ttl = srv->state.ttl;
831 }
832
833 (void)bt_mesh_model_send(srv->mod, ctx, &rsp, NULL, NULL);
834
835 return 0;
836 }
837
838 const struct bt_mesh_model_op _bt_mesh_blob_srv_op[] = {
839 { BT_MESH_BLOB_OP_XFER_GET, BT_MESH_LEN_EXACT(0), handle_xfer_get },
840 { BT_MESH_BLOB_OP_XFER_START, BT_MESH_LEN_EXACT(16), handle_xfer_start },
841 { BT_MESH_BLOB_OP_XFER_CANCEL, BT_MESH_LEN_EXACT(8), handle_xfer_cancel },
842 { BT_MESH_BLOB_OP_BLOCK_GET, BT_MESH_LEN_EXACT(0), handle_block_get },
843 { BT_MESH_BLOB_OP_BLOCK_START, BT_MESH_LEN_EXACT(4), handle_block_start },
844 { BT_MESH_BLOB_OP_CHUNK, BT_MESH_LEN_MIN(2), handle_chunk },
845 { BT_MESH_BLOB_OP_INFO_GET, BT_MESH_LEN_EXACT(0), handle_info_get },
846 BT_MESH_MODEL_OP_END,
847 };
848
blob_srv_init(const struct bt_mesh_model * mod)849 static int blob_srv_init(const struct bt_mesh_model *mod)
850 {
851 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
852
853 srv->mod = mod;
854 srv->state.ttl = BT_MESH_TTL_DEFAULT;
855 srv->block.number = 0xffff;
856 srv->state.xfer.chunk_size = 0xffff;
857 k_work_init_delayable(&srv->rx_timeout, timeout);
858 k_work_init_delayable(&srv->pull.report, report_timeout);
859
860 return 0;
861 }
862
blob_srv_settings_set(const struct bt_mesh_model * mod,const char * name,size_t len_rd,settings_read_cb read_cb,void * cb_arg)863 static int blob_srv_settings_set(const struct bt_mesh_model *mod, const char *name,
864 size_t len_rd, settings_read_cb read_cb,
865 void *cb_arg)
866 {
867 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
868 ssize_t len;
869
870 if (len_rd < offsetof(struct bt_mesh_blob_srv_state, blocks)) {
871 return -EINVAL;
872 }
873
874 len = read_cb(cb_arg, &srv->state, sizeof(srv->state));
875 if (len < 0) {
876 return len;
877 }
878
879 srv->block.number = 0xffff;
880 srv->state.xfer.chunk_size = 0xffff;
881
882 if (block_count_get(srv) > BT_MESH_BLOB_BLOCKS_MAX) {
883 LOG_WRN("Loaded block count too high (%u, max: %u)",
884 block_count_get(srv), BT_MESH_BLOB_BLOCKS_MAX);
885 return 0;
886 }
887
888 /* If device restarted before it handled `XFER_START` server we restore state into
889 * BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START phase, so `XFER_START` can be accepted
890 * as it would before reboot
891 */
892 if (srv->state.cli == BT_MESH_ADDR_UNASSIGNED) {
893 LOG_DBG("Transfer (id=%llu) waiting for start", srv->state.xfer.id);
894 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START);
895 } else {
896 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_SUSPENDED);
897
898 LOG_DBG("Recovered transfer from 0x%04x (%llu)", srv->state.cli,
899 srv->state.xfer.id);
900 }
901
902 return 0;
903 }
904
blob_srv_start(const struct bt_mesh_model * mod)905 static int blob_srv_start(const struct bt_mesh_model *mod)
906 {
907 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
908 int err = -ENOTSUP;
909
910 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
911 return 0;
912 }
913
914 if (srv->cb && srv->cb->recover) {
915 srv->io = NULL;
916 err = srv->cb->recover(srv, &srv->state.xfer, &srv->io);
917 if (!err && srv->io) {
918 err = io_open(srv);
919 }
920 }
921
922 if (err || !srv->io) {
923 LOG_WRN("Abandoning transfer.");
924 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
925 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
926 srv->state.ttl = BT_MESH_TTL_DEFAULT;
927 erase_state(srv);
928 }
929
930 return 0;
931 }
932
blob_srv_reset(const struct bt_mesh_model * mod)933 static void blob_srv_reset(const struct bt_mesh_model *mod)
934 {
935 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
936
937 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
938 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
939 k_work_cancel_delayable(&srv->rx_timeout);
940 k_work_cancel_delayable(&srv->pull.report);
941 erase_state(srv);
942 }
943
944 const struct bt_mesh_model_cb _bt_mesh_blob_srv_cb = {
945 .init = blob_srv_init,
946 .settings_set = blob_srv_settings_set,
947 .start = blob_srv_start,
948 .reset = blob_srv_reset,
949 };
950
bt_mesh_blob_srv_recv(struct bt_mesh_blob_srv * srv,uint64_t id,const struct bt_mesh_blob_io * io,uint8_t ttl,uint16_t timeout_base)951 int bt_mesh_blob_srv_recv(struct bt_mesh_blob_srv *srv, uint64_t id,
952 const struct bt_mesh_blob_io *io, uint8_t ttl,
953 uint16_t timeout_base)
954 {
955 if (bt_mesh_blob_srv_is_busy(srv)) {
956 return -EBUSY;
957 }
958
959 if (!io || !io->wr) {
960 return -EINVAL;
961 }
962
963 srv->state.xfer.id = id;
964 srv->state.ttl = ttl;
965 srv->state.timeout_base = timeout_base;
966 srv->io = io;
967 srv->block.number = 0xffff;
968 srv->state.xfer.chunk_size = 0xffff;
969 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START);
970 store_state(srv);
971
972 return 0;
973 }
974
bt_mesh_blob_srv_cancel(struct bt_mesh_blob_srv * srv)975 int bt_mesh_blob_srv_cancel(struct bt_mesh_blob_srv *srv)
976 {
977 if (!bt_mesh_blob_srv_is_busy(srv)) {
978 return -EALREADY;
979 }
980
981 cancel(srv);
982
983 return 0;
984 }
985
bt_mesh_blob_srv_is_busy(const struct bt_mesh_blob_srv * srv)986 bool bt_mesh_blob_srv_is_busy(const struct bt_mesh_blob_srv *srv)
987 {
988 return srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE &&
989 srv->phase != BT_MESH_BLOB_XFER_PHASE_SUSPENDED &&
990 srv->phase != BT_MESH_BLOB_XFER_PHASE_COMPLETE;
991 }
992
bt_mesh_blob_srv_progress(const struct bt_mesh_blob_srv * srv)993 uint8_t bt_mesh_blob_srv_progress(const struct bt_mesh_blob_srv *srv)
994 {
995 uint32_t total;
996 uint32_t received;
997
998 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE ||
999 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
1000 return 0;
1001 }
1002
1003 total = block_count_get(srv);
1004
1005 received = 0;
1006 for (int i = 0; i < total; ++i) {
1007 if (!atomic_test_bit(srv->state.blocks, i)) {
1008 received++;
1009 }
1010 }
1011
1012 return (100U * received) / total;
1013 }
1014