1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <string.h>
7 #include <zephyr/bluetooth/mesh.h>
8 #include <common/bt_str.h>
9 #include "net.h"
10 #include "access.h"
11 #include "transport.h"
12 #include "lpn.h"
13 #include "blob.h"
14
15 #define LOG_LEVEL CONFIG_BT_MESH_MODEL_LOG_LEVEL
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(bt_mesh_blob_srv);
18
19 #define CHUNK_SIZE_MAX BLOB_CHUNK_SIZE_MAX(BT_MESH_RX_SDU_MAX)
20 #define MTU_SIZE_MAX (BT_MESH_RX_SDU_MAX - BT_MESH_MIC_SHORT)
21
22 /* The Receive BLOB Timeout Timer */
23 #define SERVER_TIMEOUT_SECS(srv) (10 * (1 + (srv)->state.timeout_base))
24 /* The initial timer value used by an instance of the Pull BLOB State machine - T_BPI */
25 #define REPORT_TIMER_TIMEOUT K_SECONDS(CONFIG_BT_MESH_BLOB_REPORT_TIMEOUT)
26
27 BUILD_ASSERT(BLOB_BLOCK_SIZE_LOG_MIN <= BLOB_BLOCK_SIZE_LOG_MAX,
28 "The must be at least one number between the min and "
29 "max block size that is the power of two.");
30
31 BUILD_ASSERT((BLOB_XFER_STATUS_MSG_MAXLEN + BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_XFER_STATUS) +
32 BT_MESH_MIC_SHORT) <= BT_MESH_TX_SDU_MAX,
33 "The BLOB Transfer Status message does not fit into the maximum outgoing SDU size.");
34
35 BUILD_ASSERT((BLOB_BLOCK_REPORT_STATUS_MSG_MAXLEN +
36 BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_BLOCK_REPORT) + BT_MESH_MIC_SHORT)
37 <= BT_MESH_TX_SDU_MAX,
38 "The BLOB Partial Block Report message does not fit into the maximum outgoing SDU "
39 "size.");
40
41 BUILD_ASSERT((BLOB_BLOCK_STATUS_MSG_MAXLEN + BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_BLOCK_STATUS) +
42 BT_MESH_MIC_SHORT) <= BT_MESH_TX_SDU_MAX,
43 "The BLOB Block Status message does not fit into the maximum outgoing SDU size.");
44
45 static void cancel(struct bt_mesh_blob_srv *srv);
46 static void suspend(struct bt_mesh_blob_srv *srv);
47
block_count_get(const struct bt_mesh_blob_srv * srv)48 static inline uint32_t block_count_get(const struct bt_mesh_blob_srv *srv)
49 {
50 return DIV_ROUND_UP(srv->state.xfer.size,
51 (1U << srv->state.xfer.block_size_log));
52 }
53
max_chunk_size(const struct bt_mesh_blob_srv * srv)54 static inline uint32_t max_chunk_size(const struct bt_mesh_blob_srv *srv)
55 {
56 return MIN((srv->state.mtu_size - 2 -
57 BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_CHUNK)),
58 CHUNK_SIZE_MAX);
59 }
60
max_chunk_count(const struct bt_mesh_blob_srv * srv)61 static inline uint32_t max_chunk_count(const struct bt_mesh_blob_srv *srv)
62 {
63 return MIN(8 * (srv->state.mtu_size - 6),
64 CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX);
65 }
66
missing_chunks(const struct bt_mesh_blob_block * block)67 static inline uint32_t missing_chunks(const struct bt_mesh_blob_block *block)
68 {
69 int i;
70 uint32_t count = 0;
71
72 for (i = 0; i < ARRAY_SIZE(block->missing); ++i) {
73 count += POPCOUNT(block->missing[i]);
74 }
75
76 return count;
77 }
78
store_state(const struct bt_mesh_blob_srv * srv)79 static void store_state(const struct bt_mesh_blob_srv *srv)
80 {
81 if (!IS_ENABLED(CONFIG_BT_SETTINGS)) {
82 return;
83 }
84
85 /* Convert bit count to byte count: */
86 uint32_t block_len = DIV_ROUND_UP(block_count_get(srv), 8);
87
88 bt_mesh_model_data_store(
89 srv->mod, false, NULL, &srv->state,
90 offsetof(struct bt_mesh_blob_srv_state, blocks) + block_len);
91 }
92
erase_state(struct bt_mesh_blob_srv * srv)93 static void erase_state(struct bt_mesh_blob_srv *srv)
94 {
95 if (!IS_ENABLED(CONFIG_BT_SETTINGS)) {
96 return;
97 }
98
99 bt_mesh_model_data_store(srv->mod, false, NULL, NULL, 0);
100 }
101
io_open(struct bt_mesh_blob_srv * srv)102 static int io_open(struct bt_mesh_blob_srv *srv)
103 {
104 if (!srv->io->open) {
105 return 0;
106 }
107
108 return srv->io->open(srv->io, &srv->state.xfer, BT_MESH_BLOB_WRITE);
109 }
110
io_close(struct bt_mesh_blob_srv * srv)111 static void io_close(struct bt_mesh_blob_srv *srv)
112 {
113 if (!srv->io->close) {
114 return;
115 }
116
117 srv->io->close(srv->io, &srv->state.xfer);
118 }
119
reset_timer(struct bt_mesh_blob_srv * srv)120 static void reset_timer(struct bt_mesh_blob_srv *srv)
121 {
122 uint32_t timeout_secs =
123 srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL ?
124 MAX(SERVER_TIMEOUT_SECS(srv),
125 CONFIG_BT_MESH_BLOB_REPORT_TIMEOUT + 1) :
126 SERVER_TIMEOUT_SECS(srv);
127 k_work_reschedule(&srv->rx_timeout, K_SECONDS(timeout_secs));
128 }
129
buf_chunk_index_add(struct net_buf_simple * buf,uint16_t chunk)130 static void buf_chunk_index_add(struct net_buf_simple *buf, uint16_t chunk)
131 {
132 /* utf-8 encoded: */
133 if (chunk < 0x80) {
134 net_buf_simple_add_u8(buf, chunk);
135 } else if (chunk < 0x8000) {
136 net_buf_simple_add_u8(buf, 0xc0 | chunk >> 6);
137 net_buf_simple_add_u8(buf, 0x80 | (chunk & BIT_MASK(6)));
138 } else {
139 net_buf_simple_add_u8(buf, 0xe0 | chunk >> 12);
140 net_buf_simple_add_u8(buf, 0x80 | ((chunk >> 6) & BIT_MASK(6)));
141 net_buf_simple_add_u8(buf, 0x80 | (chunk & BIT_MASK(6)));
142 }
143 }
144
pull_req_max(const struct bt_mesh_blob_srv * srv)145 static int pull_req_max(const struct bt_mesh_blob_srv *srv)
146 {
147 int count = CONFIG_BT_MESH_BLOB_SRV_PULL_REQ_COUNT;
148
149 #if defined(CONFIG_BT_MESH_LOW_POWER)
150 /* No point in requesting more than the friend node can hold: */
151 if (bt_mesh_lpn_established()) {
152 uint32_t segments_per_chunk = DIV_ROUND_UP(
153 BLOB_CHUNK_SDU_LEN(srv->state.xfer.chunk_size),
154 BT_MESH_APP_SEG_SDU_MAX);
155
156 count = MIN(CONFIG_BT_MESH_BLOB_SRV_PULL_REQ_COUNT,
157 bt_mesh.lpn.queue_size / segments_per_chunk);
158 }
159 #endif
160
161 return MIN(count, missing_chunks(&srv->block));
162 }
163
report_sent(int err,void * cb_data)164 static void report_sent(int err, void *cb_data)
165 {
166 struct bt_mesh_blob_srv *srv = cb_data;
167
168 LOG_DBG("");
169
170 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) && bt_mesh_lpn_established()) {
171 bt_mesh_lpn_poll();
172 }
173
174 if (k_work_delayable_is_pending(&srv->rx_timeout)) {
175 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
176 }
177 }
178
block_report(struct bt_mesh_blob_srv * srv)179 static void block_report(struct bt_mesh_blob_srv *srv)
180 {
181 static const struct bt_mesh_send_cb report_cb = { .end = report_sent };
182 struct bt_mesh_msg_ctx ctx = {
183 .app_idx = srv->state.app_idx,
184 .send_ttl = srv->state.ttl,
185 .addr = srv->state.cli,
186 };
187 int count;
188 int i;
189
190 LOG_DBG("rx BLOB Timeout Timer: %i", k_work_delayable_is_pending(&srv->rx_timeout));
191
192 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_BLOCK_REPORT,
193 BLOB_BLOCK_REPORT_STATUS_MSG_MAXLEN);
194 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_BLOCK_REPORT);
195
196 count = pull_req_max(srv);
197
198 for (i = 0; i < srv->block.chunk_count && count; ++i) {
199 if (blob_chunk_missing_get(srv->block.missing, i)) {
200 buf_chunk_index_add(&buf, i);
201 count--;
202 }
203 }
204
205 (void)bt_mesh_model_send(srv->mod, &ctx, &buf, &report_cb, srv);
206 }
207
phase_set(struct bt_mesh_blob_srv * srv,enum bt_mesh_blob_xfer_phase phase)208 static void phase_set(struct bt_mesh_blob_srv *srv,
209 enum bt_mesh_blob_xfer_phase phase)
210 {
211 srv->phase = phase;
212 LOG_DBG("Phase: %u", phase);
213 }
214
cancel(struct bt_mesh_blob_srv * srv)215 static void cancel(struct bt_mesh_blob_srv *srv)
216 {
217 /* TODO: Could this state be preserved instead somehow? Wiping the
218 * entire transfer state is a bit overkill
219 */
220 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
221 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
222 srv->state.ttl = BT_MESH_TTL_DEFAULT;
223 srv->block.number = 0xffff;
224 srv->state.xfer.chunk_size = 0xffff;
225 k_work_cancel_delayable(&srv->rx_timeout);
226 k_work_cancel_delayable(&srv->pull.report);
227 io_close(srv);
228 erase_state(srv);
229
230 if (srv->cb && srv->cb->end) {
231 srv->cb->end(srv, srv->state.xfer.id, false);
232 }
233 }
234
suspend(struct bt_mesh_blob_srv * srv)235 static void suspend(struct bt_mesh_blob_srv *srv)
236 {
237 LOG_DBG("");
238 k_work_cancel_delayable(&srv->rx_timeout);
239 k_work_cancel_delayable(&srv->pull.report);
240 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_SUSPENDED);
241 if (srv->cb && srv->cb->suspended) {
242 srv->cb->suspended(srv);
243 }
244 }
245
resume(struct bt_mesh_blob_srv * srv)246 static void resume(struct bt_mesh_blob_srv *srv)
247 {
248 LOG_DBG("Resuming");
249
250 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
251 reset_timer(srv);
252 }
253
end(struct bt_mesh_blob_srv * srv)254 static void end(struct bt_mesh_blob_srv *srv)
255 {
256 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_COMPLETE);
257 k_work_cancel_delayable(&srv->rx_timeout);
258 k_work_cancel_delayable(&srv->pull.report);
259 io_close(srv);
260 erase_state(srv);
261
262 if (srv->cb && srv->cb->end) {
263 srv->cb->end(srv, srv->state.xfer.id, true);
264 }
265 }
266
all_blocks_received(struct bt_mesh_blob_srv * srv)267 static bool all_blocks_received(struct bt_mesh_blob_srv *srv)
268 {
269 for (int i = 0; i < ARRAY_SIZE(srv->state.blocks); ++i) {
270 if (srv->state.blocks[i]) {
271 return false;
272 }
273 }
274
275 return true;
276 }
277
pull_mode_xfer_complete(struct bt_mesh_blob_srv * srv)278 static bool pull_mode_xfer_complete(struct bt_mesh_blob_srv *srv)
279 {
280 return srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL &&
281 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK &&
282 all_blocks_received(srv);
283 }
284
timeout(struct k_work * work)285 static void timeout(struct k_work *work)
286 {
287 struct bt_mesh_blob_srv *srv =
288 CONTAINER_OF(work, struct bt_mesh_blob_srv, rx_timeout.work);
289
290 LOG_DBG("");
291
292 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
293 cancel(srv);
294 } else if (pull_mode_xfer_complete(srv)) {
295 end(srv);
296 } else {
297 suspend(srv);
298 }
299 }
300
report_timeout(struct k_work * work)301 static void report_timeout(struct k_work *work)
302 {
303 struct bt_mesh_blob_srv *srv =
304 CONTAINER_OF(work, struct bt_mesh_blob_srv, pull.report.work);
305
306 LOG_DBG("");
307
308 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK &&
309 srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK) {
310 return;
311 }
312
313 block_report(srv);
314 }
315
316 /*******************************************************************************
317 * Message handling
318 ******************************************************************************/
319
xfer_status_rsp(struct bt_mesh_blob_srv * srv,struct bt_mesh_msg_ctx * ctx,enum bt_mesh_blob_status status)320 static void xfer_status_rsp(struct bt_mesh_blob_srv *srv,
321 struct bt_mesh_msg_ctx *ctx,
322 enum bt_mesh_blob_status status)
323 {
324 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_XFER_STATUS,
325 BLOB_XFER_STATUS_MSG_MAXLEN);
326 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_XFER_STATUS);
327
328 net_buf_simple_add_u8(&buf, ((status & BIT_MASK(4)) |
329 (srv->state.xfer.mode << 6)));
330 net_buf_simple_add_u8(&buf, srv->phase);
331
332 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
333 goto send;
334 }
335
336 net_buf_simple_add_le64(&buf, srv->state.xfer.id);
337
338 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
339 goto send;
340 }
341
342 net_buf_simple_add_le32(&buf, srv->state.xfer.size);
343 net_buf_simple_add_u8(&buf, srv->state.xfer.block_size_log);
344 net_buf_simple_add_le16(&buf, srv->state.mtu_size);
345 net_buf_simple_add_mem(&buf, srv->state.blocks,
346 DIV_ROUND_UP(block_count_get(srv), 8));
347
348 send:
349 ctx->send_ttl = srv->state.ttl;
350 (void)bt_mesh_model_send(srv->mod, ctx, &buf, NULL, NULL);
351 }
352
block_status_rsp(struct bt_mesh_blob_srv * srv,struct bt_mesh_msg_ctx * ctx,enum bt_mesh_blob_status status)353 static void block_status_rsp(struct bt_mesh_blob_srv *srv,
354 struct bt_mesh_msg_ctx *ctx,
355 enum bt_mesh_blob_status status)
356 {
357 enum bt_mesh_blob_chunks_missing format;
358 uint32_t missing;
359 int i;
360
361 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_BLOCK_STATUS,
362 BLOB_BLOCK_STATUS_MSG_MAXLEN);
363 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_BLOCK_STATUS);
364
365 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE ||
366 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
367 missing = srv->block.chunk_count;
368 } else if (srv->phase == BT_MESH_BLOB_XFER_PHASE_COMPLETE) {
369 missing = 0U;
370 } else {
371 missing = missing_chunks(&srv->block);
372 }
373
374 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
375 format = BT_MESH_BLOB_CHUNKS_MISSING_ENCODED;
376 } else if (missing == srv->block.chunk_count) {
377 format = BT_MESH_BLOB_CHUNKS_MISSING_ALL;
378 } else if (missing == 0) {
379 format = BT_MESH_BLOB_CHUNKS_MISSING_NONE;
380 } else {
381 format = BT_MESH_BLOB_CHUNKS_MISSING_SOME;
382 }
383
384 LOG_DBG("Status: %u, missing: %u/%u", status, missing, srv->block.chunk_count);
385
386 net_buf_simple_add_u8(&buf, (status & BIT_MASK(4)) | (format << 6));
387 net_buf_simple_add_le16(&buf, srv->block.number);
388 net_buf_simple_add_le16(&buf, srv->state.xfer.chunk_size);
389
390 if (format == BT_MESH_BLOB_CHUNKS_MISSING_SOME) {
391 net_buf_simple_add_mem(&buf, srv->block.missing,
392 DIV_ROUND_UP(srv->block.chunk_count,
393 8));
394
395 LOG_DBG("Bits: %s",
396 bt_hex(srv->block.missing,
397 DIV_ROUND_UP(srv->block.chunk_count, 8)));
398
399 } else if (format == BT_MESH_BLOB_CHUNKS_MISSING_ENCODED) {
400 int count = pull_req_max(srv);
401
402 for (i = 0; (i < srv->block.chunk_count) && count; ++i) {
403 if (blob_chunk_missing_get(srv->block.missing, i)) {
404 LOG_DBG("Missing %u", i);
405 buf_chunk_index_add(&buf, i);
406 count--;
407 }
408 }
409 }
410
411 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
412 ctx->send_ttl = srv->state.ttl;
413 }
414
415 (void)bt_mesh_model_send(srv->mod, ctx, &buf, NULL, NULL);
416 }
417
handle_xfer_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)418 static int handle_xfer_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
419 struct net_buf_simple *buf)
420 {
421 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
422
423 LOG_DBG("");
424
425 if (pull_mode_xfer_complete(srv)) {
426 /* The client requested transfer. If we are in Pull mode and all blocks were
427 * received, we should change the Transfer state here to Complete so that the client
428 * receives the correct state.
429 */
430 end(srv);
431 }
432
433 xfer_status_rsp(srv, ctx, BT_MESH_BLOB_SUCCESS);
434
435 return 0;
436 }
437
handle_xfer_start(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)438 static int handle_xfer_start(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
439 struct net_buf_simple *buf)
440 {
441 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
442 enum bt_mesh_blob_status status;
443 enum bt_mesh_blob_xfer_mode mode;
444 uint64_t id;
445 size_t size;
446 uint8_t block_size_log;
447 uint32_t block_count;
448 uint16_t mtu_size;
449 int err;
450
451 mode = (net_buf_simple_pull_u8(buf) >> 6);
452 id = net_buf_simple_pull_le64(buf);
453 size = net_buf_simple_pull_le32(buf);
454 block_size_log = net_buf_simple_pull_u8(buf);
455 mtu_size = net_buf_simple_pull_le16(buf);
456
457 LOG_DBG("\n\tsize: %u block size: %u\n\tmtu_size: %u\n\tmode: %s",
458 size, (1U << block_size_log), mtu_size,
459 mode == BT_MESH_BLOB_XFER_MODE_PUSH ? "push" : "pull");
460
461 if (mode != BT_MESH_BLOB_XFER_MODE_PULL &&
462 mode != BT_MESH_BLOB_XFER_MODE_PUSH) {
463 LOG_WRN("Invalid mode 0x%x", mode);
464 return -EINVAL;
465 }
466
467 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
468 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
469 LOG_WRN("Uninitialized");
470 goto rsp;
471 }
472
473 if (srv->state.xfer.id != id) {
474 status = BT_MESH_BLOB_ERR_WRONG_BLOB_ID;
475 /* bt_hex uses static array for the resulting hex string.
476 * Not possible to use bt_hex in the same logging function twice.
477 */
478 LOG_WRN("Invalid ID: %s", bt_hex(&id, sizeof(uint64_t)));
479 LOG_WRN("Expected ID: %s", bt_hex(&srv->state.xfer.id, sizeof(uint64_t)));
480 goto rsp;
481 }
482
483 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
484 if (srv->state.xfer.mode != mode ||
485 srv->state.xfer.size != size ||
486 srv->state.xfer.block_size_log != block_size_log ||
487 srv->state.mtu_size > mtu_size) {
488 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
489 LOG_WRN("Busy");
490 goto rsp;
491 }
492
493 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_SUSPENDED) {
494 resume(srv);
495 store_state(srv);
496 } else {
497 LOG_DBG("Duplicate");
498 }
499
500 status = BT_MESH_BLOB_SUCCESS;
501 goto rsp;
502 }
503
504 if (size > CONFIG_BT_MESH_BLOB_SIZE_MAX) {
505 LOG_WRN("Too large");
506 status = BT_MESH_BLOB_ERR_BLOB_TOO_LARGE;
507 goto rsp;
508 }
509
510 if (((1U << block_size_log) < CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MIN) ||
511 ((1U << block_size_log) > CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX)) {
512 LOG_WRN("Invalid block size: %u", block_size_log);
513 status = BT_MESH_BLOB_ERR_INVALID_BLOCK_SIZE;
514 goto rsp;
515 }
516
517 srv->state.cli = ctx->addr;
518 srv->state.app_idx = ctx->app_idx;
519 srv->state.mtu_size = MIN(mtu_size, MTU_SIZE_MAX);
520 srv->state.xfer.id = id;
521 srv->state.xfer.size = size;
522 srv->state.xfer.mode = mode;
523 srv->state.xfer.block_size_log = block_size_log;
524 srv->state.xfer.chunk_size = 0xffff;
525 srv->block.number = 0xffff;
526
527 block_count = block_count_get(srv);
528 if (block_count > BT_MESH_BLOB_BLOCKS_MAX) {
529 LOG_WRN("Invalid block count (%u)", block_count);
530 status = BT_MESH_BLOB_ERR_INVALID_PARAM;
531 cancel(srv);
532 goto rsp;
533 }
534
535 memset(srv->state.blocks, 0, sizeof(srv->state.blocks));
536 for (int i = 0; i < block_count; i++) {
537 atomic_set_bit(srv->state.blocks, i);
538 }
539
540 err = io_open(srv);
541 if (err) {
542 LOG_ERR("Couldn't open stream (err: %d)", err);
543 status = BT_MESH_BLOB_ERR_INTERNAL;
544 cancel(srv);
545 goto rsp;
546 }
547
548 if (srv->cb && srv->cb->start) {
549 err = srv->cb->start(srv, ctx, &srv->state.xfer);
550 if (err) {
551 LOG_ERR("Couldn't start transfer (err: %d)", err);
552 status = BT_MESH_BLOB_ERR_INTERNAL;
553 cancel(srv);
554 goto rsp;
555 }
556 }
557
558 reset_timer(srv);
559 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
560 store_state(srv);
561 status = BT_MESH_BLOB_SUCCESS;
562
563 rsp:
564 xfer_status_rsp(srv, ctx, status);
565
566 return 0;
567 }
568
handle_xfer_cancel(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)569 static int handle_xfer_cancel(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
570 struct net_buf_simple *buf)
571 {
572 enum bt_mesh_blob_status status = BT_MESH_BLOB_SUCCESS;
573 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
574 uint64_t id;
575
576 id = net_buf_simple_pull_le64(buf);
577
578 LOG_DBG("%u", (uint32_t)id);
579
580 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
581 goto rsp;
582 }
583
584 if (srv->state.xfer.id != id) {
585 status = BT_MESH_BLOB_ERR_WRONG_BLOB_ID;
586 goto rsp;
587 }
588
589 cancel(srv);
590
591 rsp:
592 xfer_status_rsp(srv, ctx, status);
593
594 return 0;
595 }
596
handle_block_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)597 static int handle_block_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
598 struct net_buf_simple *buf)
599 {
600 enum bt_mesh_blob_status status;
601 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
602
603 switch (srv->phase) {
604 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK:
605 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK:
606 case BT_MESH_BLOB_XFER_PHASE_COMPLETE:
607 status = BT_MESH_BLOB_SUCCESS;
608 break;
609 case BT_MESH_BLOB_XFER_PHASE_SUSPENDED:
610 status = BT_MESH_BLOB_ERR_INFO_UNAVAILABLE;
611 break;
612 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START:
613 case BT_MESH_BLOB_XFER_PHASE_INACTIVE:
614 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
615 break;
616 default:
617 status = BT_MESH_BLOB_ERR_INTERNAL;
618 break;
619 }
620
621 LOG_DBG("");
622
623 block_status_rsp(srv, ctx, status);
624
625 return 0;
626 }
627
handle_block_start(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)628 static int handle_block_start(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
629 struct net_buf_simple *buf)
630 {
631 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
632 enum bt_mesh_blob_status status;
633 uint16_t block_number, chunk_size;
634 int err;
635
636 block_number = net_buf_simple_pull_le16(buf);
637 chunk_size = net_buf_simple_pull_le16(buf);
638
639 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START ||
640 srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
641 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
642 goto rsp;
643 }
644
645 reset_timer(srv);
646
647 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK) {
648 if (block_number != srv->block.number ||
649 chunk_size != srv->state.xfer.chunk_size) {
650 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
651 } else {
652 status = BT_MESH_BLOB_SUCCESS;
653 }
654
655 goto rsp;
656 }
657
658 if (block_number >= block_count_get(srv)) {
659 status = BT_MESH_BLOB_ERR_INVALID_BLOCK_NUM;
660 goto rsp;
661 }
662
663 if (!chunk_size || chunk_size > max_chunk_size(srv) ||
664 (DIV_ROUND_UP((1 << srv->state.xfer.block_size_log), chunk_size) >
665 max_chunk_count(srv))) {
666 LOG_WRN("Invalid chunk size: (chunk size: %u, max: %u, block log: %u, count: %u)",
667 chunk_size, max_chunk_size(srv),
668 srv->state.xfer.block_size_log,
669 max_chunk_count(srv));
670 status = BT_MESH_BLOB_ERR_INVALID_CHUNK_SIZE;
671 goto rsp;
672 }
673
674 srv->block.size = blob_block_size(
675 srv->state.xfer.size, srv->state.xfer.block_size_log, block_number);
676 srv->block.number = block_number;
677 srv->block.chunk_count = DIV_ROUND_UP(srv->block.size, chunk_size);
678 srv->state.xfer.chunk_size = chunk_size;
679 srv->block.offset = block_number * (1UL << srv->state.xfer.block_size_log);
680
681 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_COMPLETE ||
682 !atomic_test_bit(srv->state.blocks, block_number)) {
683 memset(srv->block.missing, 0, sizeof(srv->block.missing));
684 status = BT_MESH_BLOB_SUCCESS;
685 goto rsp;
686 }
687
688 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_SUSPENDED && srv->cb &&
689 srv->cb->resume) {
690 srv->cb->resume(srv);
691 }
692
693 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK);
694 blob_chunk_missing_set_all(&srv->block);
695
696 LOG_DBG("%u: (%u/%u)\n\tsize: %u\n\tchunk size: %u\n\tchunk count: %u",
697 srv->block.number, srv->block.number + 1, block_count_get(srv),
698 srv->block.size, chunk_size, srv->block.chunk_count);
699
700 if (srv->io->block_start) {
701 err = srv->io->block_start(srv->io, &srv->state.xfer,
702 &srv->block);
703 if (err) {
704 cancel(srv);
705 status = BT_MESH_BLOB_ERR_INTERNAL;
706 goto rsp;
707 }
708 }
709
710 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
711 /* Wait for the client to send the first chunk */
712 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
713 }
714
715 status = BT_MESH_BLOB_SUCCESS;
716
717 rsp:
718 block_status_rsp(srv, ctx, status);
719
720 return 0;
721 }
722
handle_chunk(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)723 static int handle_chunk(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
724 struct net_buf_simple *buf)
725 {
726 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
727 struct bt_mesh_blob_chunk chunk;
728 size_t expected_size = 0;
729 uint16_t idx;
730 int err;
731
732 idx = net_buf_simple_pull_le16(buf);
733 chunk.size = buf->len;
734 chunk.data = net_buf_simple_pull_mem(buf, chunk.size);
735 chunk.offset = idx * srv->state.xfer.chunk_size;
736
737 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK ||
738 idx >= srv->block.chunk_count) {
739 LOG_ERR("Invalid phase or index (%u %u)", srv->phase,
740 idx);
741 return -EINVAL;
742 }
743
744 if (idx == srv->block.chunk_count - 1) {
745 expected_size = srv->block.size % srv->state.xfer.chunk_size;
746 }
747
748 if (expected_size == 0) {
749 expected_size = srv->state.xfer.chunk_size;
750 }
751
752 if (chunk.size != expected_size) {
753 LOG_ERR("Unexpected size: %u != %u", expected_size, chunk.size);
754 return -EINVAL;
755 }
756
757 LOG_DBG("%u/%u (%u bytes)", idx + 1, srv->block.chunk_count,
758 chunk.size);
759
760 reset_timer(srv);
761 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
762 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
763 }
764
765 if (!blob_chunk_missing_get(srv->block.missing, idx)) {
766 LOG_DBG("Duplicate chunk %u", idx);
767 return -EALREADY;
768 }
769
770 err = srv->io->wr(srv->io, &srv->state.xfer, &srv->block, &chunk);
771 if (err) {
772 return err;
773 }
774
775 blob_chunk_missing_set(srv->block.missing, idx, false);
776 if (missing_chunks(&srv->block)) {
777 return 0;
778 }
779
780 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
781 block_report(srv);
782 }
783
784 if (srv->io->block_end) {
785 srv->io->block_end(srv->io, &srv->state.xfer, &srv->block);
786 }
787
788 atomic_clear_bit(srv->state.blocks, srv->block.number);
789
790 if (!all_blocks_received(srv)) {
791 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
792 store_state(srv);
793 return 0;
794 }
795
796 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
797 /* By spec (section 5.2.4), the BLOB Server stops sending BLOB Partial Block Report
798 * messages "If the current block is the last block, then the server determines that
799 * the client knows the transfer is complete. For example, a higher-layer model may
800 * indicate that the client considers the transfer complete."
801 *
802 * We don't have any way for higher-layer model to indicate that the transfer is
803 * complete. Therefore we need to keep sending Partial Block Report messages until
804 * the client sends BLOB Transfer Get message or the Block Timer expires.
805 */
806 return 0;
807 }
808
809 end(srv);
810 return 0;
811 }
812
handle_info_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)813 static int handle_info_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
814 struct net_buf_simple *buf)
815 {
816 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
817
818 LOG_DBG("");
819
820 BT_MESH_MODEL_BUF_DEFINE(rsp, BT_MESH_BLOB_OP_INFO_STATUS, 15);
821 bt_mesh_model_msg_init(&rsp, BT_MESH_BLOB_OP_INFO_STATUS);
822 net_buf_simple_add_u8(&rsp, BLOB_BLOCK_SIZE_LOG_MIN);
823 net_buf_simple_add_u8(&rsp, BLOB_BLOCK_SIZE_LOG_MAX);
824 net_buf_simple_add_le16(&rsp, CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX);
825 net_buf_simple_add_le16(&rsp, CHUNK_SIZE_MAX);
826 net_buf_simple_add_le32(&rsp, CONFIG_BT_MESH_BLOB_SIZE_MAX);
827 net_buf_simple_add_le16(&rsp, MTU_SIZE_MAX);
828 net_buf_simple_add_u8(&rsp, BT_MESH_BLOB_XFER_MODE_ALL);
829
830 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
831 ctx->send_ttl = srv->state.ttl;
832 }
833
834 (void)bt_mesh_model_send(srv->mod, ctx, &rsp, NULL, NULL);
835
836 return 0;
837 }
838
839 const struct bt_mesh_model_op _bt_mesh_blob_srv_op[] = {
840 { BT_MESH_BLOB_OP_XFER_GET, BT_MESH_LEN_EXACT(0), handle_xfer_get },
841 { BT_MESH_BLOB_OP_XFER_START, BT_MESH_LEN_EXACT(16), handle_xfer_start },
842 { BT_MESH_BLOB_OP_XFER_CANCEL, BT_MESH_LEN_EXACT(8), handle_xfer_cancel },
843 { BT_MESH_BLOB_OP_BLOCK_GET, BT_MESH_LEN_EXACT(0), handle_block_get },
844 { BT_MESH_BLOB_OP_BLOCK_START, BT_MESH_LEN_EXACT(4), handle_block_start },
845 { BT_MESH_BLOB_OP_CHUNK, BT_MESH_LEN_MIN(2), handle_chunk },
846 { BT_MESH_BLOB_OP_INFO_GET, BT_MESH_LEN_EXACT(0), handle_info_get },
847 BT_MESH_MODEL_OP_END,
848 };
849
blob_srv_init(const struct bt_mesh_model * mod)850 static int blob_srv_init(const struct bt_mesh_model *mod)
851 {
852 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
853
854 srv->mod = mod;
855 srv->state.ttl = BT_MESH_TTL_DEFAULT;
856 srv->block.number = 0xffff;
857 srv->state.xfer.chunk_size = 0xffff;
858 k_work_init_delayable(&srv->rx_timeout, timeout);
859 k_work_init_delayable(&srv->pull.report, report_timeout);
860
861 return 0;
862 }
863
blob_srv_settings_set(const struct bt_mesh_model * mod,const char * name,size_t len_rd,settings_read_cb read_cb,void * cb_arg)864 static int blob_srv_settings_set(const struct bt_mesh_model *mod, const char *name,
865 size_t len_rd, settings_read_cb read_cb,
866 void *cb_arg)
867 {
868 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
869 ssize_t len;
870
871 if (len_rd < offsetof(struct bt_mesh_blob_srv_state, blocks)) {
872 return -EINVAL;
873 }
874
875 len = read_cb(cb_arg, &srv->state, sizeof(srv->state));
876 if (len < 0) {
877 return len;
878 }
879
880 srv->block.number = 0xffff;
881 srv->state.xfer.chunk_size = 0xffff;
882
883 if (block_count_get(srv) > BT_MESH_BLOB_BLOCKS_MAX) {
884 LOG_WRN("Loaded block count too high (%u, max: %u)",
885 block_count_get(srv), BT_MESH_BLOB_BLOCKS_MAX);
886 return 0;
887 }
888
889 /* If device restarted before it handled `XFER_START` server we restore state into
890 * BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START phase, so `XFER_START` can be accepted
891 * as it would before reboot
892 */
893 if (srv->state.cli == BT_MESH_ADDR_UNASSIGNED) {
894 LOG_DBG("Transfer (id=%llu) waiting for start", srv->state.xfer.id);
895 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START);
896 } else {
897 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_SUSPENDED);
898
899 LOG_DBG("Recovered transfer from 0x%04x (%llu)", srv->state.cli,
900 srv->state.xfer.id);
901 }
902
903 return 0;
904 }
905
blob_srv_start(const struct bt_mesh_model * mod)906 static int blob_srv_start(const struct bt_mesh_model *mod)
907 {
908 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
909 int err = -ENOTSUP;
910
911 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
912 return 0;
913 }
914
915 if (srv->cb && srv->cb->recover) {
916 srv->io = NULL;
917 err = srv->cb->recover(srv, &srv->state.xfer, &srv->io);
918 if (!err && srv->io) {
919 err = io_open(srv);
920 }
921 }
922
923 if (err || !srv->io) {
924 LOG_WRN("Abandoning transfer.");
925 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
926 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
927 srv->state.ttl = BT_MESH_TTL_DEFAULT;
928 erase_state(srv);
929 }
930
931 return 0;
932 }
933
blob_srv_reset(const struct bt_mesh_model * mod)934 static void blob_srv_reset(const struct bt_mesh_model *mod)
935 {
936 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
937
938 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
939 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
940 k_work_cancel_delayable(&srv->rx_timeout);
941 k_work_cancel_delayable(&srv->pull.report);
942 erase_state(srv);
943 }
944
945 const struct bt_mesh_model_cb _bt_mesh_blob_srv_cb = {
946 .init = blob_srv_init,
947 .settings_set = blob_srv_settings_set,
948 .start = blob_srv_start,
949 .reset = blob_srv_reset,
950 };
951
bt_mesh_blob_srv_recv(struct bt_mesh_blob_srv * srv,uint64_t id,const struct bt_mesh_blob_io * io,uint8_t ttl,uint16_t timeout_base)952 int bt_mesh_blob_srv_recv(struct bt_mesh_blob_srv *srv, uint64_t id,
953 const struct bt_mesh_blob_io *io, uint8_t ttl,
954 uint16_t timeout_base)
955 {
956 if (bt_mesh_blob_srv_is_busy(srv)) {
957 return -EBUSY;
958 }
959
960 if (!io || !io->wr) {
961 return -EINVAL;
962 }
963
964 srv->state.xfer.id = id;
965 srv->state.ttl = ttl;
966 srv->state.timeout_base = timeout_base;
967 srv->io = io;
968 srv->block.number = 0xffff;
969 srv->state.xfer.chunk_size = 0xffff;
970 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START);
971 store_state(srv);
972
973 return 0;
974 }
975
bt_mesh_blob_srv_cancel(struct bt_mesh_blob_srv * srv)976 int bt_mesh_blob_srv_cancel(struct bt_mesh_blob_srv *srv)
977 {
978 if (!bt_mesh_blob_srv_is_busy(srv)) {
979 return -EALREADY;
980 }
981
982 cancel(srv);
983
984 return 0;
985 }
986
bt_mesh_blob_srv_is_busy(const struct bt_mesh_blob_srv * srv)987 bool bt_mesh_blob_srv_is_busy(const struct bt_mesh_blob_srv *srv)
988 {
989 return srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE &&
990 srv->phase != BT_MESH_BLOB_XFER_PHASE_SUSPENDED &&
991 srv->phase != BT_MESH_BLOB_XFER_PHASE_COMPLETE;
992 }
993
bt_mesh_blob_srv_progress(const struct bt_mesh_blob_srv * srv)994 uint8_t bt_mesh_blob_srv_progress(const struct bt_mesh_blob_srv *srv)
995 {
996 uint32_t total;
997 uint32_t received;
998
999 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE ||
1000 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
1001 return 0;
1002 }
1003
1004 total = block_count_get(srv);
1005
1006 received = 0;
1007 for (int i = 0; i < total; ++i) {
1008 if (!atomic_test_bit(srv->state.blocks, i)) {
1009 received++;
1010 }
1011 }
1012
1013 return (100U * received) / total;
1014 }
1015