1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <errno.h>
9 #include <stdlib.h>
10 #include <zephyr/sys/slist.h>
11
12 #include <zephyr/net/buf.h>
13 #include <zephyr/bluetooth/mesh.h>
14
15 #include "msg.h"
16 #include "access.h"
17 #include "net.h"
18
19 #define LOG_LEVEL CONFIG_BT_MESH_ACCESS_LOG_LEVEL
20 #include <zephyr/logging/log.h>
21 LOG_MODULE_REGISTER(bt_mesh_delayable_msg);
22
23 static void delayable_msg_handler(struct k_work *w);
24 static bool push_msg_from_delayable_msgs(void);
25
26 static struct delayable_msg_chunk {
27 sys_snode_t node;
28 uint8_t data[CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_CHUNK_SIZE];
29 } delayable_msg_chunks[CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_CHUNK_COUNT];
30
31 static struct delayable_msg_ctx {
32 sys_snode_t node;
33 sys_slist_t chunks;
34 struct bt_mesh_msg_ctx ctx;
35 uint16_t src_addr;
36 const struct bt_mesh_send_cb *cb;
37 void *cb_data;
38 uint32_t fired_time;
39 uint16_t len;
40 } delayable_msgs_ctx[CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_COUNT];
41
42 static struct {
43 sys_slist_t busy_ctx;
44 sys_slist_t free_ctx;
45 sys_slist_t free_chunks;
46 struct k_work_delayable random_delay;
47 } access_delayable_msg = {.random_delay = Z_WORK_DELAYABLE_INITIALIZER(delayable_msg_handler)};
48
put_ctx_to_busy_list(struct delayable_msg_ctx * ctx)49 static void put_ctx_to_busy_list(struct delayable_msg_ctx *ctx)
50 {
51 struct delayable_msg_ctx *curr_ctx;
52 sys_slist_t *list = &access_delayable_msg.busy_ctx;
53 sys_snode_t *head = sys_slist_peek_head(list);
54 sys_snode_t *curr = head;
55 sys_snode_t *prev = curr;
56
57 if (!head) {
58 sys_slist_append(list, &ctx->node);
59 return;
60 }
61
62 do {
63 curr_ctx = CONTAINER_OF(curr, struct delayable_msg_ctx, node);
64 if (ctx->fired_time < curr_ctx->fired_time) {
65 if (curr == head) {
66 sys_slist_prepend(list, &ctx->node);
67 } else {
68 sys_slist_insert(list, prev, &ctx->node);
69 }
70 return;
71 }
72 prev = curr;
73 } while ((curr = sys_slist_peek_next(curr)));
74
75 sys_slist_append(list, &ctx->node);
76 }
77
peek_pending_msg(void)78 static struct delayable_msg_ctx *peek_pending_msg(void)
79 {
80 struct delayable_msg_ctx *pending_msg = NULL;
81 sys_snode_t *node = sys_slist_peek_head(&access_delayable_msg.busy_ctx);
82
83 if (node) {
84 pending_msg = CONTAINER_OF(node, struct delayable_msg_ctx, node);
85 }
86
87 return pending_msg;
88 }
89
reschedule_delayable_msg(struct delayable_msg_ctx * msg)90 static void reschedule_delayable_msg(struct delayable_msg_ctx *msg)
91 {
92 uint32_t curr_time;
93 k_timeout_t delay = K_NO_WAIT;
94 struct delayable_msg_ctx *pending_msg;
95
96 if (msg) {
97 put_ctx_to_busy_list(msg);
98 }
99
100 pending_msg = peek_pending_msg();
101
102 if (!pending_msg) {
103 return;
104 }
105
106 curr_time = k_uptime_get_32();
107 if (curr_time < pending_msg->fired_time) {
108 delay = K_MSEC(pending_msg->fired_time - curr_time);
109 }
110
111 k_work_reschedule(&access_delayable_msg.random_delay, delay);
112 }
113
allocate_delayable_msg_chunks(struct delayable_msg_ctx * msg,int number)114 static int allocate_delayable_msg_chunks(struct delayable_msg_ctx *msg, int number)
115 {
116 sys_snode_t *node;
117
118 for (int i = 0; i < number; i++) {
119 node = sys_slist_get(&access_delayable_msg.free_chunks);
120 if (!node) {
121 LOG_WRN("Unable allocate %u chunks, allocated %u", number, i);
122 return i;
123 }
124 sys_slist_append(&msg->chunks, node);
125 }
126
127 return number;
128 }
129
release_delayable_msg_chunks(struct delayable_msg_ctx * msg)130 static void release_delayable_msg_chunks(struct delayable_msg_ctx *msg)
131 {
132 sys_snode_t *node;
133
134 while ((node = sys_slist_get(&msg->chunks))) {
135 sys_slist_append(&access_delayable_msg.free_chunks, node);
136 }
137 }
138
allocate_delayable_msg_ctx(void)139 static struct delayable_msg_ctx *allocate_delayable_msg_ctx(void)
140 {
141 struct delayable_msg_ctx *msg;
142 sys_snode_t *node;
143
144 if (sys_slist_is_empty(&access_delayable_msg.free_ctx)) {
145 LOG_WRN("Purge pending delayable message.");
146 if (!push_msg_from_delayable_msgs()) {
147 return NULL;
148 }
149 }
150
151 node = sys_slist_get(&access_delayable_msg.free_ctx);
152 msg = CONTAINER_OF(node, struct delayable_msg_ctx, node);
153 sys_slist_init(&msg->chunks);
154
155 return msg;
156 }
157
release_delayable_msg_ctx(struct delayable_msg_ctx * ctx)158 static void release_delayable_msg_ctx(struct delayable_msg_ctx *ctx)
159 {
160 if (sys_slist_find_and_remove(&access_delayable_msg.busy_ctx, &ctx->node)) {
161 sys_slist_append(&access_delayable_msg.free_ctx, &ctx->node);
162 }
163 }
164
push_msg_from_delayable_msgs(void)165 static bool push_msg_from_delayable_msgs(void)
166 {
167 sys_snode_t *node;
168 struct delayable_msg_chunk *chunk;
169 struct delayable_msg_ctx *msg = peek_pending_msg();
170 uint16_t len;
171 int err;
172
173 if (!msg) {
174 return false;
175 }
176
177 len = msg->len;
178
179 NET_BUF_SIMPLE_DEFINE(buf, BT_MESH_TX_SDU_MAX);
180
181 SYS_SLIST_FOR_EACH_NODE(&msg->chunks, node) {
182 uint16_t tmp = MIN(CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_CHUNK_SIZE, len);
183
184 chunk = CONTAINER_OF(node, struct delayable_msg_chunk, node);
185 memcpy(net_buf_simple_add(&buf, tmp), chunk->data, tmp);
186 len -= tmp;
187 }
188
189 msg->ctx.rnd_delay = false;
190 err = bt_mesh_access_send(&msg->ctx, &buf, msg->src_addr, msg->cb, msg->cb_data);
191 msg->ctx.rnd_delay = true;
192
193 if (err == -EBUSY || err == -ENOBUFS) {
194 return false;
195 }
196
197 release_delayable_msg_chunks(msg);
198 release_delayable_msg_ctx(msg);
199
200 if (err && msg->cb && msg->cb->start) {
201 msg->cb->start(0, err, msg->cb_data);
202 }
203
204 return true;
205 }
206
delayable_msg_handler(struct k_work * w)207 static void delayable_msg_handler(struct k_work *w)
208 {
209 if (!push_msg_from_delayable_msgs()) {
210 sys_snode_t *node = sys_slist_get(&access_delayable_msg.busy_ctx);
211 struct delayable_msg_ctx *pending_msg =
212 CONTAINER_OF(node, struct delayable_msg_ctx, node);
213
214 pending_msg->fired_time += 10;
215 reschedule_delayable_msg(pending_msg);
216 } else {
217 reschedule_delayable_msg(NULL);
218 }
219 }
220
bt_mesh_delayable_msg_manage(struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf,uint16_t src_addr,const struct bt_mesh_send_cb * cb,void * cb_data)221 int bt_mesh_delayable_msg_manage(struct bt_mesh_msg_ctx *ctx, struct net_buf_simple *buf,
222 uint16_t src_addr, const struct bt_mesh_send_cb *cb, void *cb_data)
223 {
224 sys_snode_t *node;
225 struct delayable_msg_ctx *msg;
226 uint16_t random_delay;
227 int total_number = DIV_ROUND_UP(buf->size, CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_CHUNK_SIZE);
228 int allocated_number = 0;
229 uint16_t len = buf->len;
230
231 if (atomic_test_bit(bt_mesh.flags, BT_MESH_SUSPENDED)) {
232 LOG_WRN("Refusing to allocate message context while suspended");
233 return -ENODEV;
234 }
235
236 if (total_number > CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_CHUNK_COUNT) {
237 return -EINVAL;
238 }
239
240 msg = allocate_delayable_msg_ctx();
241 if (!msg) {
242 LOG_WRN("No available free delayable message context.");
243 return -ENOMEM;
244 }
245
246 do {
247 allocated_number +=
248 allocate_delayable_msg_chunks(msg, total_number - allocated_number);
249
250 if (total_number > allocated_number) {
251 LOG_DBG("Unable allocate %u chunks, allocated %u", total_number,
252 allocated_number);
253 if (!push_msg_from_delayable_msgs()) {
254 LOG_WRN("No available chunk memory.");
255 release_delayable_msg_chunks(msg);
256 release_delayable_msg_ctx(msg);
257 return -ENOMEM;
258 }
259 }
260 } while (total_number > allocated_number);
261
262 SYS_SLIST_FOR_EACH_NODE(&msg->chunks, node) {
263 uint16_t tmp = MIN(CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_CHUNK_SIZE, buf->len);
264
265 struct delayable_msg_chunk *chunk =
266 CONTAINER_OF(node, struct delayable_msg_chunk, node);
267
268 memcpy(chunk->data, net_buf_simple_pull_mem(buf, tmp), tmp);
269 }
270
271 bt_rand(&random_delay, sizeof(uint16_t));
272 random_delay = 20 + random_delay % (BT_MESH_ADDR_IS_UNICAST(ctx->recv_dst) ? 30 : 480);
273 msg->fired_time = k_uptime_get_32() + random_delay;
274 msg->ctx = *ctx;
275 msg->src_addr = src_addr;
276 msg->cb = cb;
277 msg->cb_data = cb_data;
278 msg->len = len;
279
280 reschedule_delayable_msg(msg);
281
282 return 0;
283 }
284
bt_mesh_delayable_msg_init(void)285 void bt_mesh_delayable_msg_init(void)
286 {
287 sys_slist_init(&access_delayable_msg.busy_ctx);
288 sys_slist_init(&access_delayable_msg.free_ctx);
289 sys_slist_init(&access_delayable_msg.free_chunks);
290
291 for (int i = 0; i < CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_COUNT; i++) {
292 sys_slist_append(&access_delayable_msg.free_ctx, &delayable_msgs_ctx[i].node);
293 }
294
295 for (int i = 0; i < CONFIG_BT_MESH_ACCESS_DELAYABLE_MSG_CHUNK_COUNT; i++) {
296 sys_slist_append(&access_delayable_msg.free_chunks, &delayable_msg_chunks[i].node);
297 }
298 }
299
bt_mesh_delayable_msg_stop(void)300 void bt_mesh_delayable_msg_stop(void)
301 {
302 sys_snode_t *node;
303 struct delayable_msg_ctx *ctx;
304
305 k_work_cancel_delayable(&access_delayable_msg.random_delay);
306
307 while ((node = sys_slist_peek_head(&access_delayable_msg.busy_ctx))) {
308 ctx = CONTAINER_OF(node, struct delayable_msg_ctx, node);
309 release_delayable_msg_chunks(ctx);
310 release_delayable_msg_ctx(ctx);
311
312 if (ctx->cb && ctx->cb->start) {
313 ctx->cb->start(0, -ENODEV, ctx->cb_data);
314 }
315 }
316 }
317