1 /*
2 * Copyright (c) 2022 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ipc/icmsg.h>
8
9 #include <string.h>
10 #include <zephyr/drivers/mbox.h>
11 #include <zephyr/sys/atomic.h>
12 #include <zephyr/sys/spsc_pbuf.h>
13 #include <zephyr/init.h>
14
15 #define BOND_NOTIFY_REPEAT_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS)
16 #define SHMEM_ACCESS_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS)
17
18 enum rx_buffer_state {
19 RX_BUFFER_STATE_RELEASED,
20 RX_BUFFER_STATE_RELEASING,
21 RX_BUFFER_STATE_HELD
22 };
23
24 enum tx_buffer_state {
25 TX_BUFFER_STATE_UNUSED,
26 TX_BUFFER_STATE_RESERVED
27 };
28
29 static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
30 0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34};
31
32 #if IS_ENABLED(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
33 static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE);
34 static struct k_work_q icmsg_workq;
35 static struct k_work_q *const workq = &icmsg_workq;
36 #else
37 static struct k_work_q *const workq = &k_sys_work_q;
38 #endif
39
mbox_deinit(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)40 static int mbox_deinit(const struct icmsg_config_t *conf,
41 struct icmsg_data_t *dev_data)
42 {
43 int err;
44
45 err = mbox_set_enabled(&conf->mbox_rx, 0);
46 if (err != 0) {
47 return err;
48 }
49
50 err = mbox_register_callback(&conf->mbox_rx, NULL, NULL);
51 if (err != 0) {
52 return err;
53 }
54
55 (void)k_work_cancel(&dev_data->mbox_work);
56 (void)k_work_cancel_delayable(&dev_data->notify_work);
57
58 return 0;
59 }
60
notify_process(struct k_work * item)61 static void notify_process(struct k_work *item)
62 {
63 struct k_work_delayable *dwork = k_work_delayable_from_work(item);
64 struct icmsg_data_t *dev_data =
65 CONTAINER_OF(dwork, struct icmsg_data_t, notify_work);
66
67 (void)mbox_send(&dev_data->cfg->mbox_tx, NULL);
68
69 atomic_t state = atomic_get(&dev_data->state);
70
71 if (state != ICMSG_STATE_READY) {
72 int ret;
73
74 ret = k_work_reschedule_for_queue(workq, dwork, BOND_NOTIFY_REPEAT_TO);
75 __ASSERT_NO_MSG(ret >= 0);
76 (void)ret;
77 }
78 }
79
is_endpoint_ready(struct icmsg_data_t * dev_data)80 static bool is_endpoint_ready(struct icmsg_data_t *dev_data)
81 {
82 return atomic_get(&dev_data->state) == ICMSG_STATE_READY;
83 }
84
is_tx_buffer_reserved(struct icmsg_data_t * dev_data)85 static bool is_tx_buffer_reserved(struct icmsg_data_t *dev_data)
86 {
87 return atomic_get(&dev_data->tx_buffer_state) ==
88 TX_BUFFER_STATE_RESERVED;
89 }
90
reserve_tx_buffer_if_unused(struct icmsg_data_t * dev_data)91 static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data)
92 {
93 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
94 int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO);
95
96 if (ret < 0) {
97 return ret;
98 }
99 #endif
100
101 bool was_unused = atomic_cas(&dev_data->tx_buffer_state,
102 TX_BUFFER_STATE_UNUSED, TX_BUFFER_STATE_RESERVED);
103
104 return was_unused ? 0 : -EALREADY;
105 }
106
release_tx_buffer(struct icmsg_data_t * dev_data)107 static int release_tx_buffer(struct icmsg_data_t *dev_data)
108 {
109 bool was_reserved = atomic_cas(&dev_data->tx_buffer_state,
110 TX_BUFFER_STATE_RESERVED, TX_BUFFER_STATE_UNUSED);
111
112 if (!was_reserved) {
113 return -EALREADY;
114 }
115
116 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
117 return k_mutex_unlock(&dev_data->tx_lock);
118 #else
119 return 0;
120 #endif
121 }
122
is_rx_buffer_free(struct icmsg_data_t * dev_data)123 static bool is_rx_buffer_free(struct icmsg_data_t *dev_data)
124 {
125 #ifdef CONFIG_IPC_SERVICE_ICMSG_NOCOPY_RX
126 return atomic_get(&dev_data->rx_buffer_state) == RX_BUFFER_STATE_RELEASED;
127 #else
128 return true;
129 #endif
130 }
131
is_rx_buffer_held(struct icmsg_data_t * dev_data)132 static bool is_rx_buffer_held(struct icmsg_data_t *dev_data)
133 {
134 #ifdef CONFIG_IPC_SERVICE_ICMSG_NOCOPY_RX
135 return atomic_get(&dev_data->rx_buffer_state) == RX_BUFFER_STATE_HELD;
136 #else
137 return false;
138 #endif
139 }
140
is_rx_data_available(struct icmsg_data_t * dev_data)141 static bool is_rx_data_available(struct icmsg_data_t *dev_data)
142 {
143 int len = spsc_pbuf_read(dev_data->rx_ib, NULL, 0);
144
145 return len > 0;
146 }
147
submit_mbox_work(struct icmsg_data_t * dev_data)148 static void submit_mbox_work(struct icmsg_data_t *dev_data)
149 {
150 if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) {
151 /* The mbox processing work is never canceled.
152 * The negative error code should never be seen.
153 */
154 __ASSERT_NO_MSG(false);
155 }
156 }
157
submit_work_if_buffer_free(struct icmsg_data_t * dev_data)158 static void submit_work_if_buffer_free(struct icmsg_data_t *dev_data)
159 {
160 if (!is_rx_buffer_free(dev_data)) {
161 return;
162 }
163
164 submit_mbox_work(dev_data);
165 }
166
submit_work_if_buffer_free_and_data_available(struct icmsg_data_t * dev_data)167 static void submit_work_if_buffer_free_and_data_available(
168 struct icmsg_data_t *dev_data)
169 {
170 if (!is_rx_buffer_free(dev_data)) {
171 return;
172 }
173 if (!is_rx_data_available(dev_data)) {
174 return;
175 }
176
177 submit_mbox_work(dev_data);
178 }
179
mbox_callback_process(struct k_work * item)180 static void mbox_callback_process(struct k_work *item)
181 {
182 char *rx_buffer;
183 struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work);
184
185 atomic_t state = atomic_get(&dev_data->state);
186
187 uint16_t len = spsc_pbuf_claim(dev_data->rx_ib, &rx_buffer);
188
189 if (len == 0) {
190 /* Unlikely, no data in buffer. */
191 return;
192 }
193
194 if (state == ICMSG_STATE_READY) {
195 if (dev_data->cb->received) {
196 #if CONFIG_IPC_SERVICE_ICMSG_NOCOPY_RX
197 dev_data->rx_buffer = rx_buffer;
198 dev_data->rx_len = len;
199 #endif
200
201 dev_data->cb->received(rx_buffer, len,
202 dev_data->ctx);
203
204 /* Release Rx buffer here only in case when user did not request
205 * to hold it.
206 */
207 if (!is_rx_buffer_held(dev_data)) {
208 spsc_pbuf_free(dev_data->rx_ib, len);
209
210 #if CONFIG_IPC_SERVICE_ICMSG_NOCOPY_RX
211 dev_data->rx_buffer = NULL;
212 dev_data->rx_len = 0;
213 #endif
214 }
215 }
216 } else {
217 __ASSERT_NO_MSG(state == ICMSG_STATE_BUSY);
218
219 bool endpoint_invalid = (len != sizeof(magic) || memcmp(magic, rx_buffer, len));
220
221 spsc_pbuf_free(dev_data->rx_ib, len);
222
223 if (endpoint_invalid) {
224 __ASSERT_NO_MSG(false);
225 return;
226 }
227
228 if (dev_data->cb->bound) {
229 dev_data->cb->bound(dev_data->ctx);
230 }
231
232 atomic_set(&dev_data->state, ICMSG_STATE_READY);
233 }
234
235 submit_work_if_buffer_free_and_data_available(dev_data);
236 }
237
mbox_callback(const struct device * instance,uint32_t channel,void * user_data,struct mbox_msg * msg_data)238 static void mbox_callback(const struct device *instance, uint32_t channel,
239 void *user_data, struct mbox_msg *msg_data)
240 {
241 struct icmsg_data_t *dev_data = user_data;
242 submit_work_if_buffer_free(dev_data);
243 }
244
mbox_init(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)245 static int mbox_init(const struct icmsg_config_t *conf,
246 struct icmsg_data_t *dev_data)
247 {
248 int err;
249
250 k_work_init(&dev_data->mbox_work, mbox_callback_process);
251 k_work_init_delayable(&dev_data->notify_work, notify_process);
252
253 err = mbox_register_callback(&conf->mbox_rx, mbox_callback, dev_data);
254 if (err != 0) {
255 return err;
256 }
257
258 return mbox_set_enabled(&conf->mbox_rx, 1);
259 }
260
icmsg_open(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const struct ipc_service_cb * cb,void * ctx)261 int icmsg_open(const struct icmsg_config_t *conf,
262 struct icmsg_data_t *dev_data,
263 const struct ipc_service_cb *cb, void *ctx)
264 {
265 __ASSERT_NO_MSG(conf->tx_shm_size > sizeof(struct spsc_pbuf));
266
267 if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF, ICMSG_STATE_BUSY)) {
268 /* Already opened. */
269 return -EALREADY;
270 }
271
272 dev_data->cb = cb;
273 dev_data->ctx = ctx;
274 dev_data->cfg = conf;
275
276 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
277 k_mutex_init(&dev_data->tx_lock);
278 #endif
279
280 dev_data->tx_ib = spsc_pbuf_init((void *)conf->tx_shm_addr,
281 conf->tx_shm_size,
282 SPSC_PBUF_CACHE);
283 dev_data->rx_ib = (void *)conf->rx_shm_addr;
284
285 int ret = spsc_pbuf_write(dev_data->tx_ib, magic, sizeof(magic));
286
287 if (ret < 0) {
288 __ASSERT_NO_MSG(false);
289 return ret;
290 }
291
292 if (ret < (int)sizeof(magic)) {
293 __ASSERT_NO_MSG(ret == sizeof(magic));
294 return ret;
295 }
296
297 ret = mbox_init(conf, dev_data);
298 if (ret) {
299 return ret;
300 }
301
302 ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT);
303 if (ret < 0) {
304 return ret;
305 }
306
307 return 0;
308 }
309
icmsg_close(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)310 int icmsg_close(const struct icmsg_config_t *conf,
311 struct icmsg_data_t *dev_data)
312 {
313 int ret;
314
315 ret = mbox_deinit(conf, dev_data);
316 if (ret) {
317 return ret;
318 }
319
320 atomic_set(&dev_data->state, ICMSG_STATE_OFF);
321
322 return 0;
323 }
324
icmsg_send(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const void * msg,size_t len)325 int icmsg_send(const struct icmsg_config_t *conf,
326 struct icmsg_data_t *dev_data,
327 const void *msg, size_t len)
328 {
329 int ret;
330 int write_ret;
331 int release_ret;
332 int sent_bytes;
333
334 if (!is_endpoint_ready(dev_data)) {
335 return -EBUSY;
336 }
337
338 /* Empty message is not allowed */
339 if (len == 0) {
340 return -ENODATA;
341 }
342
343 ret = reserve_tx_buffer_if_unused(dev_data);
344 if (ret < 0) {
345 return -ENOBUFS;
346 }
347
348 write_ret = spsc_pbuf_write(dev_data->tx_ib, msg, len);
349 release_ret = release_tx_buffer(dev_data);
350 __ASSERT_NO_MSG(!release_ret);
351
352 if (write_ret < 0) {
353 return write_ret;
354 } else if (write_ret < len) {
355 return -EBADMSG;
356 }
357 sent_bytes = write_ret;
358
359 __ASSERT_NO_MSG(conf->mbox_tx.dev != NULL);
360
361 ret = mbox_send(&conf->mbox_tx, NULL);
362 if (ret) {
363 return ret;
364 }
365
366 return sent_bytes;
367 }
368
icmsg_get_tx_buffer(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,void ** data,size_t * size)369 int icmsg_get_tx_buffer(const struct icmsg_config_t *conf,
370 struct icmsg_data_t *dev_data,
371 void **data, size_t *size)
372 {
373 int ret;
374 int release_ret;
375 uint16_t requested_size;
376 int allocated_len;
377 char *allocated_buf;
378
379 if (*size == 0) {
380 /* Requested allocation of maximal size.
381 * Try to allocate maximal buffer size from spsc,
382 * potentially after wrapping marker.
383 */
384 requested_size = SPSC_PBUF_MAX_LEN - 1;
385 } else {
386 requested_size = *size;
387 }
388
389 ret = reserve_tx_buffer_if_unused(dev_data);
390 if (ret < 0) {
391 return -ENOBUFS;
392 }
393
394 ret = spsc_pbuf_alloc(dev_data->tx_ib, requested_size, &allocated_buf);
395 if (ret < 0) {
396 release_ret = release_tx_buffer(dev_data);
397 __ASSERT_NO_MSG(!release_ret);
398 return ret;
399 }
400 allocated_len = ret;
401
402 if (*size == 0) {
403 /* Requested allocation of maximal size.
404 * Pass the buffer that was allocated.
405 */
406 *size = allocated_len;
407 *data = allocated_buf;
408 return 0;
409 }
410
411 if (*size == allocated_len) {
412 /* Allocated buffer is of requested size. */
413 *data = allocated_buf;
414 return 0;
415 }
416
417 /* Allocated smaller buffer than requested.
418 * Silently stop using the allocated buffer what is allowed by SPSC API
419 */
420 release_tx_buffer(dev_data);
421 *size = allocated_len;
422 return -ENOMEM;
423 }
424
icmsg_drop_tx_buffer(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const void * data)425 int icmsg_drop_tx_buffer(const struct icmsg_config_t *conf,
426 struct icmsg_data_t *dev_data,
427 const void *data)
428 {
429 /* Silently stop using the allocated buffer what is allowed by SPSC API
430 */
431 return release_tx_buffer(dev_data);
432 }
433
icmsg_send_nocopy(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const void * msg,size_t len)434 int icmsg_send_nocopy(const struct icmsg_config_t *conf,
435 struct icmsg_data_t *dev_data,
436 const void *msg, size_t len)
437 {
438 int ret;
439 int sent_bytes;
440
441 if (!is_endpoint_ready(dev_data)) {
442 return -EBUSY;
443 }
444
445 /* Empty message is not allowed */
446 if (len == 0) {
447 return -ENODATA;
448 }
449
450 if (!is_tx_buffer_reserved(dev_data)) {
451 return -ENXIO;
452 }
453
454 spsc_pbuf_commit(dev_data->tx_ib, len);
455 sent_bytes = len;
456
457 ret = release_tx_buffer(dev_data);
458 __ASSERT_NO_MSG(!ret);
459
460 __ASSERT_NO_MSG(conf->mbox_tx.dev != NULL);
461
462 ret = mbox_send(&conf->mbox_tx, NULL);
463 if (ret) {
464 return ret;
465 }
466
467 return sent_bytes;
468 }
469
470 #ifdef CONFIG_IPC_SERVICE_ICMSG_NOCOPY_RX
icmsg_hold_rx_buffer(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const void * data)471 int icmsg_hold_rx_buffer(const struct icmsg_config_t *conf,
472 struct icmsg_data_t *dev_data,
473 const void *data)
474 {
475 bool was_released;
476
477 if (!is_endpoint_ready(dev_data)) {
478 return -EBUSY;
479 }
480
481 if (data != dev_data->rx_buffer) {
482 return -EINVAL;
483 }
484
485 was_released = atomic_cas(&dev_data->rx_buffer_state,
486 RX_BUFFER_STATE_RELEASED, RX_BUFFER_STATE_HELD);
487 if (!was_released) {
488 return -EALREADY;
489 }
490
491 return 0;
492 }
493
icmsg_release_rx_buffer(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const void * data)494 int icmsg_release_rx_buffer(const struct icmsg_config_t *conf,
495 struct icmsg_data_t *dev_data,
496 const void *data)
497 {
498 bool was_held;
499
500 if (!is_endpoint_ready(dev_data)) {
501 return -EBUSY;
502 }
503
504 if (data != dev_data->rx_buffer) {
505 return -EINVAL;
506 }
507
508 /* Do not schedule new packet processing until buffer will be released.
509 * Protect buffer against being freed multiple times.
510 */
511 was_held = atomic_cas(&dev_data->rx_buffer_state,
512 RX_BUFFER_STATE_HELD, RX_BUFFER_STATE_RELEASING);
513 if (!was_held) {
514 return -EALREADY;
515 }
516
517 spsc_pbuf_free(dev_data->rx_ib, dev_data->rx_len);
518
519 #if CONFIG_IPC_SERVICE_ICMSG_NOCOPY_RX
520 dev_data->rx_buffer = NULL;
521 dev_data->rx_len = 0;
522 #endif
523
524 atomic_set(&dev_data->rx_buffer_state, RX_BUFFER_STATE_RELEASED);
525
526 submit_work_if_buffer_free_and_data_available(dev_data);
527
528 return 0;
529 }
530 #endif /* CONFIG_IPC_SERVICE_ICMSG_NOCOPY_RX */
531
532 #if IS_ENABLED(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
533
work_q_init(void)534 static int work_q_init(void)
535 {
536 struct k_work_queue_config cfg = {
537 .name = "icmsg_workq",
538 };
539
540 k_work_queue_start(&icmsg_workq,
541 icmsg_stack,
542 K_KERNEL_STACK_SIZEOF(icmsg_stack),
543 CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY, &cfg);
544 return 0;
545 }
546
547 SYS_INIT(work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
548
549 #endif
550