1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ipc/icmsg.h>
8 
9 #include <string.h>
10 #include <zephyr/drivers/mbox.h>
11 #include <zephyr/sys/atomic.h>
12 #include <zephyr/ipc/pbuf.h>
13 #include <zephyr/init.h>
14 
15 
16 #define UNBOUND_DISABLED IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_DISABLED_ALLOWED)
17 #define UNBOUND_ENABLED IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_ENABLED_ALLOWED)
18 #define UNBOUND_DETECT IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_DETECT_ALLOWED)
19 
20 /** Get local session id request from RX handshake word.
21  */
22 #define LOCAL_SID_REQ_FROM_RX(rx_handshake) ((rx_handshake) & 0xFFFF)
23 
24 /** Get remote session id request from TX handshake word.
25  */
26 #define REMOTE_SID_REQ_FROM_TX(tx_handshake) ((tx_handshake) & 0xFFFF)
27 
28 /** Get local session id acknowledge from TX handshake word.
29  */
30 #define LOCAL_SID_ACK_FROM_TX(tx_handshake) ((tx_handshake) >> 16)
31 
32 /** Create RX handshake word from local session id request
33  * and remote session id acknowledge.
34  */
35 #define MAKE_RX_HANDSHAKE(local_sid_req, remote_sid_ack) \
36 	((local_sid_req) | ((remote_sid_ack) << 16))
37 
38 /** Create TX handshake word from remote session id request
39  * and local session id acknowledge.
40  */
41 #define MAKE_TX_HANDSHAKE(remote_sid_req, local_sid_ack) \
42 	((remote_sid_req) | ((local_sid_ack) << 16))
43 
44 /** Special session id indicating that peers are disconnected.
45  */
46 #define SID_DISCONNECTED 0
47 
48 #define SHMEM_ACCESS_TO		K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS)
49 
50 static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
51 				0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34};
52 
53 #ifdef CONFIG_MULTITHREADING
54 #if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
55 static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE);
56 static struct k_work_q icmsg_workq;
57 static struct k_work_q *const workq = &icmsg_workq;
58 #else /* defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) */
59 static struct k_work_q *const workq = &k_sys_work_q;
60 #endif /* defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) */
61 #endif /* def CONFIG_MULTITHREADING */
62 
mbox_deinit(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)63 static int mbox_deinit(const struct icmsg_config_t *conf,
64 		       struct icmsg_data_t *dev_data)
65 {
66 	int err;
67 
68 	err = mbox_set_enabled_dt(&conf->mbox_rx, 0);
69 	if (err != 0) {
70 		return err;
71 	}
72 
73 	err = mbox_register_callback_dt(&conf->mbox_rx, NULL, NULL);
74 	if (err != 0) {
75 		return err;
76 	}
77 
78 #ifdef CONFIG_MULTITHREADING
79 	(void)k_work_cancel(&dev_data->mbox_work);
80 #endif
81 
82 	return 0;
83 }
84 
is_endpoint_ready(atomic_t state)85 static bool is_endpoint_ready(atomic_t state)
86 {
87 	return state >= MIN(ICMSG_STATE_CONNECTED_SID_DISABLED, ICMSG_STATE_CONNECTED_SID_ENABLED);
88 }
89 
reserve_tx_buffer_if_unused(struct icmsg_data_t * dev_data)90 static inline int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data)
91 {
92 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
93 	return k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO);
94 #else
95 	return 0;
96 #endif
97 }
98 
release_tx_buffer(struct icmsg_data_t * dev_data)99 static inline int release_tx_buffer(struct icmsg_data_t *dev_data)
100 {
101 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
102 	return k_mutex_unlock(&dev_data->tx_lock);
103 #else
104 	return 0;
105 #endif
106 }
107 
data_available(struct icmsg_data_t * dev_data)108 static uint32_t data_available(struct icmsg_data_t *dev_data)
109 {
110 	return pbuf_read(dev_data->rx_pb, NULL, 0);
111 }
112 
113 #ifdef CONFIG_MULTITHREADING
submit_mbox_work(struct icmsg_data_t * dev_data)114 static void submit_mbox_work(struct icmsg_data_t *dev_data)
115 {
116 	if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) {
117 		/* The mbox processing work is never canceled.
118 		 * The negative error code should never be seen.
119 		 */
120 		__ASSERT_NO_MSG(false);
121 	}
122 }
123 
124 #endif
125 
initialize_tx_with_sid_disabled(struct icmsg_data_t * dev_data)126 static int initialize_tx_with_sid_disabled(struct icmsg_data_t *dev_data)
127 {
128 	int ret;
129 
130 	ret = pbuf_tx_init(dev_data->tx_pb);
131 
132 	if (ret < 0) {
133 		__ASSERT(false, "Incorrect Tx configuration");
134 		return ret;
135 	}
136 
137 	ret = pbuf_write(dev_data->tx_pb, magic, sizeof(magic));
138 
139 	if (ret < 0) {
140 		__ASSERT_NO_MSG(false);
141 		return ret;
142 	}
143 
144 	if (ret < (int)sizeof(magic)) {
145 		__ASSERT_NO_MSG(ret == sizeof(magic));
146 		return -EINVAL;
147 	}
148 
149 	return 0;
150 }
151 
callback_process(struct icmsg_data_t * dev_data)152 static bool callback_process(struct icmsg_data_t *dev_data)
153 {
154 	int ret;
155 	uint8_t rx_buffer[CONFIG_PBUF_RX_READ_BUF_SIZE] __aligned(4);
156 	uint32_t len = 0;
157 	uint32_t len_available;
158 	bool rerun = false;
159 	bool notify_remote = false;
160 	atomic_t state = atomic_get(&dev_data->state);
161 
162 	switch (state) {
163 
164 #if UNBOUND_DETECT
165 	case ICMSG_STATE_INITIALIZING_SID_DETECT: {
166 		/* Initialization with detection of remote session awareness */
167 		volatile char *magic_buf;
168 		uint16_t magic_len;
169 
170 		ret = pbuf_get_initial_buf(dev_data->rx_pb, &magic_buf, &magic_len);
171 
172 		if (ret == 0 && magic_len == sizeof(magic) &&
173 		    memcmp((void *)magic_buf, magic, sizeof(magic)) == 0) {
174 			/* Remote initialized in session-unaware mode, so we do old way of
175 			 * initialization.
176 			 */
177 			ret = initialize_tx_with_sid_disabled(dev_data);
178 			if (ret < 0) {
179 				if (dev_data->cb->error) {
180 					dev_data->cb->error("Incorrect Tx configuration",
181 							    dev_data->ctx);
182 				}
183 				__ASSERT(false, "Incorrect Tx configuration");
184 				atomic_set(&dev_data->state, ICMSG_STATE_OFF);
185 				return false;
186 			}
187 			/* We got magic data, so we can handle it later. */
188 			notify_remote = true;
189 			rerun = true;
190 			atomic_set(&dev_data->state, ICMSG_STATE_INITIALIZING_SID_DISABLED);
191 			break;
192 		}
193 		/* If remote did not initialize the RX in session-unaware mode, we can try
194 		 * with session-aware initialization.
195 		 */
196 		__fallthrough;
197 	}
198 #endif /* UNBOUND_DETECT */
199 
200 #if UNBOUND_ENABLED || UNBOUND_DETECT
201 	case ICMSG_STATE_INITIALIZING_SID_ENABLED: {
202 		uint32_t tx_handshake = pbuf_handshake_read(dev_data->tx_pb);
203 		uint32_t remote_sid_req = REMOTE_SID_REQ_FROM_TX(tx_handshake);
204 		uint32_t local_sid_ack = LOCAL_SID_ACK_FROM_TX(tx_handshake);
205 
206 		if (remote_sid_req != dev_data->remote_sid && remote_sid_req != SID_DISCONNECTED) {
207 			/* We can now initialize TX, since we know that remote, during receiving,
208 			 * will first read FIFO indexes and later, it will check if session has
209 			 * changed before using indexes to receive the message. Additionally,
210 			 * we know that remote after session request change will no try to receive
211 			 * more data.
212 			 */
213 			ret = pbuf_tx_init(dev_data->tx_pb);
214 			if (ret < 0) {
215 				if (dev_data->cb->error) {
216 					dev_data->cb->error("Incorrect Tx configuration",
217 						dev_data->ctx);
218 				}
219 				__ASSERT(false, "Incorrect Tx configuration");
220 				atomic_set(&dev_data->state, ICMSG_STATE_DISCONNECTED);
221 				return false;
222 			}
223 			/* Acknowledge the remote session. */
224 			dev_data->remote_sid = remote_sid_req;
225 			pbuf_handshake_write(dev_data->rx_pb,
226 				MAKE_RX_HANDSHAKE(dev_data->local_sid, dev_data->remote_sid));
227 			notify_remote = true;
228 		}
229 
230 		if (local_sid_ack == dev_data->local_sid &&
231 		    dev_data->remote_sid != SID_DISCONNECTED) {
232 			/* We send acknowledge to remote, receive acknowledge from remote,
233 			 * so we are ready.
234 			 */
235 			atomic_set(&dev_data->state, ICMSG_STATE_CONNECTED_SID_ENABLED);
236 
237 			if (dev_data->cb->bound) {
238 				dev_data->cb->bound(dev_data->ctx);
239 			}
240 			/* Re-run this handler, because remote may already send something. */
241 			rerun = true;
242 			notify_remote = true;
243 		}
244 
245 		break;
246 	}
247 #endif /* UNBOUND_ENABLED || UNBOUND_DETECT */
248 
249 #if UNBOUND_ENABLED || UNBOUND_DETECT
250 	case ICMSG_STATE_CONNECTED_SID_ENABLED:
251 #endif
252 #if UNBOUND_DISABLED || UNBOUND_DETECT
253 	case ICMSG_STATE_CONNECTED_SID_DISABLED:
254 #endif
255 #if UNBOUND_DISABLED
256 	case ICMSG_STATE_INITIALIZING_SID_DISABLED:
257 #endif
258 
259 		len_available = data_available(dev_data);
260 
261 		if (len_available > 0 && sizeof(rx_buffer) >= len_available) {
262 			len = pbuf_read(dev_data->rx_pb, rx_buffer, sizeof(rx_buffer));
263 		}
264 
265 		if (state == ICMSG_STATE_CONNECTED_SID_ENABLED &&
266 		    (UNBOUND_ENABLED || UNBOUND_DETECT)) {
267 			/* The incoming message is valid only if remote session is as expected,
268 			 * so we need to check remote session now.
269 			 */
270 			uint32_t remote_sid_req = REMOTE_SID_REQ_FROM_TX(
271 				pbuf_handshake_read(dev_data->tx_pb));
272 
273 			if (remote_sid_req != dev_data->remote_sid) {
274 				atomic_set(&dev_data->state, ICMSG_STATE_DISCONNECTED);
275 				if (dev_data->cb->unbound) {
276 					dev_data->cb->unbound(dev_data->ctx);
277 				}
278 				return false;
279 			}
280 		}
281 
282 		if (len_available == 0) {
283 			/* Unlikely, no data in buffer. */
284 			return false;
285 		}
286 
287 		__ASSERT_NO_MSG(len_available <= sizeof(rx_buffer));
288 
289 		if (sizeof(rx_buffer) < len_available) {
290 			return false;
291 		}
292 
293 		if (state != ICMSG_STATE_INITIALIZING_SID_DISABLED || !UNBOUND_DISABLED) {
294 			if (dev_data->cb->received) {
295 				dev_data->cb->received(rx_buffer, len, dev_data->ctx);
296 			}
297 		} else {
298 			/* Allow magic number longer than sizeof(magic) for future protocol
299 			 * version.
300 			 */
301 			bool endpoint_invalid = (len < sizeof(magic) ||
302 						memcmp(magic, rx_buffer, sizeof(magic)));
303 
304 			if (endpoint_invalid) {
305 				__ASSERT_NO_MSG(false);
306 				return false;
307 			}
308 
309 			if (dev_data->cb->bound) {
310 				dev_data->cb->bound(dev_data->ctx);
311 			}
312 
313 			atomic_set(&dev_data->state, ICMSG_STATE_CONNECTED_SID_DISABLED);
314 			notify_remote = true;
315 		}
316 
317 		rerun = (data_available(dev_data) > 0);
318 		break;
319 
320 	case ICMSG_STATE_OFF:
321 	case ICMSG_STATE_DISCONNECTED:
322 	default:
323 		/* Nothing to do in this state. */
324 		return false;
325 	}
326 
327 	if (notify_remote) {
328 		(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
329 	}
330 
331 	return rerun;
332 }
333 
334 #ifdef CONFIG_MULTITHREADING
workq_callback_process(struct k_work * item)335 static void workq_callback_process(struct k_work *item)
336 {
337 	bool rerun;
338 	struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work);
339 
340 	rerun = callback_process(dev_data);
341 	if (rerun) {
342 		submit_mbox_work(dev_data);
343 	}
344 }
345 #endif /* def CONFIG_MULTITHREADING */
346 
mbox_callback(const struct device * instance,uint32_t channel,void * user_data,struct mbox_msg * msg_data)347 static void mbox_callback(const struct device *instance, uint32_t channel,
348 			  void *user_data, struct mbox_msg *msg_data)
349 {
350 	bool rerun;
351 	struct icmsg_data_t *dev_data = user_data;
352 
353 #ifdef CONFIG_MULTITHREADING
354 	ARG_UNUSED(rerun);
355 	submit_mbox_work(dev_data);
356 #else
357 	do {
358 		rerun = callback_process(dev_data);
359 	} while (rerun);
360 #endif
361 }
362 
mbox_init(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)363 static int mbox_init(const struct icmsg_config_t *conf,
364 		     struct icmsg_data_t *dev_data)
365 {
366 	int err;
367 
368 #ifdef CONFIG_MULTITHREADING
369 	k_work_init(&dev_data->mbox_work, workq_callback_process);
370 #endif
371 
372 	err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data);
373 	if (err != 0) {
374 		return err;
375 	}
376 
377 	return mbox_set_enabled_dt(&conf->mbox_rx, 1);
378 }
379 
icmsg_open(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const struct ipc_service_cb * cb,void * ctx)380 int icmsg_open(const struct icmsg_config_t *conf,
381 	       struct icmsg_data_t *dev_data,
382 	       const struct ipc_service_cb *cb, void *ctx)
383 {
384 	int ret;
385 	enum icmsg_state old_state;
386 
387 	__ASSERT(conf->unbound_mode != ICMSG_UNBOUND_MODE_DISABLE || UNBOUND_DISABLED,
388 		 "Unbound mode \"disabled\" is was forbidden in Kconfig.");
389 	__ASSERT(conf->unbound_mode != ICMSG_UNBOUND_MODE_ENABLE || UNBOUND_ENABLED,
390 		 "Unbound mode \"enabled\" is was forbidden in Kconfig.");
391 	__ASSERT(conf->unbound_mode != ICMSG_UNBOUND_MODE_DETECT || UNBOUND_DETECT,
392 		 "Unbound mode \"detect\" is was forbidden in Kconfig.");
393 
394 	if (conf->unbound_mode == ICMSG_UNBOUND_MODE_DISABLE ||
395 	    !(UNBOUND_ENABLED || UNBOUND_DETECT)) {
396 		if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF,
397 				ICMSG_STATE_INITIALIZING_SID_DISABLED)) {
398 			/* Already opened. */
399 			return -EALREADY;
400 		}
401 		old_state = ICMSG_STATE_OFF;
402 	} else {
403 		/* Unbound mode has the same values as ICMSG_STATE_INITIALIZING_* */
404 		old_state = atomic_set(&dev_data->state, conf->unbound_mode);
405 	}
406 
407 	dev_data->cb = cb;
408 	dev_data->ctx = ctx;
409 	dev_data->cfg = conf;
410 
411 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
412 	k_mutex_init(&dev_data->tx_lock);
413 #endif
414 
415 #if defined(CONFIG_ARCH_POSIX)
416 	pbuf_native_addr_remap(dev_data->tx_pb);
417 #endif
418 
419 	ret = pbuf_rx_init(dev_data->rx_pb);
420 
421 	if (ret < 0) {
422 		__ASSERT(false, "Incorrect Rx configuration");
423 		goto cleanup_and_exit;
424 	}
425 
426 	if (conf->unbound_mode != ICMSG_UNBOUND_MODE_DISABLE &&
427 	    (UNBOUND_ENABLED || UNBOUND_DETECT)) {
428 		/* Increment local session id without conflicts with forbidden values. */
429 		uint32_t local_sid_ack =
430 			LOCAL_SID_ACK_FROM_TX(pbuf_handshake_read(dev_data->tx_pb));
431 		dev_data->local_sid =
432 			LOCAL_SID_REQ_FROM_RX(pbuf_handshake_read(dev_data->tx_pb));
433 		dev_data->remote_sid = SID_DISCONNECTED;
434 		do {
435 			dev_data->local_sid = (dev_data->local_sid + 1) & 0xFFFF;
436 		} while (dev_data->local_sid == local_sid_ack ||
437 			 dev_data->local_sid == SID_DISCONNECTED);
438 		/* Write local session id request without remote acknowledge */
439 		pbuf_handshake_write(dev_data->rx_pb,
440 			MAKE_RX_HANDSHAKE(dev_data->local_sid, SID_DISCONNECTED));
441 	} else if (UNBOUND_DISABLED) {
442 		ret = initialize_tx_with_sid_disabled(dev_data);
443 		if (ret < 0) {
444 			goto cleanup_and_exit;
445 		}
446 	}
447 
448 	if (old_state == ICMSG_STATE_OFF && (UNBOUND_ENABLED || UNBOUND_DETECT)) {
449 		/* Initialize mbox only if we are doing first-time open (not re-open
450 		 * after unbound)
451 		 */
452 		ret = mbox_init(conf, dev_data);
453 		if (ret) {
454 			goto cleanup_and_exit;
455 		}
456 	}
457 
458 	/* We need to send a notification to remote, it may not be delivered
459 	 * since it may be uninitialized yet, but when it finishes the initialization
460 	 * we get a notification from it. We need to send this notification in callback
461 	 * again to make sure that it arrived.
462 	 */
463 	ret = mbox_send_dt(&conf->mbox_tx, NULL);
464 
465 	if (ret < 0) {
466 		__ASSERT(false, "Cannot send mbox notification");
467 		goto cleanup_and_exit;
468 	}
469 
470 	return ret;
471 
472 cleanup_and_exit:
473 	atomic_set(&dev_data->state, ICMSG_STATE_OFF);
474 	return ret;
475 }
476 
icmsg_close(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)477 int icmsg_close(const struct icmsg_config_t *conf,
478 		struct icmsg_data_t *dev_data)
479 {
480 	int ret = 0;
481 	enum icmsg_state old_state;
482 
483 	if (conf->unbound_mode != ICMSG_UNBOUND_MODE_DISABLE &&
484 	    (UNBOUND_ENABLED || UNBOUND_DETECT)) {
485 		pbuf_handshake_write(dev_data->rx_pb,
486 			MAKE_RX_HANDSHAKE(SID_DISCONNECTED, SID_DISCONNECTED));
487 	}
488 
489 	(void)mbox_send_dt(&conf->mbox_tx, NULL);
490 
491 	old_state = atomic_set(&dev_data->state, ICMSG_STATE_OFF);
492 
493 	if (old_state != ICMSG_STATE_OFF) {
494 		ret = mbox_deinit(conf, dev_data);
495 	}
496 
497 	return ret;
498 }
499 
icmsg_send(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const void * msg,size_t len)500 int icmsg_send(const struct icmsg_config_t *conf,
501 	       struct icmsg_data_t *dev_data,
502 	       const void *msg, size_t len)
503 {
504 	int ret;
505 	int write_ret;
506 	int release_ret;
507 	int sent_bytes;
508 	uint32_t state = atomic_get(&dev_data->state);
509 
510 	if (!is_endpoint_ready(state)) {
511 		/* If instance was disconnected on the remote side, some threads may still
512 		 * don't know it yet and still may try to send messages.
513 		 */
514 		return (state == ICMSG_STATE_DISCONNECTED) ? len : -EBUSY;
515 	}
516 
517 	/* Empty message is not allowed */
518 	if (len == 0) {
519 		return -ENODATA;
520 	}
521 
522 	ret = reserve_tx_buffer_if_unused(dev_data);
523 	if (ret < 0) {
524 		return -ENOBUFS;
525 	}
526 
527 	write_ret = pbuf_write(dev_data->tx_pb, msg, len);
528 
529 	release_ret = release_tx_buffer(dev_data);
530 	__ASSERT_NO_MSG(!release_ret);
531 
532 	if (write_ret < 0) {
533 		return write_ret;
534 	} else if (write_ret < len) {
535 		return -EBADMSG;
536 	}
537 	sent_bytes = write_ret;
538 
539 	__ASSERT_NO_MSG(conf->mbox_tx.dev != NULL);
540 
541 	ret = mbox_send_dt(&conf->mbox_tx, NULL);
542 	if (ret) {
543 		return ret;
544 	}
545 
546 	return sent_bytes;
547 }
548 
549 #if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
550 
work_q_init(void)551 static int work_q_init(void)
552 {
553 	struct k_work_queue_config cfg = {
554 		.name = "icmsg_workq",
555 	};
556 
557 	k_work_queue_start(&icmsg_workq,
558 			    icmsg_stack,
559 			    K_KERNEL_STACK_SIZEOF(icmsg_stack),
560 			    CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY, &cfg);
561 	return 0;
562 }
563 
564 SYS_INIT(work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
565 
566 #endif
567