1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ipc/icmsg.h>
8 
9 #include <string.h>
10 #include <zephyr/drivers/mbox.h>
11 #include <zephyr/sys/atomic.h>
12 #include <zephyr/ipc/pbuf.h>
13 #include <zephyr/init.h>
14 
15 #define BOND_NOTIFY_REPEAT_TO	K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS)
16 #define SHMEM_ACCESS_TO		K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS)
17 
18 static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
19 				0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34};
20 
21 #ifdef CONFIG_MULTITHREADING
22 #if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
23 static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE);
24 static struct k_work_q icmsg_workq;
25 static struct k_work_q *const workq = &icmsg_workq;
26 #else
27 static struct k_work_q *const workq = &k_sys_work_q;
28 #endif
29 static void mbox_callback_process(struct k_work *item);
30 #else
31 static void mbox_callback_process(struct icmsg_data_t *dev_data);
32 #endif
33 
mbox_deinit(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)34 static int mbox_deinit(const struct icmsg_config_t *conf,
35 		       struct icmsg_data_t *dev_data)
36 {
37 	int err;
38 
39 	err = mbox_set_enabled_dt(&conf->mbox_rx, 0);
40 	if (err != 0) {
41 		return err;
42 	}
43 
44 	err = mbox_register_callback_dt(&conf->mbox_rx, NULL, NULL);
45 	if (err != 0) {
46 		return err;
47 	}
48 
49 #ifdef CONFIG_MULTITHREADING
50 	(void)k_work_cancel(&dev_data->mbox_work);
51 	(void)k_work_cancel_delayable(&dev_data->notify_work);
52 #endif
53 
54 	return 0;
55 }
56 
is_endpoint_ready(struct icmsg_data_t * dev_data)57 static bool is_endpoint_ready(struct icmsg_data_t *dev_data)
58 {
59 	return atomic_get(&dev_data->state) == ICMSG_STATE_READY;
60 }
61 
62 #ifdef CONFIG_MULTITHREADING
notify_process(struct k_work * item)63 static void notify_process(struct k_work *item)
64 {
65 	struct k_work_delayable *dwork = k_work_delayable_from_work(item);
66 	struct icmsg_data_t *dev_data =
67 		CONTAINER_OF(dwork, struct icmsg_data_t, notify_work);
68 
69 	(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
70 
71 	atomic_t state = atomic_get(&dev_data->state);
72 
73 	if (state != ICMSG_STATE_READY) {
74 		int ret;
75 
76 		ret = k_work_reschedule_for_queue(workq, dwork, BOND_NOTIFY_REPEAT_TO);
77 		__ASSERT_NO_MSG(ret >= 0);
78 		(void)ret;
79 	}
80 }
81 #else
notify_process(struct icmsg_data_t * dev_data)82 static void notify_process(struct icmsg_data_t *dev_data)
83 {
84 	(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
85 #if defined(CONFIG_SYS_CLOCK_EXISTS)
86 	int64_t start = k_uptime_get();
87 #endif
88 
89 	while (false == is_endpoint_ready(dev_data)) {
90 		mbox_callback_process(dev_data);
91 
92 #if defined(CONFIG_SYS_CLOCK_EXISTS)
93 		if ((k_uptime_get() - start) > CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) {
94 #endif
95 			(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
96 #if defined(CONFIG_SYS_CLOCK_EXISTS)
97 			start = k_uptime_get();
98 		};
99 #endif
100 	}
101 }
102 #endif
103 
104 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
reserve_tx_buffer_if_unused(struct icmsg_data_t * dev_data)105 static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data)
106 {
107 	int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO);
108 
109 	if (ret < 0) {
110 		return ret;
111 	}
112 
113 	return 0;
114 }
115 
release_tx_buffer(struct icmsg_data_t * dev_data)116 static int release_tx_buffer(struct icmsg_data_t *dev_data)
117 {
118 	return k_mutex_unlock(&dev_data->tx_lock);
119 }
120 #endif
121 
data_available(struct icmsg_data_t * dev_data)122 static uint32_t data_available(struct icmsg_data_t *dev_data)
123 {
124 	return pbuf_read(dev_data->rx_pb, NULL, 0);
125 }
126 
127 #ifdef CONFIG_MULTITHREADING
submit_mbox_work(struct icmsg_data_t * dev_data)128 static void submit_mbox_work(struct icmsg_data_t *dev_data)
129 {
130 	if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) {
131 		/* The mbox processing work is never canceled.
132 		 * The negative error code should never be seen.
133 		 */
134 		__ASSERT_NO_MSG(false);
135 	}
136 }
137 
submit_work_if_buffer_free(struct icmsg_data_t * dev_data)138 static void submit_work_if_buffer_free(struct icmsg_data_t *dev_data)
139 {
140 	submit_mbox_work(dev_data);
141 }
142 
submit_work_if_buffer_free_and_data_available(struct icmsg_data_t * dev_data)143 static void submit_work_if_buffer_free_and_data_available(
144 		struct icmsg_data_t *dev_data)
145 {
146 	if (!data_available(dev_data)) {
147 		return;
148 	}
149 
150 	submit_mbox_work(dev_data);
151 }
152 #else
submit_if_buffer_free(struct icmsg_data_t * dev_data)153 static void submit_if_buffer_free(struct icmsg_data_t *dev_data)
154 {
155 	mbox_callback_process(dev_data);
156 }
157 
submit_if_buffer_free_and_data_available(struct icmsg_data_t * dev_data)158 static void submit_if_buffer_free_and_data_available(
159 		struct icmsg_data_t *dev_data)
160 {
161 
162 	if (!data_available(dev_data)) {
163 		return;
164 	}
165 
166 	mbox_callback_process(dev_data);
167 }
168 #endif
169 
170 #ifdef CONFIG_MULTITHREADING
mbox_callback_process(struct k_work * item)171 static void mbox_callback_process(struct k_work *item)
172 #else
173 static void mbox_callback_process(struct icmsg_data_t *dev_data)
174 #endif
175 {
176 #ifdef CONFIG_MULTITHREADING
177 	struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work);
178 #endif
179 	uint8_t rx_buffer[CONFIG_PBUF_RX_READ_BUF_SIZE] __aligned(4);
180 
181 	atomic_t state = atomic_get(&dev_data->state);
182 
183 	uint32_t len = data_available(dev_data);
184 
185 	if (len == 0) {
186 		/* Unlikely, no data in buffer. */
187 		return;
188 	}
189 
190 	__ASSERT_NO_MSG(len <= sizeof(rx_buffer));
191 
192 	if (sizeof(rx_buffer) < len) {
193 		return;
194 	}
195 
196 	len = pbuf_read(dev_data->rx_pb, rx_buffer, sizeof(rx_buffer));
197 
198 	if (state == ICMSG_STATE_READY) {
199 		if (dev_data->cb->received) {
200 			dev_data->cb->received(rx_buffer, len, dev_data->ctx);
201 		}
202 	} else {
203 		__ASSERT_NO_MSG(state == ICMSG_STATE_BUSY);
204 
205 		/* Allow magic number longer than sizeof(magic) for future protocol version. */
206 		bool endpoint_invalid = (len < sizeof(magic) ||
207 					memcmp(magic, rx_buffer, sizeof(magic)));
208 
209 		if (endpoint_invalid) {
210 			__ASSERT_NO_MSG(false);
211 			return;
212 		}
213 
214 		if (dev_data->cb->bound) {
215 			dev_data->cb->bound(dev_data->ctx);
216 		}
217 
218 		atomic_set(&dev_data->state, ICMSG_STATE_READY);
219 	}
220 #ifdef CONFIG_MULTITHREADING
221 	submit_work_if_buffer_free_and_data_available(dev_data);
222 #else
223 	submit_if_buffer_free_and_data_available(dev_data);
224 #endif
225 }
226 
mbox_callback(const struct device * instance,uint32_t channel,void * user_data,struct mbox_msg * msg_data)227 static void mbox_callback(const struct device *instance, uint32_t channel,
228 			  void *user_data, struct mbox_msg *msg_data)
229 {
230 	struct icmsg_data_t *dev_data = user_data;
231 #ifdef CONFIG_MULTITHREADING
232 	submit_work_if_buffer_free(dev_data);
233 #else
234 	submit_if_buffer_free(dev_data);
235 #endif
236 }
237 
mbox_init(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)238 static int mbox_init(const struct icmsg_config_t *conf,
239 		     struct icmsg_data_t *dev_data)
240 {
241 	int err;
242 
243 #ifdef CONFIG_MULTITHREADING
244 	k_work_init(&dev_data->mbox_work, mbox_callback_process);
245 	k_work_init_delayable(&dev_data->notify_work, notify_process);
246 #endif
247 
248 	err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data);
249 	if (err != 0) {
250 		return err;
251 	}
252 
253 	return mbox_set_enabled_dt(&conf->mbox_rx, 1);
254 }
255 
icmsg_open(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const struct ipc_service_cb * cb,void * ctx)256 int icmsg_open(const struct icmsg_config_t *conf,
257 	       struct icmsg_data_t *dev_data,
258 	       const struct ipc_service_cb *cb, void *ctx)
259 {
260 	if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF, ICMSG_STATE_BUSY)) {
261 		/* Already opened. */
262 		return -EALREADY;
263 	}
264 
265 	dev_data->cb = cb;
266 	dev_data->ctx = ctx;
267 	dev_data->cfg = conf;
268 
269 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
270 	k_mutex_init(&dev_data->tx_lock);
271 #endif
272 
273 	int ret = pbuf_tx_init(dev_data->tx_pb);
274 
275 	if (ret < 0) {
276 		__ASSERT(false, "Incorrect Tx configuration");
277 		return ret;
278 	}
279 
280 	ret = pbuf_rx_init(dev_data->rx_pb);
281 
282 	if (ret < 0) {
283 		__ASSERT(false, "Incorrect Rx configuration");
284 		return ret;
285 	}
286 
287 	ret = pbuf_write(dev_data->tx_pb, magic, sizeof(magic));
288 
289 	if (ret < 0) {
290 		__ASSERT_NO_MSG(false);
291 		return ret;
292 	}
293 
294 	if (ret < (int)sizeof(magic)) {
295 		__ASSERT_NO_MSG(ret == sizeof(magic));
296 		return ret;
297 	}
298 
299 	ret = mbox_init(conf, dev_data);
300 	if (ret) {
301 		return ret;
302 	}
303 #ifdef CONFIG_MULTITHREADING
304 	ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT);
305 	if (ret < 0) {
306 		return ret;
307 	}
308 #else
309 	notify_process(dev_data);
310 #endif
311 	return 0;
312 }
313 
icmsg_close(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data)314 int icmsg_close(const struct icmsg_config_t *conf,
315 		struct icmsg_data_t *dev_data)
316 {
317 	int ret;
318 
319 	ret = mbox_deinit(conf, dev_data);
320 	if (ret) {
321 		return ret;
322 	}
323 
324 	atomic_set(&dev_data->state, ICMSG_STATE_OFF);
325 
326 	return 0;
327 }
328 
icmsg_send(const struct icmsg_config_t * conf,struct icmsg_data_t * dev_data,const void * msg,size_t len)329 int icmsg_send(const struct icmsg_config_t *conf,
330 	       struct icmsg_data_t *dev_data,
331 	       const void *msg, size_t len)
332 {
333 	int ret;
334 	int write_ret;
335 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
336 	int release_ret;
337 #endif
338 	int sent_bytes;
339 
340 	if (!is_endpoint_ready(dev_data)) {
341 		return -EBUSY;
342 	}
343 
344 	/* Empty message is not allowed */
345 	if (len == 0) {
346 		return -ENODATA;
347 	}
348 
349 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
350 	ret = reserve_tx_buffer_if_unused(dev_data);
351 	if (ret < 0) {
352 		return -ENOBUFS;
353 	}
354 #endif
355 
356 	write_ret = pbuf_write(dev_data->tx_pb, msg, len);
357 
358 #ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
359 	release_ret = release_tx_buffer(dev_data);
360 	__ASSERT_NO_MSG(!release_ret);
361 #endif
362 
363 	if (write_ret < 0) {
364 		return write_ret;
365 	} else if (write_ret < len) {
366 		return -EBADMSG;
367 	}
368 	sent_bytes = write_ret;
369 
370 	__ASSERT_NO_MSG(conf->mbox_tx.dev != NULL);
371 
372 	ret = mbox_send_dt(&conf->mbox_tx, NULL);
373 	if (ret) {
374 		return ret;
375 	}
376 
377 	return sent_bytes;
378 }
379 
380 #if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
381 
work_q_init(void)382 static int work_q_init(void)
383 {
384 	struct k_work_queue_config cfg = {
385 		.name = "icmsg_workq",
386 	};
387 
388 	k_work_queue_start(&icmsg_workq,
389 			    icmsg_stack,
390 			    K_KERNEL_STACK_SIZEOF(icmsg_stack),
391 			    CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY, &cfg);
392 	return 0;
393 }
394 
395 SYS_INIT(work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
396 
397 #endif
398