1 /*
2  * Copyright (c) 2024 Felipe Neves.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT linaro_ivshmem_mbox
8 
9 #include <stdint.h>
10 #include <string.h>
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/mbox.h>
13 #include <zephyr/drivers/virtualization/ivshmem.h>
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(mbox_ivshmem, CONFIG_MBOX_LOG_LEVEL);
16 
17 K_THREAD_STACK_DEFINE(ivshmem_ev_loop_stack, CONFIG_MBOX_IVSHMEM_EVENT_LOOP_STACK_SIZE);
18 static struct k_thread ivshmem_ev_loop_thread;
19 
20 struct ivshmem_mbox_data {
21 	mbox_callback_t cb;
22 	void *user_data;
23 };
24 
25 struct ivshmem_mbox_config {
26 	const struct device *ivshmem_dev;
27 	int peer_id;
28 };
29 
ivshmem_mbox_event_loop_thread(void * arg,void * p2,void * p3)30 static void ivshmem_mbox_event_loop_thread(void *arg, void *p2, void *p3)
31 {
32 	ARG_UNUSED(p2);
33 	ARG_UNUSED(p3);
34 
35 	unsigned int poll_signaled;
36 	int ivshmem_vector_rx;
37 	struct k_poll_signal sig;
38 	struct k_poll_event events[] = {
39 		K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sig),
40 	};
41 
42 	const struct device *dev = (const struct device *)arg;
43 	struct ivshmem_mbox_data *dev_data = (struct ivshmem_mbox_data *)dev->data;
44 	struct ivshmem_mbox_config *dev_cfg = (struct ivshmem_mbox_config *)dev->config;
45 
46 	k_poll_signal_init(&sig);
47 	int ret = ivshmem_register_handler(dev_cfg->ivshmem_dev, &sig, 0);
48 
49 	if (ret < 0) {
50 		LOG_ERR("registering handlers must be supported: %d\n", ret);
51 		k_panic();
52 	}
53 
54 	while (1) {
55 		LOG_DBG("%s: waiting interrupt from client...\n", __func__);
56 		ret = k_poll(events, ARRAY_SIZE(events), K_FOREVER);
57 
58 		k_poll_signal_check(&sig, &poll_signaled, &ivshmem_vector_rx);
59 		/* get ready for next signal */
60 		k_poll_signal_reset(&sig);
61 
62 		if (dev_data->cb) {
63 			dev_data->cb(dev, 0, dev_data->user_data, NULL);
64 		}
65 	}
66 }
67 
ivshmem_mbox_send(const struct device * dev,mbox_channel_id_t channel,const struct mbox_msg * msg)68 static int ivshmem_mbox_send(const struct device *dev, mbox_channel_id_t channel,
69 			     const struct mbox_msg *msg)
70 {
71 	ARG_UNUSED(msg);
72 	ARG_UNUSED(channel);
73 
74 	struct ivshmem_mbox_config *dev_cfg = (struct ivshmem_mbox_config *)dev->config;
75 
76 	LOG_DBG("sending notification to the peer id 0x%x\n", (int)channel);
77 	return ivshmem_int_peer(dev_cfg->ivshmem_dev, (int)channel, 0);
78 }
79 
ivshmem_mbox_register_callback(const struct device * dev,mbox_channel_id_t channel,mbox_callback_t cb,void * user_data)80 static int ivshmem_mbox_register_callback(const struct device *dev, mbox_channel_id_t channel,
81 					  mbox_callback_t cb, void *user_data)
82 {
83 	ARG_UNUSED(channel);
84 
85 	struct ivshmem_mbox_data *dev_data = (struct ivshmem_mbox_data *)dev->data;
86 
87 	if (!cb) {
88 		LOG_ERR("Must provide a callback");
89 		return -EINVAL;
90 	}
91 
92 	dev_data->cb = cb;
93 	dev_data->user_data = user_data;
94 
95 	return 0;
96 }
97 
98 /* some subsystems needs those functions to be at least implemented,
99  * returning some valid values instead of errors, just provide them.
100  */
101 
ivshmem_mbox_mtu_get(const struct device * dev)102 static int ivshmem_mbox_mtu_get(const struct device *dev)
103 {
104 	ARG_UNUSED(dev);
105 
106 	return 0;
107 }
108 
ivshmem_mbox_max_channels_get(const struct device * dev)109 static uint32_t ivshmem_mbox_max_channels_get(const struct device *dev)
110 {
111 	ARG_UNUSED(dev);
112 
113 	return UINT16_MAX;
114 }
115 
ivshmem_mbox_set_enabled(const struct device * dev,mbox_channel_id_t channel,bool enable)116 static int ivshmem_mbox_set_enabled(const struct device *dev, mbox_channel_id_t channel,
117 				    bool enable)
118 {
119 	ARG_UNUSED(dev);
120 	ARG_UNUSED(channel);
121 	ARG_UNUSED(enable);
122 
123 	return 0;
124 }
125 
ivshmem_mbox_init(const struct device * dev)126 static int ivshmem_mbox_init(const struct device *dev)
127 {
128 	k_thread_create(&ivshmem_ev_loop_thread, ivshmem_ev_loop_stack,
129 			CONFIG_MBOX_IVSHMEM_EVENT_LOOP_STACK_SIZE, ivshmem_mbox_event_loop_thread,
130 			(void *)dev, NULL, NULL, CONFIG_MBOX_IVSHMEM_EVENT_LOOP_PRIO, 0, K_NO_WAIT);
131 
132 	return 0;
133 }
134 
135 static DEVICE_API(mbox, ivshmem_mbox_driver_api) = {
136 	.send = ivshmem_mbox_send,
137 	.register_callback = ivshmem_mbox_register_callback,
138 	.mtu_get = ivshmem_mbox_mtu_get,
139 	.max_channels_get = ivshmem_mbox_max_channels_get,
140 	.set_enabled = ivshmem_mbox_set_enabled,
141 };
142 
143 #define MBOX_IVSHMEM_INIT(inst)                                                                    \
144 	static const struct ivshmem_mbox_config ivshmem_mbox_cfg_##inst = {                        \
145 		.ivshmem_dev = DEVICE_DT_GET(DT_INST_PHANDLE(inst, ivshmem)),                      \
146 	};                                                                                         \
147 	static struct ivshmem_mbox_data ivshmem_mbox_data_##inst = {                               \
148 		.cb = NULL,                                                                        \
149 		.user_data = NULL,                                                                 \
150 	};                                                                                         \
151 	DEVICE_DT_INST_DEFINE(inst, ivshmem_mbox_init, NULL, &ivshmem_mbox_data_##inst,            \
152 			      &ivshmem_mbox_cfg_##inst, POST_KERNEL,                               \
153 			      CONFIG_APPLICATION_INIT_PRIORITY, &ivshmem_mbox_driver_api);
154 
155 DT_INST_FOREACH_STATUS_OKAY(MBOX_IVSHMEM_INIT);
156