1 /*
2 * Copyright (c) 2022 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8
9 #include <zephyr/device.h>
10 #include <zephyr/ipc/icmsg.h>
11 #include <zephyr/ipc/icmsg_me.h>
12 #include <zephyr/ipc/ipc_service_backend.h>
13
14 #define DT_DRV_COMPAT zephyr_ipc_icmsg_me_follower
15
16 #define INVALID_EPT_ID 255
17 #define SEND_BUF_SIZE CONFIG_IPC_SERVICE_BACKEND_ICMSG_ME_SEND_BUF_SIZE
18 #define NUM_EP CONFIG_IPC_SERVICE_BACKEND_ICMSG_ME_NUM_EP
19 #define EP_NAME_LEN CONFIG_IPC_SERVICE_BACKEND_ICMSG_ME_EP_NAME_LEN
20
21 #define EVENT_BOUND 0x01
22
23 struct ept_disc_rmt_cache_t {
24 icmsg_me_ept_id_t id;
25 char name[EP_NAME_LEN];
26 };
27
28 struct backend_data_t {
29 struct icmsg_me_data_t icmsg_me_data;
30
31 struct k_mutex cache_mutex;
32 const struct ipc_ept_cfg *ept_disc_loc_cache[NUM_EP];
33 struct ept_disc_rmt_cache_t ept_disc_rmt_cache[NUM_EP];
34 };
35
get_ept_cached_loc(const struct backend_data_t * data,const char * name,size_t len)36 static const struct ipc_ept_cfg *get_ept_cached_loc(
37 const struct backend_data_t *data, const char *name, size_t len)
38 {
39 for (int i = 0; i < NUM_EP; i++) {
40 if (data->ept_disc_loc_cache[i] == NULL) {
41 continue;
42 }
43 if (strncmp(data->ept_disc_loc_cache[i]->name, name,
44 MIN(EP_NAME_LEN, len)) == 0) {
45 return data->ept_disc_loc_cache[i];
46 }
47 }
48
49 return NULL;
50 }
51
get_ept_cached_rmt(struct backend_data_t * data,const char * name,size_t len)52 static struct ept_disc_rmt_cache_t *get_ept_cached_rmt(
53 struct backend_data_t *data, const char *name, size_t len)
54 {
55 size_t cmp_len = MIN(EP_NAME_LEN, len);
56
57 for (int i = 0; i < NUM_EP; i++) {
58 if (strncmp(data->ept_disc_rmt_cache[i].name, name,
59 cmp_len) == 0 &&
60 strlen(data->ept_disc_rmt_cache[i].name) == len) {
61 return &data->ept_disc_rmt_cache[i];
62 }
63 }
64
65 return NULL;
66 }
67
cache_ept_loc(struct backend_data_t * data,const struct ipc_ept_cfg * ept)68 static int cache_ept_loc(struct backend_data_t *data, const struct ipc_ept_cfg *ept)
69 {
70 for (int i = 0; i < NUM_EP; i++) {
71 if (data->ept_disc_loc_cache[i] == NULL) {
72 data->ept_disc_loc_cache[i] = ept;
73 return 0;
74 }
75 }
76
77 return -ENOMEM;
78 }
79
cache_ept_rmt(struct backend_data_t * data,const char * name,size_t len,icmsg_me_ept_id_t id)80 static int cache_ept_rmt(struct backend_data_t *data, const char *name,
81 size_t len, icmsg_me_ept_id_t id)
82 {
83 for (int i = 0; i < NUM_EP; i++) {
84 if (!strlen(data->ept_disc_rmt_cache[i].name)) {
85 size_t copy_len = MIN(EP_NAME_LEN - 1, len);
86
87 strncpy(data->ept_disc_rmt_cache[i].name, name,
88 copy_len);
89 data->ept_disc_rmt_cache[i].name[copy_len] = '\0';
90 data->ept_disc_rmt_cache[i].id = id;
91 return 0;
92 }
93 }
94
95 return -ENOMEM;
96 }
97
bind_ept(const struct icmsg_config_t * conf,struct backend_data_t * data,const struct ipc_ept_cfg * ept,icmsg_me_ept_id_t id)98 static int bind_ept(const struct icmsg_config_t *conf,
99 struct backend_data_t *data, const struct ipc_ept_cfg *ept,
100 icmsg_me_ept_id_t id)
101 {
102 __ASSERT_NO_MSG(id <= NUM_EP);
103
104 int r;
105 const uint8_t confirmation[] = {
106 0, /* EP discovery endpoint id */
107 id, /* Bound endpoint id */
108 };
109
110 r = icmsg_me_set_ept_cfg(&data->icmsg_me_data, id, ept);
111 if (r < 0) {
112 return r;
113 }
114
115 icmsg_me_wait_for_icmsg_bind(&data->icmsg_me_data);
116 r = icmsg_send(conf, &data->icmsg_me_data.icmsg_data, confirmation,
117 sizeof(confirmation));
118 if (r < 0) {
119 return r;
120 }
121
122 if (ept->cb.bound) {
123 ept->cb.bound(ept->priv);
124 }
125
126 return 0;
127 }
128
bound(void * priv)129 static void bound(void *priv)
130 {
131 const struct device *instance = priv;
132 struct backend_data_t *dev_data = instance->data;
133
134 icmsg_me_icmsg_bound(&dev_data->icmsg_me_data);
135 }
136
received(const void * data,size_t len,void * priv)137 static void received(const void *data, size_t len, void *priv)
138 {
139 const struct device *instance = priv;
140 const struct icmsg_config_t *conf = instance->config;
141 struct backend_data_t *dev_data = instance->data;
142
143 const icmsg_me_ept_id_t *id = data;
144
145 __ASSERT_NO_MSG(len > 0);
146
147 if (*id == 0) {
148 __ASSERT_NO_MSG(len > 1);
149
150 id++;
151 icmsg_me_ept_id_t ept_id = *id;
152 const char *name = id + 1;
153 size_t name_len = len - 2 * sizeof(icmsg_me_ept_id_t);
154
155 k_mutex_lock(&dev_data->cache_mutex, K_FOREVER);
156
157 const struct ipc_ept_cfg *ept =
158 get_ept_cached_loc(dev_data, name, name_len);
159 if (ept == NULL) {
160 cache_ept_rmt(dev_data, name, name_len, ept_id);
161 } else {
162 /* Remote cache entry should be already filled during
163 * the local cache entry registration. Update its
164 * id for correct token value.
165 */
166 struct ept_disc_rmt_cache_t *rmt_cache_entry;
167
168 rmt_cache_entry = get_ept_cached_rmt(dev_data, name,
169 name_len);
170
171 __ASSERT_NO_MSG(rmt_cache_entry != NULL);
172 __ASSERT_NO_MSG(rmt_cache_entry->id == INVALID_EPT_ID);
173 rmt_cache_entry->id = ept_id;
174
175 bind_ept(conf, dev_data, ept, ept_id);
176 }
177
178 k_mutex_unlock(&dev_data->cache_mutex);
179 } else {
180 icmsg_me_received_data(&dev_data->icmsg_me_data, *id,
181 data, len);
182 }
183 }
184
185 static const struct ipc_service_cb cb = {
186 .bound = bound,
187 .received = received,
188 .error = NULL,
189 };
190
open(const struct device * instance)191 static int open(const struct device *instance)
192 {
193 const struct icmsg_config_t *conf = instance->config;
194 struct backend_data_t *dev_data = instance->data;
195
196 return icmsg_me_open(conf, &dev_data->icmsg_me_data, &cb,
197 (void *)instance);
198 }
199
register_ept(const struct device * instance,void ** token,const struct ipc_ept_cfg * cfg)200 static int register_ept(const struct device *instance, void **token,
201 const struct ipc_ept_cfg *cfg)
202 {
203 const struct icmsg_config_t *conf = instance->config;
204 struct backend_data_t *data = instance->data;
205 struct ept_disc_rmt_cache_t *rmt_cache_entry;
206 int r;
207
208 k_mutex_lock(&data->cache_mutex, K_FOREVER);
209
210 rmt_cache_entry = get_ept_cached_rmt(data, cfg->name,
211 strlen(cfg->name));
212 if (rmt_cache_entry == NULL) {
213 r = cache_ept_loc(data, cfg);
214 if (r) {
215 goto exit;
216 }
217
218 /* Register remote cache entry alongside the local one to
219 * make available endpoint id storage required for token.
220 */
221 r = cache_ept_rmt(data, cfg->name, strlen(cfg->name),
222 INVALID_EPT_ID);
223 if (r) {
224 goto exit;
225 }
226
227 rmt_cache_entry = get_ept_cached_rmt(data, cfg->name,
228 strlen(cfg->name));
229 __ASSERT_NO_MSG(rmt_cache_entry != NULL);
230 *token = &rmt_cache_entry->id;
231 } else {
232 icmsg_me_ept_id_t ept_id = rmt_cache_entry->id;
233
234 if (ept_id == INVALID_EPT_ID) {
235 r = -EAGAIN;
236 goto exit;
237 }
238
239 *token = &rmt_cache_entry->id;
240 r = bind_ept(conf, data, cfg, ept_id);
241 }
242
243 exit:
244 k_mutex_unlock(&data->cache_mutex);
245 return r;
246 }
247
send(const struct device * instance,void * token,const void * msg,size_t user_len)248 static int send(const struct device *instance, void *token,
249 const void *msg, size_t user_len)
250 {
251 const struct icmsg_config_t *conf = instance->config;
252 struct backend_data_t *dev_data = instance->data;
253 icmsg_me_ept_id_t *id = token;
254
255 if (*id == INVALID_EPT_ID) {
256 return -ENOTCONN;
257 }
258
259 return icmsg_me_send(conf, &dev_data->icmsg_me_data, *id, msg,
260 user_len);
261 }
262
263 const static struct ipc_service_backend backend_ops = {
264 .open_instance = open,
265 .register_endpoint = register_ept,
266 .send = send,
267 };
268
backend_init(const struct device * instance)269 static int backend_init(const struct device *instance)
270 {
271 const struct icmsg_config_t *conf = instance->config;
272 struct backend_data_t *dev_data = instance->data;
273
274 k_mutex_init(&dev_data->cache_mutex);
275
276 return icmsg_me_init(conf, &dev_data->icmsg_me_data);
277 }
278
279 #define DEFINE_BACKEND_DEVICE(i) \
280 static const struct icmsg_config_t backend_config_##i = { \
281 .mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
282 .mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
283 }; \
284 \
285 PBUF_DEFINE(tx_pb_##i, \
286 DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \
287 DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \
288 DT_INST_PROP_OR(i, dcache_alignment, 0)); \
289 PBUF_DEFINE(rx_pb_##i, \
290 DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \
291 DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \
292 DT_INST_PROP_OR(i, dcache_alignment, 0)); \
293 \
294 static struct backend_data_t backend_data_##i = { \
295 .icmsg_me_data = { \
296 .icmsg_data = { \
297 .tx_pb = &tx_pb_##i, \
298 .rx_pb = &rx_pb_##i, \
299 } \
300 } \
301 }; \
302 \
303 DEVICE_DT_INST_DEFINE(i, \
304 &backend_init, \
305 NULL, \
306 &backend_data_##i, \
307 &backend_config_##i, \
308 POST_KERNEL, \
309 CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \
310 &backend_ops);
311
312 DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)
313