1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdio.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/init.h>
10 #include <zephyr/sys/dlist.h>
11 #include <zephyr/sys/iterable_sections.h>
12 
13 #include "uvb.h"
14 
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(uvb, CONFIG_UVB_LOG_LEVEL);
17 
18 static struct k_fifo uvb_queue;
19 static void uvb_work_handler(struct k_work *work);
20 static K_WORK_DEFINE(uvb_work, uvb_work_handler);
21 
22 enum uvb_msg_type {
23 	UVB_MSG_ADVERT,
24 	UVB_MSG_TO_HOST,
25 	UVB_MSG_SUBSCRIBE,
26 	UVB_MSG_UNSUBSCRIBE,
27 };
28 
29 struct uvb_msg {
30 	sys_snode_t node;
31 	enum uvb_msg_type type;
32 	const struct uvb_node *source;
33 	union {
34 		struct uvb_node *sink;
35 		struct {
36 			enum uvb_event_type type;
37 			const void *data;
38 		} event;
39 	};
40 };
41 
42 K_MEM_SLAB_DEFINE_STATIC(uvb_msg_slab, sizeof(struct uvb_msg),
43 			 CONFIG_UVB_MAX_MESSAGES, sizeof(void *));
44 
45 K_MEM_SLAB_DEFINE_STATIC(uvb_pkt_slab, sizeof(struct uvb_packet),
46 			 CONFIG_UVB_MAX_MESSAGES, sizeof(void *));
47 
uvb_alloc_pkt(const enum uvb_request request,const uint8_t addr,const uint8_t ep,uint8_t * const data,const size_t length)48 struct uvb_packet *uvb_alloc_pkt(const enum uvb_request request,
49 				 const uint8_t addr, const uint8_t ep,
50 				 uint8_t *const data,
51 				 const size_t length)
52 {
53 	static uint32_t seq;
54 	struct uvb_packet *pkt;
55 
56 	if (k_mem_slab_alloc(&uvb_pkt_slab, (void **)&pkt, K_NO_WAIT)) {
57 		LOG_ERR("Failed to allocate packet memory");
58 		return NULL;
59 	}
60 
61 	seq++;
62 	pkt->seq = seq;
63 	pkt->request = request;
64 	pkt->reply = UVB_REPLY_TIMEOUT;
65 	pkt->addr = addr;
66 	pkt->ep = ep;
67 	pkt->data = data;
68 	pkt->length = length;
69 
70 	return pkt;
71 }
72 
uvb_free_pkt(struct uvb_packet * const pkt)73 void uvb_free_pkt(struct uvb_packet *const pkt)
74 {
75 	k_mem_slab_free(&uvb_pkt_slab, (void *)pkt);
76 }
77 
submit_new_work(struct uvb_msg * const msg)78 static ALWAYS_INLINE int submit_new_work(struct uvb_msg *const msg)
79 {
80 	k_fifo_put(&uvb_queue, msg);
81 	return k_work_submit(&uvb_work);
82 }
83 
uvb_alloc_msg(const struct uvb_node * const node)84 static struct uvb_msg *uvb_alloc_msg(const struct uvb_node *const node)
85 {
86 	struct uvb_msg *msg;
87 
88 	if (k_mem_slab_alloc(&uvb_msg_slab, (void **)&msg, K_NO_WAIT)) {
89 		LOG_ERR("Failed to allocate msg memory");
90 		return NULL;
91 	}
92 
93 	memset(msg, 0, sizeof(struct uvb_msg));
94 	msg->source = node;
95 
96 	return msg;
97 }
98 
uvb_advert(const struct uvb_node * const host_node,const enum uvb_event_type type,const struct uvb_packet * const pkt)99 int uvb_advert(const struct uvb_node *const host_node,
100 	       const enum uvb_event_type type,
101 	       const struct uvb_packet *const pkt)
102 {
103 	struct uvb_msg *msg;
104 	int err;
105 
106 	msg = uvb_alloc_msg(host_node);
107 	if (msg == NULL) {
108 		return -ENOMEM;
109 	}
110 
111 	msg->type = UVB_MSG_ADVERT;
112 	msg->event.type = type;
113 	msg->event.data = (void *)pkt;
114 	err = submit_new_work(msg);
115 
116 	return err < 0 ? err : 0;
117 }
118 
uvb_to_host(const struct uvb_node * const dev_node,const enum uvb_event_type type,const struct uvb_packet * const pkt)119 int uvb_to_host(const struct uvb_node *const dev_node,
120 		const enum uvb_event_type type,
121 		const struct uvb_packet *const pkt)
122 {
123 	struct uvb_msg *msg;
124 	int err;
125 
126 	msg = uvb_alloc_msg(dev_node);
127 	if (msg == NULL) {
128 		return -ENOMEM;
129 	}
130 
131 	msg->type = UVB_MSG_TO_HOST;
132 	msg->event.type = type;
133 	msg->event.data = (void *)pkt;
134 	err = submit_new_work(msg);
135 
136 	return err < 0 ? err : 0;
137 }
138 
subscribe_msg(const struct uvb_node * const host_node,struct uvb_node * const dev_node,const enum uvb_msg_type type)139 static int subscribe_msg(const struct uvb_node *const host_node,
140 			 struct uvb_node *const dev_node,
141 			 const enum uvb_msg_type type)
142 {
143 	struct uvb_msg *msg;
144 	int err;
145 
146 	msg = uvb_alloc_msg(host_node);
147 	if (msg == NULL) {
148 		return -ENOMEM;
149 	}
150 
151 	msg->type = type;
152 	msg->sink = dev_node;
153 	err = submit_new_work(msg);
154 
155 	return err < 0 ? err : 0;
156 }
157 
unsubscribe_msg(const struct uvb_node * const host_node,struct uvb_node * const dev_node)158 static int unsubscribe_msg(const struct uvb_node *const host_node,
159 			   struct uvb_node *const dev_node)
160 {
161 	struct uvb_msg *msg;
162 	int err;
163 
164 	msg = uvb_alloc_msg(host_node);
165 	if (msg == NULL) {
166 		return -ENOMEM;
167 	}
168 
169 	msg->type = UVB_MSG_UNSUBSCRIBE;
170 	msg->sink = dev_node;
171 	err = submit_new_work(msg);
172 
173 	return err < 0 ? err : 0;
174 }
175 
find_host_node(const char * name)176 static struct uvb_node *find_host_node(const char *name)
177 {
178 	if (name == NULL || name[0] == '\0') {
179 		return NULL;
180 	}
181 
182 	STRUCT_SECTION_FOREACH(uvb_node, host) {
183 		if (strcmp(name, host->name) == 0) {
184 			return host;
185 		}
186 	}
187 
188 	return NULL;
189 }
190 
uvb_subscribe(const char * name,struct uvb_node * const dev_node)191 int uvb_subscribe(const char *name, struct uvb_node *const dev_node)
192 {
193 	const struct uvb_node *host_node;
194 
195 	host_node = find_host_node(name);
196 	if (host_node == NULL) {
197 		return -ENOENT;
198 	}
199 
200 	return subscribe_msg(host_node, dev_node, UVB_MSG_SUBSCRIBE);
201 }
202 
uvb_unsubscribe(const char * name,struct uvb_node * const dev_node)203 int uvb_unsubscribe(const char *name, struct uvb_node *const dev_node)
204 {
205 	const struct uvb_node *host_node;
206 
207 	host_node = find_host_node(name);
208 	if (host_node == NULL) {
209 		return -ENOENT;
210 	}
211 
212 	return unsubscribe_msg(host_node, dev_node);
213 }
214 
handle_msg_subscribe(struct uvb_msg * const msg)215 static ALWAYS_INLINE void handle_msg_subscribe(struct uvb_msg *const msg)
216 {
217 	struct uvb_node *host_node;
218 	struct uvb_node *dev_node;
219 
220 	host_node = (struct uvb_node *)msg->source;
221 	dev_node = msg->sink;
222 	if (atomic_get(&dev_node->subscribed)) {
223 		LOG_ERR("%p already subscribed", dev_node);
224 		return;
225 	}
226 
227 	LOG_DBG("%p -> %p", dev_node, host_node);
228 	sys_dnode_init(&dev_node->node);
229 	if (msg->type == UVB_MSG_SUBSCRIBE) {
230 		sys_dlist_prepend(&host_node->list, &dev_node->node);
231 	}
232 
233 	atomic_inc(&dev_node->subscribed);
234 }
235 
handle_msg_unsubscribe(struct uvb_msg * const msg)236 static ALWAYS_INLINE void handle_msg_unsubscribe(struct uvb_msg *const msg)
237 {
238 	struct uvb_node *dev_node;
239 	atomic_t tmp;
240 
241 	dev_node = msg->sink;
242 	tmp = atomic_clear(&dev_node->subscribed);
243 	if (tmp) {
244 		LOG_DBG("unsubscribe %p", dev_node);
245 		sys_dlist_remove(&dev_node->node);
246 	} else {
247 		LOG_ERR("%p is not subscribed", dev_node);
248 	}
249 }
250 
handle_msg_event(struct uvb_msg * const msg)251 static ALWAYS_INLINE void handle_msg_event(struct uvb_msg *const msg)
252 {
253 	struct uvb_node *host_node;
254 	struct uvb_node *dev_node;
255 
256 	host_node = (struct uvb_node *)msg->source;
257 	SYS_DLIST_FOR_EACH_CONTAINER(&host_node->list, dev_node, node) {
258 		LOG_DBG("%p from %p to %p", msg, host_node, dev_node);
259 		if (dev_node->notify) {
260 			dev_node->notify(dev_node->priv,
261 					 msg->event.type,
262 					 msg->event.data);
263 		}
264 	}
265 }
266 
handle_msg_to_host(struct uvb_msg * const msg)267 static ALWAYS_INLINE void handle_msg_to_host(struct uvb_msg *const msg)
268 {
269 	struct uvb_node *host_node;
270 	struct uvb_node *source;
271 
272 	source = (struct uvb_node *)msg->source;
273 	if (source->head) {
274 		LOG_ERR("Host may not reply");
275 	}
276 
277 	SYS_DLIST_FOR_EACH_CONTAINER(&source->node, host_node, node) {
278 		LOG_DBG("%p from %p to %p", msg, source, host_node);
279 		if (host_node->head && host_node->notify) {
280 			host_node->notify(host_node->priv,
281 					  msg->event.type,
282 					  msg->event.data);
283 		}
284 	}
285 }
286 
uvb_work_handler(struct k_work * work)287 static void uvb_work_handler(struct k_work *work)
288 {
289 	struct uvb_msg *msg;
290 
291 	msg = k_fifo_get(&uvb_queue, K_NO_WAIT);
292 	if (msg == NULL) {
293 		return;
294 	}
295 
296 	LOG_DBG("Message %p %s", msg->source, msg->source->name);
297 	switch (msg->type) {
298 	case UVB_MSG_SUBSCRIBE:
299 		handle_msg_subscribe(msg);
300 		break;
301 	case UVB_MSG_UNSUBSCRIBE:
302 		handle_msg_unsubscribe(msg);
303 		break;
304 	case UVB_MSG_ADVERT:
305 		handle_msg_event(msg);
306 		break;
307 	case UVB_MSG_TO_HOST:
308 		handle_msg_to_host(msg);
309 		break;
310 	default:
311 		break;
312 	}
313 
314 	k_mem_slab_free(&uvb_msg_slab, (void *)msg);
315 	if (!k_fifo_is_empty(&uvb_queue)) {
316 		(void)k_work_submit(work);
317 	}
318 }
319 
uvb_init(void)320 static int uvb_init(void)
321 {
322 	STRUCT_SECTION_FOREACH(uvb_node, host) {
323 		LOG_DBG("Host %p - %s", host, host->name);
324 		sys_dlist_init(&host->list);
325 	}
326 
327 	k_fifo_init(&uvb_queue);
328 
329 	return 0;
330 }
331 
332 SYS_INIT(uvb_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
333