1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * @file  uhc_virtual.c
9  * @brief Virtual USB host controller (UHC) driver
10  *
11  * Virtual device controller does not emulate any hardware
12  * and can only communicate with the virtual device controllers
13  * through virtual bus.
14  */
15 
16 #include "uhc_common.h"
17 #include "../uvb/uvb.h"
18 
19 #include <string.h>
20 #include <zephyr/kernel.h>
21 #include <zephyr/init.h>
22 #include <zephyr/drivers/usb/uhc.h>
23 
24 #include <zephyr/logging/log.h>
25 LOG_MODULE_REGISTER(uhc_vrt, CONFIG_UHC_DRIVER_LOG_LEVEL);
26 
27 struct uhc_vrt_config {
28 };
29 
30 struct uhc_vrt_data {
31 	const struct device *dev;
32 	struct uvb_node *host_node;
33 	struct k_work work;
34 	struct k_fifo fifo;
35 	struct uhc_transfer *last_xfer;
36 	struct k_timer sof_timer;
37 	bool busy;
38 	uint8_t req;
39 };
40 
41 enum uhc_vrt_event_type {
42 	/* Trigger next transfer */
43 	UHC_VRT_EVT_XFER,
44 	/* SoF generator event */
45 	UHC_VRT_EVT_SOF,
46 	/* Request reply received */
47 	UHC_VRT_EVT_REPLY,
48 };
49 
50 /* Structure for driver's endpoint events */
51 struct uhc_vrt_event {
52 	sys_snode_t node;
53 	enum uhc_vrt_event_type type;
54 	struct uvb_packet *pkt;
55 };
56 
57 K_MEM_SLAB_DEFINE(uhc_vrt_slab, sizeof(struct uhc_vrt_event),
58 		  16, sizeof(void *));
59 
vrt_event_submit(const struct device * dev,const enum uhc_vrt_event_type type,const void * data)60 static void vrt_event_submit(const struct device *dev,
61 			     const enum uhc_vrt_event_type type,
62 			     const void *data)
63 {
64 	struct uhc_vrt_data *priv = uhc_get_private(dev);
65 	struct uhc_vrt_event *event;
66 	int ret;
67 
68 	ret = k_mem_slab_alloc(&uhc_vrt_slab, (void **)&event, K_NO_WAIT);
69 	__ASSERT(ret == 0, "Failed to allocate slab");
70 
71 	event->type = type;
72 	event->pkt = (struct uvb_packet *const)data;
73 	k_fifo_put(&priv->fifo, event);
74 	k_work_submit(&priv->work);
75 }
76 
vrt_xfer_control(const struct device * dev,struct uhc_transfer * const xfer)77 static int vrt_xfer_control(const struct device *dev,
78 			    struct uhc_transfer *const xfer)
79 {
80 	struct uhc_vrt_data *priv = uhc_get_private(dev);
81 	struct net_buf *buf = xfer->buf;
82 	struct uvb_packet *uvb_pkt;
83 	uint8_t *data = NULL;
84 	size_t length = 0;
85 
86 	if (xfer->stage == UHC_CONTROL_STAGE_SETUP) {
87 		LOG_DBG("Handle SETUP stage");
88 		uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_SETUP,
89 					xfer->addr, USB_CONTROL_EP_OUT,
90 					xfer->setup_pkt, sizeof(xfer->setup_pkt));
91 		if (uvb_pkt == NULL) {
92 			LOG_ERR("Failed to allocate UVB packet");
93 			return -ENOMEM;
94 		}
95 
96 		priv->req = UVB_REQUEST_SETUP;
97 		priv->busy = true;
98 
99 		return uvb_advert_pkt(priv->host_node, uvb_pkt);
100 	}
101 
102 	if (buf != NULL && xfer->stage == UHC_CONTROL_STAGE_DATA) {
103 		if (USB_EP_DIR_IS_IN(xfer->ep)) {
104 			length = MIN(net_buf_tailroom(buf), xfer->mps);
105 			data = net_buf_tail(buf);
106 		} else {
107 			length = MIN(buf->len, xfer->mps);
108 			data = buf->data;
109 		}
110 
111 		LOG_DBG("Handle DATA stage");
112 		uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA,
113 					xfer->addr, xfer->ep,
114 					data, length);
115 		if (uvb_pkt == NULL) {
116 			LOG_ERR("Failed to allocate UVB packet");
117 			return -ENOMEM;
118 		}
119 
120 		priv->req = UVB_REQUEST_DATA;
121 		priv->busy = true;
122 
123 		return uvb_advert_pkt(priv->host_node, uvb_pkt);
124 	}
125 
126 	if (xfer->stage == UHC_CONTROL_STAGE_STATUS) {
127 		uint8_t ep;
128 
129 		LOG_DBG("Handle STATUS stage");
130 		if (USB_EP_DIR_IS_IN(xfer->ep)) {
131 			ep = USB_CONTROL_EP_OUT;
132 		} else {
133 			ep = USB_CONTROL_EP_IN;
134 		}
135 
136 		uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA,
137 					xfer->addr, ep,
138 					NULL, 0);
139 		if (uvb_pkt == NULL) {
140 			LOG_ERR("Failed to allocate UVB packet");
141 			return -ENOMEM;
142 		}
143 
144 		priv->req = UVB_REQUEST_DATA;
145 		priv->busy = true;
146 
147 		return uvb_advert_pkt(priv->host_node, uvb_pkt);
148 	}
149 
150 	return -EINVAL;
151 }
152 
vrt_xfer_bulk(const struct device * dev,struct uhc_transfer * const xfer)153 static int vrt_xfer_bulk(const struct device *dev,
154 			 struct uhc_transfer *const xfer)
155 {
156 	struct uhc_vrt_data *priv = uhc_get_private(dev);
157 	struct net_buf *buf = xfer->buf;
158 	struct uvb_packet *uvb_pkt;
159 	uint8_t *data;
160 	size_t length;
161 
162 	if (USB_EP_DIR_IS_IN(xfer->ep)) {
163 		length = MIN(net_buf_tailroom(buf), xfer->mps);
164 		data = net_buf_tail(buf);
165 	} else {
166 		length = MIN(buf->len, xfer->mps);
167 		data = buf->data;
168 	}
169 
170 	uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA, xfer->addr, xfer->ep,
171 				data, length);
172 	if (uvb_pkt == NULL) {
173 		LOG_ERR("Failed to allocate UVB packet");
174 		return -ENOMEM;
175 	}
176 
177 	return uvb_advert_pkt(priv->host_node, uvb_pkt);
178 }
179 
vrt_schedule_xfer(const struct device * dev)180 static int vrt_schedule_xfer(const struct device *dev)
181 {
182 	struct uhc_vrt_data *priv = uhc_get_private(dev);
183 
184 	if (priv->last_xfer == NULL) {
185 		priv->last_xfer = uhc_xfer_get_next(dev);
186 		if (priv->last_xfer == NULL) {
187 			LOG_DBG("Nothing to transfer");
188 			return 0;
189 		}
190 
191 		LOG_DBG("Next transfer is %p", priv->last_xfer);
192 	}
193 
194 	if (USB_EP_GET_IDX(priv->last_xfer->ep) == 0) {
195 		return vrt_xfer_control(dev, priv->last_xfer);
196 	}
197 
198 	/* TODO: Isochronous transfers */
199 	return vrt_xfer_bulk(dev, priv->last_xfer);
200 }
201 
vrt_hrslt_success(const struct device * dev,struct uvb_packet * const pkt)202 static void vrt_hrslt_success(const struct device *dev,
203 			      struct uvb_packet *const pkt)
204 {
205 	struct uhc_vrt_data *priv = uhc_get_private(dev);
206 	struct uhc_transfer *const xfer = priv->last_xfer;
207 	struct net_buf *buf = xfer->buf;
208 	bool finished = false;
209 	size_t length;
210 
211 	switch (pkt->request) {
212 	case UVB_REQUEST_SETUP:
213 		if (xfer->buf != NULL) {
214 			xfer->stage = UHC_CONTROL_STAGE_DATA;
215 		} else {
216 			xfer->stage = UHC_CONTROL_STAGE_STATUS;
217 		}
218 
219 		break;
220 	case UVB_REQUEST_DATA:
221 		if (xfer->stage == UHC_CONTROL_STAGE_STATUS) {
222 			LOG_DBG("Status stage finished");
223 			finished = true;
224 			break;
225 		}
226 
227 		if (USB_EP_DIR_IS_OUT(pkt->ep)) {
228 			length = MIN(buf->len, xfer->mps);
229 			net_buf_pull(buf, length);
230 			LOG_DBG("OUT chunk %zu out of %u", length, buf->len);
231 			if (buf->len == 0) {
232 				if (pkt->ep == USB_CONTROL_EP_OUT) {
233 					xfer->stage = UHC_CONTROL_STAGE_STATUS;
234 				} else {
235 					finished = true;
236 				}
237 			}
238 		} else {
239 			length = MIN(net_buf_tailroom(buf), pkt->length);
240 			net_buf_add(buf, length);
241 			if (pkt->length > xfer->mps) {
242 				LOG_ERR("Ambiguous packet with the length %zu",
243 					pkt->length);
244 			}
245 
246 			LOG_DBG("IN chunk %zu out of %zu", length, net_buf_tailroom(buf));
247 			if (pkt->length < xfer->mps || !net_buf_tailroom(buf)) {
248 				if (pkt->ep == USB_CONTROL_EP_IN) {
249 					xfer->stage = UHC_CONTROL_STAGE_STATUS;
250 				} else {
251 					finished = true;
252 				}
253 			}
254 		}
255 		break;
256 	}
257 
258 	if (finished) {
259 		LOG_DBG("Transfer finished");
260 		uhc_xfer_return(dev, xfer, 0);
261 		priv->last_xfer = NULL;
262 	}
263 }
264 
vrt_xfer_drop_active(const struct device * dev,int err)265 static void vrt_xfer_drop_active(const struct device *dev, int err)
266 {
267 	struct uhc_vrt_data *priv = uhc_get_private(dev);
268 
269 	if (priv->last_xfer) {
270 		uhc_xfer_return(dev, priv->last_xfer, err);
271 		priv->last_xfer = NULL;
272 	}
273 }
274 
vrt_handle_reply(const struct device * dev,struct uvb_packet * const pkt)275 static int vrt_handle_reply(const struct device *dev,
276 			    struct uvb_packet *const pkt)
277 {
278 	struct uhc_vrt_data *priv = uhc_get_private(dev);
279 	struct uhc_transfer *const xfer = priv->last_xfer;
280 	int ret = 0;
281 
282 	if (xfer == NULL) {
283 		LOG_ERR("No transfers to handle");
284 		ret = -ENODATA;
285 		goto handle_reply_err;
286 	}
287 
288 	priv->busy = false;
289 
290 	switch (pkt->reply) {
291 	case UVB_REPLY_NACK:
292 		/* Restart last transaction */
293 		break;
294 	case UVB_REPLY_STALL:
295 		vrt_xfer_drop_active(dev, -EPIPE);
296 		break;
297 	case UVB_REPLY_ACK:
298 		vrt_hrslt_success(dev, pkt);
299 		break;
300 	default:
301 		vrt_xfer_drop_active(dev, -EINVAL);
302 		ret = -EINVAL;
303 		break;
304 	}
305 
306 handle_reply_err:
307 	uvb_free_pkt(pkt);
308 	return ret;
309 }
310 
xfer_work_handler(struct k_work * work)311 static void xfer_work_handler(struct k_work *work)
312 {
313 	struct uhc_vrt_data *priv = CONTAINER_OF(work, struct uhc_vrt_data, work);
314 	const struct device *dev = priv->dev;
315 	struct uhc_vrt_event *ev;
316 
317 	while ((ev = k_fifo_get(&priv->fifo, K_NO_WAIT)) != NULL) {
318 		bool schedule = false;
319 		int err;
320 
321 		switch (ev->type) {
322 		case UHC_VRT_EVT_REPLY:
323 			err = vrt_handle_reply(dev, ev->pkt);
324 			if (unlikely(err)) {
325 				uhc_submit_event(dev, UHC_EVT_ERROR, err);
326 			}
327 
328 			schedule = true;
329 			break;
330 		case UHC_VRT_EVT_XFER:
331 			LOG_DBG("Transfer triggered for %p", dev);
332 			schedule = true;
333 			break;
334 		case UHC_VRT_EVT_SOF:
335 			if (priv->last_xfer != NULL) {
336 				if (priv->last_xfer->timeout) {
337 					priv->last_xfer->timeout--;
338 				} else {
339 					vrt_xfer_drop_active(dev, -ETIMEDOUT);
340 					priv->busy = false;
341 					LOG_WRN("Transfer timeout");
342 				}
343 			}
344 			break;
345 		default:
346 			break;
347 		}
348 
349 		if (schedule && !priv->busy) {
350 			err = vrt_schedule_xfer(dev);
351 			if (unlikely(err)) {
352 				uhc_submit_event(dev, UHC_EVT_ERROR, err);
353 			}
354 		}
355 
356 		k_mem_slab_free(&uhc_vrt_slab, (void *)ev);
357 	}
358 }
359 
sof_timer_handler(struct k_timer * timer)360 static void sof_timer_handler(struct k_timer *timer)
361 {
362 	struct uhc_vrt_data *priv = CONTAINER_OF(timer, struct uhc_vrt_data, sof_timer);
363 
364 	vrt_event_submit(priv->dev, UHC_VRT_EVT_SOF, NULL);
365 }
366 
vrt_device_act(const struct device * dev,const enum uvb_device_act act)367 static void vrt_device_act(const struct device *dev,
368 			   const enum uvb_device_act act)
369 {
370 	enum uhc_event_type type;
371 
372 	switch (act) {
373 	case UVB_DEVICE_ACT_RWUP:
374 		type = UHC_EVT_RWUP;
375 		break;
376 	case UVB_DEVICE_ACT_FS:
377 		type = UHC_EVT_DEV_CONNECTED_FS;
378 		break;
379 	case UVB_DEVICE_ACT_HS:
380 		type = UHC_EVT_DEV_CONNECTED_HS;
381 		break;
382 	case UVB_DEVICE_ACT_REMOVED:
383 		type = UHC_EVT_DEV_REMOVED;
384 		break;
385 	default:
386 		type = UHC_EVT_ERROR;
387 	}
388 
389 	uhc_submit_event(dev, type, 0);
390 }
391 
uhc_vrt_uvb_cb(const void * const vrt_priv,const enum uvb_event_type type,const void * data)392 static void uhc_vrt_uvb_cb(const void *const vrt_priv,
393 			   const enum uvb_event_type type,
394 			   const void *data)
395 {
396 	const struct device *dev = vrt_priv;
397 
398 	if (type == UVB_EVT_REPLY) {
399 		vrt_event_submit(dev, UHC_VRT_EVT_REPLY, data);
400 	} else if (type == UVB_EVT_DEVICE_ACT) {
401 		vrt_device_act(dev, POINTER_TO_INT(data));
402 	} else {
403 		LOG_ERR("Unknown event %d for %p", type, dev);
404 	}
405 }
406 
uhc_vrt_sof_enable(const struct device * dev)407 static int uhc_vrt_sof_enable(const struct device *dev)
408 {
409 	/* TODO */
410 	return 0;
411 }
412 
413 /* Disable SOF generator and suspend bus */
uhc_vrt_bus_suspend(const struct device * dev)414 static int uhc_vrt_bus_suspend(const struct device *dev)
415 {
416 	struct uhc_vrt_data *priv = uhc_get_private(dev);
417 
418 	k_timer_stop(&priv->sof_timer);
419 
420 	return uvb_advert(priv->host_node, UVB_EVT_SUSPEND, NULL);
421 }
422 
uhc_vrt_bus_reset(const struct device * dev)423 static int uhc_vrt_bus_reset(const struct device *dev)
424 {
425 	struct uhc_vrt_data *priv = uhc_get_private(dev);
426 
427 	k_timer_stop(&priv->sof_timer);
428 
429 	return uvb_advert(priv->host_node, UVB_EVT_RESET, NULL);
430 }
431 
uhc_vrt_bus_resume(const struct device * dev)432 static int uhc_vrt_bus_resume(const struct device *dev)
433 {
434 	struct uhc_vrt_data *priv = uhc_get_private(dev);
435 
436 	k_timer_init(&priv->sof_timer, sof_timer_handler, NULL);
437 	k_timer_start(&priv->sof_timer, K_MSEC(1), K_MSEC(1));
438 
439 	return uvb_advert(priv->host_node, UVB_EVT_RESUME, NULL);
440 }
441 
uhc_vrt_enqueue(const struct device * dev,struct uhc_transfer * const xfer)442 static int uhc_vrt_enqueue(const struct device *dev,
443 			   struct uhc_transfer *const xfer)
444 {
445 	uhc_xfer_append(dev, xfer);
446 	vrt_event_submit(dev, UHC_VRT_EVT_XFER, NULL);
447 
448 	return 0;
449 }
450 
uhc_vrt_dequeue(const struct device * dev,struct uhc_transfer * const xfer)451 static int uhc_vrt_dequeue(const struct device *dev,
452 			    struct uhc_transfer *const xfer)
453 {
454 	/* TODO */
455 	return 0;
456 }
457 
uhc_vrt_init(const struct device * dev)458 static int uhc_vrt_init(const struct device *dev)
459 {
460 	return 0;
461 }
462 
uhc_vrt_enable(const struct device * dev)463 static int uhc_vrt_enable(const struct device *dev)
464 {
465 	struct uhc_vrt_data *priv = uhc_get_private(dev);
466 
467 	return uvb_advert(priv->host_node, UVB_EVT_VBUS_READY, NULL);
468 }
469 
uhc_vrt_disable(const struct device * dev)470 static int uhc_vrt_disable(const struct device *dev)
471 {
472 	struct uhc_vrt_data *priv = uhc_get_private(dev);
473 
474 	return uvb_advert(priv->host_node, UVB_EVT_VBUS_REMOVED, NULL);
475 }
476 
uhc_vrt_shutdown(const struct device * dev)477 static int uhc_vrt_shutdown(const struct device *dev)
478 {
479 	return 0;
480 }
481 
uhc_vrt_lock(const struct device * dev)482 static int uhc_vrt_lock(const struct device *dev)
483 {
484 	return uhc_lock_internal(dev, K_FOREVER);
485 }
486 
uhc_vrt_unlock(const struct device * dev)487 static int uhc_vrt_unlock(const struct device *dev)
488 {
489 
490 	return uhc_unlock_internal(dev);
491 }
492 
uhc_vrt_driver_preinit(const struct device * dev)493 static int uhc_vrt_driver_preinit(const struct device *dev)
494 {
495 	struct uhc_vrt_data *priv = uhc_get_private(dev);
496 	struct uhc_data *data = dev->data;
497 
498 	priv->dev = dev;
499 	k_mutex_init(&data->mutex);
500 
501 	priv->host_node->priv = dev;
502 	k_fifo_init(&priv->fifo);
503 	k_work_init(&priv->work, xfer_work_handler);
504 	k_timer_init(&priv->sof_timer, sof_timer_handler, NULL);
505 
506 	LOG_DBG("Virtual UHC pre-initialized");
507 
508 	return 0;
509 }
510 
511 static const struct uhc_api uhc_vrt_api = {
512 	.lock = uhc_vrt_lock,
513 	.unlock = uhc_vrt_unlock,
514 	.init = uhc_vrt_init,
515 	.enable = uhc_vrt_enable,
516 	.disable = uhc_vrt_disable,
517 	.shutdown = uhc_vrt_shutdown,
518 
519 	.bus_reset = uhc_vrt_bus_reset,
520 	.sof_enable  = uhc_vrt_sof_enable,
521 	.bus_suspend = uhc_vrt_bus_suspend,
522 	.bus_resume = uhc_vrt_bus_resume,
523 
524 	.ep_enqueue = uhc_vrt_enqueue,
525 	.ep_dequeue = uhc_vrt_dequeue,
526 };
527 
528 #define DT_DRV_COMPAT zephyr_uhc_virtual
529 
530 #define UHC_VRT_DEVICE_DEFINE(n)						\
531 	UVB_HOST_NODE_DEFINE(uhc_bc_##n,					\
532 			     DT_NODE_FULL_NAME(DT_DRV_INST(n)),			\
533 			     uhc_vrt_uvb_cb);					\
534 										\
535 	static const struct uhc_vrt_config uhc_vrt_config_##n = {		\
536 	};									\
537 										\
538 	static struct uhc_vrt_data uhc_priv_##n = {				\
539 		.host_node = &uhc_bc_##n,					\
540 	};									\
541 										\
542 	static struct uhc_data uhc_data_##n = {					\
543 		.priv = &uhc_priv_##n,						\
544 	};									\
545 										\
546 	DEVICE_DT_INST_DEFINE(n, uhc_vrt_driver_preinit, NULL,			\
547 			      &uhc_data_##n, &uhc_vrt_config_##n,		\
548 			      POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,	\
549 			      &uhc_vrt_api);
550 
551 DT_INST_FOREACH_STATUS_OKAY(UHC_VRT_DEVICE_DEFINE)
552