1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file  udc_virtual.c
9  * @brief Virtual USB device controller (UDC) driver
10  *
11  * Virtual device controller does not emulate any hardware
12  * and can only communicate with the virtual host controller
13  * through virtual bus.
14  */
15 
16 #include "udc_common.h"
17 #include "../uvb/uvb.h"
18 
19 #include <string.h>
20 #include <stdio.h>
21 
22 #include <zephyr/kernel.h>
23 #include <zephyr/drivers/usb/udc.h>
24 
25 #include <zephyr/logging/log.h>
26 LOG_MODULE_REGISTER(udc_vrt, CONFIG_UDC_DRIVER_LOG_LEVEL);
27 
28 struct udc_vrt_config {
29 	size_t num_of_eps;
30 	struct udc_ep_config *ep_cfg_in;
31 	struct udc_ep_config *ep_cfg_out;
32 	void (*make_thread)(const struct device *dev);
33 	struct uvb_node *dev_node;
34 	int speed_idx;
35 	const char *uhc_name;
36 };
37 
38 struct udc_vrt_data {
39 	struct k_fifo fifo;
40 	struct k_thread thread_data;
41 	uint8_t addr;
42 };
43 
44 struct udc_vrt_event {
45 	sys_snode_t node;
46 	enum uvb_event_type type;
47 	struct uvb_packet *pkt;
48 };
49 
50 K_MEM_SLAB_DEFINE(udc_vrt_slab, sizeof(struct udc_vrt_event),
51 		  16, sizeof(void *));
52 
53 /* Reuse request packet for reply */
vrt_request_reply(const struct device * dev,struct uvb_packet * const pkt,const enum uvb_reply reply)54 static int vrt_request_reply(const struct device *dev,
55 			     struct uvb_packet *const pkt,
56 			     const enum uvb_reply reply)
57 {
58 	const struct udc_vrt_config *config = dev->config;
59 
60 	pkt->reply = reply;
61 
62 	return uvb_reply_pkt(config->dev_node, pkt);
63 }
64 
ctrl_ep_clear_halt(const struct device * dev)65 static void ctrl_ep_clear_halt(const struct device *dev)
66 {
67 	struct udc_ep_config *cfg;
68 
69 	cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
70 	cfg->stat.halted = false;
71 
72 	cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN);
73 	cfg->stat.halted = false;
74 }
75 
vrt_ctrl_feed_dout(const struct device * dev,const size_t length)76 static int vrt_ctrl_feed_dout(const struct device *dev,
77 			      const size_t length)
78 {
79 	struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
80 	struct net_buf *buf;
81 
82 	buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length);
83 	if (buf == NULL) {
84 		return -ENOMEM;
85 	}
86 
87 	udc_buf_put(ep_cfg, buf);
88 
89 	return 0;
90 }
91 
vrt_handle_setup(const struct device * dev,struct uvb_packet * const pkt)92 static int vrt_handle_setup(const struct device *dev,
93 			    struct uvb_packet *const pkt)
94 {
95 	struct net_buf *buf;
96 	int err, ret;
97 
98 	buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, 8);
99 	if (buf == NULL) {
100 		return -ENOMEM;
101 	}
102 
103 	net_buf_add_mem(buf, pkt->data, pkt->length);
104 	udc_ep_buf_set_setup(buf);
105 	ctrl_ep_clear_halt(dev);
106 
107 	/* Update to next stage of control transfer */
108 	udc_ctrl_update_stage(dev, buf);
109 
110 	if (udc_ctrl_stage_is_data_out(dev)) {
111 		/*  Allocate and feed buffer for data OUT stage */
112 		LOG_DBG("s: %p | feed for -out-", buf);
113 		err = vrt_ctrl_feed_dout(dev, udc_data_stage_length(buf));
114 		if (err == -ENOMEM) {
115 			/*
116 			 * Pass it on to the higher level which will
117 			 * halt control OUT endpoint.
118 			 */
119 			err = udc_submit_ep_event(dev, buf, err);
120 		}
121 	} else if (udc_ctrl_stage_is_data_in(dev)) {
122 		LOG_DBG("s: %p | submit for -in-", buf);
123 		/* Allocate buffer for data IN and submit to upper layer */
124 		err = udc_ctrl_submit_s_in_status(dev);
125 	} else {
126 		LOG_DBG("s:%p | submit for -status", buf);
127 		/*
128 		 * For all other cases we feed with a buffer
129 		 * large enough for setup packet.
130 		 */
131 		err = udc_ctrl_submit_s_status(dev);
132 	}
133 
134 	ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK);
135 
136 	return ret ? ret : err;
137 }
138 
vrt_handle_ctrl_out(const struct device * dev,struct net_buf * const buf)139 static int vrt_handle_ctrl_out(const struct device *dev,
140 			       struct net_buf *const buf)
141 {
142 	int err = 0;
143 
144 	if (udc_ctrl_stage_is_status_out(dev)) {
145 		/* Status stage finished, notify upper layer */
146 		err = udc_ctrl_submit_status(dev, buf);
147 	}
148 
149 	/* Update to next stage of control transfer */
150 	udc_ctrl_update_stage(dev, buf);
151 
152 	if (udc_ctrl_stage_is_status_in(dev)) {
153 		return udc_ctrl_submit_s_out_status(dev, buf);
154 	}
155 
156 	return err;
157 }
158 
vrt_handle_out(const struct device * dev,struct uvb_packet * const pkt)159 static int vrt_handle_out(const struct device *dev,
160 			  struct uvb_packet *const pkt)
161 {
162 	struct udc_ep_config *ep_cfg;
163 	const uint8_t ep = pkt->ep;
164 	struct net_buf *buf;
165 	size_t min_len;
166 	int err = 0;
167 	int ret;
168 
169 	ep_cfg = udc_get_ep_cfg(dev, ep);
170 	if (ep_cfg->stat.halted) {
171 		LOG_DBG("reply STALL ep 0x%02x", ep);
172 		return vrt_request_reply(dev, pkt, UVB_REPLY_STALL);
173 	}
174 
175 	buf = udc_buf_peek(dev, ep);
176 	if (buf == NULL) {
177 		LOG_DBG("reply NACK ep 0x%02x", ep);
178 		return vrt_request_reply(dev, pkt, UVB_REPLY_NACK);
179 	}
180 
181 	min_len = MIN(pkt->length, net_buf_tailroom(buf));
182 	net_buf_add_mem(buf, pkt->data, min_len);
183 
184 	LOG_DBG("Handle data OUT, %zu | %zu", pkt->length, net_buf_tailroom(buf));
185 
186 	if (net_buf_tailroom(buf) == 0 || pkt->length < udc_mps_ep_size(ep_cfg)) {
187 		buf = udc_buf_get(dev, ep);
188 
189 		if (ep == USB_CONTROL_EP_OUT) {
190 			err = vrt_handle_ctrl_out(dev, buf);
191 		} else {
192 			err = udc_submit_ep_event(dev, buf, 0);
193 		}
194 	}
195 
196 	ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK);
197 
198 	return ret ? ret : err;
199 }
200 
isr_handle_ctrl_in(const struct device * dev,struct net_buf * const buf)201 static int isr_handle_ctrl_in(const struct device *dev,
202 			      struct net_buf *const buf)
203 {
204 	int err = 0;
205 
206 	if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) {
207 		/* Status stage finished, notify upper layer */
208 		err = udc_ctrl_submit_status(dev, buf);
209 	}
210 
211 	/* Update to next stage of control transfer */
212 	udc_ctrl_update_stage(dev, buf);
213 
214 	if (udc_ctrl_stage_is_status_out(dev)) {
215 		/*
216 		 * IN transfer finished, release buffer,
217 		 * Feed control OUT buffer for status stage.
218 		 */
219 		net_buf_unref(buf);
220 		return vrt_ctrl_feed_dout(dev, 0);
221 	}
222 
223 	return err;
224 }
225 
vrt_handle_in(const struct device * dev,struct uvb_packet * const pkt)226 static int vrt_handle_in(const struct device *dev,
227 			 struct uvb_packet *const pkt)
228 {
229 	struct udc_ep_config *ep_cfg;
230 	const uint8_t ep = pkt->ep;
231 	struct net_buf *buf;
232 	size_t min_len;
233 	int err = 0;
234 	int ret;
235 
236 	ep_cfg = udc_get_ep_cfg(dev, ep);
237 	if (ep_cfg->stat.halted) {
238 		LOG_DBG("reply STALL ep 0x%02x", ep);
239 		return vrt_request_reply(dev, pkt, UVB_REPLY_STALL);
240 	}
241 
242 	buf = udc_buf_peek(dev, ep);
243 	if (buf == NULL) {
244 		LOG_DBG("reply NACK ep 0x%02x", ep);
245 		return vrt_request_reply(dev, pkt, UVB_REPLY_NACK);
246 	}
247 
248 	LOG_DBG("Handle data IN, %zu | %u | %u",
249 		pkt->length, buf->len, udc_mps_ep_size(ep_cfg));
250 	min_len = MIN(pkt->length, buf->len);
251 	memcpy(pkt->data, buf->data, min_len);
252 	net_buf_pull(buf, min_len);
253 	pkt->length = min_len;
254 
255 	if (buf->len == 0 || pkt->length < udc_mps_ep_size(ep_cfg)) {
256 		if (udc_ep_buf_has_zlp(buf)) {
257 			udc_ep_buf_clear_zlp(buf);
258 			goto continue_in;
259 		}
260 
261 		LOG_DBG("Finish data IN %zu | %u", pkt->length, buf->len);
262 		buf = udc_buf_get(dev, ep);
263 
264 		if (ep == USB_CONTROL_EP_IN) {
265 			err = isr_handle_ctrl_in(dev, buf);
266 		} else {
267 			err = udc_submit_ep_event(dev, buf, 0);
268 		}
269 	}
270 
271 continue_in:
272 	ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK);
273 
274 	return ret ? ret : err;
275 }
276 
vrt_handle_request(const struct device * dev,struct uvb_packet * const pkt)277 static int vrt_handle_request(const struct device *dev,
278 			      struct uvb_packet *const pkt)
279 {
280 	LOG_DBG("REQUEST event for %p pkt %p", dev, pkt);
281 
282 	if (USB_EP_GET_IDX(pkt->ep) == 0 && pkt->request == UVB_REQUEST_SETUP) {
283 		return vrt_handle_setup(dev, pkt);
284 	}
285 
286 	if (USB_EP_DIR_IS_OUT(pkt->ep) && pkt->request == UVB_REQUEST_DATA) {
287 		return vrt_handle_out(dev, pkt);
288 	}
289 
290 	if (USB_EP_DIR_IS_IN(pkt->ep) && pkt->request == UVB_REQUEST_DATA) {
291 		return vrt_handle_in(dev, pkt);
292 	}
293 
294 	return -ENOTSUP;
295 }
296 
udc_vrt_thread_handler(void * arg)297 static ALWAYS_INLINE void udc_vrt_thread_handler(void *arg)
298 {
299 	const struct device *dev = (const struct device *)arg;
300 	struct udc_vrt_data *priv = udc_get_private(dev);
301 
302 	while (true) {
303 		struct udc_vrt_event *vrt_ev;
304 		int err = 0;
305 
306 		vrt_ev = k_fifo_get(&priv->fifo, K_FOREVER);
307 
308 		switch (vrt_ev->type) {
309 		case UVB_EVT_VBUS_REMOVED:
310 			err = udc_submit_event(dev, UDC_EVT_VBUS_REMOVED, 0);
311 			break;
312 		case UVB_EVT_VBUS_READY:
313 			err = udc_submit_event(dev, UDC_EVT_VBUS_READY, 0);
314 			break;
315 		case UVB_EVT_SUSPEND:
316 			err = udc_submit_event(dev, UDC_EVT_SUSPEND, 0);
317 			break;
318 		case UVB_EVT_RESUME:
319 			err = udc_submit_event(dev, UDC_EVT_RESUME, 0);
320 			break;
321 		case UVB_EVT_RESET:
322 			err = udc_submit_event(dev, UDC_EVT_RESET, 0);
323 			break;
324 		case UVB_EVT_REQUEST:
325 			err = vrt_handle_request(dev, vrt_ev->pkt);
326 			break;
327 		default:
328 			break;
329 		};
330 
331 		if (err) {
332 			udc_submit_event(dev, UDC_EVT_ERROR, err);
333 		}
334 
335 		k_mem_slab_free(&udc_vrt_slab, (void *)vrt_ev);
336 	}
337 }
338 
vrt_submit_uvb_event(const struct device * dev,const enum uvb_event_type type,struct uvb_packet * const pkt)339 static void vrt_submit_uvb_event(const struct device *dev,
340 				 const enum uvb_event_type type,
341 				 struct uvb_packet *const pkt)
342 {
343 	struct udc_vrt_data *priv = udc_get_private(dev);
344 	struct udc_vrt_event *vrt_ev;
345 	int ret;
346 
347 	ret = k_mem_slab_alloc(&udc_vrt_slab, (void **)&vrt_ev, K_NO_WAIT);
348 	__ASSERT(ret == 0, "Failed to allocate slab");
349 
350 	vrt_ev->type = type;
351 	vrt_ev->pkt = pkt;
352 	k_fifo_put(&priv->fifo, vrt_ev);
353 }
354 
udc_vrt_uvb_cb(const void * const vrt_priv,const enum uvb_event_type type,const void * data)355 static void udc_vrt_uvb_cb(const void *const vrt_priv,
356 			   const enum uvb_event_type type,
357 			   const void *data)
358 {
359 	const struct device *dev = vrt_priv;
360 	struct udc_vrt_data *priv = udc_get_private(dev);
361 	struct uvb_packet *const pkt = (void *)data;
362 
363 	switch (type) {
364 	case UVB_EVT_VBUS_REMOVED:
365 		__fallthrough;
366 	case UVB_EVT_VBUS_READY:
367 		if (udc_is_initialized(dev)) {
368 			vrt_submit_uvb_event(dev, type, NULL);
369 		}
370 		break;
371 	case UVB_EVT_SUSPEND:
372 		__fallthrough;
373 	case UVB_EVT_RESUME:
374 		__fallthrough;
375 	case UVB_EVT_RESET:
376 		if (udc_is_enabled(dev)) {
377 			vrt_submit_uvb_event(dev, type, NULL);
378 		}
379 		break;
380 	case UVB_EVT_REQUEST:
381 		if (udc_is_enabled(dev) && priv->addr == pkt->addr) {
382 			vrt_submit_uvb_event(dev, type, pkt);
383 		}
384 		break;
385 	default:
386 		LOG_ERR("Unknown event for %p", dev);
387 		break;
388 	};
389 }
390 
udc_vrt_ep_enqueue(const struct device * dev,struct udc_ep_config * cfg,struct net_buf * buf)391 static int udc_vrt_ep_enqueue(const struct device *dev,
392 			      struct udc_ep_config *cfg,
393 			      struct net_buf *buf)
394 {
395 	LOG_DBG("%p enqueue %p", dev, buf);
396 	udc_buf_put(cfg, buf);
397 
398 	if (cfg->stat.halted) {
399 		LOG_DBG("ep 0x%02x halted", cfg->addr);
400 		return 0;
401 	}
402 
403 	return 0;
404 }
405 
udc_vrt_ep_dequeue(const struct device * dev,struct udc_ep_config * cfg)406 static int udc_vrt_ep_dequeue(const struct device *dev,
407 			      struct udc_ep_config *cfg)
408 {
409 	unsigned int lock_key;
410 	struct net_buf *buf;
411 
412 	lock_key = irq_lock();
413 	/* Draft dequeue implementation */
414 	buf = udc_buf_get_all(dev, cfg->addr);
415 	if (buf) {
416 		udc_submit_ep_event(dev, buf, -ECONNABORTED);
417 	}
418 	irq_unlock(lock_key);
419 
420 	return 0;
421 }
422 
udc_vrt_ep_enable(const struct device * dev,struct udc_ep_config * cfg)423 static int udc_vrt_ep_enable(const struct device *dev,
424 			     struct udc_ep_config *cfg)
425 {
426 	return 0;
427 }
428 
udc_vrt_ep_disable(const struct device * dev,struct udc_ep_config * cfg)429 static int udc_vrt_ep_disable(const struct device *dev,
430 			      struct udc_ep_config *cfg)
431 {
432 	return 0;
433 }
434 
udc_vrt_ep_set_halt(const struct device * dev,struct udc_ep_config * cfg)435 static int udc_vrt_ep_set_halt(const struct device *dev,
436 			       struct udc_ep_config *cfg)
437 {
438 	LOG_DBG("Set halt ep 0x%02x", cfg->addr);
439 
440 	cfg->stat.halted = true;
441 
442 	return 0;
443 }
444 
udc_vrt_ep_clear_halt(const struct device * dev,struct udc_ep_config * cfg)445 static int udc_vrt_ep_clear_halt(const struct device *dev,
446 				 struct udc_ep_config *cfg)
447 {
448 	cfg->stat.halted = false;
449 
450 	return 0;
451 }
452 
udc_vrt_set_address(const struct device * dev,const uint8_t addr)453 static int udc_vrt_set_address(const struct device *dev, const uint8_t addr)
454 {
455 	struct udc_vrt_data *priv = udc_get_private(dev);
456 
457 	priv->addr = addr;
458 	LOG_DBG("Set new address %u for %p", priv->addr, dev);
459 
460 	return 0;
461 }
462 
udc_vrt_host_wakeup(const struct device * dev)463 static int udc_vrt_host_wakeup(const struct device *dev)
464 {
465 
466 	const struct udc_vrt_config *config = dev->config;
467 
468 	return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT,
469 			   INT_TO_POINTER(UVB_DEVICE_ACT_RWUP));
470 }
471 
udc_vrt_device_speed(const struct device * dev)472 static enum udc_bus_speed udc_vrt_device_speed(const struct device *dev)
473 {
474 	struct udc_data *data = dev->data;
475 
476 	/* FIXME: get actual device speed */
477 	return data->caps.hs ? UDC_BUS_SPEED_HS : UDC_BUS_SPEED_FS;
478 }
479 
udc_vrt_enable(const struct device * dev)480 static int udc_vrt_enable(const struct device *dev)
481 {
482 	const struct udc_vrt_config *config = dev->config;
483 	enum uvb_device_act act;
484 
485 	switch (config->speed_idx) {
486 	case 1:
487 		act = UVB_DEVICE_ACT_FS;
488 		break;
489 	case 2:
490 		act = UVB_DEVICE_ACT_HS;
491 		break;
492 	case 3:
493 		act = UVB_DEVICE_ACT_SS;
494 		break;
495 	case 0:
496 	default:
497 		act = UVB_DEVICE_ACT_LS;
498 		break;
499 	}
500 
501 	return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT,
502 			   INT_TO_POINTER(act));
503 }
504 
udc_vrt_disable(const struct device * dev)505 static int udc_vrt_disable(const struct device *dev)
506 {
507 	const struct udc_vrt_config *config = dev->config;
508 
509 	return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT,
510 			   INT_TO_POINTER(UVB_DEVICE_ACT_REMOVED));
511 }
512 
udc_vrt_init(const struct device * dev)513 static int udc_vrt_init(const struct device *dev)
514 {
515 	const struct udc_vrt_config *config = dev->config;
516 
517 	if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT,
518 				   USB_EP_TYPE_CONTROL, 64, 0)) {
519 		LOG_ERR("Failed to enable control endpoint");
520 		return -EIO;
521 	}
522 
523 	if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN,
524 				   USB_EP_TYPE_CONTROL, 64, 0)) {
525 		LOG_ERR("Failed to enable control endpoint");
526 		return -EIO;
527 	}
528 
529 	return uvb_subscribe(config->uhc_name, config->dev_node);
530 }
531 
udc_vrt_shutdown(const struct device * dev)532 static int udc_vrt_shutdown(const struct device *dev)
533 {
534 	const struct udc_vrt_config *config = dev->config;
535 
536 	if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
537 		LOG_ERR("Failed to disable control endpoint");
538 		return -EIO;
539 	}
540 
541 	if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
542 		LOG_ERR("Failed to disable control endpoint");
543 		return -EIO;
544 	}
545 
546 	return uvb_unsubscribe(config->uhc_name, config->dev_node);
547 }
548 
udc_vrt_driver_preinit(const struct device * dev)549 static int udc_vrt_driver_preinit(const struct device *dev)
550 {
551 	const struct udc_vrt_config *config = dev->config;
552 	struct udc_data *data = dev->data;
553 	struct udc_vrt_data *priv = data->priv;
554 	uint16_t mps = 1023;
555 	int err;
556 
557 	k_mutex_init(&data->mutex);
558 	k_fifo_init(&priv->fifo);
559 
560 	data->caps.rwup = true;
561 	data->caps.mps0 = UDC_MPS0_64;
562 	if (config->speed_idx == 2) {
563 		data->caps.hs = true;
564 		mps = 1024;
565 	}
566 
567 	for (int i = 0; i < config->num_of_eps; i++) {
568 		config->ep_cfg_out[i].caps.out = 1;
569 		if (i == 0) {
570 			config->ep_cfg_out[i].caps.control = 1;
571 			config->ep_cfg_out[i].caps.mps = 64;
572 		} else {
573 			config->ep_cfg_out[i].caps.bulk = 1;
574 			config->ep_cfg_out[i].caps.interrupt = 1;
575 			config->ep_cfg_out[i].caps.iso = 1;
576 			config->ep_cfg_out[i].caps.mps = mps;
577 		}
578 
579 		config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i;
580 		err = udc_register_ep(dev, &config->ep_cfg_out[i]);
581 		if (err != 0) {
582 			LOG_ERR("Failed to register endpoint");
583 			return err;
584 		}
585 	}
586 
587 	for (int i = 0; i < config->num_of_eps; i++) {
588 		config->ep_cfg_in[i].caps.in = 1;
589 		if (i == 0) {
590 			config->ep_cfg_in[i].caps.control = 1;
591 			config->ep_cfg_in[i].caps.mps = 64;
592 		} else {
593 			config->ep_cfg_in[i].caps.bulk = 1;
594 			config->ep_cfg_in[i].caps.interrupt = 1;
595 			config->ep_cfg_in[i].caps.iso = 1;
596 			config->ep_cfg_in[i].caps.mps = mps;
597 		}
598 
599 		config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i;
600 		err = udc_register_ep(dev, &config->ep_cfg_in[i]);
601 		if (err != 0) {
602 			LOG_ERR("Failed to register endpoint");
603 			return err;
604 		}
605 	}
606 
607 	config->dev_node->priv = dev;
608 	config->make_thread(dev);
609 	LOG_INF("Device %p (max. speed %d) belongs to %s",
610 		dev, config->speed_idx, config->uhc_name);
611 
612 	return 0;
613 }
614 
udc_vrt_lock(const struct device * dev)615 static int udc_vrt_lock(const struct device *dev)
616 {
617 	return udc_lock_internal(dev, K_FOREVER);
618 }
619 
udc_vrt_unlock(const struct device * dev)620 static int udc_vrt_unlock(const struct device *dev)
621 {
622 	return udc_unlock_internal(dev);
623 }
624 
625 static const struct udc_api udc_vrt_api = {
626 	.lock = udc_vrt_lock,
627 	.unlock = udc_vrt_unlock,
628 	.device_speed = udc_vrt_device_speed,
629 	.init = udc_vrt_init,
630 	.enable = udc_vrt_enable,
631 	.disable = udc_vrt_disable,
632 	.shutdown = udc_vrt_shutdown,
633 	.set_address = udc_vrt_set_address,
634 	.host_wakeup = udc_vrt_host_wakeup,
635 	.ep_enable = udc_vrt_ep_enable,
636 	.ep_disable = udc_vrt_ep_disable,
637 	.ep_set_halt = udc_vrt_ep_set_halt,
638 	.ep_clear_halt = udc_vrt_ep_clear_halt,
639 	.ep_enqueue = udc_vrt_ep_enqueue,
640 	.ep_dequeue = udc_vrt_ep_dequeue,
641 };
642 
643 #define DT_DRV_COMPAT zephyr_udc_virtual
644 
645 #define UDC_VRT_DEVICE_DEFINE(n)						\
646 	K_THREAD_STACK_DEFINE(udc_vrt_stack_area_##n,				\
647 			      CONFIG_UDC_VIRTUAL_STACK_SIZE);			\
648 										\
649 	static void udc_vrt_thread_##n(void *dev, void *unused1, void *unused2)	\
650 	{									\
651 		while (1) {							\
652 			udc_vrt_thread_handler(dev);				\
653 		}								\
654 	}									\
655 										\
656 	static void udc_vrt_make_thread_##n(const struct device *dev)		\
657 	{									\
658 		struct udc_vrt_data *priv = udc_get_private(dev);		\
659 										\
660 		k_thread_create(&priv->thread_data,				\
661 			    udc_vrt_stack_area_##n,				\
662 			    K_THREAD_STACK_SIZEOF(udc_vrt_stack_area_##n),	\
663 			    udc_vrt_thread_##n,					\
664 			    (void *)dev, NULL, NULL,				\
665 			    K_PRIO_COOP(CONFIG_UDC_VIRTUAL_THREAD_PRIORITY),	\
666 			    K_ESSENTIAL,					\
667 			    K_NO_WAIT);						\
668 		k_thread_name_set(&priv->thread_data, dev->name);		\
669 	}									\
670 										\
671 	static struct udc_ep_config						\
672 		ep_cfg_out[DT_INST_PROP(n, num_bidir_endpoints)];		\
673 	static struct udc_ep_config						\
674 		ep_cfg_in[DT_INST_PROP(n, num_bidir_endpoints)];		\
675 										\
676 	static struct uvb_node udc_vrt_dev_node##n = {				\
677 		.name = DT_NODE_FULL_NAME(DT_DRV_INST(n)),			\
678 		.notify = udc_vrt_uvb_cb,					\
679 	};									\
680 										\
681 	static const struct udc_vrt_config udc_vrt_config_##n = {		\
682 		.num_of_eps = DT_INST_PROP(n, num_bidir_endpoints),		\
683 		.ep_cfg_in = ep_cfg_out,					\
684 		.ep_cfg_out = ep_cfg_in,					\
685 		.make_thread = udc_vrt_make_thread_##n,				\
686 		.dev_node = &udc_vrt_dev_node##n,				\
687 		.speed_idx = DT_ENUM_IDX(DT_DRV_INST(n), maximum_speed),	\
688 		.uhc_name = DT_NODE_FULL_NAME(DT_INST_PARENT(n)),		\
689 	};									\
690 										\
691 	static struct udc_vrt_data udc_priv_##n = {				\
692 	};									\
693 										\
694 	static struct udc_data udc_data_##n = {					\
695 		.mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex),		\
696 		.priv = &udc_priv_##n,						\
697 	};									\
698 										\
699 	DEVICE_DT_INST_DEFINE(n, udc_vrt_driver_preinit, NULL,			\
700 			      &udc_data_##n, &udc_vrt_config_##n,		\
701 			      POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,	\
702 			      &udc_vrt_api);
703 
704 DT_INST_FOREACH_STATUS_OKAY(UDC_VRT_DEVICE_DEFINE)
705