1 /*
2 * Copyright (c) 2022 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /*
8 * @file uhc_virtual.c
9 * @brief Virtual USB host controller (UHC) driver
10 *
11 * Virtual device controller does not emulate any hardware
12 * and can only communicate with the virtual device controllers
13 * through virtual bus.
14 */
15
16 #include "uhc_common.h"
17 #include "../uvb/uvb.h"
18
19 #include <string.h>
20 #include <zephyr/kernel.h>
21 #include <zephyr/init.h>
22 #include <zephyr/drivers/usb/uhc.h>
23
24 #include <zephyr/logging/log.h>
25 LOG_MODULE_REGISTER(uhc_vrt, CONFIG_UHC_DRIVER_LOG_LEVEL);
26
27 #define FRAME_MAX_TRANSFERS 16
28
29 struct uhc_vrt_config {
30 };
31
32 struct uhc_vrt_slot {
33 sys_dnode_t node;
34 struct uhc_transfer *xfer;
35 };
36
37 struct uhc_vrt_frame {
38 struct uhc_vrt_slot slots[FRAME_MAX_TRANSFERS];
39 sys_dnode_t *ptr;
40 sys_dlist_t list;
41 uint8_t count;
42 };
43
44 struct uhc_vrt_data {
45 const struct device *dev;
46 struct uvb_node *host_node;
47 struct k_work work;
48 struct k_fifo fifo;
49 struct uhc_transfer *last_xfer;
50 struct uhc_vrt_frame frame;
51 struct k_timer sof_timer;
52 uint16_t frame_number;
53 uint8_t req;
54 };
55
56 enum uhc_vrt_event_type {
57 /* SoF generator event */
58 UHC_VRT_EVT_SOF,
59 /* Request reply received */
60 UHC_VRT_EVT_REPLY,
61 };
62
63 /* Structure for driver's endpoint events */
64 struct uhc_vrt_event {
65 sys_snode_t node;
66 enum uhc_vrt_event_type type;
67 struct uvb_packet *pkt;
68 };
69
70 K_MEM_SLAB_DEFINE(uhc_vrt_slab, sizeof(struct uhc_vrt_event),
71 16, sizeof(void *));
72
vrt_event_submit(const struct device * dev,const enum uhc_vrt_event_type type,const void * data)73 static void vrt_event_submit(const struct device *dev,
74 const enum uhc_vrt_event_type type,
75 const void *data)
76 {
77 struct uhc_vrt_data *priv = uhc_get_private(dev);
78 struct uhc_vrt_event *event;
79 int ret;
80
81 ret = k_mem_slab_alloc(&uhc_vrt_slab, (void **)&event, K_NO_WAIT);
82 __ASSERT(ret == 0, "Failed to allocate slab");
83
84 event->type = type;
85 event->pkt = (struct uvb_packet *const)data;
86 k_fifo_put(&priv->fifo, event);
87 k_work_submit(&priv->work);
88 }
89
vrt_xfer_control(const struct device * dev,struct uhc_transfer * const xfer)90 static int vrt_xfer_control(const struct device *dev,
91 struct uhc_transfer *const xfer)
92 {
93 struct uhc_vrt_data *priv = uhc_get_private(dev);
94 struct net_buf *buf = xfer->buf;
95 struct uvb_packet *uvb_pkt;
96 uint8_t *data = NULL;
97 size_t length = 0;
98
99 if (xfer->stage == UHC_CONTROL_STAGE_SETUP) {
100 LOG_DBG("Handle SETUP stage");
101 uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_SETUP,
102 xfer->udev->addr, USB_CONTROL_EP_OUT,
103 xfer->setup_pkt, sizeof(xfer->setup_pkt));
104 if (uvb_pkt == NULL) {
105 LOG_ERR("Failed to allocate UVB packet");
106 return -ENOMEM;
107 }
108
109 priv->req = UVB_REQUEST_SETUP;
110
111 return uvb_advert_pkt(priv->host_node, uvb_pkt);
112 }
113
114 if (buf != NULL && xfer->stage == UHC_CONTROL_STAGE_DATA) {
115 if (USB_EP_DIR_IS_IN(xfer->ep)) {
116 length = MIN(net_buf_tailroom(buf), xfer->mps);
117 data = net_buf_tail(buf);
118 } else {
119 length = MIN(buf->len, xfer->mps);
120 data = buf->data;
121 }
122
123 LOG_DBG("Handle DATA stage");
124 uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA,
125 xfer->udev->addr, xfer->ep,
126 data, length);
127 if (uvb_pkt == NULL) {
128 LOG_ERR("Failed to allocate UVB packet");
129 return -ENOMEM;
130 }
131
132 priv->req = UVB_REQUEST_DATA;
133
134 return uvb_advert_pkt(priv->host_node, uvb_pkt);
135 }
136
137 if (xfer->stage == UHC_CONTROL_STAGE_STATUS) {
138 uint8_t ep;
139
140 LOG_DBG("Handle STATUS stage");
141 if (USB_EP_DIR_IS_IN(xfer->ep)) {
142 ep = USB_CONTROL_EP_OUT;
143 } else {
144 ep = USB_CONTROL_EP_IN;
145 }
146
147 uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA,
148 xfer->udev->addr, ep,
149 NULL, 0);
150 if (uvb_pkt == NULL) {
151 LOG_ERR("Failed to allocate UVB packet");
152 return -ENOMEM;
153 }
154
155 priv->req = UVB_REQUEST_DATA;
156
157 return uvb_advert_pkt(priv->host_node, uvb_pkt);
158 }
159
160 return -EINVAL;
161 }
162
vrt_xfer_bulk(const struct device * dev,struct uhc_transfer * const xfer)163 static int vrt_xfer_bulk(const struct device *dev,
164 struct uhc_transfer *const xfer)
165 {
166 struct uhc_vrt_data *priv = uhc_get_private(dev);
167 struct net_buf *buf = xfer->buf;
168 struct uvb_packet *uvb_pkt;
169 uint8_t *data;
170 size_t length;
171
172 if (USB_EP_DIR_IS_IN(xfer->ep)) {
173 length = MIN(net_buf_tailroom(buf), xfer->mps);
174 data = net_buf_tail(buf);
175 } else {
176 length = MIN(buf->len, xfer->mps);
177 data = buf->data;
178 }
179
180 uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA, xfer->udev->addr, xfer->ep,
181 data, length);
182 if (uvb_pkt == NULL) {
183 LOG_ERR("Failed to allocate UVB packet");
184 return -ENOMEM;
185 }
186
187 return uvb_advert_pkt(priv->host_node, uvb_pkt);
188 }
189
get_xfer_ep_idx(const uint8_t ep)190 static inline uint8_t get_xfer_ep_idx(const uint8_t ep)
191 {
192 /* We do not need to differentiate the direction for the control
193 * transfers because they are handled as a whole.
194 */
195 if (USB_EP_DIR_IS_OUT(ep) || USB_EP_GET_IDX(ep) == 0) {
196 return USB_EP_GET_IDX(ep & BIT_MASK(4));
197 }
198
199 return USB_EP_GET_IDX(ep & BIT_MASK(4)) + 16U;
200 }
201
vrt_assemble_frame(const struct device * dev)202 static void vrt_assemble_frame(const struct device *dev)
203 {
204 struct uhc_vrt_data *const priv = uhc_get_private(dev);
205 struct uhc_vrt_frame *const frame = &priv->frame;
206 struct uhc_data *const data = dev->data;
207 struct uhc_transfer *tmp;
208 unsigned int n = 0;
209 unsigned int key;
210 uint32_t bm = 0;
211
212 sys_dlist_init(&frame->list);
213 frame->ptr = NULL;
214 frame->count = 0;
215 key = irq_lock();
216
217 /* TODO: add periodic transfers up to 90% */
218 SYS_DLIST_FOR_EACH_CONTAINER(&data->ctrl_xfers, tmp, node) {
219 uint8_t idx = get_xfer_ep_idx(tmp->ep);
220
221 /* There could be multiple transfers queued for the same
222 * endpoint, for now we only allow one to be scheduled per frame.
223 */
224 if (bm & BIT(idx)) {
225 continue;
226 }
227
228 if (tmp->interval) {
229 if (tmp->start_frame != priv->frame_number) {
230 continue;
231 }
232
233 tmp->start_frame = priv->frame_number + tmp->interval;
234 LOG_DBG("Interrupt transfer s.f. %u f.n. %u interval %u",
235 tmp->start_frame, priv->frame_number, tmp->interval);
236 }
237
238 bm |= BIT(idx);
239 frame->slots[n].xfer = tmp;
240 sys_dlist_append(&frame->list, &frame->slots[n].node);
241 n++;
242
243 if (n >= FRAME_MAX_TRANSFERS) {
244 /* No more free slots */
245 break;
246 }
247 }
248
249 irq_unlock(key);
250 }
251
vrt_schedule_frame(const struct device * dev)252 static int vrt_schedule_frame(const struct device *dev)
253 {
254 struct uhc_vrt_data *const priv = uhc_get_private(dev);
255 struct uhc_vrt_frame *const frame = &priv->frame;
256 struct uhc_vrt_slot *slot;
257
258 if (priv->last_xfer == NULL) {
259 if (frame->count >= FRAME_MAX_TRANSFERS) {
260 LOG_DBG("Frame finished");
261 return 0;
262 }
263
264 frame->ptr = sys_dlist_get(&frame->list);
265 slot = SYS_DLIST_CONTAINER(frame->ptr, slot, node);
266 if (slot == NULL) {
267 LOG_DBG("No more transfers for the frame");
268 return 0;
269 }
270
271 priv->last_xfer = slot->xfer;
272 frame->count++;
273 LOG_DBG("Next transfer is %p (count %u)",
274 (void *)priv->last_xfer, frame->count);
275 }
276
277 if (USB_EP_GET_IDX(priv->last_xfer->ep) == 0) {
278 return vrt_xfer_control(dev, priv->last_xfer);
279 }
280
281 return vrt_xfer_bulk(dev, priv->last_xfer);
282 }
283
vrt_hrslt_success(const struct device * dev,struct uvb_packet * const pkt)284 static void vrt_hrslt_success(const struct device *dev,
285 struct uvb_packet *const pkt)
286 {
287 struct uhc_vrt_data *priv = uhc_get_private(dev);
288 struct uhc_transfer *const xfer = priv->last_xfer;
289 struct net_buf *buf = xfer->buf;
290 bool finished = false;
291 size_t length;
292
293 switch (pkt->request) {
294 case UVB_REQUEST_SETUP:
295 if (xfer->buf != NULL) {
296 xfer->stage = UHC_CONTROL_STAGE_DATA;
297 } else {
298 xfer->stage = UHC_CONTROL_STAGE_STATUS;
299 }
300
301 break;
302 case UVB_REQUEST_DATA:
303 if (xfer->stage == UHC_CONTROL_STAGE_STATUS) {
304 LOG_DBG("Status stage finished");
305 finished = true;
306 break;
307 }
308
309 if (USB_EP_DIR_IS_OUT(pkt->ep)) {
310 length = MIN(buf->len, xfer->mps);
311 net_buf_pull(buf, length);
312 LOG_DBG("OUT chunk %zu out of %u", length, buf->len);
313 if (buf->len == 0) {
314 if (pkt->ep == USB_CONTROL_EP_OUT) {
315 xfer->stage = UHC_CONTROL_STAGE_STATUS;
316 } else {
317 finished = true;
318 }
319 }
320 } else {
321 length = MIN(net_buf_tailroom(buf), pkt->length);
322 net_buf_add(buf, length);
323 if (pkt->length > xfer->mps) {
324 LOG_ERR("Ambiguous packet with the length %zu",
325 pkt->length);
326 }
327
328 LOG_DBG("IN chunk %zu out of %zu", length, net_buf_tailroom(buf));
329 if (pkt->length < xfer->mps || !net_buf_tailroom(buf)) {
330 if (pkt->ep == USB_CONTROL_EP_IN) {
331 xfer->stage = UHC_CONTROL_STAGE_STATUS;
332 } else {
333 finished = true;
334 }
335 }
336 }
337 break;
338 }
339
340 if (finished) {
341 LOG_DBG("Transfer finished");
342 uhc_xfer_return(dev, xfer, 0);
343 priv->last_xfer = NULL;
344 }
345 }
346
vrt_xfer_drop_active(const struct device * dev,int err)347 static void vrt_xfer_drop_active(const struct device *dev, int err)
348 {
349 struct uhc_vrt_data *priv = uhc_get_private(dev);
350
351 if (priv->last_xfer) {
352 uhc_xfer_return(dev, priv->last_xfer, err);
353 priv->last_xfer = NULL;
354 }
355 }
356
vrt_handle_reply(const struct device * dev,struct uvb_packet * const pkt)357 static int vrt_handle_reply(const struct device *dev,
358 struct uvb_packet *const pkt)
359 {
360 struct uhc_vrt_data *priv = uhc_get_private(dev);
361 struct uhc_vrt_frame *const frame = &priv->frame;
362 struct uhc_transfer *const xfer = priv->last_xfer;
363 int ret = 0;
364
365 if (xfer == NULL) {
366 LOG_ERR("No transfers to handle");
367 ret = -ENODATA;
368 goto handle_reply_err;
369 }
370
371 switch (pkt->reply) {
372 case UVB_REPLY_NACK:
373 /* Move the transfer back to the list. */
374 sys_dlist_append(&frame->list, frame->ptr);
375 priv->last_xfer = NULL;
376 LOG_DBG("NACK 0x%02x count %u", xfer->ep, frame->count);
377 break;
378 case UVB_REPLY_STALL:
379 vrt_xfer_drop_active(dev, -EPIPE);
380 break;
381 case UVB_REPLY_ACK:
382 vrt_hrslt_success(dev, pkt);
383 break;
384 default:
385 vrt_xfer_drop_active(dev, -EINVAL);
386 ret = -EINVAL;
387 break;
388 }
389
390 handle_reply_err:
391 uvb_free_pkt(pkt);
392 return ret;
393 }
394
vrt_xfer_cleanup_cancelled(const struct device * dev)395 static void vrt_xfer_cleanup_cancelled(const struct device *dev)
396 {
397 struct uhc_vrt_data *priv = uhc_get_private(dev);
398 struct uhc_data *data = dev->data;
399 struct uhc_transfer *tmp;
400
401 if (priv->last_xfer != NULL && priv->last_xfer->err == -ECONNRESET) {
402 vrt_xfer_drop_active(dev, -ECONNRESET);
403 }
404
405 SYS_DLIST_FOR_EACH_CONTAINER(&data->ctrl_xfers, tmp, node) {
406 if (tmp->err == -ECONNRESET) {
407 uhc_xfer_return(dev, tmp, -ECONNRESET);
408 }
409 }
410 }
411
xfer_work_handler(struct k_work * work)412 static void xfer_work_handler(struct k_work *work)
413 {
414 struct uhc_vrt_data *priv = CONTAINER_OF(work, struct uhc_vrt_data, work);
415 const struct device *dev = priv->dev;
416 struct uhc_vrt_event *ev;
417
418 while ((ev = k_fifo_get(&priv->fifo, K_NO_WAIT)) != NULL) {
419 bool schedule = false;
420 int err;
421
422 switch (ev->type) {
423 case UHC_VRT_EVT_SOF:
424 priv->frame_number++;
425 vrt_xfer_cleanup_cancelled(dev);
426 vrt_assemble_frame(dev);
427 schedule = true;
428 break;
429 case UHC_VRT_EVT_REPLY:
430 err = vrt_handle_reply(dev, ev->pkt);
431 if (unlikely(err)) {
432 uhc_submit_event(dev, UHC_EVT_ERROR, err);
433 }
434
435 schedule = true;
436 break;
437 default:
438 break;
439 }
440
441 if (schedule) {
442 err = vrt_schedule_frame(dev);
443 if (unlikely(err)) {
444 uhc_submit_event(dev, UHC_EVT_ERROR, err);
445 }
446
447 }
448
449 k_mem_slab_free(&uhc_vrt_slab, (void *)ev);
450 }
451 }
452
sof_timer_handler(struct k_timer * timer)453 static void sof_timer_handler(struct k_timer *timer)
454 {
455 struct uhc_vrt_data *priv = CONTAINER_OF(timer, struct uhc_vrt_data, sof_timer);
456
457 vrt_event_submit(priv->dev, UHC_VRT_EVT_SOF, NULL);
458 }
459
vrt_device_act(const struct device * dev,const enum uvb_device_act act)460 static void vrt_device_act(const struct device *dev,
461 const enum uvb_device_act act)
462 {
463 struct uhc_vrt_data *priv = uhc_get_private(dev);
464 enum uhc_event_type type;
465
466 switch (act) {
467 case UVB_DEVICE_ACT_RWUP:
468 type = UHC_EVT_RWUP;
469 break;
470 case UVB_DEVICE_ACT_FS:
471 type = UHC_EVT_DEV_CONNECTED_FS;
472 k_timer_start(&priv->sof_timer, K_MSEC(1), K_MSEC(1));
473 break;
474 case UVB_DEVICE_ACT_HS:
475 type = UHC_EVT_DEV_CONNECTED_HS;
476 k_timer_start(&priv->sof_timer, K_MSEC(1), K_USEC(125));
477 break;
478 case UVB_DEVICE_ACT_REMOVED:
479 type = UHC_EVT_DEV_REMOVED;
480 break;
481 default:
482 type = UHC_EVT_ERROR;
483 }
484
485 uhc_submit_event(dev, type, 0);
486 }
487
uhc_vrt_uvb_cb(const void * const vrt_priv,const enum uvb_event_type type,const void * data)488 static void uhc_vrt_uvb_cb(const void *const vrt_priv,
489 const enum uvb_event_type type,
490 const void *data)
491 {
492 const struct device *dev = vrt_priv;
493
494 if (type == UVB_EVT_REPLY) {
495 vrt_event_submit(dev, UHC_VRT_EVT_REPLY, data);
496 } else if (type == UVB_EVT_DEVICE_ACT) {
497 vrt_device_act(dev, POINTER_TO_INT(data));
498 } else {
499 LOG_ERR("Unknown event %d for %p", type, dev);
500 }
501 }
502
uhc_vrt_sof_enable(const struct device * dev)503 static int uhc_vrt_sof_enable(const struct device *dev)
504 {
505 struct uhc_vrt_data *priv = uhc_get_private(dev);
506
507 k_timer_start(&priv->sof_timer, K_MSEC(1), K_MSEC(1));
508
509 return 0;
510 }
511
512 /* Disable SOF generator and suspend bus */
uhc_vrt_bus_suspend(const struct device * dev)513 static int uhc_vrt_bus_suspend(const struct device *dev)
514 {
515 struct uhc_vrt_data *priv = uhc_get_private(dev);
516
517 k_timer_stop(&priv->sof_timer);
518
519 return uvb_advert(priv->host_node, UVB_EVT_SUSPEND, NULL);
520 }
521
uhc_vrt_bus_reset(const struct device * dev)522 static int uhc_vrt_bus_reset(const struct device *dev)
523 {
524 struct uhc_vrt_data *priv = uhc_get_private(dev);
525 int ret;
526
527 k_timer_stop(&priv->sof_timer);
528 ret = uvb_advert(priv->host_node, UVB_EVT_RESET, NULL);
529 /* TDRSTR */
530 k_msleep(50);
531 k_timer_start(&priv->sof_timer, K_MSEC(1), K_MSEC(1));
532
533 return ret;
534 }
535
uhc_vrt_bus_resume(const struct device * dev)536 static int uhc_vrt_bus_resume(const struct device *dev)
537 {
538 struct uhc_vrt_data *priv = uhc_get_private(dev);
539
540 k_timer_start(&priv->sof_timer, K_MSEC(1), K_MSEC(1));
541
542 return uvb_advert(priv->host_node, UVB_EVT_RESUME, NULL);
543 }
544
uhc_vrt_enqueue(const struct device * dev,struct uhc_transfer * const xfer)545 static int uhc_vrt_enqueue(const struct device *dev,
546 struct uhc_transfer *const xfer)
547 {
548 struct uhc_vrt_data *priv = uhc_get_private(dev);
549
550 if (xfer->interval) {
551 xfer->start_frame = priv->frame_number + xfer->interval;
552 LOG_DBG("New interrupt transfer s.f. %u f.n. %u interval %u",
553 xfer->start_frame, priv->frame_number, xfer->interval);
554 }
555
556 uhc_xfer_append(dev, xfer);
557
558 return 0;
559 }
560
uhc_vrt_dequeue(const struct device * dev,struct uhc_transfer * const xfer)561 static int uhc_vrt_dequeue(const struct device *dev,
562 struct uhc_transfer *const xfer)
563 {
564 struct uhc_data *data = dev->data;
565 struct uhc_transfer *tmp;
566 unsigned int key;
567
568 key = irq_lock();
569
570 SYS_DLIST_FOR_EACH_CONTAINER(&data->ctrl_xfers, tmp, node) {
571 if (xfer == tmp) {
572 tmp->err = -ECONNRESET;
573 }
574 }
575
576 irq_unlock(key);
577
578 return 0;
579 }
580
uhc_vrt_init(const struct device * dev)581 static int uhc_vrt_init(const struct device *dev)
582 {
583 return 0;
584 }
585
uhc_vrt_enable(const struct device * dev)586 static int uhc_vrt_enable(const struct device *dev)
587 {
588 struct uhc_vrt_data *priv = uhc_get_private(dev);
589
590 return uvb_advert(priv->host_node, UVB_EVT_VBUS_READY, NULL);
591 }
592
uhc_vrt_disable(const struct device * dev)593 static int uhc_vrt_disable(const struct device *dev)
594 {
595 struct uhc_vrt_data *priv = uhc_get_private(dev);
596
597 return uvb_advert(priv->host_node, UVB_EVT_VBUS_REMOVED, NULL);
598 }
599
uhc_vrt_shutdown(const struct device * dev)600 static int uhc_vrt_shutdown(const struct device *dev)
601 {
602 return 0;
603 }
604
uhc_vrt_lock(const struct device * dev)605 static int uhc_vrt_lock(const struct device *dev)
606 {
607 return uhc_lock_internal(dev, K_FOREVER);
608 }
609
uhc_vrt_unlock(const struct device * dev)610 static int uhc_vrt_unlock(const struct device *dev)
611 {
612
613 return uhc_unlock_internal(dev);
614 }
615
uhc_vrt_driver_preinit(const struct device * dev)616 static int uhc_vrt_driver_preinit(const struct device *dev)
617 {
618 struct uhc_vrt_data *priv = uhc_get_private(dev);
619 struct uhc_data *data = dev->data;
620
621 priv->dev = dev;
622 k_mutex_init(&data->mutex);
623
624 priv->host_node->priv = dev;
625 k_fifo_init(&priv->fifo);
626 k_work_init(&priv->work, xfer_work_handler);
627 k_timer_init(&priv->sof_timer, sof_timer_handler, NULL);
628
629 LOG_DBG("Virtual UHC pre-initialized");
630
631 return 0;
632 }
633
634 static const struct uhc_api uhc_vrt_api = {
635 .lock = uhc_vrt_lock,
636 .unlock = uhc_vrt_unlock,
637 .init = uhc_vrt_init,
638 .enable = uhc_vrt_enable,
639 .disable = uhc_vrt_disable,
640 .shutdown = uhc_vrt_shutdown,
641
642 .bus_reset = uhc_vrt_bus_reset,
643 .sof_enable = uhc_vrt_sof_enable,
644 .bus_suspend = uhc_vrt_bus_suspend,
645 .bus_resume = uhc_vrt_bus_resume,
646
647 .ep_enqueue = uhc_vrt_enqueue,
648 .ep_dequeue = uhc_vrt_dequeue,
649 };
650
651 #define DT_DRV_COMPAT zephyr_uhc_virtual
652
653 #define UHC_VRT_DEVICE_DEFINE(n) \
654 UVB_HOST_NODE_DEFINE(uhc_bc_##n, \
655 DT_NODE_FULL_NAME(DT_DRV_INST(n)), \
656 uhc_vrt_uvb_cb); \
657 \
658 static const struct uhc_vrt_config uhc_vrt_config_##n = { \
659 }; \
660 \
661 static struct uhc_vrt_data uhc_priv_##n = { \
662 .host_node = &uhc_bc_##n, \
663 }; \
664 \
665 static struct uhc_data uhc_data_##n = { \
666 .priv = &uhc_priv_##n, \
667 }; \
668 \
669 DEVICE_DT_INST_DEFINE(n, uhc_vrt_driver_preinit, NULL, \
670 &uhc_data_##n, &uhc_vrt_config_##n, \
671 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
672 &uhc_vrt_api);
673
674 DT_INST_FOREACH_STATUS_OKAY(UHC_VRT_DEVICE_DEFINE)
675