1 /*
2 * Copyright (c) 2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file udc_nrf.c
9 * @brief Nordic USB device controller (UDC) driver
10 *
11 * The driver implements the interface between the nRF USBD peripheral
12 * driver from nrfx package and UDC API.
13 */
14
15 #include <string.h>
16 #include <stdio.h>
17 #include <soc.h>
18
19 #include <zephyr/kernel.h>
20 #include <zephyr/drivers/usb/udc.h>
21 #include <zephyr/drivers/clock_control.h>
22 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
23 #include <zephyr/dt-bindings/regulator/nrf5x.h>
24
25 #include <nrf_usbd_common.h>
26 #include <hal/nrf_usbd.h>
27 #include <nrfx_power.h>
28 #include "udc_common.h"
29
30 #include <zephyr/logging/log.h>
31 LOG_MODULE_REGISTER(udc_nrf, CONFIG_UDC_DRIVER_LOG_LEVEL);
32
33 /*
34 * There is no real advantage to change control endpoint size
35 * but we can use it for testing UDC driver API and higher layers.
36 */
37 #define UDC_NRF_MPS0 UDC_MPS0_64
38 #define UDC_NRF_EP0_SIZE 64
39
40 enum udc_nrf_event_type {
41 /* An event generated by the HAL driver */
42 UDC_NRF_EVT_HAL,
43 /* Shim driver event to trigger next transfer */
44 UDC_NRF_EVT_XFER,
45 /* Let controller perform status stage */
46 UDC_NRF_EVT_STATUS_IN,
47 };
48
49 struct udc_nrf_evt {
50 enum udc_nrf_event_type type;
51 union {
52 nrf_usbd_common_evt_t hal_evt;
53 uint8_t ep;
54 };
55 };
56
57 K_MSGQ_DEFINE(drv_msgq, sizeof(struct udc_nrf_evt),
58 CONFIG_UDC_NRF_MAX_QMESSAGES, sizeof(uint32_t));
59
60 static K_KERNEL_STACK_DEFINE(drv_stack, CONFIG_UDC_NRF_THREAD_STACK_SIZE);
61 static struct k_thread drv_stack_data;
62
63 /* USB device controller access from devicetree */
64 #define DT_DRV_COMPAT nordic_nrf_usbd
65
66 #define CFG_EPIN_CNT DT_INST_PROP(0, num_in_endpoints)
67 #define CFG_EPOUT_CNT DT_INST_PROP(0, num_out_endpoints)
68 #define CFG_EP_ISOIN_CNT DT_INST_PROP(0, num_isoin_endpoints)
69 #define CFG_EP_ISOOUT_CNT DT_INST_PROP(0, num_isoout_endpoints)
70
71 static struct udc_ep_config ep_cfg_out[CFG_EPOUT_CNT + CFG_EP_ISOOUT_CNT + 1];
72 static struct udc_ep_config ep_cfg_in[CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + 1];
73 static bool udc_nrf_setup_rcvd, udc_nrf_setup_set_addr, udc_nrf_fake_setup;
74 static uint8_t udc_nrf_address;
75 const static struct device *udc_nrf_dev;
76
77 struct udc_nrf_config {
78 clock_control_subsys_t clock;
79 nrfx_power_config_t pwr;
80 nrfx_power_usbevt_config_t evt;
81 };
82
83 static struct onoff_manager *hfxo_mgr;
84 static struct onoff_client hfxo_cli;
85
udc_nrf_clear_control_out(const struct device * dev)86 static void udc_nrf_clear_control_out(const struct device *dev)
87 {
88 if (nrf_usbd_common_last_setup_dir_get() == USB_CONTROL_EP_OUT &&
89 udc_nrf_setup_rcvd) {
90 /* Allow data chunk on EP0 OUT */
91 nrf_usbd_common_setup_data_clear();
92 udc_nrf_setup_rcvd = false;
93 LOG_INF("Allow data OUT");
94 }
95 }
96
udc_event_xfer_in_next(const struct device * dev,const uint8_t ep)97 static void udc_event_xfer_in_next(const struct device *dev, const uint8_t ep)
98 {
99 struct net_buf *buf;
100
101 if (udc_ep_is_busy(dev, ep)) {
102 return;
103 }
104
105 buf = udc_buf_peek(dev, ep);
106 if (buf != NULL) {
107 nrf_usbd_common_transfer_t xfer = {
108 .p_data = {.tx = buf->data},
109 .size = buf->len,
110 .flags = udc_ep_buf_has_zlp(buf) ?
111 NRF_USBD_COMMON_TRANSFER_ZLP_FLAG : 0,
112 };
113 nrfx_err_t err;
114
115 err = nrf_usbd_common_ep_transfer(ep, &xfer);
116 if (err != NRFX_SUCCESS) {
117 LOG_ERR("ep 0x%02x nrfx error: %x", ep, err);
118 /* REVISE: remove from endpoint queue? ASSERT? */
119 udc_submit_ep_event(dev, buf, -ECONNREFUSED);
120 } else {
121 udc_ep_set_busy(dev, ep, true);
122 }
123 }
124 }
125
udc_event_xfer_ctrl_in(const struct device * dev,struct net_buf * const buf)126 static void udc_event_xfer_ctrl_in(const struct device *dev,
127 struct net_buf *const buf)
128 {
129 if (udc_ctrl_stage_is_status_in(dev) ||
130 udc_ctrl_stage_is_no_data(dev)) {
131 /* Status stage finished, notify upper layer */
132 udc_ctrl_submit_status(dev, buf);
133 }
134
135 if (udc_ctrl_stage_is_data_in(dev)) {
136 /*
137 * s-in-[status] finished, release buffer.
138 * Since the controller supports auto-status we cannot use
139 * if (udc_ctrl_stage_is_status_out()) after state update.
140 */
141 net_buf_unref(buf);
142 }
143
144 /* Update to next stage of control transfer */
145 udc_ctrl_update_stage(dev, buf);
146
147 if (!udc_nrf_setup_set_addr) {
148 nrf_usbd_common_setup_clear();
149 }
150 }
151
udc_event_fake_status_in(const struct device * dev)152 static void udc_event_fake_status_in(const struct device *dev)
153 {
154 struct net_buf *buf;
155
156 buf = udc_buf_get(dev, USB_CONTROL_EP_IN);
157 if (unlikely(buf == NULL)) {
158 LOG_DBG("ep 0x%02x queue is empty", USB_CONTROL_EP_IN);
159 return;
160 }
161
162 LOG_DBG("Fake status IN %p", buf);
163 udc_event_xfer_ctrl_in(dev, buf);
164 }
165
udc_event_xfer_in(const struct device * dev,nrf_usbd_common_evt_t const * const event)166 static void udc_event_xfer_in(const struct device *dev,
167 nrf_usbd_common_evt_t const *const event)
168 {
169 uint8_t ep = event->data.eptransfer.ep;
170 struct net_buf *buf;
171
172 switch (event->data.eptransfer.status) {
173 case NRF_USBD_COMMON_EP_OK:
174 buf = udc_buf_get(dev, ep);
175 if (buf == NULL) {
176 LOG_ERR("ep 0x%02x queue is empty", ep);
177 __ASSERT_NO_MSG(false);
178 return;
179 }
180
181 udc_ep_set_busy(dev, ep, false);
182 if (ep == USB_CONTROL_EP_IN) {
183 udc_event_xfer_ctrl_in(dev, buf);
184 return;
185 }
186
187 udc_submit_ep_event(dev, buf, 0);
188 break;
189
190 case NRF_USBD_COMMON_EP_ABORTED:
191 LOG_WRN("aborted IN ep 0x%02x", ep);
192 buf = udc_buf_get_all(dev, ep);
193
194 if (buf == NULL) {
195 LOG_DBG("ep 0x%02x queue is empty", ep);
196 return;
197 }
198
199 udc_ep_set_busy(dev, ep, false);
200 udc_submit_ep_event(dev, buf, -ECONNABORTED);
201 break;
202
203 default:
204 LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
205 event->data.eptransfer.status, ep);
206 udc_submit_event(dev, UDC_EVT_ERROR, -EIO);
207 break;
208 }
209 }
210
udc_event_xfer_ctrl_out(const struct device * dev,struct net_buf * const buf)211 static void udc_event_xfer_ctrl_out(const struct device *dev,
212 struct net_buf *const buf)
213 {
214 /*
215 * In case s-in-status, controller supports auto-status therefore we
216 * do not have to call udc_ctrl_stage_is_status_out().
217 */
218
219 /* Update to next stage of control transfer */
220 udc_ctrl_update_stage(dev, buf);
221
222 if (udc_ctrl_stage_is_status_in(dev)) {
223 udc_ctrl_submit_s_out_status(dev, buf);
224 }
225 }
226
udc_event_xfer_out_next(const struct device * dev,const uint8_t ep)227 static void udc_event_xfer_out_next(const struct device *dev, const uint8_t ep)
228 {
229 struct net_buf *buf;
230
231 if (udc_ep_is_busy(dev, ep)) {
232 return;
233 }
234
235 buf = udc_buf_peek(dev, ep);
236 if (buf != NULL) {
237 nrf_usbd_common_transfer_t xfer = {
238 .p_data = {.rx = buf->data},
239 .size = buf->size,
240 .flags = 0,
241 };
242 nrfx_err_t err;
243
244 err = nrf_usbd_common_ep_transfer(ep, &xfer);
245 if (err != NRFX_SUCCESS) {
246 LOG_ERR("ep 0x%02x nrfx error: %x", ep, err);
247 /* REVISE: remove from endpoint queue? ASSERT? */
248 udc_submit_ep_event(dev, buf, -ECONNREFUSED);
249 } else {
250 udc_ep_set_busy(dev, ep, true);
251 }
252 } else {
253 LOG_DBG("ep 0x%02x waiting, queue is empty", ep);
254 }
255 }
256
udc_event_xfer_out(const struct device * dev,nrf_usbd_common_evt_t const * const event)257 static void udc_event_xfer_out(const struct device *dev,
258 nrf_usbd_common_evt_t const *const event)
259 {
260 uint8_t ep = event->data.eptransfer.ep;
261 nrf_usbd_common_ep_status_t err_code;
262 struct net_buf *buf;
263 size_t len;
264
265 switch (event->data.eptransfer.status) {
266 case NRF_USBD_COMMON_EP_WAITING:
267 /*
268 * There is nothing to do here, new transfer
269 * will be tried in both cases later.
270 */
271 break;
272
273 case NRF_USBD_COMMON_EP_OK:
274 err_code = nrf_usbd_common_ep_status_get(ep, &len);
275 if (err_code != NRF_USBD_COMMON_EP_OK) {
276 LOG_ERR("OUT transfer failed %d", err_code);
277 }
278
279 buf = udc_buf_get(dev, ep);
280 if (buf == NULL) {
281 LOG_ERR("ep 0x%02x ok, queue is empty", ep);
282 return;
283 }
284
285 net_buf_add(buf, len);
286 udc_ep_set_busy(dev, ep, false);
287 if (ep == USB_CONTROL_EP_OUT) {
288 udc_event_xfer_ctrl_out(dev, buf);
289 } else {
290 udc_submit_ep_event(dev, buf, 0);
291 }
292
293 break;
294
295 default:
296 LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
297 event->data.eptransfer.status, ep);
298 udc_submit_event(dev, UDC_EVT_ERROR, -EIO);
299 break;
300 }
301 }
302
usbd_ctrl_feed_dout(const struct device * dev,const size_t length)303 static int usbd_ctrl_feed_dout(const struct device *dev,
304 const size_t length)
305 {
306
307 struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
308 struct net_buf *buf;
309
310 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length);
311 if (buf == NULL) {
312 return -ENOMEM;
313 }
314
315 k_fifo_put(&cfg->fifo, buf);
316 udc_nrf_clear_control_out(dev);
317
318 return 0;
319 }
320
udc_event_xfer_setup(const struct device * dev)321 static int udc_event_xfer_setup(const struct device *dev)
322 {
323 nrf_usbd_common_setup_t *setup;
324 struct net_buf *buf;
325 int err;
326
327 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT,
328 sizeof(struct usb_setup_packet));
329 if (buf == NULL) {
330 LOG_ERR("Failed to allocate for setup");
331 return -ENOMEM;
332 }
333
334 udc_ep_buf_set_setup(buf);
335 setup = (nrf_usbd_common_setup_t *)buf->data;
336 nrf_usbd_common_setup_get(setup);
337
338 /* USBD peripheral automatically handles Set Address in slightly
339 * different manner than the USB stack.
340 *
341 * USBD peripheral doesn't care about wLength, but the peripheral
342 * switches to new address only after status stage. The device won't
343 * automatically accept Data Stage packets.
344 *
345 * However, in the case the host:
346 * * sends SETUP Set Address with non-zero wLength
347 * * does not send corresponding OUT DATA packets (to match wLength)
348 * or sends the packets but disregards NAK
349 * or sends the packets that device ACKs
350 * * sends IN token (either incorrectly proceeds to status stage, or
351 * manages to send IN before SW sets STALL)
352 * then the USBD peripheral will accept the address and USB stack won't.
353 * This will lead to state mismatch between the stack and peripheral.
354 *
355 * In cases where the USB stack would like to STALL the request there is
356 * a race condition between host issuing Set Address status stage (IN
357 * token) and SW setting STALL bit. If host wins the race, the device
358 * ACKs status stage and use new address. If device wins the race, the
359 * device STALLs status stage and address remains unchanged.
360 */
361 udc_nrf_setup_set_addr =
362 setup->bmRequestType == 0 &&
363 setup->bRequest == USB_SREQ_SET_ADDRESS;
364 if (udc_nrf_setup_set_addr) {
365 if (setup->wLength) {
366 /* Currently USB stack only STALLs OUT Data Stage when
367 * buffer allocation fails. To prevent the device from
368 * ACKing the Data Stage, simply ignore the request
369 * completely.
370 *
371 * If host incorrectly proceeds to status stage there
372 * will be address mismatch (unless the new address is
373 * equal to current device address). If host does not
374 * issue IN token then the mismatch will be avoided.
375 */
376 net_buf_unref(buf);
377 return 0;
378 }
379
380 /* nRF52/nRF53 USBD doesn't care about wValue bits 8..15 and
381 * wIndex value but USB device stack does.
382 *
383 * Just clear the bits so stack will handle the request in the
384 * same way as USBD peripheral does, avoiding the mismatch.
385 */
386 setup->wValue &= 0x7F;
387 setup->wIndex = 0;
388 }
389
390 if (!udc_nrf_setup_set_addr && udc_nrf_address != NRF_USBD->USBADDR) {
391 /* Address mismatch detected. Fake Set Address handling to
392 * correct the situation, then repeat handling.
393 */
394 udc_nrf_fake_setup = true;
395 udc_nrf_setup_set_addr = true;
396
397 setup->bmRequestType = 0;
398 setup->bRequest = USB_SREQ_SET_ADDRESS;
399 setup->wValue = NRF_USBD->USBADDR;
400 setup->wIndex = 0;
401 setup->wLength = 0;
402 } else {
403 udc_nrf_fake_setup = false;
404 }
405
406 net_buf_add(buf, sizeof(nrf_usbd_common_setup_t));
407 udc_nrf_setup_rcvd = true;
408
409 /* Update to next stage of control transfer */
410 udc_ctrl_update_stage(dev, buf);
411
412 if (udc_ctrl_stage_is_data_out(dev)) {
413 /* Allocate and feed buffer for data OUT stage */
414 LOG_DBG("s:%p|feed for -out-", buf);
415 err = usbd_ctrl_feed_dout(dev, udc_data_stage_length(buf));
416 if (err == -ENOMEM) {
417 err = udc_submit_ep_event(dev, buf, err);
418 }
419 } else if (udc_ctrl_stage_is_data_in(dev)) {
420 err = udc_ctrl_submit_s_in_status(dev);
421 } else {
422 err = udc_ctrl_submit_s_status(dev);
423 }
424
425 return err;
426 }
427
udc_nrf_thread(void * p1,void * p2,void * p3)428 static void udc_nrf_thread(void *p1, void *p2, void *p3)
429 {
430 ARG_UNUSED(p2);
431 ARG_UNUSED(p3);
432
433 const struct device *dev = p1;
434
435 while (true) {
436 bool start_xfer = false;
437 struct udc_nrf_evt evt;
438 uint8_t ep;
439
440 k_msgq_get(&drv_msgq, &evt, K_FOREVER);
441
442 switch (evt.type) {
443 case UDC_NRF_EVT_HAL:
444 ep = evt.hal_evt.data.eptransfer.ep;
445 switch (evt.hal_evt.type) {
446 case NRF_USBD_COMMON_EVT_SUSPEND:
447 LOG_INF("SUSPEND state detected");
448 nrf_usbd_common_suspend();
449 udc_set_suspended(udc_nrf_dev, true);
450 udc_submit_event(udc_nrf_dev, UDC_EVT_SUSPEND, 0);
451 break;
452 case NRF_USBD_COMMON_EVT_RESUME:
453 LOG_INF("RESUMING from suspend");
454 udc_set_suspended(udc_nrf_dev, false);
455 udc_submit_event(udc_nrf_dev, UDC_EVT_RESUME, 0);
456 break;
457 case NRF_USBD_COMMON_EVT_WUREQ:
458 LOG_INF("Remote wakeup initiated");
459 udc_set_suspended(udc_nrf_dev, false);
460 udc_submit_event(udc_nrf_dev, UDC_EVT_RESUME, 0);
461 break;
462 case NRF_USBD_COMMON_EVT_EPTRANSFER:
463 start_xfer = true;
464 if (USB_EP_DIR_IS_IN(ep)) {
465 udc_event_xfer_in(dev, &evt.hal_evt);
466 } else {
467 udc_event_xfer_out(dev, &evt.hal_evt);
468 }
469 break;
470 case NRF_USBD_COMMON_EVT_SETUP:
471 udc_event_xfer_setup(dev);
472 break;
473 default:
474 break;
475 }
476 break;
477 case UDC_NRF_EVT_XFER:
478 start_xfer = true;
479 ep = evt.ep;
480 break;
481 case UDC_NRF_EVT_STATUS_IN:
482 udc_event_fake_status_in(dev);
483 break;
484 }
485
486 if (start_xfer) {
487 if (USB_EP_DIR_IS_IN(ep)) {
488 udc_event_xfer_in_next(dev, ep);
489 } else {
490 udc_event_xfer_out_next(dev, ep);
491 }
492 }
493 }
494 }
495
udc_sof_check_iso_out(const struct device * dev)496 static void udc_sof_check_iso_out(const struct device *dev)
497 {
498 const uint8_t iso_out_addr = 0x08;
499 struct udc_nrf_evt evt = {
500 .type = UDC_NRF_EVT_XFER,
501 .ep = iso_out_addr,
502 };
503 struct udc_ep_config *ep_cfg;
504
505 ep_cfg = udc_get_ep_cfg(dev, iso_out_addr);
506 if (ep_cfg == NULL) {
507 return;
508 }
509
510 if (ep_cfg->stat.enabled && !k_fifo_is_empty(&ep_cfg->fifo)) {
511 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
512 }
513 }
514
usbd_event_handler(nrf_usbd_common_evt_t const * const hal_evt)515 static void usbd_event_handler(nrf_usbd_common_evt_t const *const hal_evt)
516 {
517 switch (hal_evt->type) {
518 case NRF_USBD_COMMON_EVT_RESET:
519 LOG_INF("Reset");
520 udc_submit_event(udc_nrf_dev, UDC_EVT_RESET, 0);
521 break;
522 case NRF_USBD_COMMON_EVT_SOF:
523 udc_submit_event(udc_nrf_dev, UDC_EVT_SOF, 0);
524 udc_sof_check_iso_out(udc_nrf_dev);
525 break;
526 case NRF_USBD_COMMON_EVT_SUSPEND:
527 case NRF_USBD_COMMON_EVT_RESUME:
528 case NRF_USBD_COMMON_EVT_WUREQ:
529 case NRF_USBD_COMMON_EVT_EPTRANSFER:
530 case NRF_USBD_COMMON_EVT_SETUP: {
531 struct udc_nrf_evt evt = {
532 .type = UDC_NRF_EVT_HAL,
533 .hal_evt = *hal_evt,
534 };
535
536 /* Forward these two to the thread since mutually exclusive
537 * access to the controller is necessary.
538 */
539 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
540 break;
541 }
542 default:
543 break;
544 }
545 }
546
udc_nrf_power_handler(nrfx_power_usb_evt_t pwr_evt)547 static void udc_nrf_power_handler(nrfx_power_usb_evt_t pwr_evt)
548 {
549 switch (pwr_evt) {
550 case NRFX_POWER_USB_EVT_DETECTED:
551 LOG_DBG("POWER event detected");
552 udc_submit_event(udc_nrf_dev, UDC_EVT_VBUS_READY, 0);
553 break;
554 case NRFX_POWER_USB_EVT_READY:
555 LOG_DBG("POWER event ready");
556 nrf_usbd_common_start(true);
557 break;
558 case NRFX_POWER_USB_EVT_REMOVED:
559 LOG_DBG("POWER event removed");
560 udc_submit_event(udc_nrf_dev, UDC_EVT_VBUS_REMOVED, 0);
561 break;
562 default:
563 LOG_ERR("Unknown power event %d", pwr_evt);
564 }
565 }
566
udc_nrf_fake_status_in(const struct device * dev)567 static bool udc_nrf_fake_status_in(const struct device *dev)
568 {
569 struct udc_nrf_evt evt = {
570 .type = UDC_NRF_EVT_STATUS_IN,
571 .ep = USB_CONTROL_EP_IN,
572 };
573
574 if (nrf_usbd_common_last_setup_dir_get() == USB_CONTROL_EP_OUT ||
575 udc_nrf_fake_setup) {
576 /* Let controller perform status IN stage */
577 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
578 return true;
579 }
580
581 return false;
582 }
583
udc_nrf_ep_enqueue(const struct device * dev,struct udc_ep_config * cfg,struct net_buf * buf)584 static int udc_nrf_ep_enqueue(const struct device *dev,
585 struct udc_ep_config *cfg,
586 struct net_buf *buf)
587 {
588 struct udc_nrf_evt evt = {
589 .type = UDC_NRF_EVT_XFER,
590 .ep = cfg->addr,
591 };
592
593 udc_buf_put(cfg, buf);
594
595 if (cfg->addr == USB_CONTROL_EP_IN && buf->len == 0) {
596 if (udc_nrf_fake_status_in(dev)) {
597 return 0;
598 }
599 }
600
601 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
602
603 return 0;
604 }
605
udc_nrf_ep_dequeue(const struct device * dev,struct udc_ep_config * cfg)606 static int udc_nrf_ep_dequeue(const struct device *dev,
607 struct udc_ep_config *cfg)
608 {
609 bool busy = nrf_usbd_common_ep_is_busy(cfg->addr);
610
611 nrf_usbd_common_ep_abort(cfg->addr);
612 if (USB_EP_DIR_IS_OUT(cfg->addr) || !busy) {
613 struct net_buf *buf;
614
615 /*
616 * HAL driver does not generate event for an OUT endpoint
617 * or when IN endpoint is not busy.
618 */
619 buf = udc_buf_get_all(dev, cfg->addr);
620 if (buf) {
621 udc_submit_ep_event(dev, buf, -ECONNABORTED);
622 } else {
623 LOG_INF("ep 0x%02x queue is empty", cfg->addr);
624 }
625
626 }
627
628 udc_ep_set_busy(dev, cfg->addr, false);
629
630 return 0;
631 }
632
udc_nrf_ep_enable(const struct device * dev,struct udc_ep_config * cfg)633 static int udc_nrf_ep_enable(const struct device *dev,
634 struct udc_ep_config *cfg)
635 {
636 uint16_t mps;
637
638 __ASSERT_NO_MSG(cfg);
639 mps = (udc_mps_ep_size(cfg) == 0) ? cfg->caps.mps : udc_mps_ep_size(cfg);
640 nrf_usbd_common_ep_max_packet_size_set(cfg->addr, mps);
641 nrf_usbd_common_ep_enable(cfg->addr);
642 if (!NRF_USBD_EPISO_CHECK(cfg->addr)) {
643 /* ISO transactions for full-speed device do not support
644 * toggle sequencing and should only send DATA0 PID.
645 */
646 nrf_usbd_common_ep_dtoggle_clear(cfg->addr);
647 nrf_usbd_common_ep_stall_clear(cfg->addr);
648 }
649
650 LOG_DBG("Enable ep 0x%02x", cfg->addr);
651
652 return 0;
653 }
654
udc_nrf_ep_disable(const struct device * dev,struct udc_ep_config * cfg)655 static int udc_nrf_ep_disable(const struct device *dev,
656 struct udc_ep_config *cfg)
657 {
658 __ASSERT_NO_MSG(cfg);
659 nrf_usbd_common_ep_disable(cfg->addr);
660 LOG_DBG("Disable ep 0x%02x", cfg->addr);
661
662 return 0;
663 }
664
udc_nrf_ep_set_halt(const struct device * dev,struct udc_ep_config * cfg)665 static int udc_nrf_ep_set_halt(const struct device *dev,
666 struct udc_ep_config *cfg)
667 {
668 LOG_DBG("Halt ep 0x%02x", cfg->addr);
669
670 if (cfg->addr == USB_CONTROL_EP_OUT ||
671 cfg->addr == USB_CONTROL_EP_IN) {
672 nrf_usbd_common_setup_stall();
673 } else {
674 nrf_usbd_common_ep_stall(cfg->addr);
675 }
676
677 return 0;
678 }
679
udc_nrf_ep_clear_halt(const struct device * dev,struct udc_ep_config * cfg)680 static int udc_nrf_ep_clear_halt(const struct device *dev,
681 struct udc_ep_config *cfg)
682 {
683 LOG_DBG("Clear halt ep 0x%02x", cfg->addr);
684
685 nrf_usbd_common_ep_dtoggle_clear(cfg->addr);
686 nrf_usbd_common_ep_stall_clear(cfg->addr);
687
688 return 0;
689 }
690
udc_nrf_set_address(const struct device * dev,const uint8_t addr)691 static int udc_nrf_set_address(const struct device *dev, const uint8_t addr)
692 {
693 /*
694 * If the status stage already finished (which depends entirely on when
695 * the host sends IN token) then NRF_USBD->USBADDR will have the same
696 * address, otherwise it won't (unless new address is unchanged).
697 *
698 * Store the address so the driver can detect address mismatches
699 * between USB stack and USBD peripheral. The mismatches can occur if:
700 * * SW has high enough latency in SETUP handling, or
701 * * Host did not issue Status Stage after Set Address request
702 *
703 * The SETUP handling latency is a problem because the Set Address is
704 * automatically handled by device. Because whole Set Address handling
705 * can finish in less than 21 us, the latency required (with perfect
706 * timing) to hit the issue is relatively short (2 ms Set Address
707 * recovery interval + negligible Set Address handling time). If host
708 * sends new SETUP before SW had a chance to read the Set Address one,
709 * the Set Address one will be overwritten without a trace.
710 */
711 udc_nrf_address = addr;
712
713 if (udc_nrf_fake_setup) {
714 struct udc_nrf_evt evt = {
715 .type = UDC_NRF_EVT_HAL,
716 .hal_evt = {
717 .type = NRF_USBD_COMMON_EVT_SETUP,
718 },
719 };
720
721 /* Finished handling lost Set Address, now handle the pending
722 * SETUP transfer.
723 */
724 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
725 }
726
727 return 0;
728 }
729
udc_nrf_host_wakeup(const struct device * dev)730 static int udc_nrf_host_wakeup(const struct device *dev)
731 {
732 bool res = nrf_usbd_common_wakeup_req();
733
734 LOG_DBG("Host wakeup request");
735 if (!res) {
736 return -EAGAIN;
737 }
738
739 return 0;
740 }
741
udc_nrf_enable(const struct device * dev)742 static int udc_nrf_enable(const struct device *dev)
743 {
744 unsigned int key;
745 int ret;
746
747 ret = nrf_usbd_common_init(usbd_event_handler);
748 if (ret != NRFX_SUCCESS) {
749 LOG_ERR("nRF USBD driver initialization failed");
750 return -EIO;
751 }
752
753 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT,
754 USB_EP_TYPE_CONTROL, UDC_NRF_EP0_SIZE, 0)) {
755 LOG_ERR("Failed to enable control endpoint");
756 return -EIO;
757 }
758
759 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN,
760 USB_EP_TYPE_CONTROL, UDC_NRF_EP0_SIZE, 0)) {
761 LOG_ERR("Failed to enable control endpoint");
762 return -EIO;
763 }
764
765 sys_notify_init_spinwait(&hfxo_cli.notify);
766 ret = onoff_request(hfxo_mgr, &hfxo_cli);
767 if (ret < 0) {
768 LOG_ERR("Failed to start HFXO %d", ret);
769 return ret;
770 }
771
772 /* Disable interrupts until USBD is enabled */
773 key = irq_lock();
774 nrf_usbd_common_enable();
775 irq_unlock(key);
776
777 return 0;
778 }
779
udc_nrf_disable(const struct device * dev)780 static int udc_nrf_disable(const struct device *dev)
781 {
782 int ret;
783
784 nrf_usbd_common_disable();
785
786 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
787 LOG_ERR("Failed to disable control endpoint");
788 return -EIO;
789 }
790
791 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
792 LOG_ERR("Failed to disable control endpoint");
793 return -EIO;
794 }
795
796 nrf_usbd_common_uninit();
797
798 ret = onoff_cancel_or_release(hfxo_mgr, &hfxo_cli);
799 if (ret < 0) {
800 LOG_ERR("Failed to stop HFXO %d", ret);
801 return ret;
802 }
803
804 return 0;
805 }
806
udc_nrf_init(const struct device * dev)807 static int udc_nrf_init(const struct device *dev)
808 {
809 const struct udc_nrf_config *cfg = dev->config;
810
811 hfxo_mgr = z_nrf_clock_control_get_onoff(cfg->clock);
812
813 #ifdef CONFIG_HAS_HW_NRF_USBREG
814 /* Use CLOCK/POWER priority for compatibility with other series where
815 * USB events are handled by CLOCK interrupt handler.
816 */
817 IRQ_CONNECT(USBREGULATOR_IRQn,
818 DT_IRQ(DT_INST(0, nordic_nrf_clock), priority),
819 nrfx_isr, nrfx_usbreg_irq_handler, 0);
820 irq_enable(USBREGULATOR_IRQn);
821 #endif
822
823 IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
824 nrfx_isr, nrf_usbd_common_irq_handler, 0);
825
826 (void)nrfx_power_init(&cfg->pwr);
827 nrfx_power_usbevt_init(&cfg->evt);
828
829 nrfx_power_usbevt_enable();
830 LOG_INF("Initialized");
831
832 return 0;
833 }
834
udc_nrf_shutdown(const struct device * dev)835 static int udc_nrf_shutdown(const struct device *dev)
836 {
837 LOG_INF("shutdown");
838
839 nrfx_power_usbevt_disable();
840 nrfx_power_usbevt_uninit();
841 #ifdef CONFIG_HAS_HW_NRF_USBREG
842 irq_disable(USBREGULATOR_IRQn);
843 #endif
844
845 return 0;
846 }
847
udc_nrf_driver_init(const struct device * dev)848 static int udc_nrf_driver_init(const struct device *dev)
849 {
850 struct udc_data *data = dev->data;
851 int err;
852
853 LOG_INF("Preinit");
854 udc_nrf_dev = dev;
855 k_mutex_init(&data->mutex);
856 k_thread_create(&drv_stack_data, drv_stack,
857 K_KERNEL_STACK_SIZEOF(drv_stack),
858 udc_nrf_thread,
859 (void *)dev, NULL, NULL,
860 K_PRIO_COOP(8), 0, K_NO_WAIT);
861
862 k_thread_name_set(&drv_stack_data, "udc_nrfx");
863
864 for (int i = 0; i < ARRAY_SIZE(ep_cfg_out); i++) {
865 ep_cfg_out[i].caps.out = 1;
866 if (i == 0) {
867 ep_cfg_out[i].caps.control = 1;
868 ep_cfg_out[i].caps.mps = NRF_USBD_COMMON_EPSIZE;
869 } else if (i < (CFG_EPOUT_CNT + 1)) {
870 ep_cfg_out[i].caps.bulk = 1;
871 ep_cfg_out[i].caps.interrupt = 1;
872 ep_cfg_out[i].caps.mps = NRF_USBD_COMMON_EPSIZE;
873 } else {
874 ep_cfg_out[i].caps.iso = 1;
875 ep_cfg_out[i].caps.mps = NRF_USBD_COMMON_ISOSIZE / 2;
876 }
877
878 ep_cfg_out[i].addr = USB_EP_DIR_OUT | i;
879 err = udc_register_ep(dev, &ep_cfg_out[i]);
880 if (err != 0) {
881 LOG_ERR("Failed to register endpoint");
882 return err;
883 }
884 }
885
886 for (int i = 0; i < ARRAY_SIZE(ep_cfg_in); i++) {
887 ep_cfg_in[i].caps.in = 1;
888 if (i == 0) {
889 ep_cfg_in[i].caps.control = 1;
890 ep_cfg_in[i].caps.mps = NRF_USBD_COMMON_EPSIZE;
891 } else if (i < (CFG_EPIN_CNT + 1)) {
892 ep_cfg_in[i].caps.bulk = 1;
893 ep_cfg_in[i].caps.interrupt = 1;
894 ep_cfg_in[i].caps.mps = NRF_USBD_COMMON_EPSIZE;
895 } else {
896 ep_cfg_in[i].caps.iso = 1;
897 ep_cfg_in[i].caps.mps = NRF_USBD_COMMON_ISOSIZE / 2;
898 }
899
900 ep_cfg_in[i].addr = USB_EP_DIR_IN | i;
901 err = udc_register_ep(dev, &ep_cfg_in[i]);
902 if (err != 0) {
903 LOG_ERR("Failed to register endpoint");
904 return err;
905 }
906 }
907
908 data->caps.rwup = true;
909 data->caps.out_ack = true;
910 data->caps.mps0 = UDC_NRF_MPS0;
911 data->caps.can_detect_vbus = true;
912
913 return 0;
914 }
915
udc_nrf_lock(const struct device * dev)916 static int udc_nrf_lock(const struct device *dev)
917 {
918 return udc_lock_internal(dev, K_FOREVER);
919 }
920
udc_nrf_unlock(const struct device * dev)921 static int udc_nrf_unlock(const struct device *dev)
922 {
923 return udc_unlock_internal(dev);
924 }
925
926 static const struct udc_nrf_config udc_nrf_cfg = {
927 .clock = COND_CODE_1(NRF_CLOCK_HAS_HFCLK192M,
928 (CLOCK_CONTROL_NRF_SUBSYS_HF192M),
929 (CLOCK_CONTROL_NRF_SUBSYS_HF)),
930 .pwr = {
931 .dcdcen = (DT_PROP(DT_INST(0, nordic_nrf5x_regulator), regulator_initial_mode)
932 == NRF5X_REG_MODE_DCDC),
933 #if NRFX_POWER_SUPPORTS_DCDCEN_VDDH
934 .dcdcenhv = COND_CODE_1(CONFIG_SOC_SERIES_NRF52X,
935 (DT_NODE_HAS_STATUS_OKAY(DT_INST(0, nordic_nrf52x_regulator_hv))),
936 (DT_NODE_HAS_STATUS_OKAY(DT_INST(0, nordic_nrf53x_regulator_hv)))),
937 #endif
938 },
939
940 .evt = {
941 .handler = udc_nrf_power_handler
942 },
943 };
944
945 static struct udc_data udc_nrf_data = {
946 .mutex = Z_MUTEX_INITIALIZER(udc_nrf_data.mutex),
947 .priv = NULL,
948 };
949
950 static const struct udc_api udc_nrf_api = {
951 .lock = udc_nrf_lock,
952 .unlock = udc_nrf_unlock,
953 .init = udc_nrf_init,
954 .enable = udc_nrf_enable,
955 .disable = udc_nrf_disable,
956 .shutdown = udc_nrf_shutdown,
957 .set_address = udc_nrf_set_address,
958 .host_wakeup = udc_nrf_host_wakeup,
959 .ep_try_config = NULL,
960 .ep_enable = udc_nrf_ep_enable,
961 .ep_disable = udc_nrf_ep_disable,
962 .ep_set_halt = udc_nrf_ep_set_halt,
963 .ep_clear_halt = udc_nrf_ep_clear_halt,
964 .ep_enqueue = udc_nrf_ep_enqueue,
965 .ep_dequeue = udc_nrf_ep_dequeue,
966 };
967
968 DEVICE_DT_INST_DEFINE(0, udc_nrf_driver_init, NULL,
969 &udc_nrf_data, &udc_nrf_cfg,
970 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
971 &udc_nrf_api);
972