1 /*
2 * Copyright (c) 2018, Nordic Semiconductor ASA
3 * Copyright (c) 2018 Sundar Subramaniyan <sundar.subramaniyan@gmail.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /**
9 * @file usb_dc_nrfx.c
10 * @brief Nordic USB device controller driver
11 *
12 * The driver implements the interface between the USBD peripheral
13 * driver from nrfx package and the operating system.
14 */
15
16 #include <soc.h>
17 #include <string.h>
18 #include <stdio.h>
19 #include <zephyr/init.h>
20 #include <zephyr/kernel.h>
21 #include <zephyr/drivers/usb/usb_dc.h>
22 #include <zephyr/usb/usb_device.h>
23 #include <zephyr/drivers/clock_control.h>
24 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
25 #include <zephyr/dt-bindings/regulator/nrf5x.h>
26 #include <nrf_usbd_common.h>
27 #include <hal/nrf_usbd.h>
28 #include <nrfx_power.h>
29
30
31 #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL
32 #include <zephyr/logging/log.h>
33 #include <zephyr/irq.h>
34 LOG_MODULE_REGISTER(usb_nrfx);
35
36 /* USB device controller access from devicetree */
37 #define DT_DRV_COMPAT nordic_nrf_usbd
38
39 /**
40 * @brief nRF USBD peripheral states
41 */
42 enum usbd_periph_state {
43 USBD_DETACHED,
44 USBD_ATTACHED,
45 USBD_POWERED,
46 USBD_SUSPENDED,
47 USBD_RESUMED,
48 USBD_DEFAULT,
49 USBD_ADDRESS_SET,
50 USBD_CONFIGURED,
51 };
52
53 /**
54 * @brief Endpoint event types.
55 */
56 enum usbd_ep_event_type {
57 EP_EVT_SETUP_RECV,
58 EP_EVT_RECV_REQ,
59 EP_EVT_RECV_COMPLETE,
60 EP_EVT_WRITE_COMPLETE,
61 };
62
63 /**
64 * @brief USBD peripheral event types.
65 */
66 enum usbd_event_type {
67 USBD_EVT_POWER,
68 USBD_EVT_EP,
69 USBD_EVT_RESET,
70 USBD_EVT_SOF,
71 USBD_EVT_REINIT
72 };
73
74 /**
75 * @brief Endpoint configuration.
76 *
77 * @param cb Endpoint callback.
78 * @param max_sz Max packet size supported by endpoint.
79 * @param en Enable/Disable flag.
80 * @param addr Endpoint address.
81 * @param type Endpoint transfer type.
82 */
83 struct nrf_usbd_ep_cfg {
84 usb_dc_ep_callback cb;
85 uint32_t max_sz;
86 bool en;
87 uint8_t addr;
88 enum usb_dc_ep_transfer_type type;
89
90 };
91
92 struct usbd_mem_block {
93 void *data;
94 };
95
96 /**
97 * @brief Endpoint buffer
98 *
99 * @param len Remaining length to be read/written.
100 * @param block Mempool block, for freeing up buffer after use.
101 * @param data Pointer to the data buffer for the endpoint.
102 * @param curr Pointer to the current offset in the endpoint buffer.
103 */
104 struct nrf_usbd_ep_buf {
105 uint32_t len;
106 struct usbd_mem_block block;
107 uint8_t *data;
108 uint8_t *curr;
109 };
110
111 /**
112 * @brief Endpoint context
113 *
114 * @param cfg Endpoint configuration
115 * @param buf Endpoint buffer
116 * @param read_complete A flag indicating that DMA read operation
117 * has been completed.
118 * @param read_pending A flag indicating that the Host has requested
119 * a data transfer.
120 * @param write_in_progress A flag indicating that write operation has
121 * been scheduled.
122 * @param trans_zlp Flag required for Control IN Endpoint. It
123 * indicates that ZLP is required to end data
124 * stage of the control request.
125 */
126 struct nrf_usbd_ep_ctx {
127 struct nrf_usbd_ep_cfg cfg;
128 struct nrf_usbd_ep_buf buf;
129 volatile bool read_complete;
130 volatile bool read_pending;
131 volatile bool write_in_progress;
132 bool trans_zlp;
133 };
134
135 /**
136 * @brief Endpoint event structure
137 *
138 * @param ep Endpoint control block pointer
139 * @param evt_type Event type
140 */
141 struct usbd_ep_event {
142 struct nrf_usbd_ep_ctx *ep;
143 enum usbd_ep_event_type evt_type;
144 };
145
146 /**
147 * @brief Power event structure
148 *
149 * @param state New USBD peripheral state.
150 */
151 struct usbd_pwr_event {
152 enum usbd_periph_state state;
153 };
154
155 /**
156 * @brief Endpoint USB event
157 * Used by ISR to send events to work handler
158 *
159 * @param node Used by the kernel for FIFO management
160 * @param block Mempool block pointer for freeing up after use
161 * @param evt Event data field
162 * @param evt_type Type of event that has occurred from the USBD peripheral
163 */
164 struct usbd_event {
165 sys_snode_t node;
166 struct usbd_mem_block block;
167 union {
168 struct usbd_ep_event ep_evt;
169 struct usbd_pwr_event pwr_evt;
170 } evt;
171 enum usbd_event_type evt_type;
172 };
173
174 /**
175 * @brief Fifo element slab
176 * Used for allocating fifo elements to pass from ISR to work handler
177 * TODO: The number of FIFO elements is an arbitrary number now but it should
178 * be derived from the theoretical number of backlog events possible depending
179 * on the number of endpoints configured.
180 */
181 #define FIFO_ELEM_SZ sizeof(struct usbd_event)
182 #define FIFO_ELEM_ALIGN sizeof(unsigned int)
183
184 K_MEM_SLAB_DEFINE(fifo_elem_slab, FIFO_ELEM_SZ,
185 CONFIG_USB_NRFX_EVT_QUEUE_SIZE, FIFO_ELEM_ALIGN);
186
187
188 /** Number of IN Endpoints configured (including control) */
189 #define CFG_EPIN_CNT (DT_INST_PROP(0, num_in_endpoints) + \
190 DT_INST_PROP(0, num_bidir_endpoints))
191
192 /** Number of OUT Endpoints configured (including control) */
193 #define CFG_EPOUT_CNT (DT_INST_PROP(0, num_out_endpoints) + \
194 DT_INST_PROP(0, num_bidir_endpoints))
195
196 /** Number of ISO IN Endpoints */
197 #define CFG_EP_ISOIN_CNT DT_INST_PROP(0, num_isoin_endpoints)
198
199 /** Number of ISO OUT Endpoints */
200 #define CFG_EP_ISOOUT_CNT DT_INST_PROP(0, num_isoout_endpoints)
201
202 /** ISO endpoint index */
203 #define EP_ISOIN_INDEX CFG_EPIN_CNT
204 #define EP_ISOOUT_INDEX (CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + CFG_EPOUT_CNT)
205
206 #define EP_BUF_MAX_SZ 64UL
207 #define ISO_EP_BUF_MAX_SZ 1024UL
208
209 /**
210 * @brief Output endpoint buffers
211 * Used as buffers for the endpoints' data transfer
212 * Max buffers size possible: 1536 Bytes (8 EP * 64B + 1 ISO * 1024B)
213 */
214 static uint8_t ep_out_bufs[CFG_EPOUT_CNT][EP_BUF_MAX_SZ]
215 __aligned(sizeof(uint32_t));
216 static uint8_t ep_isoout_bufs[CFG_EP_ISOOUT_CNT][ISO_EP_BUF_MAX_SZ]
217 __aligned(sizeof(uint32_t));
218
219 /** Total endpoints configured */
220 #define CFG_EP_CNT (CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + \
221 CFG_EPOUT_CNT + CFG_EP_ISOOUT_CNT)
222
223 /**
224 * @brief USBD control structure
225 *
226 * @param status_cb Status callback for USB DC notifications
227 * @param setup Setup packet for Control requests
228 * @param hfxo_cli Onoff client used to control HFXO
229 * @param hfxo_mgr Pointer to onoff manager associated with HFXO.
230 * @param clk_requested Flag used to protect against double stop.
231 * @param attached USBD Attached flag
232 * @param ready USBD Ready flag set after pullup
233 * @param usb_work USBD work item
234 * @param drv_lock Mutex for thread-safe nrfx driver use
235 * @param ep_ctx Endpoint contexts
236 * @param ctrl_read_len State of control read operation (EP0).
237 */
238 struct nrf_usbd_ctx {
239 usb_dc_status_callback status_cb;
240 struct usb_setup_packet setup;
241 struct onoff_client hfxo_cli;
242 struct onoff_manager *hfxo_mgr;
243 atomic_t clk_requested;
244
245 bool attached;
246 bool ready;
247
248 struct k_work usb_work;
249 struct k_mutex drv_lock;
250
251 struct nrf_usbd_ep_ctx ep_ctx[CFG_EP_CNT];
252
253 uint16_t ctrl_read_len;
254 };
255
256
257 /* FIFO used for queuing up events from ISR. */
258 K_FIFO_DEFINE(usbd_evt_fifo);
259
260 /* Work queue used for handling the ISR events (i.e. for notifying the USB
261 * device stack, for executing the endpoints callbacks, etc.) out of the ISR
262 * context.
263 * The system work queue cannot be used for this purpose as it might be used in
264 * applications for scheduling USB transfers and this could lead to a deadlock
265 * when the USB device stack would not be notified about certain event because
266 * of a system work queue item waiting for a USB transfer to be finished.
267 */
268 static struct k_work_q usbd_work_queue;
269 static K_KERNEL_STACK_DEFINE(usbd_work_queue_stack,
270 CONFIG_USB_NRFX_WORK_QUEUE_STACK_SIZE);
271
272
273 static struct nrf_usbd_ctx usbd_ctx = {
274 .attached = false,
275 .ready = false,
276 };
277
get_usbd_ctx(void)278 static inline struct nrf_usbd_ctx *get_usbd_ctx(void)
279 {
280 return &usbd_ctx;
281 }
282
dev_attached(void)283 static inline bool dev_attached(void)
284 {
285 return get_usbd_ctx()->attached;
286 }
287
dev_ready(void)288 static inline bool dev_ready(void)
289 {
290 return get_usbd_ctx()->ready;
291 }
292
ep_addr_to_nrfx(uint8_t ep)293 static inline nrf_usbd_common_ep_t ep_addr_to_nrfx(uint8_t ep)
294 {
295 return (nrf_usbd_common_ep_t)ep;
296 }
297
nrfx_addr_to_ep(nrf_usbd_common_ep_t ep)298 static inline uint8_t nrfx_addr_to_ep(nrf_usbd_common_ep_t ep)
299 {
300 return (uint8_t)ep;
301 }
302
ep_is_valid(const uint8_t ep)303 static inline bool ep_is_valid(const uint8_t ep)
304 {
305 uint8_t ep_num = USB_EP_GET_IDX(ep);
306
307 if (NRF_USBD_EPIN_CHECK(ep)) {
308 if (unlikely(ep_num == NRF_USBD_EPISO_FIRST)) {
309 if (CFG_EP_ISOIN_CNT == 0) {
310 return false;
311 }
312 } else {
313 if (ep_num >= CFG_EPIN_CNT) {
314 return false;
315 }
316 }
317 } else {
318 if (unlikely(ep_num == NRF_USBD_EPISO_FIRST)) {
319 if (CFG_EP_ISOOUT_CNT == 0) {
320 return false;
321 }
322 } else {
323 if (ep_num >= CFG_EPOUT_CNT) {
324 return false;
325 }
326 }
327 }
328
329 return true;
330 }
331
endpoint_ctx(const uint8_t ep)332 static struct nrf_usbd_ep_ctx *endpoint_ctx(const uint8_t ep)
333 {
334 struct nrf_usbd_ctx *ctx;
335 uint8_t ep_num;
336
337 if (!ep_is_valid(ep)) {
338 return NULL;
339 }
340
341 ctx = get_usbd_ctx();
342 ep_num = NRF_USBD_EP_NR_GET(ep);
343
344 if (NRF_USBD_EPIN_CHECK(ep)) {
345 if (unlikely(NRF_USBD_EPISO_CHECK(ep))) {
346 return &ctx->ep_ctx[EP_ISOIN_INDEX];
347 } else {
348 return &ctx->ep_ctx[ep_num];
349 }
350 } else {
351 if (unlikely(NRF_USBD_EPISO_CHECK(ep))) {
352 return &ctx->ep_ctx[EP_ISOOUT_INDEX];
353 } else {
354 return &ctx->ep_ctx[CFG_EPIN_CNT +
355 CFG_EP_ISOIN_CNT +
356 ep_num];
357 }
358 }
359
360 return NULL;
361 }
362
in_endpoint_ctx(const uint8_t ep)363 static struct nrf_usbd_ep_ctx *in_endpoint_ctx(const uint8_t ep)
364 {
365 return endpoint_ctx(NRF_USBD_EPIN(ep));
366 }
367
out_endpoint_ctx(const uint8_t ep)368 static struct nrf_usbd_ep_ctx *out_endpoint_ctx(const uint8_t ep)
369 {
370 return endpoint_ctx(NRF_USBD_EPOUT(ep));
371 }
372
373 /**
374 * @brief Schedule USBD event processing.
375 *
376 * Should be called after usbd_evt_put().
377 */
usbd_work_schedule(void)378 static inline void usbd_work_schedule(void)
379 {
380 k_work_submit_to_queue(&usbd_work_queue, &get_usbd_ctx()->usb_work);
381 }
382
383 /**
384 * @brief Free previously allocated USBD event.
385 *
386 * Should be called after usbd_evt_get().
387 *
388 * @param Pointer to the USBD event structure.
389 */
usbd_evt_free(struct usbd_event * ev)390 static inline void usbd_evt_free(struct usbd_event *ev)
391 {
392 k_mem_slab_free(&fifo_elem_slab, (void *)ev->block.data);
393 }
394
395 /**
396 * @brief Enqueue USBD event.
397 *
398 * @param Pointer to the previously allocated and filled event structure.
399 */
usbd_evt_put(struct usbd_event * ev)400 static inline void usbd_evt_put(struct usbd_event *ev)
401 {
402 k_fifo_put(&usbd_evt_fifo, ev);
403 }
404
405 /**
406 * @brief Get next enqueued USBD event if present.
407 */
usbd_evt_get(void)408 static inline struct usbd_event *usbd_evt_get(void)
409 {
410 return k_fifo_get(&usbd_evt_fifo, K_NO_WAIT);
411 }
412
413 /**
414 * @brief Drop all enqueued events.
415 */
usbd_evt_flush(void)416 static inline void usbd_evt_flush(void)
417 {
418 struct usbd_event *ev;
419
420 do {
421 ev = usbd_evt_get();
422 if (ev) {
423 usbd_evt_free(ev);
424 }
425 } while (ev != NULL);
426 }
427
428 /**
429 * @brief Allocate USBD event.
430 *
431 * This function should be called prior to usbd_evt_put().
432 *
433 * @returns Pointer to the allocated event or NULL if there was no space left.
434 */
usbd_evt_alloc(void)435 static inline struct usbd_event *usbd_evt_alloc(void)
436 {
437 struct usbd_event *ev;
438 struct usbd_mem_block block;
439
440 if (k_mem_slab_alloc(&fifo_elem_slab,
441 (void **)&block.data, K_NO_WAIT)) {
442 LOG_ERR("USBD event allocation failed!");
443
444 /*
445 * Allocation may fail if workqueue thread is starved or event
446 * queue size is too small (CONFIG_USB_NRFX_EVT_QUEUE_SIZE).
447 * Wipe all events, free the space and schedule
448 * reinitialization.
449 */
450 usbd_evt_flush();
451
452 if (k_mem_slab_alloc(&fifo_elem_slab, (void **)&block.data, K_NO_WAIT)) {
453 LOG_ERR("USBD event memory corrupted");
454 __ASSERT_NO_MSG(0);
455 return NULL;
456 }
457
458 ev = (struct usbd_event *)block.data;
459 ev->block = block;
460 ev->evt_type = USBD_EVT_REINIT;
461 usbd_evt_put(ev);
462 usbd_work_schedule();
463
464 return NULL;
465 }
466
467 ev = (struct usbd_event *)block.data;
468 ev->block = block;
469
470 return ev;
471 }
472
submit_dc_power_event(enum usbd_periph_state state)473 static void submit_dc_power_event(enum usbd_periph_state state)
474 {
475 struct usbd_event *ev = usbd_evt_alloc();
476
477 if (!ev) {
478 return;
479 }
480
481 ev->evt_type = USBD_EVT_POWER;
482 ev->evt.pwr_evt.state = state;
483
484 usbd_evt_put(ev);
485
486 if (usbd_ctx.attached) {
487 usbd_work_schedule();
488 }
489 }
490
491 #if CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY
attached_evt_delay_handler(struct k_timer * timer)492 static void attached_evt_delay_handler(struct k_timer *timer)
493 {
494 LOG_DBG("ATTACHED event delay done");
495 submit_dc_power_event(USBD_ATTACHED);
496 }
497
498 static K_TIMER_DEFINE(delay_timer, attached_evt_delay_handler, NULL);
499 #endif
500
usb_dc_power_event_handler(nrfx_power_usb_evt_t event)501 static void usb_dc_power_event_handler(nrfx_power_usb_evt_t event)
502 {
503 enum usbd_periph_state new_state;
504
505 switch (event) {
506 case NRFX_POWER_USB_EVT_DETECTED:
507 #if !CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY
508 new_state = USBD_ATTACHED;
509 break;
510 #else
511 LOG_DBG("ATTACHED event delayed");
512 k_timer_start(&delay_timer,
513 K_MSEC(CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY),
514 K_NO_WAIT);
515 return;
516 #endif
517 case NRFX_POWER_USB_EVT_READY:
518 new_state = USBD_POWERED;
519 break;
520 case NRFX_POWER_USB_EVT_REMOVED:
521 new_state = USBD_DETACHED;
522 break;
523 default:
524 LOG_ERR("Unknown USB power event %d", event);
525 return;
526 }
527
528 submit_dc_power_event(new_state);
529 }
530
531 /* Stopping HFXO, algorithm supports case when stop comes before clock is
532 * started. In that case, it is stopped from the callback context.
533 */
hfxo_stop(struct nrf_usbd_ctx * ctx)534 static int hfxo_stop(struct nrf_usbd_ctx *ctx)
535 {
536 if (atomic_cas(&ctx->clk_requested, 1, 0)) {
537 return onoff_cancel_or_release(ctx->hfxo_mgr, &ctx->hfxo_cli);
538 }
539
540 return 0;
541 }
542
hfxo_start(struct nrf_usbd_ctx * ctx)543 static int hfxo_start(struct nrf_usbd_ctx *ctx)
544 {
545 if (atomic_cas(&ctx->clk_requested, 0, 1)) {
546 sys_notify_init_spinwait(&ctx->hfxo_cli.notify);
547
548 return onoff_request(ctx->hfxo_mgr, &ctx->hfxo_cli);
549 }
550
551 return 0;
552 }
553
usbd_enable_endpoints(struct nrf_usbd_ctx * ctx)554 static void usbd_enable_endpoints(struct nrf_usbd_ctx *ctx)
555 {
556 struct nrf_usbd_ep_ctx *ep_ctx;
557 int i;
558
559 for (i = 0; i < CFG_EPIN_CNT; i++) {
560 ep_ctx = in_endpoint_ctx(i);
561 __ASSERT_NO_MSG(ep_ctx);
562
563 if (ep_ctx->cfg.en) {
564 nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
565 }
566 }
567
568 if (CFG_EP_ISOIN_CNT) {
569 ep_ctx = in_endpoint_ctx(NRF_USBD_EPIN(8));
570 __ASSERT_NO_MSG(ep_ctx);
571
572 if (ep_ctx->cfg.en) {
573 nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
574 }
575 }
576
577 for (i = 0; i < CFG_EPOUT_CNT; i++) {
578 ep_ctx = out_endpoint_ctx(i);
579 __ASSERT_NO_MSG(ep_ctx);
580
581 if (ep_ctx->cfg.en) {
582 nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
583 }
584 }
585
586 if (CFG_EP_ISOOUT_CNT) {
587 ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8));
588 __ASSERT_NO_MSG(ep_ctx);
589
590 if (ep_ctx->cfg.en) {
591 nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
592 }
593 }
594 }
595
596 /**
597 * @brief Reset endpoint state.
598 *
599 * Resets the internal logic state for a given endpoint.
600 *
601 * @param[in] ep_cts Endpoint structure control block
602 */
ep_ctx_reset(struct nrf_usbd_ep_ctx * ep_ctx)603 static void ep_ctx_reset(struct nrf_usbd_ep_ctx *ep_ctx)
604 {
605 ep_ctx->buf.data = ep_ctx->buf.block.data;
606 ep_ctx->buf.curr = ep_ctx->buf.data;
607 ep_ctx->buf.len = 0U;
608
609 /* Abort ongoing write operation. */
610 if (ep_ctx->write_in_progress) {
611 nrf_usbd_common_ep_abort(ep_addr_to_nrfx(ep_ctx->cfg.addr));
612 }
613
614 ep_ctx->read_complete = true;
615 ep_ctx->read_pending = false;
616 ep_ctx->write_in_progress = false;
617 ep_ctx->trans_zlp = false;
618 }
619
620 /**
621 * @brief Initialize all endpoint structures.
622 *
623 * Endpoint buffers are allocated during the first call of this function.
624 * This function may also be called again on every USB reset event
625 * to reinitialize the state of all endpoints.
626 */
eps_ctx_init(void)627 static int eps_ctx_init(void)
628 {
629 struct nrf_usbd_ep_ctx *ep_ctx;
630 uint32_t i;
631
632 for (i = 0U; i < CFG_EPIN_CNT; i++) {
633 ep_ctx = in_endpoint_ctx(i);
634 __ASSERT_NO_MSG(ep_ctx);
635 ep_ctx_reset(ep_ctx);
636 }
637
638 for (i = 0U; i < CFG_EPOUT_CNT; i++) {
639 ep_ctx = out_endpoint_ctx(i);
640 __ASSERT_NO_MSG(ep_ctx);
641
642 if (!ep_ctx->buf.block.data) {
643 ep_ctx->buf.block.data = ep_out_bufs[i];
644 }
645
646 ep_ctx_reset(ep_ctx);
647 }
648
649 if (CFG_EP_ISOIN_CNT) {
650 ep_ctx = in_endpoint_ctx(NRF_USBD_EPIN(8));
651 __ASSERT_NO_MSG(ep_ctx);
652 ep_ctx_reset(ep_ctx);
653 }
654
655 if (CFG_EP_ISOOUT_CNT) {
656 BUILD_ASSERT(CFG_EP_ISOOUT_CNT <= 1);
657
658 ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8));
659 __ASSERT_NO_MSG(ep_ctx);
660
661 if (!ep_ctx->buf.block.data) {
662 ep_ctx->buf.block.data = ep_isoout_bufs[0];
663 }
664
665 ep_ctx_reset(ep_ctx);
666 }
667
668 return 0;
669 }
670
usbd_work_process_pwr_events(struct usbd_pwr_event * pwr_evt)671 static inline void usbd_work_process_pwr_events(struct usbd_pwr_event *pwr_evt)
672 {
673 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
674 int err;
675
676 switch (pwr_evt->state) {
677 case USBD_ATTACHED:
678 if (!nrf_usbd_common_is_enabled()) {
679 LOG_DBG("USB detected");
680 nrf_usbd_common_enable();
681 err = hfxo_start(ctx);
682 __ASSERT_NO_MSG(err >= 0);
683 }
684
685 /* No callback here.
686 * Stack will be notified when the peripheral is ready.
687 */
688 break;
689
690 case USBD_POWERED:
691 usbd_enable_endpoints(ctx);
692 nrf_usbd_common_start(IS_ENABLED(CONFIG_USB_DEVICE_SOF));
693 ctx->ready = true;
694
695 LOG_DBG("USB Powered");
696
697 if (ctx->status_cb) {
698 ctx->status_cb(USB_DC_CONNECTED, NULL);
699 }
700 break;
701
702 case USBD_DETACHED:
703 ctx->ready = false;
704 nrf_usbd_common_disable();
705 err = hfxo_stop(ctx);
706 __ASSERT_NO_MSG(err >= 0);
707
708 LOG_DBG("USB Removed");
709
710 if (ctx->status_cb) {
711 ctx->status_cb(USB_DC_DISCONNECTED, NULL);
712 }
713 break;
714
715 case USBD_SUSPENDED:
716 if (dev_ready()) {
717 nrf_usbd_common_suspend();
718 LOG_DBG("USB Suspend state");
719
720 if (ctx->status_cb) {
721 ctx->status_cb(USB_DC_SUSPEND, NULL);
722 }
723 }
724 break;
725 case USBD_RESUMED:
726 if (ctx->status_cb && dev_ready()) {
727 LOG_DBG("USB resume");
728 ctx->status_cb(USB_DC_RESUME, NULL);
729 }
730 break;
731
732 default:
733 break;
734 }
735 }
736
usbd_work_process_setup(struct nrf_usbd_ep_ctx * ep_ctx)737 static inline void usbd_work_process_setup(struct nrf_usbd_ep_ctx *ep_ctx)
738 {
739 __ASSERT_NO_MSG(ep_ctx);
740 __ASSERT(ep_ctx->cfg.type == USB_DC_EP_CONTROL,
741 "Invalid event on CTRL EP.");
742
743 struct usb_setup_packet *usbd_setup;
744
745 /* SETUP packets are handled by USBD hardware.
746 * For compatibility with the USB stack,
747 * SETUP packet must be reassembled.
748 */
749 usbd_setup = (struct usb_setup_packet *)ep_ctx->buf.data;
750 memset(usbd_setup, 0, sizeof(struct usb_setup_packet));
751 usbd_setup->bmRequestType = nrf_usbd_setup_bmrequesttype_get(NRF_USBD);
752 usbd_setup->bRequest = nrf_usbd_setup_brequest_get(NRF_USBD);
753 usbd_setup->wValue = nrf_usbd_setup_wvalue_get(NRF_USBD);
754 usbd_setup->wIndex = nrf_usbd_setup_windex_get(NRF_USBD);
755 usbd_setup->wLength = nrf_usbd_setup_wlength_get(NRF_USBD);
756 ep_ctx->buf.len = sizeof(struct usb_setup_packet);
757
758 /* Copy setup packet to driver internal structure */
759 memcpy(&usbd_ctx.setup, usbd_setup, sizeof(struct usb_setup_packet));
760
761 LOG_DBG("SETUP: bR:0x%02x bmRT:0x%02x wV:0x%04x wI:0x%04x wL:%d",
762 (uint32_t)usbd_setup->bRequest,
763 (uint32_t)usbd_setup->bmRequestType,
764 (uint32_t)usbd_setup->wValue,
765 (uint32_t)usbd_setup->wIndex,
766 (uint32_t)usbd_setup->wLength);
767
768 /* Inform the stack. */
769 ep_ctx->cfg.cb(ep_ctx->cfg.addr, USB_DC_EP_SETUP);
770
771 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
772
773 if (usb_reqtype_is_to_device(usbd_setup) && usbd_setup->wLength) {
774 ctx->ctrl_read_len = usbd_setup->wLength;
775 /* Allow data chunk on EP0 OUT */
776 nrf_usbd_common_setup_data_clear();
777 } else {
778 ctx->ctrl_read_len = 0U;
779 }
780 }
781
usbd_work_process_recvreq(struct nrf_usbd_ctx * ctx,struct nrf_usbd_ep_ctx * ep_ctx)782 static inline void usbd_work_process_recvreq(struct nrf_usbd_ctx *ctx,
783 struct nrf_usbd_ep_ctx *ep_ctx)
784 {
785 if (!ep_ctx->read_pending) {
786 return;
787 }
788 if (!ep_ctx->read_complete) {
789 return;
790 }
791
792 ep_ctx->read_pending = false;
793 ep_ctx->read_complete = false;
794
795 k_mutex_lock(&ctx->drv_lock, K_FOREVER);
796 NRF_USBD_COMMON_TRANSFER_OUT(transfer, ep_ctx->buf.data,
797 ep_ctx->cfg.max_sz);
798 nrfx_err_t err = nrf_usbd_common_ep_transfer(
799 ep_addr_to_nrfx(ep_ctx->cfg.addr), &transfer);
800 if (err != NRFX_SUCCESS) {
801 LOG_ERR("nRF USBD transfer error (OUT): 0x%02x", err);
802 }
803 k_mutex_unlock(&ctx->drv_lock);
804 }
805
806
usbd_work_process_ep_events(struct usbd_ep_event * ep_evt)807 static inline void usbd_work_process_ep_events(struct usbd_ep_event *ep_evt)
808 {
809 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
810 struct nrf_usbd_ep_ctx *ep_ctx = ep_evt->ep;
811
812 __ASSERT_NO_MSG(ep_ctx);
813
814 switch (ep_evt->evt_type) {
815 case EP_EVT_SETUP_RECV:
816 usbd_work_process_setup(ep_ctx);
817 break;
818
819 case EP_EVT_RECV_REQ:
820 usbd_work_process_recvreq(ctx, ep_ctx);
821 break;
822
823 case EP_EVT_RECV_COMPLETE:
824 ep_ctx->cfg.cb(ep_ctx->cfg.addr,
825 USB_DC_EP_DATA_OUT);
826 break;
827
828 case EP_EVT_WRITE_COMPLETE:
829 if (ep_ctx->cfg.type == USB_DC_EP_CONTROL &&
830 !ep_ctx->trans_zlp) {
831 /* Trigger the hardware to perform
832 * status stage, but only if there is
833 * no ZLP required.
834 */
835 k_mutex_lock(&ctx->drv_lock, K_FOREVER);
836 nrf_usbd_common_setup_clear();
837 k_mutex_unlock(&ctx->drv_lock);
838 }
839 ep_ctx->cfg.cb(ep_ctx->cfg.addr,
840 USB_DC_EP_DATA_IN);
841 break;
842 default:
843 break;
844 }
845 }
846
usbd_event_transfer_ctrl(nrf_usbd_common_evt_t const * const p_event)847 static void usbd_event_transfer_ctrl(nrf_usbd_common_evt_t const *const p_event)
848 {
849 struct nrf_usbd_ep_ctx *ep_ctx =
850 endpoint_ctx(p_event->data.eptransfer.ep);
851
852 if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) {
853 switch (p_event->data.eptransfer.status) {
854 case NRF_USBD_COMMON_EP_OK: {
855 struct usbd_event *ev = usbd_evt_alloc();
856
857 if (!ev) {
858 return;
859 }
860
861 ep_ctx->write_in_progress = false;
862 ev->evt_type = USBD_EVT_EP;
863 ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE;
864 ev->evt.ep_evt.ep = ep_ctx;
865
866 LOG_DBG("ctrl write complete");
867 usbd_evt_put(ev);
868 usbd_work_schedule();
869 }
870 break;
871
872 case NRF_USBD_COMMON_EP_ABORTED: {
873 LOG_DBG("Endpoint 0x%02x write aborted",
874 p_event->data.eptransfer.ep);
875 }
876 break;
877
878 default: {
879 LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
880 p_event->data.eptransfer.status,
881 p_event->data.eptransfer.ep);
882 }
883 break;
884 }
885 } else {
886 switch (p_event->data.eptransfer.status) {
887 case NRF_USBD_COMMON_EP_WAITING: {
888 struct usbd_event *ev = usbd_evt_alloc();
889
890 if (!ev) {
891 return;
892 }
893
894 LOG_DBG("ctrl read request");
895
896 ep_ctx->read_pending = true;
897 ev->evt_type = USBD_EVT_EP;
898 ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
899 ev->evt.ep_evt.ep = ep_ctx;
900
901 usbd_evt_put(ev);
902 usbd_work_schedule();
903 }
904 break;
905
906 case NRF_USBD_COMMON_EP_OK: {
907 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
908 struct usbd_event *ev = usbd_evt_alloc();
909
910 if (!ev) {
911 return;
912 }
913 nrf_usbd_common_ep_status_t err_code;
914
915 ev->evt_type = USBD_EVT_EP;
916 ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE;
917 ev->evt.ep_evt.ep = ep_ctx;
918
919 err_code = nrf_usbd_common_ep_status_get(
920 p_event->data.eptransfer.ep, &ep_ctx->buf.len);
921
922 if (err_code != NRF_USBD_COMMON_EP_OK) {
923 LOG_ERR("_ep_status_get failed! Code: %d",
924 err_code);
925 __ASSERT_NO_MSG(0);
926 }
927 LOG_DBG("ctrl read done: %d", ep_ctx->buf.len);
928
929 if (ctx->ctrl_read_len > ep_ctx->buf.len) {
930 ctx->ctrl_read_len -= ep_ctx->buf.len;
931 /* Allow next data chunk on EP0 OUT */
932 nrf_usbd_common_setup_data_clear();
933 } else {
934 ctx->ctrl_read_len = 0U;
935 }
936
937 usbd_evt_put(ev);
938 usbd_work_schedule();
939 }
940 break;
941
942 default: {
943 LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
944 p_event->data.eptransfer.status,
945 p_event->data.eptransfer.ep);
946 }
947 break;
948 }
949 }
950 }
951
usbd_event_transfer_data(nrf_usbd_common_evt_t const * const p_event)952 static void usbd_event_transfer_data(nrf_usbd_common_evt_t const *const p_event)
953 {
954 struct nrf_usbd_ep_ctx *ep_ctx =
955 endpoint_ctx(p_event->data.eptransfer.ep);
956
957 if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) {
958 switch (p_event->data.eptransfer.status) {
959 case NRF_USBD_COMMON_EP_OK: {
960 struct usbd_event *ev = usbd_evt_alloc();
961
962 if (!ev) {
963 return;
964 }
965
966 LOG_DBG("write complete, ep 0x%02x",
967 (uint32_t)p_event->data.eptransfer.ep);
968
969 ep_ctx->write_in_progress = false;
970 ev->evt_type = USBD_EVT_EP;
971 ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE;
972 ev->evt.ep_evt.ep = ep_ctx;
973 usbd_evt_put(ev);
974 usbd_work_schedule();
975 }
976 break;
977
978 case NRF_USBD_COMMON_EP_ABORTED: {
979 LOG_DBG("Endpoint 0x%02x write aborted",
980 p_event->data.eptransfer.ep);
981 }
982 break;
983
984 default: {
985 LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
986 p_event->data.eptransfer.status,
987 p_event->data.eptransfer.ep);
988 }
989 break;
990 }
991
992 } else {
993 switch (p_event->data.eptransfer.status) {
994 case NRF_USBD_COMMON_EP_WAITING: {
995 struct usbd_event *ev = usbd_evt_alloc();
996
997 if (!ev) {
998 return;
999 }
1000
1001 LOG_DBG("read request, ep 0x%02x",
1002 (uint32_t)p_event->data.eptransfer.ep);
1003
1004 ep_ctx->read_pending = true;
1005 ev->evt_type = USBD_EVT_EP;
1006 ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
1007 ev->evt.ep_evt.ep = ep_ctx;
1008
1009 usbd_evt_put(ev);
1010 usbd_work_schedule();
1011 }
1012 break;
1013
1014 case NRF_USBD_COMMON_EP_OK: {
1015 struct usbd_event *ev = usbd_evt_alloc();
1016
1017 if (!ev) {
1018 return;
1019 }
1020
1021 ep_ctx->buf.len = nrf_usbd_ep_amount_get(NRF_USBD,
1022 p_event->data.eptransfer.ep);
1023
1024 LOG_DBG("read complete, ep 0x%02x, len %d",
1025 (uint32_t)p_event->data.eptransfer.ep,
1026 ep_ctx->buf.len);
1027
1028 ev->evt_type = USBD_EVT_EP;
1029 ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE;
1030 ev->evt.ep_evt.ep = ep_ctx;
1031
1032 usbd_evt_put(ev);
1033 usbd_work_schedule();
1034 }
1035 break;
1036
1037 default: {
1038 LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
1039 p_event->data.eptransfer.status,
1040 p_event->data.eptransfer.ep);
1041 }
1042 break;
1043 }
1044 }
1045 }
1046
1047 /**
1048 * @brief nRFx USBD driver event handler function.
1049 */
usbd_event_handler(nrf_usbd_common_evt_t const * const p_event)1050 static void usbd_event_handler(nrf_usbd_common_evt_t const *const p_event)
1051 {
1052 struct usbd_event evt = {0};
1053 bool put_evt = false;
1054
1055 switch (p_event->type) {
1056 case NRF_USBD_COMMON_EVT_SUSPEND:
1057 LOG_DBG("SUSPEND state detected");
1058 evt.evt_type = USBD_EVT_POWER;
1059 evt.evt.pwr_evt.state = USBD_SUSPENDED;
1060 put_evt = true;
1061 break;
1062 case NRF_USBD_COMMON_EVT_RESUME:
1063 LOG_DBG("RESUMING from suspend");
1064 evt.evt_type = USBD_EVT_POWER;
1065 evt.evt.pwr_evt.state = USBD_RESUMED;
1066 put_evt = true;
1067 break;
1068 case NRF_USBD_COMMON_EVT_WUREQ:
1069 LOG_DBG("RemoteWU initiated");
1070 evt.evt_type = USBD_EVT_POWER;
1071 evt.evt.pwr_evt.state = USBD_RESUMED;
1072 put_evt = true;
1073 break;
1074 case NRF_USBD_COMMON_EVT_RESET:
1075 evt.evt_type = USBD_EVT_RESET;
1076 put_evt = true;
1077 break;
1078 case NRF_USBD_COMMON_EVT_SOF:
1079 if (IS_ENABLED(CONFIG_USB_DEVICE_SOF)) {
1080 evt.evt_type = USBD_EVT_SOF;
1081 put_evt = true;
1082 }
1083 break;
1084
1085 case NRF_USBD_COMMON_EVT_EPTRANSFER: {
1086 struct nrf_usbd_ep_ctx *ep_ctx;
1087
1088 ep_ctx = endpoint_ctx(p_event->data.eptransfer.ep);
1089 switch (ep_ctx->cfg.type) {
1090 case USB_DC_EP_CONTROL:
1091 usbd_event_transfer_ctrl(p_event);
1092 break;
1093 case USB_DC_EP_BULK:
1094 case USB_DC_EP_INTERRUPT:
1095 usbd_event_transfer_data(p_event);
1096 break;
1097 case USB_DC_EP_ISOCHRONOUS:
1098 usbd_event_transfer_data(p_event);
1099 break;
1100 default:
1101 break;
1102 }
1103 break;
1104 }
1105
1106 case NRF_USBD_COMMON_EVT_SETUP: {
1107 nrf_usbd_common_setup_t drv_setup;
1108
1109 nrf_usbd_common_setup_get(&drv_setup);
1110 if ((drv_setup.bRequest != USB_SREQ_SET_ADDRESS)
1111 || (USB_REQTYPE_GET_TYPE(drv_setup.bmRequestType)
1112 != USB_REQTYPE_TYPE_STANDARD)) {
1113 /* SetAddress is handled by USBD hardware.
1114 * No software action required.
1115 */
1116
1117 struct nrf_usbd_ep_ctx *ep_ctx =
1118 endpoint_ctx(NRF_USBD_EPOUT(0));
1119
1120 evt.evt_type = USBD_EVT_EP;
1121 evt.evt.ep_evt.ep = ep_ctx;
1122 evt.evt.ep_evt.evt_type = EP_EVT_SETUP_RECV;
1123 put_evt = true;
1124 }
1125 break;
1126 }
1127
1128 default:
1129 break;
1130 }
1131
1132 if (put_evt) {
1133 struct usbd_event *ev;
1134
1135 ev = usbd_evt_alloc();
1136 if (!ev) {
1137 return;
1138 }
1139 ev->evt_type = evt.evt_type;
1140 ev->evt = evt.evt;
1141 usbd_evt_put(ev);
1142 usbd_work_schedule();
1143 }
1144 }
1145
usbd_reinit(void)1146 static inline void usbd_reinit(void)
1147 {
1148 int ret;
1149 nrfx_err_t err;
1150
1151 nrfx_power_usbevt_disable();
1152 nrf_usbd_common_disable();
1153 nrf_usbd_common_uninit();
1154
1155 usbd_evt_flush();
1156
1157 ret = eps_ctx_init();
1158 __ASSERT_NO_MSG(ret == 0);
1159
1160 nrfx_power_usbevt_enable();
1161 err = nrf_usbd_common_init(usbd_event_handler);
1162
1163 if (err != NRFX_SUCCESS) {
1164 LOG_DBG("nRF USBD driver reinit failed. Code: %d", err);
1165 __ASSERT_NO_MSG(0);
1166 }
1167 }
1168
1169 /**
1170 * @brief function to generate fake receive request for
1171 * ISO OUT EP.
1172 *
1173 * ISO OUT endpoint does not generate irq by itself and reading
1174 * from ISO OUT ep is synchronized with SOF frame. For more details
1175 * refer to Nordic usbd specification.
1176 */
usbd_sof_trigger_iso_read(void)1177 static void usbd_sof_trigger_iso_read(void)
1178 {
1179 struct usbd_event *ev;
1180 struct nrf_usbd_ep_ctx *ep_ctx;
1181
1182 ep_ctx = endpoint_ctx(NRF_USBD_COMMON_EPOUT8);
1183 if (!ep_ctx) {
1184 LOG_ERR("There is no ISO ep");
1185 return;
1186 }
1187
1188 if (ep_ctx->cfg.en) {
1189 /* Dissect receive request
1190 * if the iso OUT ep is enabled
1191 */
1192 ep_ctx->read_pending = true;
1193 ep_ctx->read_complete = true;
1194 ev = usbd_evt_alloc();
1195 if (!ev) {
1196 LOG_ERR("Failed to alloc evt");
1197 return;
1198 }
1199 ev->evt_type = USBD_EVT_EP;
1200 ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
1201 ev->evt.ep_evt.ep = ep_ctx;
1202 usbd_evt_put(ev);
1203 usbd_work_schedule();
1204 } else {
1205 LOG_DBG("Endpoint is not enabled");
1206 }
1207 }
1208
1209 /* Work handler */
usbd_work_handler(struct k_work * item)1210 static void usbd_work_handler(struct k_work *item)
1211 {
1212 struct nrf_usbd_ctx *ctx;
1213 struct usbd_event *ev;
1214
1215 ctx = CONTAINER_OF(item, struct nrf_usbd_ctx, usb_work);
1216
1217 while ((ev = usbd_evt_get()) != NULL) {
1218 if (!dev_ready() && ev->evt_type != USBD_EVT_POWER) {
1219 /* Drop non-power events when cable is detached. */
1220 usbd_evt_free(ev);
1221 continue;
1222 }
1223
1224 switch (ev->evt_type) {
1225 case USBD_EVT_EP:
1226 if (!ctx->attached) {
1227 LOG_ERR("not attached, EP 0x%02x event dropped",
1228 (uint32_t)ev->evt.ep_evt.ep->cfg.addr);
1229 }
1230 usbd_work_process_ep_events(&ev->evt.ep_evt);
1231 break;
1232 case USBD_EVT_POWER:
1233 usbd_work_process_pwr_events(&ev->evt.pwr_evt);
1234 break;
1235 case USBD_EVT_RESET:
1236 LOG_DBG("USBD reset event");
1237 k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1238 eps_ctx_init();
1239 k_mutex_unlock(&ctx->drv_lock);
1240
1241 if (ctx->status_cb) {
1242 ctx->status_cb(USB_DC_RESET, NULL);
1243 }
1244 break;
1245 case USBD_EVT_SOF:
1246 usbd_sof_trigger_iso_read();
1247
1248 if (ctx->status_cb) {
1249 ctx->status_cb(USB_DC_SOF, NULL);
1250 }
1251 break;
1252 case USBD_EVT_REINIT: {
1253 /*
1254 * Reinitialize the peripheral after queue
1255 * overflow.
1256 */
1257 LOG_ERR("USBD event queue full!");
1258 usbd_reinit();
1259 break;
1260 }
1261 default:
1262 LOG_ERR("Unknown USBD event: %"PRId16, ev->evt_type);
1263 break;
1264 }
1265 usbd_evt_free(ev);
1266 }
1267 }
1268
usb_dc_attach(void)1269 int usb_dc_attach(void)
1270 {
1271 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1272 int ret;
1273
1274 if (ctx->attached) {
1275 return 0;
1276 }
1277
1278 k_mutex_init(&ctx->drv_lock);
1279 ctx->hfxo_mgr =
1280 z_nrf_clock_control_get_onoff(
1281 COND_CODE_1(NRF_CLOCK_HAS_HFCLK192M,
1282 (CLOCK_CONTROL_NRF_SUBSYS_HF192M),
1283 (CLOCK_CONTROL_NRF_SUBSYS_HF)));
1284
1285 IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
1286 nrfx_isr, nrf_usbd_common_irq_handler, 0);
1287
1288 nrfx_power_usbevt_enable();
1289
1290 ret = eps_ctx_init();
1291 if (ret == 0) {
1292 ctx->attached = true;
1293 }
1294
1295 if (!k_fifo_is_empty(&usbd_evt_fifo)) {
1296 usbd_work_schedule();
1297 }
1298
1299 if (nrfx_power_usbstatus_get() != NRFX_POWER_USB_STATE_DISCONNECTED) {
1300 /* USBDETECTED event is be generated on cable attachment and
1301 * when cable is already attached during reset, but not when
1302 * the peripheral is re-enabled.
1303 * When USB-enabled bootloader is used, target application
1304 * will not receive this event and it needs to be generated
1305 * again here.
1306 */
1307 usb_dc_power_event_handler(NRFX_POWER_USB_EVT_DETECTED);
1308 }
1309
1310 return ret;
1311 }
1312
usb_dc_detach(void)1313 int usb_dc_detach(void)
1314 {
1315 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1316
1317 k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1318
1319 usbd_evt_flush();
1320
1321 if (nrf_usbd_common_is_enabled()) {
1322 nrf_usbd_common_disable();
1323 }
1324
1325 (void)hfxo_stop(ctx);
1326 nrfx_power_usbevt_disable();
1327
1328 ctx->attached = false;
1329 k_mutex_unlock(&ctx->drv_lock);
1330
1331 return 0;
1332 }
1333
usb_dc_reset(void)1334 int usb_dc_reset(void)
1335 {
1336 int ret;
1337
1338 if (!dev_attached() || !dev_ready()) {
1339 return -ENODEV;
1340 }
1341
1342 LOG_DBG("USBD Reset");
1343
1344 ret = usb_dc_detach();
1345 if (ret) {
1346 return ret;
1347 }
1348
1349 ret = usb_dc_attach();
1350 if (ret) {
1351 return ret;
1352 }
1353
1354 return 0;
1355 }
1356
usb_dc_set_address(const uint8_t addr)1357 int usb_dc_set_address(const uint8_t addr)
1358 {
1359 struct nrf_usbd_ctx *ctx;
1360
1361 if (!dev_attached() || !dev_ready()) {
1362 return -ENODEV;
1363 }
1364
1365 /**
1366 * Nothing to do here. The USBD HW already takes care of initiating
1367 * STATUS stage. Just double check the address for sanity.
1368 */
1369 __ASSERT(addr == (uint8_t)NRF_USBD->USBADDR, "USB Address incorrect!");
1370
1371 ctx = get_usbd_ctx();
1372
1373 LOG_DBG("Address set to: %d", addr);
1374
1375 return 0;
1376 }
1377
1378
usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const ep_cfg)1379 int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data *const ep_cfg)
1380 {
1381 uint8_t ep_idx = NRF_USBD_EP_NR_GET(ep_cfg->ep_addr);
1382
1383 LOG_DBG("ep 0x%02x, mps %d, type %d", ep_cfg->ep_addr, ep_cfg->ep_mps,
1384 ep_cfg->ep_type);
1385
1386 if ((ep_cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) {
1387 LOG_ERR("invalid endpoint configuration");
1388 return -1;
1389 }
1390
1391 if (!NRF_USBD_EP_VALIDATE(ep_cfg->ep_addr)) {
1392 LOG_ERR("invalid endpoint index/address");
1393 return -1;
1394 }
1395
1396 if ((ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS) &&
1397 (!NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr))) {
1398 LOG_WRN("invalid endpoint type");
1399 return -1;
1400 }
1401
1402 if ((ep_cfg->ep_type != USB_DC_EP_ISOCHRONOUS) &&
1403 (NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr))) {
1404 LOG_WRN("iso endpoint can only be iso");
1405 return -1;
1406 }
1407
1408 return 0;
1409 }
1410
usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const ep_cfg)1411 int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const ep_cfg)
1412 {
1413 struct nrf_usbd_ep_ctx *ep_ctx;
1414
1415 if (!dev_attached()) {
1416 return -ENODEV;
1417 }
1418
1419 /**
1420 * TODO:
1421 * For ISO endpoints, application has to use EPIN/OUT 8
1422 * but right now there's no standard way of knowing the
1423 * ISOIN/ISOOUT endpoint number in advance to configure
1424 * accordingly. So either this needs to be chosen in the
1425 * menuconfig in application area or perhaps in device tree
1426 * at compile time or introduce a new API to read the endpoint
1427 * configuration at runtime before configuring them.
1428 */
1429 ep_ctx = endpoint_ctx(ep_cfg->ep_addr);
1430 if (!ep_ctx) {
1431 return -EINVAL;
1432 }
1433
1434 ep_ctx->cfg.addr = ep_cfg->ep_addr;
1435 ep_ctx->cfg.type = ep_cfg->ep_type;
1436 ep_ctx->cfg.max_sz = ep_cfg->ep_mps;
1437
1438 if (!NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr)) {
1439 if ((ep_cfg->ep_mps & (ep_cfg->ep_mps - 1)) != 0U) {
1440 LOG_ERR("EP max packet size must be a power of 2");
1441 return -EINVAL;
1442 }
1443 }
1444
1445 nrf_usbd_common_ep_max_packet_size_set(ep_addr_to_nrfx(ep_cfg->ep_addr),
1446 ep_cfg->ep_mps);
1447
1448 return 0;
1449 }
1450
usb_dc_ep_set_stall(const uint8_t ep)1451 int usb_dc_ep_set_stall(const uint8_t ep)
1452 {
1453 struct nrf_usbd_ep_ctx *ep_ctx;
1454
1455 if (!dev_attached() || !dev_ready()) {
1456 return -ENODEV;
1457 }
1458
1459 ep_ctx = endpoint_ctx(ep);
1460 if (!ep_ctx) {
1461 return -EINVAL;
1462 }
1463
1464 switch (ep_ctx->cfg.type) {
1465 case USB_DC_EP_CONTROL:
1466 nrf_usbd_common_setup_stall();
1467 break;
1468 case USB_DC_EP_BULK:
1469 case USB_DC_EP_INTERRUPT:
1470 nrf_usbd_common_ep_stall(ep_addr_to_nrfx(ep));
1471 break;
1472 case USB_DC_EP_ISOCHRONOUS:
1473 LOG_ERR("STALL unsupported on ISO endpoint");
1474 return -EINVAL;
1475 }
1476
1477 ep_ctx->buf.len = 0U;
1478 ep_ctx->buf.curr = ep_ctx->buf.data;
1479
1480 LOG_DBG("STALL on EP 0x%02x", ep);
1481
1482 return 0;
1483 }
1484
usb_dc_ep_clear_stall(const uint8_t ep)1485 int usb_dc_ep_clear_stall(const uint8_t ep)
1486 {
1487
1488 struct nrf_usbd_ep_ctx *ep_ctx;
1489
1490 if (!dev_attached() || !dev_ready()) {
1491 return -ENODEV;
1492 }
1493
1494 ep_ctx = endpoint_ctx(ep);
1495 if (!ep_ctx) {
1496 return -EINVAL;
1497 }
1498
1499 if (NRF_USBD_EPISO_CHECK(ep)) {
1500 /* ISO transactions do not support a handshake phase. */
1501 return -EINVAL;
1502 }
1503
1504 nrf_usbd_common_ep_dtoggle_clear(ep_addr_to_nrfx(ep));
1505 nrf_usbd_common_ep_stall_clear(ep_addr_to_nrfx(ep));
1506 LOG_DBG("Unstall on EP 0x%02x", ep);
1507
1508 return 0;
1509 }
1510
usb_dc_ep_halt(const uint8_t ep)1511 int usb_dc_ep_halt(const uint8_t ep)
1512 {
1513 return usb_dc_ep_set_stall(ep);
1514 }
1515
usb_dc_ep_is_stalled(const uint8_t ep,uint8_t * const stalled)1516 int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled)
1517 {
1518 struct nrf_usbd_ep_ctx *ep_ctx;
1519
1520 if (!dev_attached() || !dev_ready()) {
1521 return -ENODEV;
1522 }
1523
1524 ep_ctx = endpoint_ctx(ep);
1525 if (!ep_ctx) {
1526 return -EINVAL;
1527 }
1528
1529 if (!stalled) {
1530 return -EINVAL;
1531 }
1532
1533 *stalled = (uint8_t) nrf_usbd_common_ep_stall_check(ep_addr_to_nrfx(ep));
1534
1535 return 0;
1536 }
1537
usb_dc_ep_enable(const uint8_t ep)1538 int usb_dc_ep_enable(const uint8_t ep)
1539 {
1540 struct nrf_usbd_ep_ctx *ep_ctx;
1541
1542 if (!dev_attached()) {
1543 return -ENODEV;
1544 }
1545
1546 ep_ctx = endpoint_ctx(ep);
1547 if (!ep_ctx) {
1548 return -EINVAL;
1549 }
1550
1551 if (!NRF_USBD_EPISO_CHECK(ep)) {
1552 /* ISO transactions for full-speed device do not support
1553 * toggle sequencing and should only send DATA0 PID.
1554 */
1555 nrf_usbd_common_ep_dtoggle_clear(ep_addr_to_nrfx(ep));
1556 /** Endpoint is enabled on SetInterface request.
1557 * This should also clear EP's halt status.
1558 */
1559 nrf_usbd_common_ep_stall_clear(ep_addr_to_nrfx(ep));
1560 }
1561 if (ep_ctx->cfg.en) {
1562 return -EALREADY;
1563 }
1564
1565 LOG_DBG("EP enable: 0x%02x", ep);
1566
1567 ep_ctx->cfg.en = true;
1568
1569 /* Defer the endpoint enable if USBD is not ready yet. */
1570 if (dev_ready()) {
1571 nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep));
1572 }
1573
1574 return 0;
1575 }
1576
usb_dc_ep_disable(const uint8_t ep)1577 int usb_dc_ep_disable(const uint8_t ep)
1578 {
1579 struct nrf_usbd_ep_ctx *ep_ctx;
1580
1581 ep_ctx = endpoint_ctx(ep);
1582 if (!ep_ctx) {
1583 return -EINVAL;
1584 }
1585
1586 if (!ep_ctx->cfg.en) {
1587 return -EALREADY;
1588 }
1589
1590 LOG_DBG("EP disable: 0x%02x", ep);
1591
1592 nrf_usbd_common_ep_disable(ep_addr_to_nrfx(ep));
1593 /* Clear write_in_progress as nrf_usbd_common_ep_disable()
1594 * terminates endpoint transaction.
1595 */
1596 ep_ctx->write_in_progress = false;
1597 ep_ctx_reset(ep_ctx);
1598 ep_ctx->cfg.en = false;
1599
1600 return 0;
1601 }
1602
usb_dc_ep_flush(const uint8_t ep)1603 int usb_dc_ep_flush(const uint8_t ep)
1604 {
1605 struct nrf_usbd_ep_ctx *ep_ctx;
1606
1607 if (!dev_attached() || !dev_ready()) {
1608 return -ENODEV;
1609 }
1610
1611 ep_ctx = endpoint_ctx(ep);
1612 if (!ep_ctx) {
1613 return -EINVAL;
1614 }
1615
1616 ep_ctx->buf.len = 0U;
1617 ep_ctx->buf.curr = ep_ctx->buf.data;
1618
1619 nrf_usbd_common_transfer_out_drop(ep_addr_to_nrfx(ep));
1620
1621 return 0;
1622 }
1623
usb_dc_ep_write(const uint8_t ep,const uint8_t * const data,const uint32_t data_len,uint32_t * const ret_bytes)1624 int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data,
1625 const uint32_t data_len, uint32_t *const ret_bytes)
1626 {
1627 LOG_DBG("ep_write: ep 0x%02x, len %d", ep, data_len);
1628 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1629 struct nrf_usbd_ep_ctx *ep_ctx;
1630 int result = 0;
1631
1632 if (!dev_attached() || !dev_ready()) {
1633 return -ENODEV;
1634 }
1635
1636 if (NRF_USBD_EPOUT_CHECK(ep)) {
1637 return -EINVAL;
1638 }
1639
1640 ep_ctx = endpoint_ctx(ep);
1641 if (!ep_ctx) {
1642 return -EINVAL;
1643 }
1644
1645 if (!ep_ctx->cfg.en) {
1646 LOG_ERR("Endpoint 0x%02x is not enabled", ep);
1647 return -EINVAL;
1648 }
1649
1650 k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1651
1652 /* USBD driver does not allow scheduling multiple DMA transfers
1653 * for one EP at a time. Next USB transfer on this endpoint can be
1654 * triggered after the completion of previous one.
1655 */
1656 if (ep_ctx->write_in_progress) {
1657 k_mutex_unlock(&ctx->drv_lock);
1658 return -EAGAIN;
1659 }
1660
1661 /** Clear the ZLP flag if current write is ZLP. After the ZLP will be
1662 * send the driver will perform status stage.
1663 */
1664 if (!data_len && ep_ctx->trans_zlp) {
1665 ep_ctx->trans_zlp = false;
1666 }
1667
1668 /** If writing to a Control Endpoint there might be a need to transfer
1669 * ZLP. If the Hosts asks for more data that the device may return and
1670 * the last packet is wMaxPacketSize long. The driver must send ZLP.
1671 * For consistence with the Zephyr USB stack sending ZLP must be issued
1672 * from the stack level. Making trans_zlp flag true results in blocking
1673 * the driver from starting setup stage without required ZLP.
1674 */
1675 if (ep_ctx->cfg.type == USB_DC_EP_CONTROL) {
1676 if (data_len && usbd_ctx.setup.wLength > data_len &&
1677 !(data_len % ep_ctx->cfg.max_sz)) {
1678 ep_ctx->trans_zlp = true;
1679 }
1680 }
1681
1682 /* Setup stage is handled by hardware.
1683 * Detect the setup stage initiated by the stack
1684 * and perform appropriate action.
1685 */
1686 if ((ep_ctx->cfg.type == USB_DC_EP_CONTROL)
1687 && (nrf_usbd_common_last_setup_dir_get() != ep)) {
1688 nrf_usbd_common_setup_clear();
1689 k_mutex_unlock(&ctx->drv_lock);
1690 return 0;
1691 }
1692
1693 ep_ctx->write_in_progress = true;
1694 NRF_USBD_COMMON_TRANSFER_IN(transfer, data, data_len, 0);
1695 nrfx_err_t err = nrf_usbd_common_ep_transfer(ep_addr_to_nrfx(ep), &transfer);
1696
1697 if (err != NRFX_SUCCESS) {
1698 ep_ctx->write_in_progress = false;
1699 if (ret_bytes) {
1700 *ret_bytes = 0;
1701 }
1702 result = -EIO;
1703 LOG_ERR("nRF USBD write error: %d", (uint32_t)err);
1704 } else {
1705 if (ret_bytes) {
1706 *ret_bytes = data_len;
1707 }
1708 }
1709
1710 k_mutex_unlock(&ctx->drv_lock);
1711 return result;
1712 }
1713
usb_dc_ep_read_wait(uint8_t ep,uint8_t * data,uint32_t max_data_len,uint32_t * read_bytes)1714 int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len,
1715 uint32_t *read_bytes)
1716 {
1717 struct nrf_usbd_ep_ctx *ep_ctx;
1718 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1719 uint32_t bytes_to_copy;
1720
1721 if (!dev_attached() || !dev_ready()) {
1722 return -ENODEV;
1723 }
1724
1725 if (NRF_USBD_EPIN_CHECK(ep)) {
1726 return -EINVAL;
1727 }
1728
1729 if (!data && max_data_len) {
1730 return -EINVAL;
1731 }
1732
1733 ep_ctx = endpoint_ctx(ep);
1734 if (!ep_ctx) {
1735 return -EINVAL;
1736 }
1737
1738 if (!ep_ctx->cfg.en) {
1739 LOG_ERR("Endpoint 0x%02x is not enabled", ep);
1740 return -EINVAL;
1741 }
1742
1743 k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1744
1745 bytes_to_copy = MIN(max_data_len, ep_ctx->buf.len);
1746
1747 if (!data && !max_data_len) {
1748 if (read_bytes) {
1749 *read_bytes = ep_ctx->buf.len;
1750 }
1751 k_mutex_unlock(&ctx->drv_lock);
1752 return 0;
1753 }
1754
1755 memcpy(data, ep_ctx->buf.curr, bytes_to_copy);
1756
1757 ep_ctx->buf.curr += bytes_to_copy;
1758 ep_ctx->buf.len -= bytes_to_copy;
1759 if (read_bytes) {
1760 *read_bytes = bytes_to_copy;
1761 }
1762
1763 k_mutex_unlock(&ctx->drv_lock);
1764 return 0;
1765 }
1766
usb_dc_ep_read_continue(uint8_t ep)1767 int usb_dc_ep_read_continue(uint8_t ep)
1768 {
1769 struct nrf_usbd_ep_ctx *ep_ctx;
1770 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1771
1772 if (!dev_attached() || !dev_ready()) {
1773 return -ENODEV;
1774 }
1775
1776 if (NRF_USBD_EPIN_CHECK(ep)) {
1777 return -EINVAL;
1778 }
1779
1780 ep_ctx = endpoint_ctx(ep);
1781 if (!ep_ctx) {
1782 return -EINVAL;
1783 }
1784
1785 if (!ep_ctx->cfg.en) {
1786 LOG_ERR("Endpoint 0x%02x is not enabled", ep);
1787 return -EINVAL;
1788 }
1789
1790 k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1791 if (!ep_ctx->buf.len) {
1792 ep_ctx->buf.curr = ep_ctx->buf.data;
1793 ep_ctx->read_complete = true;
1794
1795 if (ep_ctx->read_pending) {
1796 struct usbd_event *ev = usbd_evt_alloc();
1797
1798 if (!ev) {
1799 k_mutex_unlock(&ctx->drv_lock);
1800 return -ENOMEM;
1801 }
1802
1803 ev->evt_type = USBD_EVT_EP;
1804 ev->evt.ep_evt.ep = ep_ctx;
1805 ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
1806 usbd_evt_put(ev);
1807 usbd_work_schedule();
1808 }
1809 }
1810 k_mutex_unlock(&ctx->drv_lock);
1811
1812 return 0;
1813 }
1814
usb_dc_ep_read(const uint8_t ep,uint8_t * const data,const uint32_t max_data_len,uint32_t * const read_bytes)1815 int usb_dc_ep_read(const uint8_t ep, uint8_t *const data,
1816 const uint32_t max_data_len, uint32_t *const read_bytes)
1817 {
1818 LOG_DBG("ep_read: ep 0x%02x, maxlen %d", ep, max_data_len);
1819 int ret;
1820
1821 ret = usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes);
1822 if (ret) {
1823 return ret;
1824 }
1825
1826 if (!data && !max_data_len) {
1827 return ret;
1828 }
1829
1830 ret = usb_dc_ep_read_continue(ep);
1831 return ret;
1832 }
1833
usb_dc_ep_set_callback(const uint8_t ep,const usb_dc_ep_callback cb)1834 int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb)
1835 {
1836 struct nrf_usbd_ep_ctx *ep_ctx;
1837
1838 if (!dev_attached()) {
1839 return -ENODEV;
1840 }
1841
1842 ep_ctx = endpoint_ctx(ep);
1843 if (!ep_ctx) {
1844 return -EINVAL;
1845 }
1846
1847 ep_ctx->cfg.cb = cb;
1848
1849 return 0;
1850 }
1851
usb_dc_set_status_callback(const usb_dc_status_callback cb)1852 void usb_dc_set_status_callback(const usb_dc_status_callback cb)
1853 {
1854 get_usbd_ctx()->status_cb = cb;
1855 }
1856
usb_dc_ep_mps(const uint8_t ep)1857 int usb_dc_ep_mps(const uint8_t ep)
1858 {
1859 struct nrf_usbd_ep_ctx *ep_ctx;
1860
1861 if (!dev_attached()) {
1862 return -ENODEV;
1863 }
1864
1865 ep_ctx = endpoint_ctx(ep);
1866 if (!ep_ctx) {
1867 return -EINVAL;
1868 }
1869
1870 return ep_ctx->cfg.max_sz;
1871 }
1872
usb_dc_wakeup_request(void)1873 int usb_dc_wakeup_request(void)
1874 {
1875 bool res = nrf_usbd_common_wakeup_req();
1876
1877 if (!res) {
1878 return -EAGAIN;
1879 }
1880 return 0;
1881 }
1882
usb_init(void)1883 static int usb_init(void)
1884 {
1885 struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1886 nrfx_err_t err;
1887
1888 #ifdef CONFIG_HAS_HW_NRF_USBREG
1889 /* Use CLOCK/POWER priority for compatibility with other series where
1890 * USB events are handled by CLOCK interrupt handler.
1891 */
1892 IRQ_CONNECT(USBREGULATOR_IRQn,
1893 DT_IRQ(DT_INST(0, nordic_nrf_clock), priority),
1894 nrfx_isr, nrfx_usbreg_irq_handler, 0);
1895 irq_enable(USBREGULATOR_IRQn);
1896 #endif
1897
1898 static const nrfx_power_config_t power_config = {
1899 .dcdcen = (DT_PROP(DT_INST(0, nordic_nrf5x_regulator), regulator_initial_mode)
1900 == NRF5X_REG_MODE_DCDC),
1901 #if NRFX_POWER_SUPPORTS_DCDCEN_VDDH
1902 .dcdcenhv = COND_CODE_1(CONFIG_SOC_SERIES_NRF52X,
1903 (DT_NODE_HAS_STATUS_OKAY(DT_INST(0, nordic_nrf52x_regulator_hv))),
1904 (DT_NODE_HAS_STATUS_OKAY(DT_INST(0, nordic_nrf53x_regulator_hv)))),
1905 #endif
1906 };
1907
1908 static const nrfx_power_usbevt_config_t usbevt_config = {
1909 .handler = usb_dc_power_event_handler
1910 };
1911
1912 err = nrf_usbd_common_init(usbd_event_handler);
1913 if (err != NRFX_SUCCESS) {
1914 LOG_DBG("nRF USBD driver init failed. Code: %d", (uint32_t)err);
1915 return -EIO;
1916 }
1917
1918 /* Ignore the return value, as NRFX_ERROR_ALREADY_INITIALIZED is not
1919 * a problem here.
1920 */
1921 (void)nrfx_power_init(&power_config);
1922 nrfx_power_usbevt_init(&usbevt_config);
1923
1924 k_work_queue_start(&usbd_work_queue,
1925 usbd_work_queue_stack,
1926 K_KERNEL_STACK_SIZEOF(usbd_work_queue_stack),
1927 CONFIG_SYSTEM_WORKQUEUE_PRIORITY, NULL);
1928
1929 k_thread_name_set(&usbd_work_queue.thread, "usbd_workq");
1930 k_work_init(&ctx->usb_work, usbd_work_handler);
1931
1932 return 0;
1933 }
1934
1935 SYS_INIT(usb_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
1936