1 /*
2  * Copyright (c) 2018, Nordic Semiconductor ASA
3  * Copyright (c) 2018 Sundar Subramaniyan <sundar.subramaniyan@gmail.com>
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file  usb_dc_nrfx.c
10  * @brief Nordic USB device controller driver
11  *
12  * The driver implements the interface between the USBD peripheral
13  * driver from nrfx package and the operating system.
14  */
15 
16 #include <soc.h>
17 #include <string.h>
18 #include <stdio.h>
19 #include <kernel.h>
20 #include <drivers/usb/usb_dc.h>
21 #include <usb/usb_device.h>
22 #include <drivers/clock_control.h>
23 #include <drivers/clock_control/nrf_clock_control.h>
24 #include <nrfx_usbd.h>
25 #include <nrfx_power.h>
26 
27 
28 #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL
29 #include <logging/log.h>
30 LOG_MODULE_REGISTER(usb_nrfx);
31 
32 /* USB device controller access from devicetree */
33 #define DT_DRV_COMPAT nordic_nrf_usbd
34 
35 /**
36  * @brief nRF USBD peripheral states
37  */
38 enum usbd_periph_state {
39 	USBD_DETACHED,
40 	USBD_ATTACHED,
41 	USBD_POWERED,
42 	USBD_SUSPENDED,
43 	USBD_RESUMED,
44 	USBD_DEFAULT,
45 	USBD_ADDRESS_SET,
46 	USBD_CONFIGURED,
47 };
48 
49 /**
50  * @brief Endpoint event types.
51  */
52 enum usbd_ep_event_type {
53 	EP_EVT_SETUP_RECV,
54 	EP_EVT_RECV_REQ,
55 	EP_EVT_RECV_COMPLETE,
56 	EP_EVT_WRITE_COMPLETE,
57 };
58 
59 /**
60  * @brief USBD peripheral event types.
61  */
62 enum usbd_event_type {
63 	USBD_EVT_POWER,
64 	USBD_EVT_EP,
65 	USBD_EVT_RESET,
66 	USBD_EVT_SOF,
67 	USBD_EVT_REINIT
68 };
69 
70 /**
71  * @brief Endpoint configuration.
72  *
73  * @param cb      Endpoint callback.
74  * @param max_sz  Max packet size supported by endpoint.
75  * @param en      Enable/Disable flag.
76  * @param addr    Endpoint address.
77  * @param type    Endpoint transfer type.
78  */
79 struct nrf_usbd_ep_cfg {
80 	usb_dc_ep_callback cb;
81 	uint32_t max_sz;
82 	bool en;
83 	uint8_t addr;
84 	enum usb_dc_ep_transfer_type type;
85 
86 };
87 
88 struct usbd_mem_block {
89 	void *data;
90 };
91 
92 /**
93  * @brief Endpoint buffer
94  *
95  * @param len    Remaining length to be read/written.
96  * @param block  Mempool block, for freeing up buffer after use.
97  * @param data	 Pointer to the data buffer	for the endpoint.
98  * @param curr	 Pointer to the current offset in the endpoint buffer.
99  */
100 struct nrf_usbd_ep_buf {
101 	uint32_t len;
102 	struct usbd_mem_block block;
103 	uint8_t *data;
104 	uint8_t *curr;
105 };
106 
107 /**
108  * @brief Endpoint context
109  *
110  * @param cfg			Endpoint configuration
111  * @param buf			Endpoint buffer
112  * @param read_complete		A flag indicating that DMA read operation
113  *				has been completed.
114  * @param read_pending		A flag indicating that the Host has requested
115  *				a data transfer.
116  * @param write_in_progress	A flag indicating that write operation has
117  *				been scheduled.
118  * @param trans_zlp		Flag required for Control IN Endpoint. It
119  *				indicates that ZLP is required to end data
120  *				stage of the control request.
121  */
122 struct nrf_usbd_ep_ctx {
123 	struct nrf_usbd_ep_cfg cfg;
124 	struct nrf_usbd_ep_buf buf;
125 	volatile bool read_complete;
126 	volatile bool read_pending;
127 	volatile bool write_in_progress;
128 	bool trans_zlp;
129 };
130 
131 /**
132  * @brief Endpoint event structure
133  *
134  * @param ep		Endpoint control block pointer
135  * @param evt_type	Event type
136  */
137 struct usbd_ep_event {
138 	struct nrf_usbd_ep_ctx *ep;
139 	enum usbd_ep_event_type evt_type;
140 };
141 
142 /**
143  * @brief Power event structure
144  *
145  * @param state		New USBD peripheral state.
146  */
147 struct usbd_pwr_event {
148 	enum usbd_periph_state state;
149 };
150 
151 /**
152  * @brief Endpoint USB event
153  *	  Used by ISR to send events to work handler
154  *
155  * @param node		Used by the kernel for FIFO management
156  * @param block		Mempool block pointer for freeing up after use
157  * @param evt		Event data field
158  * @param evt_type	Type of event that has occurred from the USBD peripheral
159  */
160 struct usbd_event {
161 	sys_snode_t node;
162 	struct usbd_mem_block block;
163 	union {
164 		struct usbd_ep_event ep_evt;
165 		struct usbd_pwr_event pwr_evt;
166 	} evt;
167 	enum usbd_event_type evt_type;
168 };
169 
170 /**
171  * @brief Fifo element slab
172  *	Used for allocating fifo elements to pass from ISR to work handler
173  * TODO: The number of FIFO elements is an arbitrary number now but it should
174  * be derived from the theoretical number of backlog events possible depending
175  * on the number of endpoints configured.
176  */
177 #define FIFO_ELEM_SZ            sizeof(struct usbd_event)
178 #define FIFO_ELEM_ALIGN         sizeof(unsigned int)
179 
180 K_MEM_SLAB_DEFINE(fifo_elem_slab, FIFO_ELEM_SZ,
181 		  CONFIG_USB_NRFX_EVT_QUEUE_SIZE, FIFO_ELEM_ALIGN);
182 
183 
184 /** Number of IN Endpoints configured (including control) */
185 #define CFG_EPIN_CNT (DT_INST_PROP(0, num_in_endpoints) +	\
186 		      DT_INST_PROP(0, num_bidir_endpoints))
187 
188 /** Number of OUT Endpoints configured (including control) */
189 #define CFG_EPOUT_CNT (DT_INST_PROP(0, num_out_endpoints) +	\
190 		       DT_INST_PROP(0, num_bidir_endpoints))
191 
192 /** Number of ISO IN Endpoints */
193 #define CFG_EP_ISOIN_CNT DT_INST_PROP(0, num_isoin_endpoints)
194 
195 /** Number of ISO OUT Endpoints */
196 #define CFG_EP_ISOOUT_CNT DT_INST_PROP(0, num_isoout_endpoints)
197 
198 /** ISO endpoint index */
199 #define EP_ISOIN_INDEX CFG_EPIN_CNT
200 #define EP_ISOOUT_INDEX (CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + CFG_EPOUT_CNT)
201 
202 #define EP_BUF_MAX_SZ		64UL
203 #define ISO_EP_BUF_MAX_SZ	1024UL
204 
205 /**
206  * @brief Output endpoint buffers
207  *	Used as buffers for the endpoints' data transfer
208  *	Max buffers size possible: 1536 Bytes (8 EP * 64B + 1 ISO * 1024B)
209  */
210 static uint8_t ep_out_bufs[CFG_EPOUT_CNT][EP_BUF_MAX_SZ]
211 	       __aligned(sizeof(uint32_t));
212 static uint8_t ep_isoout_bufs[CFG_EP_ISOOUT_CNT][ISO_EP_BUF_MAX_SZ]
213 	       __aligned(sizeof(uint32_t));
214 
215 /** Total endpoints configured */
216 #define CFG_EP_CNT (CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + \
217 		    CFG_EPOUT_CNT + CFG_EP_ISOOUT_CNT)
218 
219 /**
220  * @brief USBD control structure
221  *
222  * @param status_cb	Status callback for USB DC notifications
223  * @param setup		Setup packet for Control requests
224  * @param hfxo_cli	Onoff client used to control HFXO
225  * @param hfxo_mgr	Pointer to onoff manager associated with HFXO.
226  * @param clk_requested	Flag used to protect against double stop.
227  * @param attached	USBD Attached flag
228  * @param ready		USBD Ready flag set after pullup
229  * @param usb_work	USBD work item
230  * @param drv_lock	Mutex for thread-safe nrfx driver use
231  * @param ep_ctx	Endpoint contexts
232  * @param ctrl_read_len	State of control read operation (EP0).
233  */
234 struct nrf_usbd_ctx {
235 	usb_dc_status_callback status_cb;
236 	struct usb_setup_packet setup;
237 	struct onoff_client hfxo_cli;
238 	struct onoff_manager *hfxo_mgr;
239 	atomic_t clk_requested;
240 
241 	bool attached;
242 	bool ready;
243 
244 	struct k_work  usb_work;
245 	struct k_mutex drv_lock;
246 
247 	struct nrf_usbd_ep_ctx ep_ctx[CFG_EP_CNT];
248 
249 	uint16_t ctrl_read_len;
250 };
251 
252 
253 /* FIFO used for queuing up events from ISR. */
254 K_FIFO_DEFINE(usbd_evt_fifo);
255 
256 /* Work queue used for handling the ISR events (i.e. for notifying the USB
257  * device stack, for executing the endpoints callbacks, etc.) out of the ISR
258  * context.
259  * The system work queue cannot be used for this purpose as it might be used in
260  * applications for scheduling USB transfers and this could lead to a deadlock
261  * when the USB device stack would not be notified about certain event because
262  * of a system work queue item waiting for a USB transfer to be finished.
263  */
264 static struct k_work_q usbd_work_queue;
265 static K_KERNEL_STACK_DEFINE(usbd_work_queue_stack,
266 			     CONFIG_USB_NRFX_WORK_QUEUE_STACK_SIZE);
267 
268 
269 static struct nrf_usbd_ctx usbd_ctx = {
270 	.attached = false,
271 	.ready = false,
272 };
273 
get_usbd_ctx(void)274 static inline struct nrf_usbd_ctx *get_usbd_ctx(void)
275 {
276 	return &usbd_ctx;
277 }
278 
dev_attached(void)279 static inline bool dev_attached(void)
280 {
281 	return get_usbd_ctx()->attached;
282 }
283 
dev_ready(void)284 static inline bool dev_ready(void)
285 {
286 	return get_usbd_ctx()->ready;
287 }
288 
ep_addr_to_nrfx(uint8_t ep)289 static inline nrfx_usbd_ep_t ep_addr_to_nrfx(uint8_t ep)
290 {
291 	return (nrfx_usbd_ep_t)ep;
292 }
293 
nrfx_addr_to_ep(nrfx_usbd_ep_t ep)294 static inline uint8_t nrfx_addr_to_ep(nrfx_usbd_ep_t ep)
295 {
296 	return (uint8_t)ep;
297 }
298 
ep_is_valid(const uint8_t ep)299 static inline bool ep_is_valid(const uint8_t ep)
300 {
301 	uint8_t ep_num = USB_EP_GET_IDX(ep);
302 
303 	if (NRF_USBD_EPIN_CHECK(ep)) {
304 		if (unlikely(ep_num == NRF_USBD_EPISO_FIRST)) {
305 			if (CFG_EP_ISOIN_CNT == 0) {
306 				return false;
307 			}
308 		} else {
309 			if (ep_num >= CFG_EPIN_CNT) {
310 				return false;
311 			}
312 		}
313 	} else {
314 		if (unlikely(ep_num == NRF_USBD_EPISO_FIRST)) {
315 			if (CFG_EP_ISOOUT_CNT == 0) {
316 				return false;
317 			}
318 		} else {
319 			if (ep_num >= CFG_EPOUT_CNT) {
320 				return false;
321 			}
322 		}
323 	}
324 
325 	return true;
326 }
327 
endpoint_ctx(const uint8_t ep)328 static struct nrf_usbd_ep_ctx *endpoint_ctx(const uint8_t ep)
329 {
330 	struct nrf_usbd_ctx *ctx;
331 	uint8_t ep_num;
332 
333 	if (!ep_is_valid(ep)) {
334 		return NULL;
335 	}
336 
337 	ctx = get_usbd_ctx();
338 	ep_num = NRF_USBD_EP_NR_GET(ep);
339 
340 	if (NRF_USBD_EPIN_CHECK(ep)) {
341 		if (unlikely(NRF_USBD_EPISO_CHECK(ep))) {
342 			return &ctx->ep_ctx[EP_ISOIN_INDEX];
343 		} else {
344 			return &ctx->ep_ctx[ep_num];
345 		}
346 	} else {
347 		if (unlikely(NRF_USBD_EPISO_CHECK(ep))) {
348 			return &ctx->ep_ctx[EP_ISOOUT_INDEX];
349 		} else {
350 			return &ctx->ep_ctx[CFG_EPIN_CNT +
351 					    CFG_EP_ISOIN_CNT +
352 					    ep_num];
353 		}
354 	}
355 
356 	return NULL;
357 }
358 
in_endpoint_ctx(const uint8_t ep)359 static struct nrf_usbd_ep_ctx *in_endpoint_ctx(const uint8_t ep)
360 {
361 	return endpoint_ctx(NRF_USBD_EPIN(ep));
362 }
363 
out_endpoint_ctx(const uint8_t ep)364 static struct nrf_usbd_ep_ctx *out_endpoint_ctx(const uint8_t ep)
365 {
366 	return endpoint_ctx(NRF_USBD_EPOUT(ep));
367 }
368 
369 /**
370  * @brief Schedule USBD event processing.
371  *
372  * Should be called after usbd_evt_put().
373  */
usbd_work_schedule(void)374 static inline void usbd_work_schedule(void)
375 {
376 	k_work_submit_to_queue(&usbd_work_queue, &get_usbd_ctx()->usb_work);
377 }
378 
379 /**
380  * @brief Free previously allocated USBD event.
381  *
382  * Should be called after usbd_evt_get().
383  *
384  * @param Pointer to the USBD event structure.
385  */
usbd_evt_free(struct usbd_event * ev)386 static inline void usbd_evt_free(struct usbd_event *ev)
387 {
388 	k_mem_slab_free(&fifo_elem_slab, (void **)&ev->block.data);
389 }
390 
391 /**
392  * @brief Enqueue USBD event.
393  *
394  * @param Pointer to the previously allocated and filled event structure.
395  */
usbd_evt_put(struct usbd_event * ev)396 static inline void usbd_evt_put(struct usbd_event *ev)
397 {
398 	k_fifo_put(&usbd_evt_fifo, ev);
399 }
400 
401 /**
402  * @brief Get next enqueued USBD event if present.
403  */
usbd_evt_get(void)404 static inline struct usbd_event *usbd_evt_get(void)
405 {
406 	return k_fifo_get(&usbd_evt_fifo, K_NO_WAIT);
407 }
408 
409 /**
410  * @brief Drop all enqueued events.
411  */
usbd_evt_flush(void)412 static inline void usbd_evt_flush(void)
413 {
414 	struct usbd_event *ev;
415 
416 	do {
417 		ev = usbd_evt_get();
418 		if (ev) {
419 			usbd_evt_free(ev);
420 		}
421 	} while (ev != NULL);
422 }
423 
424 /**
425  * @brief Allocate USBD event.
426  *
427  * This function should be called prior to usbd_evt_put().
428  *
429  * @returns Pointer to the allocated event or NULL if there was no space left.
430  */
usbd_evt_alloc(void)431 static inline struct usbd_event *usbd_evt_alloc(void)
432 {
433 	struct usbd_event *ev;
434 	struct usbd_mem_block block;
435 
436 	if (k_mem_slab_alloc(&fifo_elem_slab,
437 			     (void **)&block.data, K_NO_WAIT)) {
438 		LOG_ERR("USBD event allocation failed!");
439 
440 		/*
441 		 * Allocation may fail if workqueue thread is starved or event
442 		 * queue size is too small (CONFIG_USB_NRFX_EVT_QUEUE_SIZE).
443 		 * Wipe all events, free the space and schedule
444 		 * reinitialization.
445 		 */
446 		usbd_evt_flush();
447 
448 		if (k_mem_slab_alloc(&fifo_elem_slab, (void **)&block.data, K_NO_WAIT)) {
449 			LOG_ERR("USBD event memory corrupted");
450 			__ASSERT_NO_MSG(0);
451 			return NULL;
452 		}
453 
454 		ev = (struct usbd_event *)block.data;
455 		ev->block = block;
456 		ev->evt_type = USBD_EVT_REINIT;
457 		usbd_evt_put(ev);
458 		usbd_work_schedule();
459 
460 		return NULL;
461 	}
462 
463 	ev = (struct usbd_event *)block.data;
464 	ev->block = block;
465 
466 	return ev;
467 }
468 
submit_dc_power_event(enum usbd_periph_state state)469 static void submit_dc_power_event(enum usbd_periph_state state)
470 {
471 	struct usbd_event *ev = usbd_evt_alloc();
472 
473 	if (!ev) {
474 		return;
475 	}
476 
477 	ev->evt_type = USBD_EVT_POWER;
478 	ev->evt.pwr_evt.state = state;
479 
480 	usbd_evt_put(ev);
481 
482 	if (usbd_ctx.attached) {
483 		usbd_work_schedule();
484 	}
485 }
486 
487 #if CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY
attached_evt_delay_handler(struct k_timer * timer)488 static void attached_evt_delay_handler(struct k_timer *timer)
489 {
490 	LOG_DBG("ATTACHED event delay done");
491 	submit_dc_power_event(USBD_ATTACHED);
492 }
493 
494 static K_TIMER_DEFINE(delay_timer, attached_evt_delay_handler, NULL);
495 #endif
496 
usb_dc_power_event_handler(nrfx_power_usb_evt_t event)497 static void usb_dc_power_event_handler(nrfx_power_usb_evt_t event)
498 {
499 	enum usbd_periph_state new_state;
500 
501 	switch (event) {
502 	case NRFX_POWER_USB_EVT_DETECTED:
503 #if !CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY
504 		new_state = USBD_ATTACHED;
505 		break;
506 #else
507 		LOG_DBG("ATTACHED event delayed");
508 		k_timer_start(&delay_timer,
509 			      K_MSEC(CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY),
510 			      K_NO_WAIT);
511 		return;
512 #endif
513 	case NRFX_POWER_USB_EVT_READY:
514 		new_state = USBD_POWERED;
515 		break;
516 	case NRFX_POWER_USB_EVT_REMOVED:
517 		new_state = USBD_DETACHED;
518 		break;
519 	default:
520 		LOG_ERR("Unknown USB power event %d", event);
521 		return;
522 	}
523 
524 	submit_dc_power_event(new_state);
525 }
526 
527 /* Stopping HFXO, algorithm supports case when stop comes before clock is
528  * started. In that case, it is stopped from the callback context.
529  */
hfxo_stop(struct nrf_usbd_ctx * ctx)530 static int hfxo_stop(struct nrf_usbd_ctx *ctx)
531 {
532 	if (atomic_cas(&ctx->clk_requested, 1, 0)) {
533 		return onoff_cancel_or_release(ctx->hfxo_mgr, &ctx->hfxo_cli);
534 	}
535 
536 	return 0;
537 }
538 
hfxo_start(struct nrf_usbd_ctx * ctx)539 static int hfxo_start(struct nrf_usbd_ctx *ctx)
540 {
541 	if (atomic_cas(&ctx->clk_requested, 0, 1)) {
542 		sys_notify_init_spinwait(&ctx->hfxo_cli.notify);
543 
544 		return onoff_request(ctx->hfxo_mgr, &ctx->hfxo_cli);
545 	}
546 
547 	return 0;
548 }
549 
usbd_enable_endpoints(struct nrf_usbd_ctx * ctx)550 static void usbd_enable_endpoints(struct nrf_usbd_ctx *ctx)
551 {
552 	struct nrf_usbd_ep_ctx *ep_ctx;
553 	int i;
554 
555 	for (i = 0; i < CFG_EPIN_CNT; i++) {
556 		ep_ctx = in_endpoint_ctx(i);
557 		__ASSERT_NO_MSG(ep_ctx);
558 
559 		if (ep_ctx->cfg.en) {
560 			nrfx_usbd_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
561 		}
562 	}
563 
564 	if (CFG_EP_ISOIN_CNT) {
565 		ep_ctx = in_endpoint_ctx(NRF_USBD_EPIN(8));
566 		__ASSERT_NO_MSG(ep_ctx);
567 
568 		if (ep_ctx->cfg.en) {
569 			nrfx_usbd_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
570 		}
571 	}
572 
573 	for (i = 0; i < CFG_EPOUT_CNT; i++) {
574 		ep_ctx = out_endpoint_ctx(i);
575 		__ASSERT_NO_MSG(ep_ctx);
576 
577 		if (ep_ctx->cfg.en) {
578 			nrfx_usbd_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
579 		}
580 	}
581 
582 	if (CFG_EP_ISOOUT_CNT) {
583 		ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8));
584 		__ASSERT_NO_MSG(ep_ctx);
585 
586 		if (ep_ctx->cfg.en) {
587 			nrfx_usbd_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr));
588 		}
589 	}
590 }
591 
592 /**
593  * @brief Reset endpoint state.
594  *
595  * Resets the internal logic state for a given endpoint.
596  *
597  * @param[in]  ep_cts   Endpoint structure control block
598  */
ep_ctx_reset(struct nrf_usbd_ep_ctx * ep_ctx)599 static void ep_ctx_reset(struct nrf_usbd_ep_ctx *ep_ctx)
600 {
601 	ep_ctx->buf.data = ep_ctx->buf.block.data;
602 	ep_ctx->buf.curr = ep_ctx->buf.data;
603 	ep_ctx->buf.len  = 0U;
604 
605 	/* Abort ongoing write operation. */
606 	if (ep_ctx->write_in_progress) {
607 		nrfx_usbd_ep_abort(ep_addr_to_nrfx(ep_ctx->cfg.addr));
608 	}
609 
610 	ep_ctx->read_complete = true;
611 	ep_ctx->read_pending = false;
612 	ep_ctx->write_in_progress = false;
613 	ep_ctx->trans_zlp = false;
614 }
615 
616 /**
617  * @brief Initialize all endpoint structures.
618  *
619  * Endpoint buffers are allocated during the first call of this function.
620  * This function may also be called again on every USB reset event
621  * to reinitialize the state of all endpoints.
622  */
eps_ctx_init(void)623 static int eps_ctx_init(void)
624 {
625 	struct nrf_usbd_ep_ctx *ep_ctx;
626 	uint32_t i;
627 
628 	for (i = 0U; i < CFG_EPIN_CNT; i++) {
629 		ep_ctx = in_endpoint_ctx(i);
630 		__ASSERT_NO_MSG(ep_ctx);
631 		ep_ctx_reset(ep_ctx);
632 	}
633 
634 	for (i = 0U; i < CFG_EPOUT_CNT; i++) {
635 		ep_ctx = out_endpoint_ctx(i);
636 		__ASSERT_NO_MSG(ep_ctx);
637 
638 		if (!ep_ctx->buf.block.data) {
639 			ep_ctx->buf.block.data = ep_out_bufs[i];
640 		}
641 
642 		ep_ctx_reset(ep_ctx);
643 	}
644 
645 	if (CFG_EP_ISOIN_CNT) {
646 		ep_ctx = in_endpoint_ctx(NRF_USBD_EPIN(8));
647 		__ASSERT_NO_MSG(ep_ctx);
648 		ep_ctx_reset(ep_ctx);
649 	}
650 
651 	if (CFG_EP_ISOOUT_CNT) {
652 		BUILD_ASSERT(CFG_EP_ISOOUT_CNT <= 1);
653 
654 		ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8));
655 		__ASSERT_NO_MSG(ep_ctx);
656 
657 		if (!ep_ctx->buf.block.data) {
658 			ep_ctx->buf.block.data = ep_isoout_bufs[0];
659 		}
660 
661 		ep_ctx_reset(ep_ctx);
662 	}
663 
664 	return 0;
665 }
666 
eps_ctx_uninit(void)667 static void eps_ctx_uninit(void)
668 {
669 	struct nrf_usbd_ep_ctx *ep_ctx;
670 	uint32_t i;
671 
672 	for (i = 0U; i < CFG_EPIN_CNT; i++) {
673 		ep_ctx = in_endpoint_ctx(i);
674 		__ASSERT_NO_MSG(ep_ctx);
675 		memset(ep_ctx, 0, sizeof(*ep_ctx));
676 	}
677 
678 	for (i = 0U; i < CFG_EPOUT_CNT; i++) {
679 		ep_ctx = out_endpoint_ctx(i);
680 		__ASSERT_NO_MSG(ep_ctx);
681 		memset(ep_ctx, 0, sizeof(*ep_ctx));
682 	}
683 
684 	if (CFG_EP_ISOIN_CNT) {
685 		ep_ctx = in_endpoint_ctx(NRF_USBD_EPIN(8));
686 		__ASSERT_NO_MSG(ep_ctx);
687 		memset(ep_ctx, 0, sizeof(*ep_ctx));
688 	}
689 
690 	if (CFG_EP_ISOOUT_CNT) {
691 		ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8));
692 		__ASSERT_NO_MSG(ep_ctx);
693 		memset(ep_ctx, 0, sizeof(*ep_ctx));
694 	}
695 }
696 
usbd_work_process_pwr_events(struct usbd_pwr_event * pwr_evt)697 static inline void usbd_work_process_pwr_events(struct usbd_pwr_event *pwr_evt)
698 {
699 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
700 	int err;
701 
702 	switch (pwr_evt->state) {
703 	case USBD_ATTACHED:
704 		if (!nrfx_usbd_is_enabled()) {
705 			LOG_DBG("USB detected");
706 			nrfx_usbd_enable();
707 			err = hfxo_start(ctx);
708 			__ASSERT_NO_MSG(err >= 0);
709 		}
710 
711 		/* No callback here.
712 		 * Stack will be notified when the peripheral is ready.
713 		 */
714 		break;
715 
716 	case USBD_POWERED:
717 		usbd_enable_endpoints(ctx);
718 		nrfx_usbd_start(true);
719 		ctx->ready = true;
720 
721 		LOG_DBG("USB Powered");
722 
723 		if (ctx->status_cb) {
724 			ctx->status_cb(USB_DC_CONNECTED, NULL);
725 		}
726 		break;
727 
728 	case USBD_DETACHED:
729 		ctx->ready = false;
730 		nrfx_usbd_disable();
731 		err = hfxo_stop(ctx);
732 		__ASSERT_NO_MSG(err >= 0);
733 
734 		LOG_DBG("USB Removed");
735 
736 		if (ctx->status_cb) {
737 			ctx->status_cb(USB_DC_DISCONNECTED, NULL);
738 		}
739 		break;
740 
741 	case USBD_SUSPENDED:
742 		if (dev_ready()) {
743 			nrfx_usbd_suspend();
744 			LOG_DBG("USB Suspend state");
745 
746 			if (ctx->status_cb) {
747 				ctx->status_cb(USB_DC_SUSPEND, NULL);
748 			}
749 		}
750 		break;
751 	case USBD_RESUMED:
752 		if (ctx->status_cb && dev_ready()) {
753 			LOG_DBG("USB resume");
754 			ctx->status_cb(USB_DC_RESUME, NULL);
755 		}
756 		break;
757 
758 	default:
759 		break;
760 	}
761 }
762 
usbd_work_process_setup(struct nrf_usbd_ep_ctx * ep_ctx)763 static inline void usbd_work_process_setup(struct nrf_usbd_ep_ctx *ep_ctx)
764 {
765 	__ASSERT_NO_MSG(ep_ctx);
766 	__ASSERT(ep_ctx->cfg.type == USB_DC_EP_CONTROL,
767 		 "Invalid event on CTRL EP.");
768 
769 	struct usb_setup_packet *usbd_setup;
770 
771 	/* SETUP packets are handled by USBD hardware.
772 	 * For compatibility with the USB stack,
773 	 * SETUP packet must be reassembled.
774 	 */
775 	usbd_setup = (struct usb_setup_packet *)ep_ctx->buf.data;
776 	memset(usbd_setup, 0, sizeof(struct usb_setup_packet));
777 	usbd_setup->bmRequestType = nrf_usbd_setup_bmrequesttype_get(NRF_USBD);
778 	usbd_setup->bRequest = nrf_usbd_setup_brequest_get(NRF_USBD);
779 	usbd_setup->wValue = nrf_usbd_setup_wvalue_get(NRF_USBD);
780 	usbd_setup->wIndex = nrf_usbd_setup_windex_get(NRF_USBD);
781 	usbd_setup->wLength = nrf_usbd_setup_wlength_get(NRF_USBD);
782 	ep_ctx->buf.len = sizeof(struct usb_setup_packet);
783 
784 	/* Copy setup packet to driver internal structure */
785 	memcpy(&usbd_ctx.setup, usbd_setup, sizeof(struct usb_setup_packet));
786 
787 	LOG_DBG("SETUP: bR:0x%02x bmRT:0x%02x wV:0x%04x wI:0x%04x wL:%d",
788 		(uint32_t)usbd_setup->bRequest,
789 		(uint32_t)usbd_setup->bmRequestType,
790 		(uint32_t)usbd_setup->wValue,
791 		(uint32_t)usbd_setup->wIndex,
792 		(uint32_t)usbd_setup->wLength);
793 
794 	/* Inform the stack. */
795 	ep_ctx->cfg.cb(ep_ctx->cfg.addr, USB_DC_EP_SETUP);
796 
797 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
798 
799 	if (usb_reqtype_is_to_device(usbd_setup) && usbd_setup->wLength) {
800 		ctx->ctrl_read_len = usbd_setup->wLength;
801 		/* Allow data chunk on EP0 OUT */
802 		nrfx_usbd_setup_data_clear();
803 	} else {
804 		ctx->ctrl_read_len = 0U;
805 	}
806 }
807 
usbd_work_process_recvreq(struct nrf_usbd_ctx * ctx,struct nrf_usbd_ep_ctx * ep_ctx)808 static inline void usbd_work_process_recvreq(struct nrf_usbd_ctx *ctx,
809 					     struct nrf_usbd_ep_ctx *ep_ctx)
810 {
811 	if (!ep_ctx->read_pending) {
812 		return;
813 	}
814 	if (!ep_ctx->read_complete) {
815 		return;
816 	}
817 
818 	ep_ctx->read_pending = false;
819 	ep_ctx->read_complete = false;
820 
821 	k_mutex_lock(&ctx->drv_lock, K_FOREVER);
822 	NRFX_USBD_TRANSFER_OUT(transfer, ep_ctx->buf.data,
823 			       ep_ctx->cfg.max_sz);
824 	nrfx_err_t err = nrfx_usbd_ep_transfer(
825 		ep_addr_to_nrfx(ep_ctx->cfg.addr), &transfer);
826 	if (err != NRFX_SUCCESS) {
827 		LOG_ERR("nRF USBD transfer error (OUT): 0x%02x", err);
828 	}
829 	k_mutex_unlock(&ctx->drv_lock);
830 }
831 
832 
usbd_work_process_ep_events(struct usbd_ep_event * ep_evt)833 static inline void usbd_work_process_ep_events(struct usbd_ep_event *ep_evt)
834 {
835 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
836 	struct nrf_usbd_ep_ctx *ep_ctx = ep_evt->ep;
837 
838 	__ASSERT_NO_MSG(ep_ctx);
839 
840 	switch (ep_evt->evt_type) {
841 	case EP_EVT_SETUP_RECV:
842 		usbd_work_process_setup(ep_ctx);
843 		break;
844 
845 	case EP_EVT_RECV_REQ:
846 		usbd_work_process_recvreq(ctx, ep_ctx);
847 		break;
848 
849 	case EP_EVT_RECV_COMPLETE:
850 		ep_ctx->cfg.cb(ep_ctx->cfg.addr,
851 			       USB_DC_EP_DATA_OUT);
852 		break;
853 
854 	case EP_EVT_WRITE_COMPLETE:
855 		if (ep_ctx->cfg.type == USB_DC_EP_CONTROL &&
856 		    !ep_ctx->trans_zlp) {
857 			/* Trigger the hardware to perform
858 			 * status stage, but only if there is
859 			 * no ZLP required.
860 			 */
861 			k_mutex_lock(&ctx->drv_lock, K_FOREVER);
862 			nrfx_usbd_setup_clear();
863 			k_mutex_unlock(&ctx->drv_lock);
864 		}
865 		ep_ctx->cfg.cb(ep_ctx->cfg.addr,
866 			       USB_DC_EP_DATA_IN);
867 		break;
868 	default:
869 		break;
870 	}
871 }
872 
usbd_event_transfer_ctrl(nrfx_usbd_evt_t const * const p_event)873 static void usbd_event_transfer_ctrl(nrfx_usbd_evt_t const *const p_event)
874 {
875 	struct nrf_usbd_ep_ctx *ep_ctx =
876 		endpoint_ctx(p_event->data.eptransfer.ep);
877 
878 	if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) {
879 		switch (p_event->data.eptransfer.status) {
880 		case NRFX_USBD_EP_OK: {
881 			struct usbd_event *ev = usbd_evt_alloc();
882 
883 			if (!ev) {
884 				return;
885 			}
886 
887 			ep_ctx->write_in_progress = false;
888 			ev->evt_type = USBD_EVT_EP;
889 			ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE;
890 			ev->evt.ep_evt.ep = ep_ctx;
891 
892 			LOG_DBG("ctrl write complete");
893 			usbd_evt_put(ev);
894 			usbd_work_schedule();
895 		}
896 		break;
897 
898 		case NRFX_USBD_EP_ABORTED: {
899 			LOG_DBG("Endpoint 0x%02x write aborted",
900 				p_event->data.eptransfer.ep);
901 		}
902 		break;
903 
904 		default: {
905 			LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
906 				p_event->data.eptransfer.status,
907 				p_event->data.eptransfer.ep);
908 		}
909 		break;
910 		}
911 	} else {
912 		switch (p_event->data.eptransfer.status) {
913 		case NRFX_USBD_EP_WAITING: {
914 			struct usbd_event *ev = usbd_evt_alloc();
915 
916 			if (!ev) {
917 				return;
918 			}
919 
920 			LOG_DBG("ctrl read request");
921 
922 			ep_ctx->read_pending = true;
923 			ev->evt_type = USBD_EVT_EP;
924 			ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
925 			ev->evt.ep_evt.ep = ep_ctx;
926 
927 			usbd_evt_put(ev);
928 			usbd_work_schedule();
929 		}
930 		break;
931 
932 		case NRFX_USBD_EP_OK: {
933 			struct nrf_usbd_ctx *ctx = get_usbd_ctx();
934 			struct usbd_event *ev = usbd_evt_alloc();
935 
936 			if (!ev) {
937 				return;
938 			}
939 			nrfx_usbd_ep_status_t err_code;
940 
941 			ev->evt_type = USBD_EVT_EP;
942 			ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE;
943 			ev->evt.ep_evt.ep = ep_ctx;
944 
945 			err_code = nrfx_usbd_ep_status_get(
946 				p_event->data.eptransfer.ep, &ep_ctx->buf.len);
947 
948 			if (err_code != NRFX_USBD_EP_OK) {
949 				LOG_ERR("_ep_status_get failed! Code: %d",
950 					err_code);
951 				__ASSERT_NO_MSG(0);
952 			}
953 			LOG_DBG("ctrl read done: %d", ep_ctx->buf.len);
954 
955 			if (ctx->ctrl_read_len > ep_ctx->buf.len) {
956 				ctx->ctrl_read_len -= ep_ctx->buf.len;
957 				/* Allow next data chunk on EP0 OUT */
958 				nrfx_usbd_setup_data_clear();
959 			} else {
960 				ctx->ctrl_read_len = 0U;
961 			}
962 
963 			usbd_evt_put(ev);
964 			usbd_work_schedule();
965 		}
966 		break;
967 
968 		default: {
969 			LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
970 				p_event->data.eptransfer.status,
971 				p_event->data.eptransfer.ep);
972 		}
973 		break;
974 		}
975 	}
976 }
977 
usbd_event_transfer_data(nrfx_usbd_evt_t const * const p_event)978 static void usbd_event_transfer_data(nrfx_usbd_evt_t const *const p_event)
979 {
980 	struct nrf_usbd_ep_ctx *ep_ctx =
981 		endpoint_ctx(p_event->data.eptransfer.ep);
982 
983 	if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) {
984 		switch (p_event->data.eptransfer.status) {
985 		case NRFX_USBD_EP_OK: {
986 			struct usbd_event *ev = usbd_evt_alloc();
987 
988 			if (!ev) {
989 				return;
990 			}
991 
992 			LOG_DBG("write complete, ep 0x%02x",
993 				(uint32_t)p_event->data.eptransfer.ep);
994 
995 			ep_ctx->write_in_progress = false;
996 			ev->evt_type = USBD_EVT_EP;
997 			ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE;
998 			ev->evt.ep_evt.ep = ep_ctx;
999 			usbd_evt_put(ev);
1000 			usbd_work_schedule();
1001 		}
1002 		break;
1003 
1004 		case NRFX_USBD_EP_ABORTED: {
1005 			LOG_DBG("Endpoint 0x%02x write aborted",
1006 				p_event->data.eptransfer.ep);
1007 		}
1008 		break;
1009 
1010 		default: {
1011 			LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
1012 				p_event->data.eptransfer.status,
1013 				p_event->data.eptransfer.ep);
1014 		}
1015 		break;
1016 		}
1017 
1018 	} else {
1019 		switch (p_event->data.eptransfer.status) {
1020 		case NRFX_USBD_EP_WAITING: {
1021 			struct usbd_event *ev = usbd_evt_alloc();
1022 
1023 			if (!ev) {
1024 				return;
1025 			}
1026 
1027 			LOG_DBG("read request, ep 0x%02x",
1028 				(uint32_t)p_event->data.eptransfer.ep);
1029 
1030 			ep_ctx->read_pending = true;
1031 			ev->evt_type = USBD_EVT_EP;
1032 			ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
1033 			ev->evt.ep_evt.ep = ep_ctx;
1034 
1035 			usbd_evt_put(ev);
1036 			usbd_work_schedule();
1037 		}
1038 		break;
1039 
1040 		case NRFX_USBD_EP_OK: {
1041 			struct usbd_event *ev = usbd_evt_alloc();
1042 
1043 			if (!ev) {
1044 				return;
1045 			}
1046 
1047 			ep_ctx->buf.len = nrf_usbd_ep_amount_get(NRF_USBD,
1048 				p_event->data.eptransfer.ep);
1049 
1050 			LOG_DBG("read complete, ep 0x%02x, len %d",
1051 				(uint32_t)p_event->data.eptransfer.ep,
1052 				ep_ctx->buf.len);
1053 
1054 			ev->evt_type = USBD_EVT_EP;
1055 			ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE;
1056 			ev->evt.ep_evt.ep = ep_ctx;
1057 
1058 			usbd_evt_put(ev);
1059 			usbd_work_schedule();
1060 		}
1061 		break;
1062 
1063 		default: {
1064 			LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x",
1065 				p_event->data.eptransfer.status,
1066 				p_event->data.eptransfer.ep);
1067 		}
1068 		break;
1069 		}
1070 	}
1071 }
1072 
1073 /**
1074  * @brief nRFx USBD driver event handler function.
1075  */
usbd_event_handler(nrfx_usbd_evt_t const * const p_event)1076 static void usbd_event_handler(nrfx_usbd_evt_t const *const p_event)
1077 {
1078 	struct nrf_usbd_ep_ctx *ep_ctx;
1079 	struct usbd_event evt = {0};
1080 	bool put_evt = false;
1081 
1082 	switch (p_event->type) {
1083 	case NRFX_USBD_EVT_SUSPEND:
1084 		LOG_DBG("SUSPEND state detected");
1085 		evt.evt_type = USBD_EVT_POWER;
1086 		evt.evt.pwr_evt.state = USBD_SUSPENDED;
1087 		put_evt = true;
1088 		break;
1089 	case NRFX_USBD_EVT_RESUME:
1090 		LOG_DBG("RESUMING from suspend");
1091 		evt.evt_type = USBD_EVT_POWER;
1092 		evt.evt.pwr_evt.state = USBD_RESUMED;
1093 		put_evt = true;
1094 		break;
1095 	case NRFX_USBD_EVT_WUREQ:
1096 		LOG_DBG("RemoteWU initiated");
1097 		evt.evt_type = USBD_EVT_POWER;
1098 		evt.evt.pwr_evt.state = USBD_RESUMED;
1099 		put_evt = true;
1100 		break;
1101 	case NRFX_USBD_EVT_RESET:
1102 		evt.evt_type = USBD_EVT_RESET;
1103 		put_evt = true;
1104 		break;
1105 	case NRFX_USBD_EVT_SOF:
1106 		if (IS_ENABLED(CONFIG_USB_DEVICE_SOF)) {
1107 			evt.evt_type = USBD_EVT_SOF;
1108 			put_evt = true;
1109 		}
1110 		break;
1111 
1112 	case NRFX_USBD_EVT_EPTRANSFER:
1113 		ep_ctx = endpoint_ctx(p_event->data.eptransfer.ep);
1114 		switch (ep_ctx->cfg.type) {
1115 		case USB_DC_EP_CONTROL:
1116 			usbd_event_transfer_ctrl(p_event);
1117 			break;
1118 		case USB_DC_EP_BULK:
1119 		case USB_DC_EP_INTERRUPT:
1120 			usbd_event_transfer_data(p_event);
1121 			break;
1122 		case USB_DC_EP_ISOCHRONOUS:
1123 			usbd_event_transfer_data(p_event);
1124 			break;
1125 		default:
1126 			break;
1127 		}
1128 		break;
1129 
1130 	case NRFX_USBD_EVT_SETUP: {
1131 		nrfx_usbd_setup_t drv_setup;
1132 
1133 		nrfx_usbd_setup_get(&drv_setup);
1134 		if ((drv_setup.bRequest != USB_SREQ_SET_ADDRESS)
1135 		    || (USB_REQTYPE_GET_TYPE(drv_setup.bmRequestType)
1136 			!= USB_REQTYPE_TYPE_STANDARD)) {
1137 			/* SetAddress is habdled by USBD hardware.
1138 			 * No software action required.
1139 			 */
1140 
1141 			struct nrf_usbd_ep_ctx *ep_ctx =
1142 				endpoint_ctx(NRF_USBD_EPOUT(0));
1143 
1144 			evt.evt_type = USBD_EVT_EP;
1145 			evt.evt.ep_evt.ep = ep_ctx;
1146 			evt.evt.ep_evt.evt_type = EP_EVT_SETUP_RECV;
1147 			put_evt = true;
1148 		}
1149 		break;
1150 	}
1151 
1152 	default:
1153 		break;
1154 	}
1155 
1156 	if (put_evt) {
1157 		struct usbd_event *ev;
1158 
1159 		ev = usbd_evt_alloc();
1160 		if (!ev) {
1161 			return;
1162 		}
1163 		ev->evt_type = evt.evt_type;
1164 		ev->evt = evt.evt;
1165 		usbd_evt_put(ev);
1166 		usbd_work_schedule();
1167 	}
1168 }
1169 
usbd_reinit(void)1170 static inline void usbd_reinit(void)
1171 {
1172 	int ret;
1173 	nrfx_err_t err;
1174 
1175 	nrfx_power_usbevt_disable();
1176 	nrfx_usbd_disable();
1177 	nrfx_usbd_uninit();
1178 
1179 	usbd_evt_flush();
1180 
1181 	ret = eps_ctx_init();
1182 	__ASSERT_NO_MSG(ret == 0);
1183 
1184 	nrfx_power_usbevt_enable();
1185 	err = nrfx_usbd_init(usbd_event_handler);
1186 
1187 	if (err != NRFX_SUCCESS) {
1188 		LOG_DBG("nRF USBD driver reinit failed. Code: %d", err);
1189 		__ASSERT_NO_MSG(0);
1190 	}
1191 }
1192 
1193 /**
1194  * @brief funciton to generate fake receive request for
1195  * ISO OUT EP.
1196  *
1197  * ISO OUT endpoint does not generate irq by itself and reading
1198  * from ISO OUT ep is sunchronized with SOF frame. For more details
1199  * refer to Nordic usbd specification.
1200  */
usbd_sof_trigger_iso_read(void)1201 static void usbd_sof_trigger_iso_read(void)
1202 {
1203 	struct usbd_event *ev;
1204 	struct nrf_usbd_ep_ctx *ep_ctx;
1205 
1206 	ep_ctx = endpoint_ctx(NRFX_USBD_EPOUT8);
1207 	if (!ep_ctx) {
1208 		LOG_ERR("There is no ISO ep");
1209 		return;
1210 	}
1211 
1212 	if (ep_ctx->cfg.en) {
1213 		/* Dissect receive request
1214 		 * if the iso OUT ep is enabled
1215 		 */
1216 		ep_ctx->read_pending = true;
1217 		ep_ctx->read_complete = true;
1218 		ev = usbd_evt_alloc();
1219 		if (!ev) {
1220 			LOG_ERR("Failed to alloc evt");
1221 			return;
1222 		}
1223 		ev->evt_type = USBD_EVT_EP;
1224 		ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
1225 		ev->evt.ep_evt.ep = ep_ctx;
1226 		usbd_evt_put(ev);
1227 		usbd_work_schedule();
1228 	} else {
1229 		LOG_DBG("Endpoint is not enabled");
1230 	}
1231 }
1232 
1233 /* Work handler */
usbd_work_handler(struct k_work * item)1234 static void usbd_work_handler(struct k_work *item)
1235 {
1236 	struct nrf_usbd_ctx *ctx;
1237 	struct usbd_event *ev;
1238 
1239 	ctx = CONTAINER_OF(item, struct nrf_usbd_ctx, usb_work);
1240 
1241 	while ((ev = usbd_evt_get()) != NULL) {
1242 		if (!dev_ready() && ev->evt_type != USBD_EVT_POWER) {
1243 			/* Drop non-power events when cable is detached. */
1244 			usbd_evt_free(ev);
1245 			continue;
1246 		}
1247 
1248 		switch (ev->evt_type) {
1249 		case USBD_EVT_EP:
1250 			if (!ctx->attached) {
1251 				LOG_ERR("not attached, EP 0x%02x event dropped",
1252 					(uint32_t)ev->evt.ep_evt.ep->cfg.addr);
1253 			}
1254 			usbd_work_process_ep_events(&ev->evt.ep_evt);
1255 			break;
1256 		case USBD_EVT_POWER:
1257 			usbd_work_process_pwr_events(&ev->evt.pwr_evt);
1258 			break;
1259 		case USBD_EVT_RESET:
1260 			LOG_DBG("USBD reset event");
1261 			k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1262 			eps_ctx_init();
1263 			k_mutex_unlock(&ctx->drv_lock);
1264 
1265 			if (ctx->status_cb) {
1266 				ctx->status_cb(USB_DC_RESET, NULL);
1267 			}
1268 			break;
1269 		case USBD_EVT_SOF:
1270 			usbd_sof_trigger_iso_read();
1271 
1272 			if (ctx->status_cb) {
1273 				ctx->status_cb(USB_DC_SOF, NULL);
1274 			}
1275 			break;
1276 		case USBD_EVT_REINIT: {
1277 				/*
1278 				 * Reinitialize the peripheral after queue
1279 				 * overflow.
1280 				 */
1281 				LOG_ERR("USBD event queue full!");
1282 				usbd_reinit();
1283 				break;
1284 			}
1285 		default:
1286 			LOG_ERR("Unknown USBD event: %"PRId16, ev->evt_type);
1287 			break;
1288 		}
1289 		usbd_evt_free(ev);
1290 	}
1291 }
1292 
usb_dc_attach(void)1293 int usb_dc_attach(void)
1294 {
1295 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1296 	nrfx_err_t err;
1297 	int ret;
1298 
1299 	if (ctx->attached) {
1300 		return 0;
1301 	}
1302 
1303 	k_mutex_init(&ctx->drv_lock);
1304 	ctx->hfxo_mgr =
1305 		z_nrf_clock_control_get_onoff(
1306 			COND_CODE_1(NRF_CLOCK_HAS_HFCLK192M,
1307 				    (CLOCK_CONTROL_NRF_SUBSYS_HF192M),
1308 				    (CLOCK_CONTROL_NRF_SUBSYS_HF)));
1309 
1310 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
1311 		    nrfx_isr, nrfx_usbd_irq_handler, 0);
1312 
1313 	err = nrfx_usbd_init(usbd_event_handler);
1314 
1315 	if (err != NRFX_SUCCESS) {
1316 		LOG_DBG("nRF USBD driver init failed. Code: %d", (uint32_t)err);
1317 		return -EIO;
1318 	}
1319 	nrfx_power_usbevt_enable();
1320 
1321 	ret = eps_ctx_init();
1322 	if (ret == 0) {
1323 		ctx->attached = true;
1324 	}
1325 
1326 	if (!k_fifo_is_empty(&usbd_evt_fifo)) {
1327 		usbd_work_schedule();
1328 	}
1329 
1330 	if (nrfx_power_usbstatus_get() != NRFX_POWER_USB_STATE_DISCONNECTED) {
1331 		/* USBDETECTED event is be generated on cable attachment and
1332 		 * when cable is already attached during reset, but not when
1333 		 * the peripheral is re-enabled.
1334 		 * When USB-enabled bootloader is used, target application
1335 		 * will not receive this event and it needs to be generated
1336 		 * again here.
1337 		 */
1338 		usb_dc_power_event_handler(NRFX_POWER_USB_EVT_DETECTED);
1339 	}
1340 
1341 	return ret;
1342 }
1343 
usb_dc_detach(void)1344 int usb_dc_detach(void)
1345 {
1346 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1347 
1348 	k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1349 
1350 	usbd_evt_flush();
1351 	eps_ctx_uninit();
1352 
1353 	if (nrfx_usbd_is_enabled()) {
1354 		nrfx_usbd_disable();
1355 	}
1356 
1357 	if (nrfx_usbd_is_initialized()) {
1358 		nrfx_usbd_uninit();
1359 	}
1360 
1361 	(void)hfxo_stop(ctx);
1362 	nrfx_power_usbevt_disable();
1363 
1364 	ctx->attached = false;
1365 	k_mutex_unlock(&ctx->drv_lock);
1366 
1367 	return 0;
1368 }
1369 
usb_dc_reset(void)1370 int usb_dc_reset(void)
1371 {
1372 	int ret;
1373 
1374 	if (!dev_attached() || !dev_ready()) {
1375 		return -ENODEV;
1376 	}
1377 
1378 	LOG_DBG("USBD Reset");
1379 
1380 	ret = usb_dc_detach();
1381 	if (ret) {
1382 		return ret;
1383 	}
1384 
1385 	ret = usb_dc_attach();
1386 	if (ret) {
1387 		return ret;
1388 	}
1389 
1390 	return 0;
1391 }
1392 
usb_dc_set_address(const uint8_t addr)1393 int usb_dc_set_address(const uint8_t addr)
1394 {
1395 	struct nrf_usbd_ctx *ctx;
1396 
1397 	if (!dev_attached() || !dev_ready()) {
1398 		return -ENODEV;
1399 	}
1400 
1401 	/**
1402 	 * Nothing to do here. The USBD HW already takes care of initiating
1403 	 * STATUS stage. Just double check the address for sanity.
1404 	 */
1405 	__ASSERT(addr == (uint8_t)NRF_USBD->USBADDR, "USB Address incorrect!");
1406 
1407 	ctx = get_usbd_ctx();
1408 
1409 	LOG_DBG("Address set to: %d", addr);
1410 
1411 	return 0;
1412 }
1413 
1414 
usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const ep_cfg)1415 int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data *const ep_cfg)
1416 {
1417 	uint8_t ep_idx = NRF_USBD_EP_NR_GET(ep_cfg->ep_addr);
1418 
1419 	LOG_DBG("ep 0x%02x, mps %d, type %d", ep_cfg->ep_addr, ep_cfg->ep_mps,
1420 		ep_cfg->ep_type);
1421 
1422 	if ((ep_cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) {
1423 		LOG_ERR("invalid endpoint configuration");
1424 		return -1;
1425 	}
1426 
1427 	if (!NRF_USBD_EP_VALIDATE(ep_cfg->ep_addr)) {
1428 		LOG_ERR("invalid endpoint index/address");
1429 		return -1;
1430 	}
1431 
1432 	if ((ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS) &&
1433 	    (!NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr))) {
1434 		LOG_WRN("invalid endpoint type");
1435 		return -1;
1436 	}
1437 
1438 	return 0;
1439 }
1440 
usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const ep_cfg)1441 int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const ep_cfg)
1442 {
1443 	struct nrf_usbd_ep_ctx *ep_ctx;
1444 
1445 	if (!dev_attached()) {
1446 		return -ENODEV;
1447 	}
1448 
1449 	/**
1450 	 * TODO:
1451 	 * For ISO endpoints, application has to use EPIN/OUT 8
1452 	 * but right now there's no standard way of knowing the
1453 	 * ISOIN/ISOOUT endpoint number in advance to configure
1454 	 * accordingly. So either this needs to be chosen in the
1455 	 * menuconfig in application area or perhaps in device tree
1456 	 * at compile time or introduce a new API to read the endpoint
1457 	 * configuration at runtime before configuring them.
1458 	 */
1459 	ep_ctx = endpoint_ctx(ep_cfg->ep_addr);
1460 	if (!ep_ctx) {
1461 		return -EINVAL;
1462 	}
1463 
1464 	ep_ctx->cfg.addr = ep_cfg->ep_addr;
1465 	ep_ctx->cfg.type = ep_cfg->ep_type;
1466 	ep_ctx->cfg.max_sz = ep_cfg->ep_mps;
1467 
1468 	if (!NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr)) {
1469 		if ((ep_cfg->ep_mps & (ep_cfg->ep_mps - 1)) != 0U) {
1470 			LOG_ERR("EP max packet size must be a power of 2");
1471 			return -EINVAL;
1472 		}
1473 	}
1474 
1475 	nrfx_usbd_ep_max_packet_size_set(ep_addr_to_nrfx(ep_cfg->ep_addr),
1476 					 ep_cfg->ep_mps);
1477 
1478 	return 0;
1479 }
1480 
usb_dc_ep_set_stall(const uint8_t ep)1481 int usb_dc_ep_set_stall(const uint8_t ep)
1482 {
1483 	struct nrf_usbd_ep_ctx *ep_ctx;
1484 
1485 	if (!dev_attached() || !dev_ready()) {
1486 		return -ENODEV;
1487 	}
1488 
1489 	ep_ctx = endpoint_ctx(ep);
1490 	if (!ep_ctx) {
1491 		return -EINVAL;
1492 	}
1493 
1494 	switch (ep_ctx->cfg.type) {
1495 	case USB_DC_EP_CONTROL:
1496 		nrfx_usbd_setup_stall();
1497 		break;
1498 	case USB_DC_EP_BULK:
1499 	case USB_DC_EP_INTERRUPT:
1500 		nrfx_usbd_ep_stall(ep_addr_to_nrfx(ep));
1501 		break;
1502 	case USB_DC_EP_ISOCHRONOUS:
1503 		LOG_ERR("STALL unsupported on ISO endpoint");
1504 		return -EINVAL;
1505 	}
1506 
1507 	ep_ctx->buf.len = 0U;
1508 	ep_ctx->buf.curr = ep_ctx->buf.data;
1509 
1510 	LOG_DBG("STALL on EP 0x%02x", ep);
1511 
1512 	return 0;
1513 }
1514 
usb_dc_ep_clear_stall(const uint8_t ep)1515 int usb_dc_ep_clear_stall(const uint8_t ep)
1516 {
1517 
1518 	struct nrf_usbd_ep_ctx *ep_ctx;
1519 
1520 	if (!dev_attached() || !dev_ready()) {
1521 		return -ENODEV;
1522 	}
1523 
1524 	ep_ctx = endpoint_ctx(ep);
1525 	if (!ep_ctx) {
1526 		return -EINVAL;
1527 	}
1528 
1529 	if (NRF_USBD_EPISO_CHECK(ep)) {
1530 		/* ISO transactions do not support a handshake phase. */
1531 		return -EINVAL;
1532 	}
1533 
1534 	nrfx_usbd_ep_dtoggle_clear(ep_addr_to_nrfx(ep));
1535 	nrfx_usbd_ep_stall_clear(ep_addr_to_nrfx(ep));
1536 	LOG_DBG("Unstall on EP 0x%02x", ep);
1537 
1538 	return 0;
1539 }
1540 
usb_dc_ep_halt(const uint8_t ep)1541 int usb_dc_ep_halt(const uint8_t ep)
1542 {
1543 	return usb_dc_ep_set_stall(ep);
1544 }
1545 
usb_dc_ep_is_stalled(const uint8_t ep,uint8_t * const stalled)1546 int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled)
1547 {
1548 	struct nrf_usbd_ep_ctx *ep_ctx;
1549 
1550 	if (!dev_attached() || !dev_ready()) {
1551 		return -ENODEV;
1552 	}
1553 
1554 	ep_ctx = endpoint_ctx(ep);
1555 	if (!ep_ctx) {
1556 		return -EINVAL;
1557 	}
1558 
1559 	if (!stalled) {
1560 		return -EINVAL;
1561 	}
1562 
1563 	*stalled = (uint8_t) nrfx_usbd_ep_stall_check(ep_addr_to_nrfx(ep));
1564 
1565 	return 0;
1566 }
1567 
usb_dc_ep_enable(const uint8_t ep)1568 int usb_dc_ep_enable(const uint8_t ep)
1569 {
1570 	struct nrf_usbd_ep_ctx *ep_ctx;
1571 
1572 	if (!dev_attached()) {
1573 		return -ENODEV;
1574 	}
1575 
1576 	ep_ctx = endpoint_ctx(ep);
1577 	if (!ep_ctx) {
1578 		return -EINVAL;
1579 	}
1580 
1581 	if (!NRF_USBD_EPISO_CHECK(ep)) {
1582 		/* ISO transactions for full-speed device do not support
1583 		 * toggle sequencing and should only send DATA0 PID.
1584 		 */
1585 		nrfx_usbd_ep_dtoggle_clear(ep_addr_to_nrfx(ep));
1586 		/** Endpoint is enabled on SetInterface request.
1587 		 * This should also clear EP's halt status.
1588 		 */
1589 		nrfx_usbd_ep_stall_clear(ep_addr_to_nrfx(ep));
1590 	}
1591 	if (ep_ctx->cfg.en) {
1592 		return -EALREADY;
1593 	}
1594 
1595 	LOG_DBG("EP enable: 0x%02x", ep);
1596 
1597 	ep_ctx->cfg.en = true;
1598 
1599 	/* Defer the endpoint enable if USBD is not ready yet. */
1600 	if (dev_ready()) {
1601 		nrfx_usbd_ep_enable(ep_addr_to_nrfx(ep));
1602 	}
1603 
1604 	return 0;
1605 }
1606 
usb_dc_ep_disable(const uint8_t ep)1607 int usb_dc_ep_disable(const uint8_t ep)
1608 {
1609 	struct nrf_usbd_ep_ctx *ep_ctx;
1610 
1611 	if (!dev_attached() || !dev_ready()) {
1612 		return -ENODEV;
1613 	}
1614 
1615 	ep_ctx = endpoint_ctx(ep);
1616 	if (!ep_ctx) {
1617 		return -EINVAL;
1618 	}
1619 
1620 	if (!ep_ctx->cfg.en) {
1621 		return -EALREADY;
1622 	}
1623 
1624 	LOG_DBG("EP disable: 0x%02x", ep);
1625 
1626 	nrfx_usbd_ep_disable(ep_addr_to_nrfx(ep));
1627 	/* Clear write_in_progress as nrfx_usbd_ep_disable()
1628 	 * terminates endpoint transaction.
1629 	 */
1630 	ep_ctx->write_in_progress = false;
1631 	ep_ctx_reset(ep_ctx);
1632 	ep_ctx->cfg.en = false;
1633 
1634 	return 0;
1635 }
1636 
usb_dc_ep_flush(const uint8_t ep)1637 int usb_dc_ep_flush(const uint8_t ep)
1638 {
1639 	struct nrf_usbd_ep_ctx *ep_ctx;
1640 
1641 	if (!dev_attached() || !dev_ready()) {
1642 		return -ENODEV;
1643 	}
1644 
1645 	ep_ctx = endpoint_ctx(ep);
1646 	if (!ep_ctx) {
1647 		return -EINVAL;
1648 	}
1649 
1650 	ep_ctx->buf.len = 0U;
1651 	ep_ctx->buf.curr = ep_ctx->buf.data;
1652 
1653 	nrfx_usbd_transfer_out_drop(ep_addr_to_nrfx(ep));
1654 
1655 	return 0;
1656 }
1657 
usb_dc_ep_write(const uint8_t ep,const uint8_t * const data,const uint32_t data_len,uint32_t * const ret_bytes)1658 int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data,
1659 		    const uint32_t data_len, uint32_t *const ret_bytes)
1660 {
1661 	LOG_DBG("ep_write: ep 0x%02x, len %d", ep, data_len);
1662 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1663 	struct nrf_usbd_ep_ctx *ep_ctx;
1664 	int result = 0;
1665 
1666 	if (!dev_attached() || !dev_ready()) {
1667 		return -ENODEV;
1668 	}
1669 
1670 	if (NRF_USBD_EPOUT_CHECK(ep)) {
1671 		return -EINVAL;
1672 	}
1673 
1674 	ep_ctx = endpoint_ctx(ep);
1675 	if (!ep_ctx) {
1676 		return -EINVAL;
1677 	}
1678 
1679 	if (!ep_ctx->cfg.en) {
1680 		LOG_ERR("Endpoint 0x%02x is not enabled", ep);
1681 		return -EINVAL;
1682 	}
1683 
1684 	k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1685 
1686 	/* USBD driver does not allow scheduling multiple DMA transfers
1687 	 * for one EP at a time. Next USB transfer on this endpoint can be
1688 	 * triggered after the completion of previous one.
1689 	 */
1690 	if (ep_ctx->write_in_progress) {
1691 		k_mutex_unlock(&ctx->drv_lock);
1692 		return -EAGAIN;
1693 	}
1694 
1695 	/** Clear the ZLP flag if current write is ZLP. After the ZLP will be
1696 	 * send the driver will perform status stage.
1697 	 */
1698 	if (!data_len && ep_ctx->trans_zlp) {
1699 		ep_ctx->trans_zlp = false;
1700 	}
1701 
1702 	/** If writing to a Control Endpoint there might be a need to transfer
1703 	 * ZLP. If the Hosts asks for more data that the device may return and
1704 	 * the last packet is wMaxPacketSize long. The driver must send ZLP.
1705 	 * For consistance with the Zephyr USB stack sending ZLP must be issued
1706 	 * from the stack level. Making trans_zlp flag true results in blocking
1707 	 * the driver from starting setup stage without required ZLP.
1708 	 */
1709 	if (ep_ctx->cfg.type == USB_DC_EP_CONTROL) {
1710 		if (data_len && usbd_ctx.setup.wLength > data_len &&
1711 		    !(data_len % ep_ctx->cfg.max_sz)) {
1712 			ep_ctx->trans_zlp = true;
1713 		}
1714 	}
1715 
1716 	/* Setup stage is handled by hardware.
1717 	 * Detect the setup stage initiated by the stack
1718 	 * and perform appropriate action.
1719 	 */
1720 	if ((ep_ctx->cfg.type == USB_DC_EP_CONTROL)
1721 	    && (nrfx_usbd_last_setup_dir_get() != ep)) {
1722 		nrfx_usbd_setup_clear();
1723 		k_mutex_unlock(&ctx->drv_lock);
1724 		return 0;
1725 	}
1726 
1727 	ep_ctx->write_in_progress = true;
1728 	NRFX_USBD_TRANSFER_IN(transfer, data, data_len, 0);
1729 	nrfx_err_t err = nrfx_usbd_ep_transfer(ep_addr_to_nrfx(ep), &transfer);
1730 
1731 	if (err != NRFX_SUCCESS) {
1732 		ep_ctx->write_in_progress = false;
1733 		if (ret_bytes) {
1734 			*ret_bytes = 0;
1735 		}
1736 		result = -EIO;
1737 		LOG_ERR("nRF USBD write error: %d", (uint32_t)err);
1738 	} else {
1739 		if (ret_bytes) {
1740 			*ret_bytes = data_len;
1741 		}
1742 	}
1743 
1744 	k_mutex_unlock(&ctx->drv_lock);
1745 	return result;
1746 }
1747 
usb_dc_ep_read_wait(uint8_t ep,uint8_t * data,uint32_t max_data_len,uint32_t * read_bytes)1748 int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len,
1749 			uint32_t *read_bytes)
1750 {
1751 	struct nrf_usbd_ep_ctx *ep_ctx;
1752 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1753 	uint32_t bytes_to_copy;
1754 
1755 	if (!dev_attached() || !dev_ready()) {
1756 		return -ENODEV;
1757 	}
1758 
1759 	if (NRF_USBD_EPIN_CHECK(ep)) {
1760 		return -EINVAL;
1761 	}
1762 
1763 	if (!data && max_data_len) {
1764 		return -EINVAL;
1765 	}
1766 
1767 	ep_ctx = endpoint_ctx(ep);
1768 	if (!ep_ctx) {
1769 		return -EINVAL;
1770 	}
1771 
1772 	if (!ep_ctx->cfg.en) {
1773 		LOG_ERR("Endpoint 0x%02x is not enabled", ep);
1774 		return -EINVAL;
1775 	}
1776 
1777 	k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1778 
1779 	bytes_to_copy = MIN(max_data_len, ep_ctx->buf.len);
1780 
1781 	if (!data && !max_data_len) {
1782 		if (read_bytes) {
1783 			*read_bytes = ep_ctx->buf.len;
1784 		}
1785 		k_mutex_unlock(&ctx->drv_lock);
1786 		return 0;
1787 	}
1788 
1789 	memcpy(data, ep_ctx->buf.curr, bytes_to_copy);
1790 
1791 	ep_ctx->buf.curr += bytes_to_copy;
1792 	ep_ctx->buf.len -= bytes_to_copy;
1793 	if (read_bytes) {
1794 		*read_bytes = bytes_to_copy;
1795 	}
1796 
1797 	k_mutex_unlock(&ctx->drv_lock);
1798 	return 0;
1799 }
1800 
usb_dc_ep_read_continue(uint8_t ep)1801 int usb_dc_ep_read_continue(uint8_t ep)
1802 {
1803 	struct nrf_usbd_ep_ctx *ep_ctx;
1804 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1805 
1806 	if (!dev_attached() || !dev_ready()) {
1807 		return -ENODEV;
1808 	}
1809 
1810 	if (NRF_USBD_EPIN_CHECK(ep)) {
1811 		return -EINVAL;
1812 	}
1813 
1814 	ep_ctx = endpoint_ctx(ep);
1815 	if (!ep_ctx) {
1816 		return -EINVAL;
1817 	}
1818 
1819 	if (!ep_ctx->cfg.en) {
1820 		LOG_ERR("Endpoint 0x%02x is not enabled", ep);
1821 		return -EINVAL;
1822 	}
1823 
1824 	k_mutex_lock(&ctx->drv_lock, K_FOREVER);
1825 	if (!ep_ctx->buf.len) {
1826 		ep_ctx->buf.curr = ep_ctx->buf.data;
1827 		ep_ctx->read_complete = true;
1828 
1829 		if (ep_ctx->read_pending) {
1830 			struct usbd_event *ev = usbd_evt_alloc();
1831 
1832 			if (!ev) {
1833 				k_mutex_unlock(&ctx->drv_lock);
1834 				return -ENOMEM;
1835 			}
1836 
1837 			ev->evt_type = USBD_EVT_EP;
1838 			ev->evt.ep_evt.ep = ep_ctx;
1839 			ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
1840 			usbd_evt_put(ev);
1841 			usbd_work_schedule();
1842 		}
1843 	}
1844 	k_mutex_unlock(&ctx->drv_lock);
1845 
1846 	return 0;
1847 }
1848 
usb_dc_ep_read(const uint8_t ep,uint8_t * const data,const uint32_t max_data_len,uint32_t * const read_bytes)1849 int usb_dc_ep_read(const uint8_t ep, uint8_t *const data,
1850 		   const uint32_t max_data_len, uint32_t *const read_bytes)
1851 {
1852 	LOG_DBG("ep_read: ep 0x%02x, maxlen %d", ep, max_data_len);
1853 	int ret;
1854 
1855 	ret = usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes);
1856 	if (ret) {
1857 		return ret;
1858 	}
1859 
1860 	if (!data && !max_data_len) {
1861 		return ret;
1862 	}
1863 
1864 	ret = usb_dc_ep_read_continue(ep);
1865 	return ret;
1866 }
1867 
usb_dc_ep_set_callback(const uint8_t ep,const usb_dc_ep_callback cb)1868 int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb)
1869 {
1870 	struct nrf_usbd_ep_ctx *ep_ctx;
1871 
1872 	if (!dev_attached()) {
1873 		return -ENODEV;
1874 	}
1875 
1876 	ep_ctx = endpoint_ctx(ep);
1877 	if (!ep_ctx) {
1878 		return -EINVAL;
1879 	}
1880 
1881 	ep_ctx->cfg.cb = cb;
1882 
1883 	return 0;
1884 }
1885 
usb_dc_set_status_callback(const usb_dc_status_callback cb)1886 void usb_dc_set_status_callback(const usb_dc_status_callback cb)
1887 {
1888 	get_usbd_ctx()->status_cb = cb;
1889 }
1890 
usb_dc_ep_mps(const uint8_t ep)1891 int usb_dc_ep_mps(const uint8_t ep)
1892 {
1893 	struct nrf_usbd_ep_ctx *ep_ctx;
1894 
1895 	if (!dev_attached()) {
1896 		return -ENODEV;
1897 	}
1898 
1899 	ep_ctx = endpoint_ctx(ep);
1900 	if (!ep_ctx) {
1901 		return -EINVAL;
1902 	}
1903 
1904 	return ep_ctx->cfg.max_sz;
1905 }
1906 
usb_dc_wakeup_request(void)1907 int usb_dc_wakeup_request(void)
1908 {
1909 	bool res = nrfx_usbd_wakeup_req();
1910 
1911 	if (!res) {
1912 		return -EAGAIN;
1913 	}
1914 	return 0;
1915 }
1916 
usb_init(const struct device * arg)1917 static int usb_init(const struct device *arg)
1918 {
1919 	struct nrf_usbd_ctx *ctx = get_usbd_ctx();
1920 
1921 #ifdef CONFIG_HAS_HW_NRF_USBREG
1922 	/* Use CLOCK/POWER priority for compatibility with other series where
1923 	 * USB events are handled by CLOCK interrupt handler.
1924 	 */
1925 	IRQ_CONNECT(USBREGULATOR_IRQn,
1926 		    DT_IRQ(DT_INST(0, nordic_nrf_clock), priority),
1927 		    nrfx_isr, nrfx_usbreg_irq_handler, 0);
1928 	irq_enable(USBREGULATOR_IRQn);
1929 #endif
1930 
1931 	static const nrfx_power_config_t power_config = {
1932 		.dcdcen = IS_ENABLED(CONFIG_SOC_DCDC_NRF52X) ||
1933 			  IS_ENABLED(CONFIG_SOC_DCDC_NRF53X_APP),
1934 #if NRFX_POWER_SUPPORTS_DCDCEN_VDDH
1935 		.dcdcenhv = IS_ENABLED(CONFIG_SOC_DCDC_NRF53X_HV),
1936 #endif
1937 	};
1938 
1939 	static const nrfx_power_usbevt_config_t usbevt_config = {
1940 		.handler = usb_dc_power_event_handler
1941 	};
1942 
1943 	/* Ignore the return value, as NRFX_ERROR_ALREADY_INITIALIZED is not
1944 	 * a problem here.
1945 	 */
1946 	(void)nrfx_power_init(&power_config);
1947 	nrfx_power_usbevt_init(&usbevt_config);
1948 
1949 	k_work_queue_start(&usbd_work_queue,
1950 			   usbd_work_queue_stack,
1951 			   K_KERNEL_STACK_SIZEOF(usbd_work_queue_stack),
1952 			   CONFIG_SYSTEM_WORKQUEUE_PRIORITY, NULL);
1953 
1954 	k_work_init(&ctx->usb_work, usbd_work_handler);
1955 
1956 	return 0;
1957 }
1958 
1959 SYS_INIT(usb_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
1960