1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "udc_common.h"
8 #include "udc_dwc2.h"
9
10 #include <string.h>
11 #include <stdio.h>
12
13 #include <zephyr/cache.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/devicetree.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/sys_io.h>
18 #include <zephyr/sys/byteorder.h>
19 #include <zephyr/drivers/usb/udc.h>
20 #include <zephyr/usb/usb_ch9.h>
21 #include <usb_dwc2_hw.h>
22
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(udc_dwc2, CONFIG_UDC_DRIVER_LOG_LEVEL);
25 #include "udc_dwc2_vendor_quirks.h"
26
27 enum dwc2_drv_event_type {
28 /* USB connection speed determined after bus reset */
29 DWC2_DRV_EVT_ENUM_DONE,
30 /* Trigger next transfer, must not be used for control OUT */
31 DWC2_DRV_EVT_XFER,
32 /* Setup packet received */
33 DWC2_DRV_EVT_SETUP,
34 /* Transaction on endpoint is finished */
35 DWC2_DRV_EVT_EP_FINISHED,
36 /* Remote Wakeup should be initiated */
37 DWC2_DRV_EVT_REMOTE_WAKEUP,
38 /* Core should enter hibernation */
39 DWC2_DRV_EVT_ENTER_HIBERNATION,
40 /* Core should exit hibernation due to bus reset */
41 DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET,
42 /* Core should exit hibernation due to host resume */
43 DWC2_DRV_EVT_HIBERNATION_EXIT_HOST_RESUME,
44 };
45
46 /* Minimum RX FIFO size in 32-bit words considering the largest used OUT packet
47 * of 512 bytes. The value must be adjusted according to the number of OUT
48 * endpoints.
49 */
50 #define UDC_DWC2_GRXFSIZ_FS_DEFAULT (15U + 512U/4U)
51 /* Default Rx FIFO size in 32-bit words calculated to support High-Speed with:
52 * * 1 control endpoint in Completer/Buffer DMA mode: 13 locations
53 * * Global OUT NAK: 1 location
54 * * Space for 3 * 1024 packets: ((1024/4) + 1) * 3 = 774 locations
55 * Driver adds 2 locations for each OUT endpoint to this value.
56 */
57 #define UDC_DWC2_GRXFSIZ_HS_DEFAULT (13 + 1 + 774)
58
59 /* TX FIFO0 depth in 32-bit words (used by control IN endpoint)
60 * Try 2 * bMaxPacketSize0 to allow simultaneous operation with a fallback to
61 * whatever is available when 2 * bMaxPacketSize0 is not possible.
62 */
63 #define UDC_DWC2_FIFO0_DEPTH (2 * 16U)
64
65 /* Get Data FIFO access register */
66 #define UDC_DWC2_EP_FIFO(base, idx) ((mem_addr_t)base + 0x1000 * (idx + 1))
67
68 enum dwc2_suspend_type {
69 DWC2_SUSPEND_NO_POWER_SAVING,
70 DWC2_SUSPEND_HIBERNATION,
71 };
72
73 /* Registers that have to be stored before Partial Power Down or Hibernation */
74 struct dwc2_reg_backup {
75 uint32_t gotgctl;
76 uint32_t gahbcfg;
77 uint32_t gusbcfg;
78 uint32_t gintmsk;
79 uint32_t grxfsiz;
80 uint32_t gnptxfsiz;
81 uint32_t gi2cctl;
82 uint32_t glpmcfg;
83 uint32_t gdfifocfg;
84 union {
85 uint32_t dptxfsiz[15];
86 uint32_t dieptxf[15];
87 };
88 uint32_t dcfg;
89 uint32_t dctl;
90 uint32_t diepmsk;
91 uint32_t doepmsk;
92 uint32_t daintmsk;
93 uint32_t diepctl[16];
94 uint32_t dieptsiz[16];
95 uint32_t diepdma[16];
96 uint32_t doepctl[16];
97 uint32_t doeptsiz[16];
98 uint32_t doepdma[16];
99 uint32_t pcgcctl;
100 };
101
102 /* Driver private data per instance */
103 struct udc_dwc2_data {
104 struct k_thread thread_data;
105 /* Main events the driver thread waits for */
106 struct k_event drv_evt;
107 /* Transfer triggers (IN on bits 0-15, OUT on bits 16-31) */
108 struct k_event xfer_new;
109 /* Finished transactions (IN on bits 0-15, OUT on bits 16-31) */
110 struct k_event xfer_finished;
111 struct dwc2_reg_backup backup;
112 uint32_t ghwcfg1;
113 uint32_t txf_set;
114 uint32_t max_xfersize;
115 uint32_t max_pktcnt;
116 uint32_t tx_len[16];
117 uint32_t rx_siz[16];
118 uint16_t dfifodepth;
119 uint16_t rxfifo_depth;
120 uint16_t max_txfifo_depth[16];
121 uint16_t sof_num;
122 /* Configuration flags */
123 unsigned int dynfifosizing : 1;
124 unsigned int bufferdma : 1;
125 unsigned int syncrst : 1;
126 /* Defect workarounds */
127 unsigned int wa_essregrestored : 1;
128 /* Runtime state flags */
129 unsigned int hibernated : 1;
130 unsigned int enumdone : 1;
131 unsigned int enumspd : 2;
132 enum dwc2_suspend_type suspend_type;
133 /* Number of endpoints including control endpoint */
134 uint8_t numdeveps;
135 /* Number of IN endpoints including control endpoint */
136 uint8_t ineps;
137 /* Number of OUT endpoints including control endpoint */
138 uint8_t outeps;
139 uint8_t setup[8];
140 };
141
142 #if defined(CONFIG_PINCTRL)
143 #include <zephyr/drivers/pinctrl.h>
144
dwc2_init_pinctrl(const struct device * dev)145 static int dwc2_init_pinctrl(const struct device *dev)
146 {
147 const struct udc_dwc2_config *const config = dev->config;
148 const struct pinctrl_dev_config *const pcfg = config->pcfg;
149 int ret = 0;
150
151 if (pcfg == NULL) {
152 LOG_INF("Skip pinctrl configuration");
153 return 0;
154 }
155
156 ret = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT);
157 if (ret) {
158 LOG_ERR("Failed to apply default pinctrl state (%d)", ret);
159 }
160
161 LOG_DBG("Apply pinctrl");
162
163 return ret;
164 }
165 #else
dwc2_init_pinctrl(const struct device * dev)166 static int dwc2_init_pinctrl(const struct device *dev)
167 {
168 ARG_UNUSED(dev);
169
170 return 0;
171 }
172 #endif
173
dwc2_get_base(const struct device * dev)174 static inline struct usb_dwc2_reg *dwc2_get_base(const struct device *dev)
175 {
176 const struct udc_dwc2_config *const config = dev->config;
177
178 return config->base;
179 }
180
dwc2_wait_for_bit(const struct device * dev,mem_addr_t addr,uint32_t bit)181 static void dwc2_wait_for_bit(const struct device *dev,
182 mem_addr_t addr, uint32_t bit)
183 {
184 k_timepoint_t timeout = sys_timepoint_calc(K_MSEC(100));
185
186 /* This could potentially be converted to use proper synchronization
187 * primitives instead of busy looping, but the number of interrupt bits
188 * this function can be waiting for is rather high.
189 *
190 * Busy looping is most likely fine unless profiling shows otherwise.
191 */
192 while (!(sys_read32(addr) & bit)) {
193 if (dwc2_quirk_is_phy_clk_off(dev)) {
194 /* No point in waiting, because the bit can only be set
195 * when the PHY is actively clocked.
196 */
197 return;
198 }
199
200 if (sys_timepoint_expired(timeout)) {
201 LOG_ERR("Timeout waiting for bit 0x%08X at 0x%08X",
202 bit, (uint32_t)addr);
203 return;
204 }
205 }
206 }
207
dwc2_in_completer_mode(const struct device * dev)208 static inline bool dwc2_in_completer_mode(const struct device *dev)
209 {
210 struct udc_dwc2_data *const priv = udc_get_private(dev);
211
212 return !IS_ENABLED(CONFIG_UDC_DWC2_DMA) || !priv->bufferdma;
213 }
214
dwc2_in_buffer_dma_mode(const struct device * dev)215 static inline bool dwc2_in_buffer_dma_mode(const struct device *dev)
216 {
217 struct udc_dwc2_data *const priv = udc_get_private(dev);
218
219 return IS_ENABLED(CONFIG_UDC_DWC2_DMA) && priv->bufferdma;
220 }
221
222 /* Get DOEPCTLn or DIEPCTLn register address */
dwc2_get_dxepctl_reg(const struct device * dev,const uint8_t ep)223 static mem_addr_t dwc2_get_dxepctl_reg(const struct device *dev, const uint8_t ep)
224 {
225 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
226 uint8_t ep_idx = USB_EP_GET_IDX(ep);
227
228 if (USB_EP_DIR_IS_OUT(ep)) {
229 return (mem_addr_t)&base->out_ep[ep_idx].doepctl;
230 } else {
231 return (mem_addr_t)&base->in_ep[ep_idx].diepctl;
232 }
233 }
234
235 /* Get available FIFO space in bytes */
dwc2_ftx_avail(const struct device * dev,const uint32_t idx)236 static uint32_t dwc2_ftx_avail(const struct device *dev, const uint32_t idx)
237 {
238 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
239 mem_addr_t reg = (mem_addr_t)&base->in_ep[idx].dtxfsts;
240 uint32_t dtxfsts;
241
242 dtxfsts = sys_read32(reg);
243
244 return usb_dwc2_get_dtxfsts_ineptxfspcavail(dtxfsts) * 4;
245 }
246
dwc2_get_iept_pktctn(const struct device * dev,const uint32_t idx)247 static uint32_t dwc2_get_iept_pktctn(const struct device *dev, const uint32_t idx)
248 {
249 struct udc_dwc2_data *const priv = udc_get_private(dev);
250
251 if (idx == 0) {
252 return usb_dwc2_get_dieptsiz0_pktcnt(UINT32_MAX);
253 } else {
254 return priv->max_pktcnt;
255 }
256 }
257
dwc2_get_iept_xfersize(const struct device * dev,const uint32_t idx)258 static uint32_t dwc2_get_iept_xfersize(const struct device *dev, const uint32_t idx)
259 {
260 struct udc_dwc2_data *const priv = udc_get_private(dev);
261
262 if (idx == 0) {
263 return usb_dwc2_get_dieptsiz0_xfersize(UINT32_MAX);
264 } else {
265 return priv->max_xfersize;
266 }
267 }
268
dwc2_get_oept_pktctn(const struct device * dev,const uint32_t idx)269 static uint32_t dwc2_get_oept_pktctn(const struct device *dev, const uint32_t idx)
270 {
271 struct udc_dwc2_data *const priv = udc_get_private(dev);
272
273 if (idx == 0) {
274 return usb_dwc2_get_doeptsiz0_pktcnt(UINT32_MAX);
275 } else {
276 return priv->max_pktcnt;
277 }
278 }
279
dwc2_get_oept_xfersize(const struct device * dev,const uint32_t idx)280 static uint32_t dwc2_get_oept_xfersize(const struct device *dev, const uint32_t idx)
281 {
282 struct udc_dwc2_data *const priv = udc_get_private(dev);
283
284 if (idx == 0) {
285 return usb_dwc2_get_doeptsiz0_xfersize(UINT32_MAX);
286 } else {
287 return priv->max_xfersize;
288 }
289 }
290
dwc2_flush_rx_fifo(const struct device * dev)291 static void dwc2_flush_rx_fifo(const struct device *dev)
292 {
293 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
294 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
295
296 sys_write32(USB_DWC2_GRSTCTL_RXFFLSH, grstctl_reg);
297 while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_RXFFLSH) {
298 }
299 }
300
dwc2_flush_tx_fifo(const struct device * dev,const uint8_t fnum)301 static void dwc2_flush_tx_fifo(const struct device *dev, const uint8_t fnum)
302 {
303 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
304 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
305 uint32_t grstctl;
306
307 grstctl = usb_dwc2_set_grstctl_txfnum(fnum) | USB_DWC2_GRSTCTL_TXFFLSH;
308
309 sys_write32(grstctl, grstctl_reg);
310 while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_TXFFLSH) {
311 }
312 }
313
314 /* Return TX FIFOi depth in 32-bit words (i = f_idx + 1) */
dwc2_get_txfdep(const struct device * dev,const uint32_t f_idx)315 static uint32_t dwc2_get_txfdep(const struct device *dev, const uint32_t f_idx)
316 {
317 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
318 uint32_t dieptxf;
319
320 dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]);
321
322 return usb_dwc2_get_dieptxf_inepntxfdep(dieptxf);
323 }
324
325 /* Return TX FIFOi address (i = f_idx + 1) */
dwc2_get_txfaddr(const struct device * dev,const uint32_t f_idx)326 static uint32_t dwc2_get_txfaddr(const struct device *dev, const uint32_t f_idx)
327 {
328 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
329 uint32_t dieptxf;
330
331 dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]);
332
333 return usb_dwc2_get_dieptxf_inepntxfstaddr(dieptxf);
334 }
335
336 /* Set TX FIFOi address and depth (i = f_idx + 1) */
dwc2_set_txf(const struct device * dev,const uint32_t f_idx,const uint32_t dep,const uint32_t addr)337 static void dwc2_set_txf(const struct device *dev, const uint32_t f_idx,
338 const uint32_t dep, const uint32_t addr)
339 {
340 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
341 uint32_t dieptxf;
342
343 dieptxf = usb_dwc2_set_dieptxf_inepntxfdep(dep) |
344 usb_dwc2_set_dieptxf_inepntxfstaddr(addr);
345
346 sys_write32(dieptxf, (mem_addr_t)&base->dieptxf[f_idx]);
347 }
348
349 /* Enable/disable endpoint interrupt */
dwc2_set_epint(const struct device * dev,struct udc_ep_config * const cfg,const bool enabled)350 static void dwc2_set_epint(const struct device *dev,
351 struct udc_ep_config *const cfg, const bool enabled)
352 {
353 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
354 mem_addr_t reg = (mem_addr_t)&base->daintmsk;
355 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
356 uint32_t epmsk;
357
358 if (USB_EP_DIR_IS_IN(cfg->addr)) {
359 epmsk = USB_DWC2_DAINT_INEPINT(ep_idx);
360 } else {
361 epmsk = USB_DWC2_DAINT_OUTEPINT(ep_idx);
362 }
363
364 if (enabled) {
365 sys_set_bits(reg, epmsk);
366 } else {
367 sys_clear_bits(reg, epmsk);
368 }
369 }
370
dwc2_ep_is_periodic(struct udc_ep_config * const cfg)371 static bool dwc2_ep_is_periodic(struct udc_ep_config *const cfg)
372 {
373 switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) {
374 case USB_EP_TYPE_INTERRUPT:
375 __fallthrough;
376 case USB_EP_TYPE_ISO:
377 return true;
378 default:
379 return false;
380 }
381 }
382
dwc2_ep_is_iso(struct udc_ep_config * const cfg)383 static bool dwc2_ep_is_iso(struct udc_ep_config *const cfg)
384 {
385 return (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) == USB_EP_TYPE_ISO;
386 }
387
dwc2_ctrl_feed_dout(const struct device * dev,const size_t length)388 static int dwc2_ctrl_feed_dout(const struct device *dev, const size_t length)
389 {
390 struct udc_dwc2_data *const priv = udc_get_private(dev);
391 struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
392 struct net_buf *buf;
393 size_t alloc_len = length;
394
395 if (dwc2_in_buffer_dma_mode(dev)) {
396 /* Control OUT buffers must be multiple of bMaxPacketSize0 */
397 alloc_len = ROUND_UP(length, 64);
398 }
399
400 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, alloc_len);
401 if (buf == NULL) {
402 return -ENOMEM;
403 }
404
405 udc_buf_put(ep_cfg, buf);
406 k_event_post(&priv->xfer_new, BIT(16));
407 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
408
409 return 0;
410 }
411
dwc2_ensure_setup_ready(const struct device * dev)412 static void dwc2_ensure_setup_ready(const struct device *dev)
413 {
414 if (dwc2_in_completer_mode(dev)) {
415 /* In Completer mode EP0 can always receive SETUP data */
416 return;
417 }
418
419 if (!udc_buf_peek(dev, USB_CONTROL_EP_OUT)) {
420 dwc2_ctrl_feed_dout(dev, 8);
421 }
422 }
423
dwc2_dma_buffer_ok_to_use(const struct device * dev,void * buf,uint32_t xfersize,uint16_t mps)424 static bool dwc2_dma_buffer_ok_to_use(const struct device *dev, void *buf,
425 uint32_t xfersize, uint16_t mps)
426 {
427 ARG_UNUSED(dev);
428
429 if (!IS_ALIGNED(buf, 4)) {
430 LOG_ERR("Buffer not aligned");
431 return false;
432 }
433
434 /* We can only do 1 packet if Max Packet Size is not multiple of 4 */
435 if (unlikely(mps % 4) && (xfersize > USB_MPS_EP_SIZE(mps))) {
436 LOG_ERR("Padding not supported");
437 return false;
438 }
439
440 return true;
441 }
442
443 /* Can be called from ISR context */
dwc2_tx_fifo_write(const struct device * dev,struct udc_ep_config * const cfg,struct net_buf * const buf)444 static int dwc2_tx_fifo_write(const struct device *dev,
445 struct udc_ep_config *const cfg, struct net_buf *const buf)
446 {
447 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
448 struct udc_dwc2_data *const priv = udc_get_private(dev);
449 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
450
451 mem_addr_t dieptsiz_reg = (mem_addr_t)&base->in_ep[ep_idx].dieptsiz;
452 /* TODO: use dwc2_get_dxepctl_reg() */
453 mem_addr_t diepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl;
454 mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint;
455
456 uint32_t diepctl;
457 uint32_t max_xfersize, max_pktcnt, pktcnt;
458 const uint32_t addnl = USB_MPS_ADDITIONAL_TRANSACTIONS(cfg->mps);
459 const size_t d = sizeof(uint32_t);
460 unsigned int key;
461 uint32_t len;
462 const bool is_periodic = dwc2_ep_is_periodic(cfg);
463 const bool is_iso = dwc2_ep_is_iso(cfg);
464
465 if (is_iso) {
466 /* Isochronous transfers can only be programmed one
467 * (micro-)frame at a time.
468 */
469 len = MIN(buf->len, USB_MPS_TO_TPL(cfg->mps));
470 } else {
471 /* DMA automatically handles packet split. In completer mode,
472 * the value is sanitized below.
473 */
474 len = buf->len;
475 }
476
477 if (dwc2_in_completer_mode(dev)) {
478 uint32_t spcavail = dwc2_ftx_avail(dev, ep_idx);
479 uint32_t spcperpkt = ROUND_UP(udc_mps_ep_size(cfg), 4);
480 uint32_t max_pkts, max_transfer;
481
482 /* Maximum number of packets that can fit in TxFIFO */
483 max_pkts = spcavail / spcperpkt;
484
485 /* We can transfer up to max_pkts MPS packets and a short one */
486 max_transfer = (max_pkts * udc_mps_ep_size(cfg)) +
487 (spcavail % spcperpkt);
488
489 /* If there is enough space for the transfer, there's no need
490 * to check any additional conditions. If the transfer is larger
491 * than TxFIFO then TxFIFO must be able to hold at least one
492 * packet (for periodic transfers at least the number of packets
493 * per microframe).
494 */
495 if ((len > max_transfer) && ((1 + addnl) > max_pkts)) {
496 LOG_ERR("ep 0x%02x FIFO space is too low, %u (%u)",
497 cfg->addr, spcavail, len);
498 return -EAGAIN;
499 }
500
501 len = MIN(len, max_transfer);
502 }
503
504 if (len != 0U) {
505 max_pktcnt = dwc2_get_iept_pktctn(dev, ep_idx);
506 max_xfersize = dwc2_get_iept_xfersize(dev, ep_idx);
507
508 if (len > max_xfersize) {
509 /*
510 * Avoid short packets if the transfer size cannot be
511 * handled in one set.
512 */
513 len = ROUND_DOWN(max_xfersize, USB_MPS_TO_TPL(cfg->mps));
514 }
515
516 /*
517 * Determine the number of packets for the current transfer;
518 * if the pktcnt is too large, truncate the actual transfer length.
519 */
520 pktcnt = DIV_ROUND_UP(len, udc_mps_ep_size(cfg));
521 if (pktcnt > max_pktcnt) {
522 pktcnt = ROUND_DOWN(max_pktcnt, (1 + addnl));
523 len = pktcnt * udc_mps_ep_size(cfg);
524 }
525 } else {
526 /* ZLP */
527 pktcnt = 1U;
528 }
529
530 LOG_DBG("Prepare ep 0x%02x xfer len %u pktcnt %u addnl %u",
531 cfg->addr, len, pktcnt, addnl);
532 priv->tx_len[ep_idx] = len;
533
534 /* Lock and write to endpoint FIFO */
535 key = irq_lock();
536
537 /* Set number of packets and transfer size */
538 sys_write32((is_periodic ? usb_dwc2_set_dieptsizn_mc(1 + addnl) : 0) |
539 usb_dwc2_set_dieptsizn_pktcnt(pktcnt) |
540 usb_dwc2_set_dieptsizn_xfersize(len), dieptsiz_reg);
541
542 if (dwc2_in_buffer_dma_mode(dev)) {
543 if (!dwc2_dma_buffer_ok_to_use(dev, buf->data, len, cfg->mps)) {
544 /* Cannot continue unless buffer is bounced. Device will
545 * cease to function. Is fatal error appropriate here?
546 */
547 irq_unlock(key);
548 return -ENOTSUP;
549 }
550
551 sys_write32((uint32_t)buf->data,
552 (mem_addr_t)&base->in_ep[ep_idx].diepdma);
553
554 sys_cache_data_flush_range(buf->data, len);
555 }
556
557 diepctl = sys_read32(diepctl_reg);
558 if (!(diepctl & USB_DWC2_DEPCTL_USBACTEP)) {
559 /* Do not attempt to write data on inactive endpoint, because
560 * no fifo is assigned to inactive endpoint and therefore it is
561 * possible that the write will corrupt other endpoint fifo.
562 */
563 irq_unlock(key);
564 return -ENOENT;
565 }
566
567 if (is_iso) {
568 /* Queue transfer on next SOF. TODO: allow stack to explicitly
569 * specify on which (micro-)frame the data should be sent.
570 */
571 if (priv->sof_num & 1) {
572 diepctl |= USB_DWC2_DEPCTL_SETEVENFR;
573 } else {
574 diepctl |= USB_DWC2_DEPCTL_SETODDFR;
575 }
576 }
577
578 /* Clear NAK and set endpoint enable */
579 diepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK;
580 sys_write32(diepctl, diepctl_reg);
581
582 /* Clear IN Endpoint NAK Effective interrupt in case it was set */
583 sys_write32(USB_DWC2_DIEPINT_INEPNAKEFF, diepint_reg);
584
585 if (dwc2_in_completer_mode(dev)) {
586 const uint8_t *src = buf->data;
587
588 while (pktcnt > 0) {
589 uint32_t pktlen = MIN(len, udc_mps_ep_size(cfg));
590
591 for (uint32_t i = 0UL; i < pktlen; i += d) {
592 uint32_t val = src[i];
593
594 if (i + 1 < pktlen) {
595 val |= ((uint32_t)src[i + 1UL]) << 8;
596 }
597 if (i + 2 < pktlen) {
598 val |= ((uint32_t)src[i + 2UL]) << 16;
599 }
600 if (i + 3 < pktlen) {
601 val |= ((uint32_t)src[i + 3UL]) << 24;
602 }
603
604 sys_write32(val, UDC_DWC2_EP_FIFO(base, ep_idx));
605 }
606
607 pktcnt--;
608 src += pktlen;
609 len -= pktlen;
610 }
611 }
612
613 irq_unlock(key);
614
615 return 0;
616 }
617
dwc2_read_fifo(const struct device * dev,const uint8_t ep,struct net_buf * const buf,const size_t size)618 static inline int dwc2_read_fifo(const struct device *dev, const uint8_t ep,
619 struct net_buf *const buf, const size_t size)
620 {
621 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
622 size_t len = buf ? MIN(size, net_buf_tailroom(buf)) : 0;
623 const size_t d = sizeof(uint32_t);
624
625 /* FIFO access is always in 32-bit words */
626
627 for (uint32_t n = 0; n < (len / d); n++) {
628 net_buf_add_le32(buf, sys_read32(UDC_DWC2_EP_FIFO(base, ep)));
629 }
630
631 if (len % d) {
632 uint8_t r[4];
633
634 /* Get the remaining */
635 sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)), r);
636 for (uint32_t i = 0U; i < (len % d); i++) {
637 net_buf_add_u8(buf, r[i]);
638 }
639 }
640
641 if (unlikely(size > len)) {
642 for (uint32_t n = 0; n < DIV_ROUND_UP(size - len, d); n++) {
643 (void)sys_read32(UDC_DWC2_EP_FIFO(base, ep));
644 }
645 }
646
647 return 0;
648 }
649
650 /* Can be called from ISR and we call it only when there is a buffer in the queue */
dwc2_prep_rx(const struct device * dev,struct net_buf * buf,struct udc_ep_config * const cfg)651 static void dwc2_prep_rx(const struct device *dev, struct net_buf *buf,
652 struct udc_ep_config *const cfg)
653 {
654 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
655 struct udc_dwc2_data *const priv = udc_get_private(dev);
656 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
657 mem_addr_t doeptsiz_reg = (mem_addr_t)&base->out_ep[ep_idx].doeptsiz;
658 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, ep_idx);
659 uint32_t max_xfersize, max_pktcnt;
660 const uint32_t addnl = USB_MPS_ADDITIONAL_TRANSACTIONS(cfg->mps);
661 uint32_t pktcnt;
662 uint32_t doeptsiz;
663 uint32_t doepctl;
664 uint32_t xfersize;
665
666 max_xfersize = dwc2_get_oept_xfersize(dev, ep_idx);
667 max_pktcnt = dwc2_get_oept_pktctn(dev, ep_idx);
668
669 /* Clear NAK and set endpoint enable */
670 doepctl = sys_read32(doepctl_reg);
671 doepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK;
672
673 if (dwc2_ep_is_iso(cfg)) {
674 xfersize = USB_MPS_TO_TPL(cfg->mps);
675 pktcnt = 1 + addnl;
676
677 if (xfersize > net_buf_tailroom(buf)) {
678 LOG_ERR("ISO RX buffer too small");
679 return;
680 }
681
682 /* Set the Even/Odd (micro-)frame appropriately */
683 if (priv->sof_num & 1) {
684 doepctl |= USB_DWC2_DEPCTL_SETEVENFR;
685 } else {
686 doepctl |= USB_DWC2_DEPCTL_SETODDFR;
687 }
688 } else {
689 xfersize = net_buf_tailroom(buf);
690
691 /* Do as many packets in a single transfer as possible */
692 if (xfersize > max_xfersize) {
693 xfersize = ROUND_DOWN(max_xfersize, USB_MPS_TO_TPL(cfg->mps));
694 }
695
696 pktcnt = DIV_ROUND_UP(xfersize, USB_MPS_EP_SIZE(cfg->mps));
697 }
698
699 if (pktcnt > max_pktcnt) {
700 pktcnt = ROUND_DOWN(max_pktcnt, (1 + addnl));
701 xfersize = pktcnt * udc_mps_ep_size(cfg);
702 }
703
704 doeptsiz = usb_dwc2_set_doeptsizn_pktcnt(pktcnt) |
705 usb_dwc2_set_doeptsizn_xfersize(xfersize);
706 if (cfg->addr == USB_CONTROL_EP_OUT) {
707 doeptsiz |= (3 << USB_DWC2_DOEPTSIZ0_SUPCNT_POS);
708 }
709
710 priv->rx_siz[ep_idx] = doeptsiz;
711 sys_write32(doeptsiz, doeptsiz_reg);
712
713 if (dwc2_in_buffer_dma_mode(dev)) {
714 void *data = net_buf_tail(buf);
715
716 if (!dwc2_dma_buffer_ok_to_use(dev, data, xfersize, cfg->mps)) {
717 /* Cannot continue unless buffer is bounced. Device will
718 * cease to function. Is fatal error appropriate here?
719 */
720 return;
721 }
722
723 sys_write32((uint32_t)data,
724 (mem_addr_t)&base->out_ep[ep_idx].doepdma);
725
726 sys_cache_data_invd_range(data, xfersize);
727 }
728
729 sys_write32(doepctl, doepctl_reg);
730
731 LOG_INF("Prepare RX 0x%02x doeptsiz 0x%x", cfg->addr, doeptsiz);
732 }
733
dwc2_handle_xfer_next(const struct device * dev,struct udc_ep_config * const cfg)734 static void dwc2_handle_xfer_next(const struct device *dev,
735 struct udc_ep_config *const cfg)
736 {
737 struct net_buf *buf;
738
739 buf = udc_buf_peek(dev, cfg->addr);
740 if (buf == NULL) {
741 return;
742 }
743
744 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
745 dwc2_prep_rx(dev, buf, cfg);
746 } else {
747 int err = dwc2_tx_fifo_write(dev, cfg, buf);
748
749 if (cfg->addr == USB_CONTROL_EP_IN) {
750 /* Feed a buffer for the next setup packet after arming
751 * IN endpoint with the data. This is necessary both in
752 * IN Data Stage (Control Read Transfer) and IN Status
753 * Stage (Control Write Transfers and Control Transfers
754 * without Data Stage).
755 *
756 * The buffer must be fed here in Buffer DMA mode to
757 * allow receiving premature SETUP. This inevitably does
758 * automatically arm the buffer for OUT Status Stage.
759 *
760 * The buffer MUST NOT be fed here in Completer mode to
761 * avoid race condition where the next Control Write
762 * Transfer Data Stage is received into the buffer.
763 */
764 if (dwc2_in_buffer_dma_mode(dev)) {
765 dwc2_ctrl_feed_dout(dev, 8);
766 }
767 }
768
769 if (err) {
770 LOG_ERR("Failed to start write to TX FIFO, ep 0x%02x (err: %d)",
771 cfg->addr, err);
772
773 buf = udc_buf_get(dev, cfg->addr);
774 if (udc_submit_ep_event(dev, buf, -ECONNREFUSED)) {
775 LOG_ERR("Failed to submit endpoint event");
776 };
777
778 return;
779 }
780 }
781
782 udc_ep_set_busy(dev, cfg->addr, true);
783 }
784
dwc2_handle_evt_setup(const struct device * dev)785 static int dwc2_handle_evt_setup(const struct device *dev)
786 {
787 struct udc_dwc2_data *const priv = udc_get_private(dev);
788 struct net_buf *buf;
789 int err;
790
791 /* In Completer mode SETUP data is received without preparing endpoint 0
792 * transfer beforehand. In Buffer DMA the SETUP can be copied to any EP0
793 * OUT buffer. If there is any buffer queued, it is obsolete now.
794 */
795 k_event_clear(&priv->xfer_finished, BIT(0) | BIT(16));
796
797 buf = udc_buf_get_all(dev, USB_CONTROL_EP_OUT);
798 if (buf) {
799 net_buf_unref(buf);
800 }
801
802 buf = udc_buf_get_all(dev, USB_CONTROL_EP_IN);
803 if (buf) {
804 net_buf_unref(buf);
805 }
806
807 udc_ep_set_busy(dev, USB_CONTROL_EP_OUT, false);
808 udc_ep_set_busy(dev, USB_CONTROL_EP_IN, false);
809
810 /* Allocate buffer and copy received SETUP for processing */
811 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, 8);
812 if (buf == NULL) {
813 LOG_ERR("No buffer available for control ep");
814 return -ENODATA;
815 }
816
817 net_buf_add_mem(buf, priv->setup, sizeof(priv->setup));
818 udc_ep_buf_set_setup(buf);
819 LOG_HEXDUMP_DBG(buf->data, buf->len, "setup");
820
821 /* Update to next stage of control transfer */
822 udc_ctrl_update_stage(dev, buf);
823
824 /* We always allocate and feed buffer large enough for a setup packet. */
825
826 if (udc_ctrl_stage_is_data_out(dev)) {
827 /* Allocate and feed buffer for data OUT stage */
828 LOG_DBG("s:%p|feed for -out-", buf);
829
830 err = dwc2_ctrl_feed_dout(dev, udc_data_stage_length(buf));
831 if (err == -ENOMEM) {
832 err = udc_submit_ep_event(dev, buf, err);
833 }
834 } else if (udc_ctrl_stage_is_data_in(dev)) {
835 LOG_DBG("s:%p|feed for -in-status", buf);
836
837 err = udc_ctrl_submit_s_in_status(dev);
838 } else {
839 LOG_DBG("s:%p|feed >setup", buf);
840
841 err = udc_ctrl_submit_s_status(dev);
842 }
843
844 return err;
845 }
846
dwc2_handle_evt_dout(const struct device * dev,struct udc_ep_config * const cfg)847 static inline int dwc2_handle_evt_dout(const struct device *dev,
848 struct udc_ep_config *const cfg)
849 {
850 struct udc_data *data = dev->data;
851 struct net_buf *buf;
852 int err = 0;
853
854 buf = udc_buf_get(dev, cfg->addr);
855 if (buf == NULL) {
856 LOG_ERR("No buffer queued for ep 0x%02x", cfg->addr);
857 return -ENODATA;
858 }
859
860 udc_ep_set_busy(dev, cfg->addr, false);
861
862 if (cfg->addr == USB_CONTROL_EP_OUT) {
863 if (udc_ctrl_stage_is_status_out(dev)) {
864 /* s-in-status finished */
865 LOG_DBG("dout:%p| status, feed >s", buf);
866
867 /* Status stage finished, notify upper layer */
868 udc_ctrl_submit_status(dev, buf);
869
870 if (dwc2_in_buffer_dma_mode(dev)) {
871 dwc2_ctrl_feed_dout(dev, 8);
872 }
873 } else {
874 LOG_DBG("dout:%p| data, feed >s", buf);
875 }
876
877 /* Update to next stage of control transfer */
878 udc_ctrl_update_stage(dev, buf);
879
880 if (udc_ctrl_stage_is_status_in(dev)) {
881 err = udc_ctrl_submit_s_out_status(dev, buf);
882 }
883
884 if (data->stage == CTRL_PIPE_STAGE_ERROR) {
885 /* Allow receiving next SETUP. USB stack won't queue any
886 * buffer because it has no clue about this transfer.
887 */
888 dwc2_ensure_setup_ready(dev);
889 }
890 } else {
891 err = udc_submit_ep_event(dev, buf, 0);
892 }
893
894 return err;
895 }
896
dwc2_handle_evt_din(const struct device * dev,struct udc_ep_config * const cfg)897 static int dwc2_handle_evt_din(const struct device *dev,
898 struct udc_ep_config *const cfg)
899 {
900 struct net_buf *buf;
901
902 buf = udc_buf_peek(dev, cfg->addr);
903 if (buf == NULL) {
904 LOG_ERR("No buffer for ep 0x%02x", cfg->addr);
905 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
906 return -ENOBUFS;
907 }
908
909 if (buf->len) {
910 /* Looks like we failed to continue in ISR, retry */
911 return dwc2_tx_fifo_write(dev, cfg, buf);
912 }
913
914 if (cfg->addr == USB_CONTROL_EP_IN && udc_ep_buf_has_zlp(buf)) {
915 udc_ep_buf_clear_zlp(buf);
916 return dwc2_tx_fifo_write(dev, cfg, buf);
917 }
918
919 buf = udc_buf_get(dev, cfg->addr);
920 udc_ep_set_busy(dev, cfg->addr, false);
921
922 if (cfg->addr == USB_CONTROL_EP_IN) {
923 if (udc_ctrl_stage_is_status_in(dev) ||
924 udc_ctrl_stage_is_no_data(dev)) {
925 /* Status stage finished, notify upper layer */
926 udc_ctrl_submit_status(dev, buf);
927 }
928
929 /* Update to next stage of control transfer */
930 udc_ctrl_update_stage(dev, buf);
931
932 if (udc_ctrl_stage_is_status_out(dev)) {
933 if (dwc2_in_completer_mode(dev)) {
934 /* Allow OUT status stage */
935 dwc2_ctrl_feed_dout(dev, 8);
936 }
937
938 /* IN transfer finished, release buffer. */
939 net_buf_unref(buf);
940 }
941
942 return 0;
943 }
944
945 return udc_submit_ep_event(dev, buf, 0);
946 }
947
dwc2_backup_registers(const struct device * dev)948 static void dwc2_backup_registers(const struct device *dev)
949 {
950 const struct udc_dwc2_config *const config = dev->config;
951 struct usb_dwc2_reg *const base = config->base;
952 struct udc_dwc2_data *const priv = udc_get_private(dev);
953 struct dwc2_reg_backup *backup = &priv->backup;
954
955 backup->gotgctl = sys_read32((mem_addr_t)&base->gotgctl);
956 backup->gahbcfg = sys_read32((mem_addr_t)&base->gahbcfg);
957 backup->gusbcfg = sys_read32((mem_addr_t)&base->gusbcfg);
958 backup->gintmsk = sys_read32((mem_addr_t)&base->gintmsk);
959 backup->grxfsiz = sys_read32((mem_addr_t)&base->grxfsiz);
960 backup->gnptxfsiz = sys_read32((mem_addr_t)&base->gnptxfsiz);
961 backup->gi2cctl = sys_read32((mem_addr_t)&base->gi2cctl);
962 backup->glpmcfg = sys_read32((mem_addr_t)&base->glpmcfg);
963 backup->gdfifocfg = sys_read32((mem_addr_t)&base->gdfifocfg);
964
965 for (uint8_t i = 1U; i < priv->ineps; i++) {
966 backup->dieptxf[i - 1] = sys_read32((mem_addr_t)&base->dieptxf[i - 1]);
967 }
968
969 backup->dcfg = sys_read32((mem_addr_t)&base->dcfg);
970 backup->dctl = sys_read32((mem_addr_t)&base->dctl);
971 backup->diepmsk = sys_read32((mem_addr_t)&base->diepmsk);
972 backup->doepmsk = sys_read32((mem_addr_t)&base->doepmsk);
973 backup->daintmsk = sys_read32((mem_addr_t)&base->daintmsk);
974
975 for (uint8_t i = 0U; i < 16; i++) {
976 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
977
978 if (epdir == USB_DWC2_GHWCFG1_EPDIR_IN || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
979 backup->diepctl[i] = sys_read32((mem_addr_t)&base->in_ep[i].diepctl);
980 if (backup->diepctl[i] & USB_DWC2_DEPCTL_DPID) {
981 backup->diepctl[i] |= USB_DWC2_DEPCTL_SETD1PID;
982 } else {
983 backup->diepctl[i] |= USB_DWC2_DEPCTL_SETD0PID;
984 }
985 backup->dieptsiz[i] = sys_read32((mem_addr_t)&base->in_ep[i].dieptsiz);
986 backup->diepdma[i] = sys_read32((mem_addr_t)&base->in_ep[i].diepdma);
987 }
988
989 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
990 backup->doepctl[i] = sys_read32((mem_addr_t)&base->out_ep[i].doepctl);
991 if (backup->doepctl[i] & USB_DWC2_DEPCTL_DPID) {
992 backup->doepctl[i] |= USB_DWC2_DEPCTL_SETD1PID;
993 } else {
994 backup->doepctl[i] |= USB_DWC2_DEPCTL_SETD0PID;
995 }
996 backup->doeptsiz[i] = sys_read32((mem_addr_t)&base->out_ep[i].doeptsiz);
997 backup->doepdma[i] = sys_read32((mem_addr_t)&base->out_ep[i].doepdma);
998 }
999 }
1000
1001 backup->pcgcctl = sys_read32((mem_addr_t)&base->pcgcctl);
1002 }
1003
dwc2_restore_essential_registers(const struct device * dev,bool rwup,bool bus_reset)1004 static void dwc2_restore_essential_registers(const struct device *dev,
1005 bool rwup, bool bus_reset)
1006 {
1007 const struct udc_dwc2_config *const config = dev->config;
1008 struct usb_dwc2_reg *const base = config->base;
1009 struct udc_dwc2_data *const priv = udc_get_private(dev);
1010 struct dwc2_reg_backup *backup = &priv->backup;
1011 uint32_t pcgcctl = backup->pcgcctl & USB_DWC2_PCGCCTL_RESTOREVALUE_MASK;
1012
1013 if (usb_dwc2_get_pcgcctl_p2hd_dev_enum_spd(pcgcctl) ==
1014 USB_DWC2_PCGCCTL_P2HD_DEV_ENUM_SPD_HS) {
1015 pcgcctl |= BIT(17);
1016 }
1017
1018 sys_write32(backup->glpmcfg, (mem_addr_t)&base->glpmcfg);
1019 sys_write32(backup->gi2cctl, (mem_addr_t)&base->gi2cctl);
1020 sys_write32(pcgcctl, (mem_addr_t)&base->pcgcctl);
1021
1022 sys_write32(backup->gahbcfg | USB_DWC2_GAHBCFG_GLBINTRMASK,
1023 (mem_addr_t)&base->gahbcfg);
1024
1025 sys_write32(0xFFFFFFFFUL, (mem_addr_t)&base->gintsts);
1026 sys_write32(USB_DWC2_GINTSTS_RSTRDONEINT, (mem_addr_t)&base->gintmsk);
1027
1028 sys_write32(backup->gusbcfg, (mem_addr_t)&base->gusbcfg);
1029 sys_write32(backup->dcfg, (mem_addr_t)&base->dcfg);
1030
1031 if (bus_reset) {
1032 sys_write32(backup->dcfg, (mem_addr_t)&base->dcfg);
1033 }
1034
1035 if (!rwup) {
1036 pcgcctl |= USB_DWC2_PCGCCTL_RESTOREMODE | USB_DWC2_PCGCCTL_RSTPDWNMODULE;
1037 }
1038 sys_write32(pcgcctl, (mem_addr_t)&base->pcgcctl);
1039 k_busy_wait(1);
1040
1041 pcgcctl |= USB_DWC2_PCGCCTL_ESSREGRESTORED;
1042 sys_write32(pcgcctl, (mem_addr_t)&base->pcgcctl);
1043
1044 /* Note: in Remote Wakeup case 15 ms max signaling time starts now */
1045
1046 /* Wait for Restore Done Interrupt */
1047 dwc2_wait_for_bit(dev, (mem_addr_t)&base->gintsts, USB_DWC2_GINTSTS_RSTRDONEINT);
1048
1049 if (priv->wa_essregrestored) {
1050 pcgcctl &= ~USB_DWC2_PCGCCTL_ESSREGRESTORED;
1051 sys_write32(pcgcctl, (mem_addr_t)&base->pcgcctl);
1052 k_busy_wait(1);
1053 }
1054
1055 if (!bus_reset) {
1056 sys_write32(0xFFFFFFFFUL, (mem_addr_t)&base->gintsts);
1057 }
1058 }
1059
dwc2_restore_device_registers(const struct device * dev,bool rwup)1060 static void dwc2_restore_device_registers(const struct device *dev, bool rwup)
1061 {
1062 const struct udc_dwc2_config *const config = dev->config;
1063 struct usb_dwc2_reg *const base = config->base;
1064 struct udc_dwc2_data *const priv = udc_get_private(dev);
1065 struct dwc2_reg_backup *backup = &priv->backup;
1066
1067 sys_write32(backup->gotgctl, (mem_addr_t)&base->gotgctl);
1068 sys_write32(backup->gahbcfg, (mem_addr_t)&base->gahbcfg);
1069 sys_write32(backup->gusbcfg, (mem_addr_t)&base->gusbcfg);
1070 sys_write32(backup->gintmsk, (mem_addr_t)&base->gintmsk);
1071 sys_write32(backup->grxfsiz, (mem_addr_t)&base->grxfsiz);
1072 sys_write32(backup->gnptxfsiz, (mem_addr_t)&base->gnptxfsiz);
1073 sys_write32(backup->gdfifocfg, (mem_addr_t)&base->gdfifocfg);
1074
1075 for (uint8_t i = 1U; i < priv->ineps; i++) {
1076 sys_write32(backup->dieptxf[i - 1], (mem_addr_t)&base->dieptxf[i - 1]);
1077 }
1078
1079 if (!rwup) {
1080 sys_write32(backup->dctl, (mem_addr_t)&base->dctl);
1081 }
1082
1083 sys_write32(backup->diepmsk, (mem_addr_t)&base->diepmsk);
1084 sys_write32(backup->doepmsk, (mem_addr_t)&base->doepmsk);
1085 sys_write32(backup->daintmsk, (mem_addr_t)&base->daintmsk);
1086
1087 for (uint8_t i = 0U; i < 16; i++) {
1088 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
1089
1090 if (epdir == USB_DWC2_GHWCFG1_EPDIR_IN || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
1091 sys_write32(backup->dieptsiz[i], (mem_addr_t)&base->in_ep[i].dieptsiz);
1092 sys_write32(backup->diepdma[i], (mem_addr_t)&base->in_ep[i].diepdma);
1093 sys_write32(backup->diepctl[i], (mem_addr_t)&base->in_ep[i].diepctl);
1094 }
1095
1096 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
1097 sys_write32(backup->doeptsiz[i], (mem_addr_t)&base->out_ep[i].doeptsiz);
1098 sys_write32(backup->doepdma[i], (mem_addr_t)&base->out_ep[i].doepdma);
1099 sys_write32(backup->doepctl[i], (mem_addr_t)&base->out_ep[i].doepctl);
1100 }
1101 }
1102 }
1103
dwc2_enter_hibernation(const struct device * dev)1104 static void dwc2_enter_hibernation(const struct device *dev)
1105 {
1106 const struct udc_dwc2_config *const config = dev->config;
1107 struct usb_dwc2_reg *const base = config->base;
1108 struct udc_dwc2_data *const priv = udc_get_private(dev);
1109 mem_addr_t gpwrdn_reg = (mem_addr_t)&base->gpwrdn;
1110 mem_addr_t pcgcctl_reg = (mem_addr_t)&base->pcgcctl;
1111
1112 dwc2_backup_registers(dev);
1113
1114 /* This code currently only supports UTMI+. UTMI+ runs at either 30 or
1115 * 60 MHz and therefore 1 us busy waits have sufficiently large margin.
1116 */
1117
1118 /* Enable PMU Logic */
1119 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUACTV);
1120 k_busy_wait(1);
1121
1122 /* Stop PHY clock */
1123 sys_set_bits(pcgcctl_reg, USB_DWC2_PCGCCTL_STOPPCLK);
1124 k_busy_wait(1);
1125
1126 /* Enable PMU interrupt */
1127 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUINTSEL);
1128 k_busy_wait(1);
1129
1130 /* Unmask PMU interrupt bits */
1131 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_LINESTAGECHANGEMSK |
1132 USB_DWC2_GPWRDN_RESETDETMSK |
1133 USB_DWC2_GPWRDN_DISCONNECTDETECTMSK |
1134 USB_DWC2_GPWRDN_STSCHNGINTMSK);
1135 k_busy_wait(1);
1136
1137 /* Enable power clamps */
1138 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNCLMP);
1139 k_busy_wait(1);
1140
1141 /* Switch off power to the controller */
1142 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNSWTCH);
1143
1144 (void)dwc2_quirk_post_hibernation_entry(dev);
1145
1146 /* Mark that the core is hibernated */
1147 priv->hibernated = 1;
1148 LOG_DBG("Hibernated");
1149 }
1150
dwc2_exit_hibernation(const struct device * dev,bool rwup,bool bus_reset)1151 static void dwc2_exit_hibernation(const struct device *dev,
1152 bool rwup, bool bus_reset)
1153 {
1154 const struct udc_dwc2_config *const config = dev->config;
1155 struct usb_dwc2_reg *const base = config->base;
1156 struct udc_dwc2_data *const priv = udc_get_private(dev);
1157 mem_addr_t gpwrdn_reg = (mem_addr_t)&base->gpwrdn;
1158 mem_addr_t pcgcctl_reg = (mem_addr_t)&base->pcgcctl;
1159
1160 (void)dwc2_quirk_pre_hibernation_exit(dev);
1161
1162 /* Switch on power to the controller */
1163 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNSWTCH);
1164 k_busy_wait(1);
1165
1166 /* Reset the controller */
1167 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNRST_N);
1168 k_busy_wait(1);
1169
1170 /* Enable restore from PMU */
1171 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_RESTORE);
1172 k_busy_wait(1);
1173
1174 /* Disable power clamps */
1175 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNCLMP);
1176
1177 if (rwup) {
1178 if (priv->syncrst) {
1179 k_busy_wait(1);
1180 } else {
1181 k_busy_wait(50);
1182 }
1183 }
1184
1185 /* Remove reset to the controller */
1186 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNRST_N);
1187 k_busy_wait(1);
1188
1189 /* Disable PMU interrupt */
1190 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUINTSEL);
1191
1192 dwc2_restore_essential_registers(dev, rwup, bus_reset);
1193
1194 /* Disable restore from PMU */
1195 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_RESTORE);
1196 k_busy_wait(1);
1197
1198 if (!rwup) {
1199 /* Clear reset to power down module */
1200 sys_clear_bits(pcgcctl_reg, USB_DWC2_PCGCCTL_RSTPDWNMODULE);
1201 }
1202
1203 /* Restore GUSBCFG, DCFG and DCTL */
1204 sys_write32(priv->backup.gusbcfg, (mem_addr_t)&base->gusbcfg);
1205 sys_write32(priv->backup.dcfg, (mem_addr_t)&base->dcfg);
1206 sys_write32(priv->backup.dctl, (mem_addr_t)&base->dctl);
1207
1208 /* Disable PMU */
1209 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUACTV);
1210 if (!rwup) {
1211 k_busy_wait(5);
1212 sys_set_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_PWRONPRGDONE);
1213 } else {
1214 k_busy_wait(1);
1215 sys_write32(USB_DWC2_DCTL_RMTWKUPSIG | priv->backup.dctl,
1216 (mem_addr_t)&base->dctl);
1217 }
1218
1219 k_msleep(1);
1220 sys_write32(0xFFFFFFFFUL, (mem_addr_t)&base->gintsts);
1221 }
1222
cancel_hibernation_request(struct udc_dwc2_data * const priv)1223 static void cancel_hibernation_request(struct udc_dwc2_data *const priv)
1224 {
1225 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
1226 }
1227
request_hibernation(struct udc_dwc2_data * const priv)1228 static void request_hibernation(struct udc_dwc2_data *const priv)
1229 {
1230 if (priv->suspend_type == DWC2_SUSPEND_HIBERNATION) {
1231 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
1232 }
1233 }
1234
dwc2_unset_unused_fifo(const struct device * dev)1235 static void dwc2_unset_unused_fifo(const struct device *dev)
1236 {
1237 struct udc_dwc2_data *const priv = udc_get_private(dev);
1238 struct udc_ep_config *tmp;
1239
1240 for (uint8_t i = priv->ineps - 1U; i > 0; i--) {
1241 tmp = udc_get_ep_cfg(dev, i | USB_EP_DIR_IN);
1242
1243 if (tmp->stat.enabled && (priv->txf_set & BIT(i))) {
1244 return;
1245 }
1246
1247 if (!tmp->stat.enabled && (priv->txf_set & BIT(i))) {
1248 priv->txf_set &= ~BIT(i);
1249 }
1250 }
1251 }
1252
1253 /*
1254 * In dedicated FIFO mode there are i (i = 1 ... ineps - 1) FIFO size registers,
1255 * e.g. DIEPTXF1, DIEPTXF2, ... DIEPTXF4. When dynfifosizing is enabled,
1256 * the size register is mutable. The offset of DIEPTXF1 registers is 0.
1257 */
dwc2_set_dedicated_fifo(const struct device * dev,struct udc_ep_config * const cfg,uint32_t * const diepctl)1258 static int dwc2_set_dedicated_fifo(const struct device *dev,
1259 struct udc_ep_config *const cfg,
1260 uint32_t *const diepctl)
1261 {
1262 struct udc_dwc2_data *const priv = udc_get_private(dev);
1263 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1264 const uint32_t addnl = USB_MPS_ADDITIONAL_TRANSACTIONS(cfg->mps);
1265 uint32_t reqdep;
1266 uint32_t txfaddr;
1267 uint32_t txfdep;
1268 uint32_t tmp;
1269
1270 /* Keep everything but FIFO number */
1271 tmp = *diepctl & ~USB_DWC2_DEPCTL_TXFNUM_MASK;
1272
1273 reqdep = DIV_ROUND_UP(udc_mps_ep_size(cfg), 4U);
1274 if (dwc2_in_buffer_dma_mode(dev)) {
1275 /* In DMA mode, TxFIFO capable of holding 2 packets is enough */
1276 reqdep *= MIN(2, (1 + addnl));
1277 } else {
1278 reqdep *= (1 + addnl);
1279 }
1280
1281 if (priv->dynfifosizing) {
1282 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1283 dwc2_unset_unused_fifo(dev);
1284 }
1285
1286 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1287 LOG_WRN("Some of the FIFOs higher than %u are set, %lx",
1288 ep_idx, priv->txf_set & ~BIT_MASK(ep_idx));
1289 return -EIO;
1290 }
1291
1292 if ((ep_idx - 1) != 0U) {
1293 txfaddr = dwc2_get_txfdep(dev, ep_idx - 2) +
1294 dwc2_get_txfaddr(dev, ep_idx - 2);
1295 } else {
1296 txfaddr = priv->rxfifo_depth +
1297 MIN(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]);
1298 }
1299
1300 /* Make sure to not set TxFIFO greater than hardware allows */
1301 txfdep = reqdep;
1302 if (txfdep > priv->max_txfifo_depth[ep_idx]) {
1303 return -ENOMEM;
1304 }
1305
1306 /* Do not allocate TxFIFO outside the SPRAM */
1307 if (txfaddr + txfdep > priv->dfifodepth) {
1308 return -ENOMEM;
1309 }
1310
1311 /* Set FIFO depth (32-bit words) and address */
1312 dwc2_set_txf(dev, ep_idx - 1, txfdep, txfaddr);
1313 } else {
1314 txfdep = dwc2_get_txfdep(dev, ep_idx - 1);
1315 txfaddr = dwc2_get_txfaddr(dev, ep_idx - 1);
1316
1317 if (reqdep > txfdep) {
1318 return -ENOMEM;
1319 }
1320
1321 LOG_DBG("Reuse FIFO%u addr 0x%08x depth %u", ep_idx, txfaddr, txfdep);
1322 }
1323
1324 /* Assign FIFO to the IN endpoint */
1325 *diepctl = tmp | usb_dwc2_set_depctl_txfnum(ep_idx);
1326 priv->txf_set |= BIT(ep_idx);
1327 dwc2_flush_tx_fifo(dev, ep_idx);
1328
1329 LOG_INF("Set FIFO%u (ep 0x%02x) addr 0x%04x depth %u size %u",
1330 ep_idx, cfg->addr, txfaddr, txfdep, dwc2_ftx_avail(dev, ep_idx));
1331
1332 return 0;
1333 }
1334
dwc2_ep_control_enable(const struct device * dev,struct udc_ep_config * const cfg)1335 static int dwc2_ep_control_enable(const struct device *dev,
1336 struct udc_ep_config *const cfg)
1337 {
1338 mem_addr_t dxepctl0_reg;
1339 uint32_t dxepctl0;
1340
1341 dxepctl0_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1342 dxepctl0 = sys_read32(dxepctl0_reg);
1343
1344 dxepctl0 &= ~USB_DWC2_DEPCTL0_MPS_MASK;
1345 switch (cfg->mps) {
1346 case 8:
1347 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_8 << USB_DWC2_DEPCTL_MPS_POS;
1348 break;
1349 case 16:
1350 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_16 << USB_DWC2_DEPCTL_MPS_POS;
1351 break;
1352 case 32:
1353 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_32 << USB_DWC2_DEPCTL_MPS_POS;
1354 break;
1355 case 64:
1356 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_64 << USB_DWC2_DEPCTL_MPS_POS;
1357 break;
1358 default:
1359 return -EINVAL;
1360 }
1361
1362 dxepctl0 |= USB_DWC2_DEPCTL_USBACTEP;
1363
1364 if (cfg->addr == USB_CONTROL_EP_OUT) {
1365 dwc2_flush_rx_fifo(dev);
1366 } else {
1367 dwc2_flush_tx_fifo(dev, 0);
1368 }
1369
1370 sys_write32(dxepctl0, dxepctl0_reg);
1371 dwc2_set_epint(dev, cfg, true);
1372
1373 return 0;
1374 }
1375
udc_dwc2_ep_activate(const struct device * dev,struct udc_ep_config * const cfg)1376 static int udc_dwc2_ep_activate(const struct device *dev,
1377 struct udc_ep_config *const cfg)
1378 {
1379 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1380 struct udc_dwc2_data *const priv = udc_get_private(dev);
1381 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1382 mem_addr_t dxepctl_reg;
1383 uint32_t dxepctl;
1384
1385 LOG_DBG("Enable ep 0x%02x", cfg->addr);
1386
1387 if (ep_idx == 0U) {
1388 return dwc2_ep_control_enable(dev, cfg);
1389 }
1390
1391 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
1392 /* TODO: use dwc2_get_dxepctl_reg() */
1393 dxepctl_reg = (mem_addr_t)&base->out_ep[ep_idx].doepctl;
1394 } else {
1395 if (priv->ineps > 0U && ep_idx > (priv->ineps - 1U)) {
1396 LOG_ERR("No resources available for ep 0x%02x", cfg->addr);
1397 return -EINVAL;
1398 }
1399
1400 dxepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl;
1401 }
1402
1403 dxepctl = sys_read32(dxepctl_reg);
1404 /* Set max packet size */
1405 dxepctl &= ~USB_DWC2_DEPCTL_MPS_MASK;
1406 dxepctl |= usb_dwc2_set_depctl_mps(udc_mps_ep_size(cfg));
1407
1408 /* Set endpoint type */
1409 dxepctl &= ~USB_DWC2_DEPCTL_EPTYPE_MASK;
1410
1411 switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) {
1412 case USB_EP_TYPE_BULK:
1413 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_BULK <<
1414 USB_DWC2_DEPCTL_EPTYPE_POS;
1415 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1416 break;
1417 case USB_EP_TYPE_INTERRUPT:
1418 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_INTERRUPT <<
1419 USB_DWC2_DEPCTL_EPTYPE_POS;
1420 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1421 break;
1422 case USB_EP_TYPE_ISO:
1423 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_ISO <<
1424 USB_DWC2_DEPCTL_EPTYPE_POS;
1425 break;
1426 default:
1427 return -EINVAL;
1428 }
1429
1430 if (USB_EP_DIR_IS_IN(cfg->addr) && udc_mps_ep_size(cfg) != 0U) {
1431 int ret = dwc2_set_dedicated_fifo(dev, cfg, &dxepctl);
1432
1433 if (ret) {
1434 return ret;
1435 }
1436 }
1437
1438 dxepctl |= USB_DWC2_DEPCTL_USBACTEP;
1439
1440 /* Enable endpoint interrupts */
1441 dwc2_set_epint(dev, cfg, true);
1442 sys_write32(dxepctl, dxepctl_reg);
1443
1444 for (uint8_t i = 1U; i < priv->ineps; i++) {
1445 LOG_DBG("DIEPTXF%u %08x DIEPCTL%u %08x",
1446 i, sys_read32((mem_addr_t)&base->dieptxf[i - 1U]), i, dxepctl);
1447 }
1448
1449 return 0;
1450 }
1451
dwc2_unset_dedicated_fifo(const struct device * dev,struct udc_ep_config * const cfg,uint32_t * const diepctl)1452 static int dwc2_unset_dedicated_fifo(const struct device *dev,
1453 struct udc_ep_config *const cfg,
1454 uint32_t *const diepctl)
1455 {
1456 struct udc_dwc2_data *const priv = udc_get_private(dev);
1457 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1458
1459 /* Clear FIFO number field */
1460 *diepctl &= ~USB_DWC2_DEPCTL_TXFNUM_MASK;
1461
1462 if (priv->dynfifosizing) {
1463 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1464 LOG_WRN("Some of the FIFOs higher than %u are set, %lx",
1465 ep_idx, priv->txf_set & ~BIT_MASK(ep_idx));
1466 return 0;
1467 }
1468
1469 dwc2_set_txf(dev, ep_idx - 1, 0, 0);
1470 }
1471
1472 priv->txf_set &= ~BIT(ep_idx);
1473
1474 return 0;
1475 }
1476
1477 /* Disabled IN endpoint means that device will send NAK (isochronous: ZLP) after
1478 * receiving IN token from host even if there is packet available in TxFIFO.
1479 * Disabled OUT endpoint means that device will NAK (isochronous: discard data)
1480 * incoming OUT data (or HS PING) even if there is space available in RxFIFO.
1481 *
1482 * Set stall parameter to true if caller wants to send STALL instead of NAK.
1483 */
udc_dwc2_ep_disable(const struct device * dev,struct udc_ep_config * const cfg,bool stall)1484 static void udc_dwc2_ep_disable(const struct device *dev,
1485 struct udc_ep_config *const cfg, bool stall)
1486 {
1487 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1488 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1489 mem_addr_t dxepctl_reg;
1490 uint32_t dxepctl;
1491 const bool is_iso = dwc2_ep_is_iso(cfg);
1492
1493 dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1494 dxepctl = sys_read32(dxepctl_reg);
1495
1496 if (!is_iso && (dxepctl & USB_DWC2_DEPCTL_NAKSTS)) {
1497 /* Endpoint already sends forced NAKs. STALL if necessary. */
1498 if (stall) {
1499 dxepctl |= USB_DWC2_DEPCTL_STALL;
1500 sys_write32(dxepctl, dxepctl_reg);
1501 }
1502
1503 return;
1504 }
1505
1506 /* FIXME: This function needs to be changed to not synchronously wait
1507 * for the events to happen because the actions here are racing against
1508 * the USB host packets. It is possible that the IN token or OUT DATA
1509 * gets sent shortly before this function disables the endpoint. If this
1510 * happens, the XferCompl would be set and driver will incorrectly think
1511 * that either:
1512 * * never queued transfer finished, OR
1513 * * transfer queued in incompisoin handler finished (before it really
1514 * does and then it'll "double"-finish when it actually finishes)
1515 *
1516 * For the time being XferCompl is cleared as a workaround.
1517 */
1518
1519 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
1520 mem_addr_t dctl_reg, gintsts_reg, doepint_reg;
1521 uint32_t dctl;
1522
1523 dctl_reg = (mem_addr_t)&base->dctl;
1524 gintsts_reg = (mem_addr_t)&base->gintsts;
1525 doepint_reg = (mem_addr_t)&base->out_ep[ep_idx].doepint;
1526
1527 dctl = sys_read32(dctl_reg);
1528
1529 if (sys_read32(gintsts_reg) & USB_DWC2_GINTSTS_GOUTNAKEFF) {
1530 LOG_ERR("GOUTNAKEFF already active");
1531 } else {
1532 dctl |= USB_DWC2_DCTL_SGOUTNAK;
1533 sys_write32(dctl, dctl_reg);
1534 dctl &= ~USB_DWC2_DCTL_SGOUTNAK;
1535 }
1536
1537 dwc2_wait_for_bit(dev, gintsts_reg, USB_DWC2_GINTSTS_GOUTNAKEFF);
1538
1539 /* The application cannot disable control OUT endpoint 0. */
1540 if (ep_idx != 0) {
1541 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS;
1542 }
1543
1544 if (stall) {
1545 /* For OUT endpoints STALL is set instead of SNAK */
1546 dxepctl |= USB_DWC2_DEPCTL_STALL;
1547 } else {
1548 dxepctl |= USB_DWC2_DEPCTL_SNAK;
1549 }
1550 sys_write32(dxepctl, dxepctl_reg);
1551
1552 if (ep_idx != 0) {
1553 dwc2_wait_for_bit(dev, doepint_reg, USB_DWC2_DOEPINT_EPDISBLD);
1554 }
1555
1556 /* Clear Endpoint Disabled interrupt */
1557 sys_write32(USB_DWC2_DOEPINT_EPDISBLD | USB_DWC2_DOEPINT_XFERCOMPL, doepint_reg);
1558
1559 dctl |= USB_DWC2_DCTL_CGOUTNAK;
1560 sys_write32(dctl, dctl_reg);
1561 } else {
1562 mem_addr_t diepint_reg;
1563
1564 diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint;
1565
1566 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_SNAK;
1567 if (stall) {
1568 /* For IN endpoints STALL is set in addition to SNAK */
1569 dxepctl |= USB_DWC2_DEPCTL_STALL;
1570 }
1571 sys_write32(dxepctl, dxepctl_reg);
1572
1573 dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_INEPNAKEFF);
1574
1575 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS;
1576 sys_write32(dxepctl, dxepctl_reg);
1577
1578 dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_EPDISBLD);
1579
1580 /* Clear Endpoint Disabled interrupt */
1581 sys_write32(USB_DWC2_DIEPINT_EPDISBLD | USB_DWC2_DIEPINT_XFERCOMPL, diepint_reg);
1582
1583 /* TODO: Read DIEPTSIZn here? Programming Guide suggest it to
1584 * let application know how many bytes of interrupted transfer
1585 * were transferred to the host.
1586 */
1587
1588 dwc2_flush_tx_fifo(dev, usb_dwc2_get_depctl_txfnum(dxepctl));
1589 }
1590
1591 udc_ep_set_busy(dev, cfg->addr, false);
1592 }
1593
1594 /* Deactivated endpoint means that there will be a bus timeout when the host
1595 * tries to access the endpoint.
1596 */
udc_dwc2_ep_deactivate(const struct device * dev,struct udc_ep_config * const cfg)1597 static int udc_dwc2_ep_deactivate(const struct device *dev,
1598 struct udc_ep_config *const cfg)
1599 {
1600 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1601 mem_addr_t dxepctl_reg;
1602 uint32_t dxepctl;
1603
1604 dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1605 dxepctl = sys_read32(dxepctl_reg);
1606
1607 if (dxepctl & USB_DWC2_DEPCTL_USBACTEP) {
1608 LOG_DBG("Disable ep 0x%02x DxEPCTL%u %x",
1609 cfg->addr, ep_idx, dxepctl);
1610
1611 udc_dwc2_ep_disable(dev, cfg, false);
1612
1613 dxepctl = sys_read32(dxepctl_reg);
1614 dxepctl &= ~USB_DWC2_DEPCTL_USBACTEP;
1615 } else {
1616 LOG_WRN("ep 0x%02x is not active DxEPCTL%u %x",
1617 cfg->addr, ep_idx, dxepctl);
1618 }
1619
1620 if (USB_EP_DIR_IS_IN(cfg->addr) && udc_mps_ep_size(cfg) != 0U &&
1621 ep_idx != 0U) {
1622 dwc2_unset_dedicated_fifo(dev, cfg, &dxepctl);
1623 }
1624
1625 sys_write32(dxepctl, dxepctl_reg);
1626 dwc2_set_epint(dev, cfg, false);
1627
1628 if (cfg->addr == USB_CONTROL_EP_OUT) {
1629 struct net_buf *buf = udc_buf_get_all(dev, cfg->addr);
1630
1631 /* Release the buffer allocated in dwc2_ctrl_feed_dout() */
1632 if (buf) {
1633 net_buf_unref(buf);
1634 }
1635 }
1636
1637 return 0;
1638 }
1639
udc_dwc2_ep_set_halt(const struct device * dev,struct udc_ep_config * const cfg)1640 static int udc_dwc2_ep_set_halt(const struct device *dev,
1641 struct udc_ep_config *const cfg)
1642 {
1643 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1644
1645 udc_dwc2_ep_disable(dev, cfg, true);
1646
1647 LOG_DBG("Set halt ep 0x%02x", cfg->addr);
1648 if (ep_idx != 0) {
1649 cfg->stat.halted = true;
1650 } else {
1651 /* Data/Status stage is STALLed, allow receiving next SETUP */
1652 dwc2_ensure_setup_ready(dev);
1653 }
1654
1655 return 0;
1656 }
1657
udc_dwc2_ep_clear_halt(const struct device * dev,struct udc_ep_config * const cfg)1658 static int udc_dwc2_ep_clear_halt(const struct device *dev,
1659 struct udc_ep_config *const cfg)
1660 {
1661 struct udc_dwc2_data *const priv = udc_get_private(dev);
1662 mem_addr_t dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1663 uint32_t dxepctl;
1664
1665 dxepctl = sys_read32(dxepctl_reg);
1666 dxepctl &= ~USB_DWC2_DEPCTL_STALL;
1667 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1668 sys_write32(dxepctl, dxepctl_reg);
1669
1670 LOG_DBG("Clear halt ep 0x%02x", cfg->addr);
1671 cfg->stat.halted = false;
1672
1673 /* Resume queued transfers if any */
1674 if (udc_buf_peek(dev, cfg->addr)) {
1675 uint32_t ep_bit;
1676
1677 if (USB_EP_DIR_IS_IN(cfg->addr)) {
1678 ep_bit = BIT(USB_EP_GET_IDX(cfg->addr));
1679 } else {
1680 ep_bit = BIT(16 + USB_EP_GET_IDX(cfg->addr));
1681 }
1682
1683 k_event_post(&priv->xfer_new, ep_bit);
1684 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
1685 }
1686
1687 return 0;
1688 }
1689
udc_dwc2_ep_enqueue(const struct device * dev,struct udc_ep_config * const cfg,struct net_buf * const buf)1690 static int udc_dwc2_ep_enqueue(const struct device *dev,
1691 struct udc_ep_config *const cfg,
1692 struct net_buf *const buf)
1693 {
1694 struct udc_dwc2_data *const priv = udc_get_private(dev);
1695
1696 LOG_DBG("%p enqueue %x %p", dev, cfg->addr, buf);
1697 udc_buf_put(cfg, buf);
1698
1699 if (!cfg->stat.halted) {
1700 uint32_t ep_bit;
1701
1702 if (USB_EP_DIR_IS_IN(cfg->addr)) {
1703 ep_bit = BIT(USB_EP_GET_IDX(cfg->addr));
1704 } else {
1705 ep_bit = BIT(16 + USB_EP_GET_IDX(cfg->addr));
1706 }
1707
1708 k_event_post(&priv->xfer_new, ep_bit);
1709 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
1710 }
1711
1712 return 0;
1713 }
1714
udc_dwc2_ep_dequeue(const struct device * dev,struct udc_ep_config * const cfg)1715 static int udc_dwc2_ep_dequeue(const struct device *dev,
1716 struct udc_ep_config *const cfg)
1717 {
1718 struct net_buf *buf;
1719
1720 udc_dwc2_ep_disable(dev, cfg, false);
1721
1722 buf = udc_buf_get_all(dev, cfg->addr);
1723 if (buf) {
1724 udc_submit_ep_event(dev, buf, -ECONNABORTED);
1725 }
1726
1727 udc_ep_set_busy(dev, cfg->addr, false);
1728
1729 LOG_DBG("dequeue ep 0x%02x", cfg->addr);
1730
1731 return 0;
1732 }
1733
udc_dwc2_set_address(const struct device * dev,const uint8_t addr)1734 static int udc_dwc2_set_address(const struct device *dev, const uint8_t addr)
1735 {
1736 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1737 mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg;
1738 uint32_t dcfg;
1739
1740 if (addr > (USB_DWC2_DCFG_DEVADDR_MASK >> USB_DWC2_DCFG_DEVADDR_POS)) {
1741 return -EINVAL;
1742 }
1743
1744 dcfg = sys_read32(dcfg_reg);
1745 dcfg &= ~USB_DWC2_DCFG_DEVADDR_MASK;
1746 dcfg |= usb_dwc2_set_dcfg_devaddr(addr);
1747 sys_write32(dcfg, dcfg_reg);
1748 LOG_DBG("Set new address %u for %p", addr, dev);
1749
1750 return 0;
1751 }
1752
udc_dwc2_test_mode(const struct device * dev,const uint8_t mode,const bool dryrun)1753 static int udc_dwc2_test_mode(const struct device *dev,
1754 const uint8_t mode, const bool dryrun)
1755 {
1756 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1757 mem_addr_t dctl_reg = (mem_addr_t)&base->dctl;
1758 uint32_t dctl;
1759
1760 if (mode == 0U || mode > USB_DWC2_DCTL_TSTCTL_TESTFE) {
1761 return -EINVAL;
1762 }
1763
1764 dctl = sys_read32(dctl_reg);
1765 if (usb_dwc2_get_dctl_tstctl(dctl) != USB_DWC2_DCTL_TSTCTL_DISABLED) {
1766 return -EALREADY;
1767 }
1768
1769 if (dryrun) {
1770 LOG_DBG("Test Mode %u supported", mode);
1771 return 0;
1772 }
1773
1774 dctl |= usb_dwc2_set_dctl_tstctl(mode);
1775 sys_write32(dctl, dctl_reg);
1776 LOG_DBG("Enable Test Mode %u", mode);
1777
1778 return 0;
1779 }
1780
udc_dwc2_host_wakeup(const struct device * dev)1781 static int udc_dwc2_host_wakeup(const struct device *dev)
1782 {
1783 struct udc_dwc2_data *const priv = udc_get_private(dev);
1784
1785 LOG_DBG("Remote wakeup from %p", dev);
1786
1787 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_REMOTE_WAKEUP));
1788
1789 return 0;
1790 }
1791
1792 /* Return actual USB device speed */
udc_dwc2_device_speed(const struct device * dev)1793 static enum udc_bus_speed udc_dwc2_device_speed(const struct device *dev)
1794 {
1795 struct udc_dwc2_data *const priv = udc_get_private(dev);
1796
1797 switch (priv->enumspd) {
1798 case USB_DWC2_DSTS_ENUMSPD_HS3060:
1799 return UDC_BUS_SPEED_HS;
1800 case USB_DWC2_DSTS_ENUMSPD_LS6:
1801 __ASSERT(false, "Low speed mode not supported");
1802 __fallthrough;
1803 case USB_DWC2_DSTS_ENUMSPD_FS48:
1804 __fallthrough;
1805 case USB_DWC2_DSTS_ENUMSPD_FS3060:
1806 __fallthrough;
1807 default:
1808 return UDC_BUS_SPEED_FS;
1809 }
1810 }
1811
dwc2_core_soft_reset(const struct device * dev)1812 static int dwc2_core_soft_reset(const struct device *dev)
1813 {
1814 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1815 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
1816 const unsigned int csr_timeout_us = 10000UL;
1817 uint32_t cnt = 0UL;
1818
1819 /* Check AHB master idle state */
1820 while (!(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_AHBIDLE)) {
1821 k_busy_wait(1);
1822
1823 if (++cnt > csr_timeout_us) {
1824 LOG_ERR("Wait for AHB idle timeout, GRSTCTL 0x%08x",
1825 sys_read32(grstctl_reg));
1826 return -EIO;
1827 }
1828 }
1829
1830 /* Apply Core Soft Reset */
1831 sys_write32(USB_DWC2_GRSTCTL_CSFTRST, grstctl_reg);
1832
1833 cnt = 0UL;
1834 do {
1835 if (++cnt > csr_timeout_us) {
1836 LOG_ERR("Wait for CSR done timeout, GRSTCTL 0x%08x",
1837 sys_read32(grstctl_reg));
1838 return -EIO;
1839 }
1840
1841 k_busy_wait(1);
1842 } while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRST &&
1843 !(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRSTDONE));
1844
1845 sys_clear_bits(grstctl_reg, USB_DWC2_GRSTCTL_CSFTRST | USB_DWC2_GRSTCTL_CSFTRSTDONE);
1846
1847 return 0;
1848 }
1849
udc_dwc2_init_controller(const struct device * dev)1850 static int udc_dwc2_init_controller(const struct device *dev)
1851 {
1852 const struct udc_dwc2_config *const config = dev->config;
1853 struct udc_dwc2_data *const priv = udc_get_private(dev);
1854 struct usb_dwc2_reg *const base = config->base;
1855 mem_addr_t grxfsiz_reg = (mem_addr_t)&base->grxfsiz;
1856 mem_addr_t gahbcfg_reg = (mem_addr_t)&base->gahbcfg;
1857 mem_addr_t gusbcfg_reg = (mem_addr_t)&base->gusbcfg;
1858 mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg;
1859 uint32_t gsnpsid;
1860 uint32_t dcfg;
1861 uint32_t gusbcfg;
1862 uint32_t gahbcfg;
1863 uint32_t ghwcfg2;
1864 uint32_t ghwcfg3;
1865 uint32_t ghwcfg4;
1866 uint32_t val;
1867 int ret;
1868 bool hs_phy;
1869
1870 ret = dwc2_core_soft_reset(dev);
1871 if (ret) {
1872 return ret;
1873 }
1874
1875 /* Enable RTL workarounds based on controller revision */
1876 gsnpsid = sys_read32((mem_addr_t)&base->gsnpsid);
1877 priv->wa_essregrestored = gsnpsid < USB_DWC2_GSNPSID_REV_5_00A;
1878
1879 priv->ghwcfg1 = sys_read32((mem_addr_t)&base->ghwcfg1);
1880 ghwcfg2 = sys_read32((mem_addr_t)&base->ghwcfg2);
1881 ghwcfg3 = sys_read32((mem_addr_t)&base->ghwcfg3);
1882 ghwcfg4 = sys_read32((mem_addr_t)&base->ghwcfg4);
1883
1884 if (!(ghwcfg4 & USB_DWC2_GHWCFG4_DEDFIFOMODE)) {
1885 LOG_ERR("Only dedicated TX FIFO mode is supported");
1886 return -ENOTSUP;
1887 }
1888
1889 /*
1890 * Force device mode as we do no support role changes.
1891 * Wait 25ms for the change to take effect.
1892 */
1893 gusbcfg = USB_DWC2_GUSBCFG_FORCEDEVMODE;
1894 sys_write32(gusbcfg, gusbcfg_reg);
1895 k_msleep(25);
1896
1897 /* Buffer DMA is always supported in Internal DMA mode.
1898 * TODO: check and support descriptor DMA if available
1899 */
1900 priv->bufferdma = (usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2) ==
1901 USB_DWC2_GHWCFG2_OTGARCH_INTERNALDMA);
1902
1903 if (!IS_ENABLED(CONFIG_UDC_DWC2_DMA)) {
1904 priv->bufferdma = 0;
1905 } else if (priv->bufferdma) {
1906 LOG_WRN("Experimental DMA enabled");
1907 }
1908
1909 if (ghwcfg2 & USB_DWC2_GHWCFG2_DYNFIFOSIZING) {
1910 LOG_DBG("Dynamic FIFO Sizing is enabled");
1911 priv->dynfifosizing = true;
1912 }
1913
1914 if (IS_ENABLED(CONFIG_UDC_DWC2_HIBERNATION) &&
1915 ghwcfg4 & USB_DWC2_GHWCFG4_HIBERNATION) {
1916 LOG_INF("Hibernation enabled");
1917 priv->suspend_type = DWC2_SUSPEND_HIBERNATION;
1918 } else {
1919 priv->suspend_type = DWC2_SUSPEND_NO_POWER_SAVING;
1920 }
1921
1922 /* Get the number or endpoints and IN endpoints we can use later */
1923 priv->numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(ghwcfg2) + 1U;
1924 priv->ineps = usb_dwc2_get_ghwcfg4_ineps(ghwcfg4) + 1U;
1925 LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", priv->numdeveps);
1926 LOG_DBG("Number of IN endpoints (INEPS + 1) %u", priv->ineps);
1927
1928 LOG_DBG("Number of periodic IN endpoints (NUMDEVPERIOEPS) %u",
1929 usb_dwc2_get_ghwcfg4_numdevperioeps(ghwcfg4));
1930 LOG_DBG("Number of additional control endpoints (NUMCTLEPS) %u",
1931 usb_dwc2_get_ghwcfg4_numctleps(ghwcfg4));
1932
1933 LOG_DBG("OTG architecture (OTGARCH) %u, mode (OTGMODE) %u",
1934 usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2),
1935 usb_dwc2_get_ghwcfg2_otgmode(ghwcfg2));
1936
1937 priv->dfifodepth = usb_dwc2_get_ghwcfg3_dfifodepth(ghwcfg3);
1938 LOG_DBG("DFIFO depth (DFIFODEPTH) %u bytes", priv->dfifodepth * 4);
1939
1940 priv->max_pktcnt = GHWCFG3_PKTCOUNT(usb_dwc2_get_ghwcfg3_pktsizewidth(ghwcfg3));
1941 priv->max_xfersize = GHWCFG3_XFERSIZE(usb_dwc2_get_ghwcfg3_xfersizewidth(ghwcfg3));
1942 LOG_DBG("Max packet count %u, Max transfer size %u",
1943 priv->max_pktcnt, priv->max_xfersize);
1944
1945 LOG_DBG("Vendor Control interface support enabled: %s",
1946 (ghwcfg3 & USB_DWC2_GHWCFG3_VNDCTLSUPT) ? "true" : "false");
1947
1948 LOG_DBG("PHY interface type: FSPHYTYPE %u, HSPHYTYPE %u, DATAWIDTH %u",
1949 usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2),
1950 usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2),
1951 usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4));
1952
1953 LOG_DBG("LPM mode is %s",
1954 (ghwcfg3 & USB_DWC2_GHWCFG3_LPMMODE) ? "enabled" : "disabled");
1955
1956 if (ghwcfg3 & USB_DWC2_GHWCFG3_RSTTYPE) {
1957 priv->syncrst = 1;
1958 }
1959
1960 /* Configure AHB, select Completer or DMA mode */
1961 gahbcfg = sys_read32(gahbcfg_reg);
1962
1963 if (priv->bufferdma) {
1964 gahbcfg |= USB_DWC2_GAHBCFG_DMAEN;
1965 } else {
1966 gahbcfg &= ~USB_DWC2_GAHBCFG_DMAEN;
1967 }
1968
1969 sys_write32(gahbcfg, gahbcfg_reg);
1970
1971 dcfg = sys_read32(dcfg_reg);
1972
1973 dcfg &= ~USB_DWC2_DCFG_DESCDMA;
1974
1975 /* Configure PHY and device speed */
1976 dcfg &= ~USB_DWC2_DCFG_DEVSPD_MASK;
1977 switch (usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2)) {
1978 case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUSULPI:
1979 __fallthrough;
1980 case USB_DWC2_GHWCFG2_HSPHYTYPE_ULPI:
1981 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 |
1982 USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_ULPI;
1983 dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20
1984 << USB_DWC2_DCFG_DEVSPD_POS;
1985 hs_phy = true;
1986 break;
1987 case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUS:
1988 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 |
1989 USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_UTMI;
1990 dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20
1991 << USB_DWC2_DCFG_DEVSPD_POS;
1992 hs_phy = true;
1993 break;
1994 case USB_DWC2_GHWCFG2_HSPHYTYPE_NO_HS:
1995 __fallthrough;
1996 default:
1997 if (usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2) !=
1998 USB_DWC2_GHWCFG2_FSPHYTYPE_NO_FS) {
1999 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB11;
2000 }
2001
2002 dcfg |= USB_DWC2_DCFG_DEVSPD_USBFS1148
2003 << USB_DWC2_DCFG_DEVSPD_POS;
2004 hs_phy = false;
2005 }
2006
2007 if (usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4)) {
2008 gusbcfg |= USB_DWC2_GUSBCFG_PHYIF_16_BIT;
2009 }
2010
2011 /* Update PHY configuration */
2012 sys_write32(gusbcfg, gusbcfg_reg);
2013 sys_write32(dcfg, dcfg_reg);
2014
2015 priv->outeps = 0U;
2016 for (uint8_t i = 0U; i < priv->numdeveps; i++) {
2017 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
2018
2019 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
2020 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2021 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
2022
2023 sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg);
2024 priv->outeps++;
2025 }
2026 }
2027
2028 LOG_DBG("Number of OUT endpoints %u", priv->outeps);
2029
2030 /* Read and store all TxFIFO depths because Programmed FIFO Depths must
2031 * not exceed the power-on values.
2032 */
2033 val = sys_read32((mem_addr_t)&base->gnptxfsiz);
2034 priv->max_txfifo_depth[0] = usb_dwc2_get_gnptxfsiz_nptxfdep(val);
2035 for (uint8_t i = 1; i < priv->ineps; i++) {
2036 priv->max_txfifo_depth[i] = dwc2_get_txfdep(dev, i - 1);
2037 }
2038
2039 priv->rxfifo_depth = usb_dwc2_get_grxfsiz(sys_read32(grxfsiz_reg));
2040
2041 if (priv->dynfifosizing) {
2042 uint32_t gnptxfsiz;
2043 uint32_t default_depth;
2044
2045 /* TODO: For proper runtime FIFO sizing UDC driver would have to
2046 * have prior knowledge of the USB configurations. Only with the
2047 * prior knowledge, the driver will be able to fairly distribute
2048 * available resources. For the time being just use different
2049 * defaults based on maximum configured PHY speed, but this has
2050 * to be revised if e.g. thresholding support would be necessary
2051 * on some target.
2052 */
2053 if (hs_phy) {
2054 default_depth = UDC_DWC2_GRXFSIZ_HS_DEFAULT;
2055 } else {
2056 default_depth = UDC_DWC2_GRXFSIZ_FS_DEFAULT;
2057 }
2058 default_depth += priv->outeps * 2U;
2059
2060 /* Driver does not dynamically resize RxFIFO so there is no need
2061 * to store reset value. Read the reset value and make sure that
2062 * the programmed value is not greater than what driver sets.
2063 */
2064 priv->rxfifo_depth = MIN(priv->rxfifo_depth, default_depth);
2065 sys_write32(usb_dwc2_set_grxfsiz(priv->rxfifo_depth), grxfsiz_reg);
2066
2067 /* Set TxFIFO 0 depth */
2068 val = MIN(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]);
2069 gnptxfsiz = usb_dwc2_set_gnptxfsiz_nptxfdep(val) |
2070 usb_dwc2_set_gnptxfsiz_nptxfstaddr(priv->rxfifo_depth);
2071
2072 sys_write32(gnptxfsiz, (mem_addr_t)&base->gnptxfsiz);
2073 }
2074
2075 LOG_DBG("RX FIFO size %u bytes", priv->rxfifo_depth * 4);
2076 for (uint8_t i = 1U; i < priv->ineps; i++) {
2077 LOG_DBG("TX FIFO%u depth %u addr %u",
2078 i, priv->max_txfifo_depth[i], dwc2_get_txfaddr(dev, i));
2079 }
2080
2081 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT,
2082 USB_EP_TYPE_CONTROL, 64, 0)) {
2083 LOG_ERR("Failed to enable control endpoint");
2084 return -EIO;
2085 }
2086
2087 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN,
2088 USB_EP_TYPE_CONTROL, 64, 0)) {
2089 LOG_ERR("Failed to enable control endpoint");
2090 return -EIO;
2091 }
2092
2093 /* Unmask interrupts */
2094 sys_write32(USB_DWC2_GINTSTS_OEPINT | USB_DWC2_GINTSTS_IEPINT |
2095 USB_DWC2_GINTSTS_ENUMDONE | USB_DWC2_GINTSTS_USBRST |
2096 USB_DWC2_GINTSTS_WKUPINT | USB_DWC2_GINTSTS_USBSUSP |
2097 USB_DWC2_GINTSTS_INCOMPISOOUT | USB_DWC2_GINTSTS_INCOMPISOIN |
2098 USB_DWC2_GINTSTS_SOF,
2099 (mem_addr_t)&base->gintmsk);
2100
2101 return 0;
2102 }
2103
udc_dwc2_enable(const struct device * dev)2104 static int udc_dwc2_enable(const struct device *dev)
2105 {
2106 const struct udc_dwc2_config *const config = dev->config;
2107 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2108 int err;
2109
2110 err = dwc2_quirk_pre_enable(dev);
2111 if (err) {
2112 LOG_ERR("Quirk pre enable failed %d", err);
2113 return err;
2114 }
2115
2116 err = udc_dwc2_init_controller(dev);
2117 if (err) {
2118 return err;
2119 }
2120
2121 err = dwc2_quirk_post_enable(dev);
2122 if (err) {
2123 LOG_ERR("Quirk post enable failed %d", err);
2124 return err;
2125 }
2126
2127 /* Enable global interrupt */
2128 sys_set_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK);
2129 config->irq_enable_func(dev);
2130
2131 /* Disable soft disconnect */
2132 sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_SFTDISCON);
2133 LOG_DBG("Enable device %p", base);
2134
2135 return 0;
2136 }
2137
udc_dwc2_disable(const struct device * dev)2138 static int udc_dwc2_disable(const struct device *dev)
2139 {
2140 const struct udc_dwc2_config *const config = dev->config;
2141 struct udc_dwc2_data *const priv = udc_get_private(dev);
2142 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2143 mem_addr_t dctl_reg = (mem_addr_t)&base->dctl;
2144 int err;
2145
2146 /* Enable soft disconnect */
2147 sys_set_bits(dctl_reg, USB_DWC2_DCTL_SFTDISCON);
2148 LOG_DBG("Disable device %p", dev);
2149
2150 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
2151 LOG_DBG("Failed to disable control endpoint");
2152 return -EIO;
2153 }
2154
2155 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
2156 LOG_DBG("Failed to disable control endpoint");
2157 return -EIO;
2158 }
2159
2160 config->irq_disable_func(dev);
2161
2162 if (priv->hibernated) {
2163 dwc2_exit_hibernation(dev, false, true);
2164 priv->hibernated = 0;
2165 }
2166
2167 sys_clear_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK);
2168
2169 err = dwc2_quirk_disable(dev);
2170 if (err) {
2171 LOG_ERR("Quirk disable failed %d", err);
2172 return err;
2173 }
2174
2175 return 0;
2176 }
2177
udc_dwc2_init(const struct device * dev)2178 static int udc_dwc2_init(const struct device *dev)
2179 {
2180 int ret;
2181
2182 ret = dwc2_quirk_init(dev);
2183 if (ret) {
2184 LOG_ERR("Quirk init failed %d", ret);
2185 return ret;
2186 }
2187
2188 return dwc2_init_pinctrl(dev);
2189 }
2190
udc_dwc2_shutdown(const struct device * dev)2191 static int udc_dwc2_shutdown(const struct device *dev)
2192 {
2193 int ret;
2194
2195 ret = dwc2_quirk_shutdown(dev);
2196 if (ret) {
2197 LOG_ERR("Quirk shutdown failed %d", ret);
2198 return ret;
2199 }
2200
2201 return 0;
2202 }
2203
dwc2_driver_preinit(const struct device * dev)2204 static int dwc2_driver_preinit(const struct device *dev)
2205 {
2206 const struct udc_dwc2_config *config = dev->config;
2207 struct udc_dwc2_data *const priv = udc_get_private(dev);
2208 struct udc_data *data = dev->data;
2209 uint16_t mps = 1023;
2210 uint32_t numdeveps;
2211 uint32_t ineps;
2212 int err;
2213
2214 k_mutex_init(&data->mutex);
2215
2216 k_event_init(&priv->drv_evt);
2217 k_event_init(&priv->xfer_new);
2218 k_event_init(&priv->xfer_finished);
2219
2220 data->caps.rwup = true;
2221 data->caps.addr_before_status = true;
2222 data->caps.mps0 = UDC_MPS0_64;
2223
2224 (void)dwc2_quirk_caps(dev);
2225 if (data->caps.hs) {
2226 mps = 1024;
2227 }
2228
2229 /*
2230 * At this point, we cannot or do not want to access the hardware
2231 * registers to get GHWCFGn values. For now, we will use devicetree to
2232 * get GHWCFGn values and use them to determine the number and type of
2233 * configured endpoints in the hardware. This can be considered a
2234 * workaround, and we may change the upper layer internals to avoid it
2235 * in the future.
2236 */
2237 ineps = usb_dwc2_get_ghwcfg4_ineps(config->ghwcfg4) + 1U;
2238 numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(config->ghwcfg2) + 1U;
2239 LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", numdeveps);
2240 LOG_DBG("Number of IN endpoints (INEPS + 1) %u", ineps);
2241
2242 for (uint32_t i = 0, n = 0; i < numdeveps; i++) {
2243 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i);
2244
2245 if (epdir != USB_DWC2_GHWCFG1_EPDIR_OUT &&
2246 epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2247 continue;
2248 }
2249
2250 if (i == 0) {
2251 config->ep_cfg_out[n].caps.control = 1;
2252 config->ep_cfg_out[n].caps.mps = 64;
2253 } else {
2254 config->ep_cfg_out[n].caps.bulk = 1;
2255 config->ep_cfg_out[n].caps.interrupt = 1;
2256 config->ep_cfg_out[n].caps.iso = 1;
2257 config->ep_cfg_out[n].caps.high_bandwidth = data->caps.hs;
2258 config->ep_cfg_out[n].caps.mps = mps;
2259 }
2260
2261 config->ep_cfg_out[n].caps.out = 1;
2262 config->ep_cfg_out[n].addr = USB_EP_DIR_OUT | i;
2263
2264 LOG_DBG("Register ep 0x%02x (%u)", i, n);
2265 err = udc_register_ep(dev, &config->ep_cfg_out[n]);
2266 if (err != 0) {
2267 LOG_ERR("Failed to register endpoint");
2268 return err;
2269 }
2270
2271 n++;
2272 /* Also check the number of desired OUT endpoints in devicetree. */
2273 if (n >= config->num_out_eps) {
2274 break;
2275 }
2276 }
2277
2278 for (uint32_t i = 0, n = 0; i < numdeveps; i++) {
2279 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i);
2280
2281 if (epdir != USB_DWC2_GHWCFG1_EPDIR_IN &&
2282 epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2283 continue;
2284 }
2285
2286 if (i == 0) {
2287 config->ep_cfg_in[n].caps.control = 1;
2288 config->ep_cfg_in[n].caps.mps = 64;
2289 } else {
2290 config->ep_cfg_in[n].caps.bulk = 1;
2291 config->ep_cfg_in[n].caps.interrupt = 1;
2292 config->ep_cfg_in[n].caps.iso = 1;
2293 config->ep_cfg_in[n].caps.high_bandwidth = data->caps.hs;
2294 config->ep_cfg_in[n].caps.mps = mps;
2295 }
2296
2297 config->ep_cfg_in[n].caps.in = 1;
2298 config->ep_cfg_in[n].addr = USB_EP_DIR_IN | i;
2299
2300 LOG_DBG("Register ep 0x%02x (%u)", USB_EP_DIR_IN | i, n);
2301 err = udc_register_ep(dev, &config->ep_cfg_in[n]);
2302 if (err != 0) {
2303 LOG_ERR("Failed to register endpoint");
2304 return err;
2305 }
2306
2307 n++;
2308 /* Also check the number of desired IN endpoints in devicetree. */
2309 if (n >= MIN(ineps, config->num_in_eps)) {
2310 break;
2311 }
2312 }
2313
2314 config->make_thread(dev);
2315
2316 return 0;
2317 }
2318
udc_dwc2_lock(const struct device * dev)2319 static void udc_dwc2_lock(const struct device *dev)
2320 {
2321 k_sched_lock();
2322 udc_lock_internal(dev, K_FOREVER);
2323 }
2324
udc_dwc2_unlock(const struct device * dev)2325 static void udc_dwc2_unlock(const struct device *dev)
2326 {
2327 udc_unlock_internal(dev);
2328 k_sched_unlock();
2329 }
2330
dwc2_on_bus_reset(const struct device * dev)2331 static void dwc2_on_bus_reset(const struct device *dev)
2332 {
2333 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2334 struct udc_dwc2_data *const priv = udc_get_private(dev);
2335 uint32_t doepmsk;
2336
2337 /* Set the NAK bit for all OUT endpoints */
2338 for (uint8_t i = 0U; i < priv->numdeveps; i++) {
2339 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
2340 mem_addr_t doepctl_reg;
2341
2342 LOG_DBG("ep 0x%02x EPDIR %u", i, epdir);
2343 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
2344 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2345 doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
2346 sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg);
2347 }
2348 }
2349
2350 doepmsk = USB_DWC2_DOEPINT_SETUP | USB_DWC2_DOEPINT_XFERCOMPL;
2351 if (dwc2_in_buffer_dma_mode(dev)) {
2352 doepmsk |= USB_DWC2_DOEPINT_STSPHSERCVD;
2353 }
2354
2355 sys_write32(doepmsk, (mem_addr_t)&base->doepmsk);
2356 sys_set_bits((mem_addr_t)&base->diepmsk, USB_DWC2_DIEPINT_XFERCOMPL);
2357
2358 /* Software has to handle RxFLvl interrupt only in Completer mode */
2359 if (dwc2_in_completer_mode(dev)) {
2360 sys_set_bits((mem_addr_t)&base->gintmsk,
2361 USB_DWC2_GINTSTS_RXFLVL);
2362 }
2363
2364 /* Clear device address during reset. */
2365 sys_clear_bits((mem_addr_t)&base->dcfg, USB_DWC2_DCFG_DEVADDR_MASK);
2366
2367 /* Speed enumeration must happen after reset. */
2368 priv->enumdone = 0;
2369 }
2370
dwc2_handle_enumdone(const struct device * dev)2371 static void dwc2_handle_enumdone(const struct device *dev)
2372 {
2373 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2374 struct udc_dwc2_data *const priv = udc_get_private(dev);
2375 uint32_t dsts;
2376
2377 dsts = sys_read32((mem_addr_t)&base->dsts);
2378 priv->enumspd = usb_dwc2_get_dsts_enumspd(dsts);
2379 priv->enumdone = 1;
2380
2381 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENUM_DONE));
2382 }
2383
dwc2_read_fifo_setup(const struct device * dev,uint8_t ep,const size_t size)2384 static inline int dwc2_read_fifo_setup(const struct device *dev, uint8_t ep,
2385 const size_t size)
2386 {
2387 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2388 struct udc_dwc2_data *const priv = udc_get_private(dev);
2389 size_t offset;
2390
2391 /* FIFO access is always in 32-bit words */
2392
2393 if (size != 8) {
2394 LOG_ERR("%d bytes SETUP", size);
2395 }
2396
2397 /*
2398 * We store the setup packet temporarily in the driver's private data
2399 * because there is always a race risk after the status stage OUT
2400 * packet from the host and the new setup packet. This is fine in
2401 * bottom-half processing because the events arrive in a queue and
2402 * there will be a next net_buf for the setup packet.
2403 */
2404 for (offset = 0; offset < MIN(size, 8); offset += 4) {
2405 sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)),
2406 &priv->setup[offset]);
2407 }
2408
2409 /* On protocol error simply discard extra data */
2410 while (offset < size) {
2411 sys_read32(UDC_DWC2_EP_FIFO(base, ep));
2412 offset += 4;
2413 }
2414
2415 return 0;
2416 }
2417
dwc2_handle_rxflvl(const struct device * dev)2418 static inline void dwc2_handle_rxflvl(const struct device *dev)
2419 {
2420 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2421 struct udc_ep_config *ep_cfg;
2422 struct net_buf *buf;
2423 uint32_t grxstsp;
2424 uint32_t pktsts;
2425 uint32_t bcnt;
2426 uint8_t ep;
2427
2428 grxstsp = sys_read32((mem_addr_t)&base->grxstsp);
2429 ep = usb_dwc2_get_grxstsp_epnum(grxstsp);
2430 bcnt = usb_dwc2_get_grxstsp_bcnt(grxstsp);
2431 pktsts = usb_dwc2_get_grxstsp_pktsts(grxstsp);
2432
2433 LOG_DBG("ep 0x%02x: pktsts %u, bcnt %u", ep, pktsts, bcnt);
2434
2435 switch (pktsts) {
2436 case USB_DWC2_GRXSTSR_PKTSTS_SETUP:
2437 dwc2_read_fifo_setup(dev, ep, bcnt);
2438 break;
2439 case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA:
2440 ep_cfg = udc_get_ep_cfg(dev, ep);
2441
2442 buf = udc_buf_peek(dev, ep_cfg->addr);
2443
2444 /* RxFIFO data must be retrieved even when buf is NULL */
2445 dwc2_read_fifo(dev, ep, buf, bcnt);
2446 break;
2447 case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA_DONE:
2448 LOG_DBG("RX pktsts DONE");
2449 break;
2450 case USB_DWC2_GRXSTSR_PKTSTS_SETUP_DONE:
2451 LOG_DBG("SETUP pktsts DONE");
2452 case USB_DWC2_GRXSTSR_PKTSTS_GLOBAL_OUT_NAK:
2453 LOG_DBG("Global OUT NAK");
2454 break;
2455 default:
2456 break;
2457 }
2458 }
2459
dwc2_handle_in_xfercompl(const struct device * dev,const uint8_t ep_idx)2460 static inline void dwc2_handle_in_xfercompl(const struct device *dev,
2461 const uint8_t ep_idx)
2462 {
2463 struct udc_dwc2_data *const priv = udc_get_private(dev);
2464 struct udc_ep_config *ep_cfg;
2465 struct net_buf *buf;
2466
2467 ep_cfg = udc_get_ep_cfg(dev, ep_idx | USB_EP_DIR_IN);
2468 buf = udc_buf_peek(dev, ep_cfg->addr);
2469 if (buf == NULL) {
2470 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
2471 return;
2472 }
2473
2474 net_buf_pull(buf, priv->tx_len[ep_idx]);
2475 if (buf->len && dwc2_tx_fifo_write(dev, ep_cfg, buf) == 0) {
2476 return;
2477 }
2478
2479 k_event_post(&priv->xfer_finished, BIT(ep_idx));
2480 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2481 }
2482
dwc2_handle_iepint(const struct device * dev)2483 static inline void dwc2_handle_iepint(const struct device *dev)
2484 {
2485 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2486 const uint8_t n_max = 16;
2487 uint32_t diepmsk;
2488 uint32_t daint;
2489
2490 diepmsk = sys_read32((mem_addr_t)&base->diepmsk);
2491 daint = sys_read32((mem_addr_t)&base->daint);
2492
2493 for (uint8_t n = 0U; n < n_max; n++) {
2494 mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[n].diepint;
2495 uint32_t diepint;
2496 uint32_t status;
2497
2498 if (daint & USB_DWC2_DAINT_INEPINT(n)) {
2499 /* Read and clear interrupt status */
2500 diepint = sys_read32(diepint_reg);
2501 status = diepint & diepmsk;
2502 sys_write32(status, diepint_reg);
2503
2504 LOG_DBG("ep 0x%02x interrupt status: 0x%x",
2505 n | USB_EP_DIR_IN, status);
2506
2507 if (status & USB_DWC2_DIEPINT_XFERCOMPL) {
2508 dwc2_handle_in_xfercompl(dev, n);
2509 }
2510
2511 }
2512 }
2513
2514 /* Clear IEPINT interrupt */
2515 sys_write32(USB_DWC2_GINTSTS_IEPINT, (mem_addr_t)&base->gintsts);
2516 }
2517
dwc2_handle_out_xfercompl(const struct device * dev,const uint8_t ep_idx)2518 static inline void dwc2_handle_out_xfercompl(const struct device *dev,
2519 const uint8_t ep_idx)
2520 {
2521 struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, ep_idx);
2522 struct udc_dwc2_data *const priv = udc_get_private(dev);
2523 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2524 uint32_t bcnt;
2525 struct net_buf *buf;
2526 uint32_t doeptsiz;
2527 const bool is_iso = dwc2_ep_is_iso(ep_cfg);
2528
2529 doeptsiz = sys_read32((mem_addr_t)&base->out_ep[ep_idx].doeptsiz);
2530
2531 buf = udc_buf_peek(dev, ep_cfg->addr);
2532 if (!buf) {
2533 LOG_ERR("No buffer for ep 0x%02x", ep_cfg->addr);
2534 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
2535 return;
2536 }
2537
2538 /* The original transfer size value is necessary here because controller
2539 * decreases the value for every byte stored.
2540 */
2541 bcnt = usb_dwc2_get_doeptsizn_xfersize(priv->rx_siz[ep_idx]) -
2542 usb_dwc2_get_doeptsizn_xfersize(doeptsiz);
2543
2544 if (is_iso) {
2545 uint32_t pkts;
2546 bool valid;
2547
2548 pkts = usb_dwc2_get_doeptsizn_pktcnt(priv->rx_siz[ep_idx]) -
2549 usb_dwc2_get_doeptsizn_pktcnt(doeptsiz);
2550 switch (usb_dwc2_get_doeptsizn_rxdpid(doeptsiz)) {
2551 case USB_DWC2_DOEPTSIZN_RXDPID_DATA0:
2552 valid = (pkts == 1);
2553 break;
2554 case USB_DWC2_DOEPTSIZN_RXDPID_DATA1:
2555 valid = (pkts == 2);
2556 break;
2557 case USB_DWC2_DOEPTSIZN_RXDPID_DATA2:
2558 valid = (pkts == 3);
2559 break;
2560 case USB_DWC2_DOEPTSIZN_RXDPID_MDATA:
2561 default:
2562 valid = false;
2563 break;
2564 }
2565
2566 if (!valid) {
2567 if (dwc2_in_completer_mode(dev)) {
2568 /* RxFlvl added data to net buf, rollback */
2569 net_buf_remove_mem(buf, bcnt);
2570 }
2571 /* Data is not valid, discard it */
2572 bcnt = 0;
2573 }
2574 }
2575
2576 if (dwc2_in_buffer_dma_mode(dev) && bcnt) {
2577 sys_cache_data_invd_range(net_buf_tail(buf), bcnt);
2578 net_buf_add(buf, bcnt);
2579 }
2580
2581 if (!is_iso && bcnt && (bcnt % udc_mps_ep_size(ep_cfg)) == 0 &&
2582 net_buf_tailroom(buf)) {
2583 dwc2_prep_rx(dev, buf, ep_cfg);
2584 } else {
2585 k_event_post(&priv->xfer_finished, BIT(16 + ep_idx));
2586 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2587 }
2588 }
2589
dwc2_handle_oepint(const struct device * dev)2590 static inline void dwc2_handle_oepint(const struct device *dev)
2591 {
2592 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2593 struct udc_dwc2_data *const priv = udc_get_private(dev);
2594 const uint8_t n_max = 16;
2595 uint32_t doepmsk;
2596 uint32_t daint;
2597
2598 doepmsk = sys_read32((mem_addr_t)&base->doepmsk);
2599 daint = sys_read32((mem_addr_t)&base->daint);
2600
2601 for (uint8_t n = 0U; n < n_max; n++) {
2602 mem_addr_t doepint_reg = (mem_addr_t)&base->out_ep[n].doepint;
2603 uint32_t doepint;
2604 uint32_t status;
2605
2606 if (!(daint & USB_DWC2_DAINT_OUTEPINT(n))) {
2607 continue;
2608 }
2609
2610 /* Read and clear interrupt status */
2611 doepint = sys_read32(doepint_reg);
2612 status = doepint & doepmsk;
2613 sys_write32(status, doepint_reg);
2614
2615 LOG_DBG("ep 0x%02x interrupt status: 0x%x", n, status);
2616
2617 /* StupPktRcvd is not enabled for interrupt, but must be checked
2618 * when XferComp hits to determine if SETUP token was received.
2619 */
2620 if (dwc2_in_buffer_dma_mode(dev) &&
2621 (status & USB_DWC2_DOEPINT_XFERCOMPL) &&
2622 (doepint & USB_DWC2_DOEPINT_STUPPKTRCVD)) {
2623 uint32_t addr;
2624
2625 sys_write32(USB_DWC2_DOEPINT_STUPPKTRCVD, doepint_reg);
2626 status &= ~USB_DWC2_DOEPINT_XFERCOMPL;
2627
2628 /* DMAAddr points past the memory location where the
2629 * SETUP data was stored. Copy the received SETUP data
2630 * to temporary location used also in Completer mode
2631 * which allows common SETUP interrupt handling.
2632 */
2633 addr = sys_read32((mem_addr_t)&base->out_ep[0].doepdma);
2634 sys_cache_data_invd_range((void *)(addr - 8), 8);
2635 memcpy(priv->setup, (void *)(addr - 8), sizeof(priv->setup));
2636 }
2637
2638 if (status & USB_DWC2_DOEPINT_SETUP) {
2639 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_SETUP));
2640 }
2641
2642 if (status & USB_DWC2_DOEPINT_STSPHSERCVD) {
2643 /* Driver doesn't need any special handling, but it is
2644 * mandatory that the bit is cleared in Buffer DMA mode.
2645 * If the bit is not cleared (i.e. when this interrupt
2646 * bit is masked), then SETUP interrupts will cease
2647 * after first control transfer with data stage from
2648 * device to host.
2649 */
2650 }
2651
2652 if (status & USB_DWC2_DOEPINT_XFERCOMPL) {
2653 dwc2_handle_out_xfercompl(dev, n);
2654 }
2655 }
2656
2657 /* Clear OEPINT interrupt */
2658 sys_write32(USB_DWC2_GINTSTS_OEPINT, (mem_addr_t)&base->gintsts);
2659 }
2660
2661 /* In DWC2 otg context incomplete isochronous IN transfer means that the host
2662 * did not issue IN token to at least one isochronous endpoint and software has
2663 * find on which endpoints the data is no longer valid and discard it.
2664 */
dwc2_handle_incompisoin(const struct device * dev)2665 static void dwc2_handle_incompisoin(const struct device *dev)
2666 {
2667 const struct udc_dwc2_config *const config = dev->config;
2668 struct usb_dwc2_reg *const base = config->base;
2669 struct udc_dwc2_data *const priv = udc_get_private(dev);
2670 mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts;
2671 const uint32_t mask =
2672 USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPTYPE_MASK |
2673 USB_DWC2_DEPCTL_USBACTEP;
2674 const uint32_t val =
2675 USB_DWC2_DEPCTL_EPENA |
2676 usb_dwc2_set_depctl_eptype(USB_DWC2_DEPCTL_EPTYPE_ISO) |
2677 USB_DWC2_DEPCTL_USBACTEP;
2678
2679 for (uint8_t i = 1U; i < priv->numdeveps; i++) {
2680 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
2681
2682 if (epdir == USB_DWC2_GHWCFG1_EPDIR_IN ||
2683 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2684 mem_addr_t diepctl_reg = dwc2_get_dxepctl_reg(dev, i | USB_EP_DIR_IN);
2685 uint32_t diepctl;
2686
2687 diepctl = sys_read32(diepctl_reg);
2688
2689 /* Check if endpoint didn't receive ISO OUT data */
2690 if ((diepctl & mask) == val) {
2691 struct udc_ep_config *cfg;
2692 struct net_buf *buf;
2693
2694 cfg = udc_get_ep_cfg(dev, i | USB_EP_DIR_IN);
2695 __ASSERT_NO_MSG(cfg && cfg->stat.enabled &&
2696 dwc2_ep_is_iso(cfg));
2697
2698 udc_dwc2_ep_disable(dev, cfg, false);
2699
2700 buf = udc_buf_get(dev, cfg->addr);
2701 if (buf) {
2702 /* Data is no longer relevant */
2703 udc_submit_ep_event(dev, buf, 0);
2704
2705 /* Try to queue next packet before SOF */
2706 dwc2_handle_xfer_next(dev, cfg);
2707 }
2708 }
2709 }
2710 }
2711
2712 sys_write32(USB_DWC2_GINTSTS_INCOMPISOIN, gintsts_reg);
2713 }
2714
2715 /* In DWC2 otg context incomplete isochronous OUT transfer means that the host
2716 * did not issue OUT token to at least one isochronous endpoint and software has
2717 * to find on which endpoint it didn't receive any data and let the stack know.
2718 */
dwc2_handle_incompisoout(const struct device * dev)2719 static void dwc2_handle_incompisoout(const struct device *dev)
2720 {
2721 const struct udc_dwc2_config *const config = dev->config;
2722 struct usb_dwc2_reg *const base = config->base;
2723 struct udc_dwc2_data *const priv = udc_get_private(dev);
2724 mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts;
2725 const uint32_t mask =
2726 USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPTYPE_MASK |
2727 USB_DWC2_DEPCTL_DPID | USB_DWC2_DEPCTL_USBACTEP;
2728 const uint32_t val =
2729 USB_DWC2_DEPCTL_EPENA |
2730 usb_dwc2_set_depctl_eptype(USB_DWC2_DEPCTL_EPTYPE_ISO) |
2731 ((priv->sof_num & 1) ? USB_DWC2_DEPCTL_DPID : 0) |
2732 USB_DWC2_DEPCTL_USBACTEP;
2733
2734 for (uint8_t i = 1U; i < priv->numdeveps; i++) {
2735 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
2736
2737 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
2738 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2739 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
2740 uint32_t doepctl;
2741
2742 doepctl = sys_read32(doepctl_reg);
2743
2744 /* Check if endpoint didn't receive ISO OUT data */
2745 if ((doepctl & mask) == val) {
2746 struct udc_ep_config *cfg;
2747 struct net_buf *buf;
2748
2749 cfg = udc_get_ep_cfg(dev, i);
2750 __ASSERT_NO_MSG(cfg && cfg->stat.enabled &&
2751 dwc2_ep_is_iso(cfg));
2752
2753 udc_dwc2_ep_disable(dev, cfg, false);
2754
2755 buf = udc_buf_get(dev, cfg->addr);
2756 if (buf) {
2757 udc_submit_ep_event(dev, buf, 0);
2758 }
2759 }
2760 }
2761 }
2762
2763 sys_write32(USB_DWC2_GINTSTS_INCOMPISOOUT, gintsts_reg);
2764 }
2765
udc_dwc2_isr_handler(const struct device * dev)2766 static void udc_dwc2_isr_handler(const struct device *dev)
2767 {
2768 const struct udc_dwc2_config *const config = dev->config;
2769 struct usb_dwc2_reg *const base = config->base;
2770 struct udc_dwc2_data *const priv = udc_get_private(dev);
2771 mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts;
2772 uint32_t int_status;
2773 uint32_t gintmsk;
2774
2775 if (priv->hibernated) {
2776 uint32_t gpwrdn = sys_read32((mem_addr_t)&base->gpwrdn);
2777 bool reset, resume = false;
2778
2779 /* Clear interrupts */
2780 sys_write32(gpwrdn, (mem_addr_t)&base->gpwrdn);
2781
2782 if (gpwrdn & USB_DWC2_GPWRDN_LNSTSCHNG) {
2783 resume = usb_dwc2_get_gpwrdn_linestate(gpwrdn) ==
2784 USB_DWC2_GPWRDN_LINESTATE_DM1DP0;
2785 }
2786
2787 reset = gpwrdn & USB_DWC2_GPWRDN_RESETDETECTED;
2788
2789 if (resume) {
2790 k_event_post(&priv->drv_evt,
2791 BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_HOST_RESUME));
2792 }
2793
2794 if (reset) {
2795 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET));
2796 }
2797
2798 (void)dwc2_quirk_irq_clear(dev);
2799 return;
2800 }
2801
2802 gintmsk = sys_read32((mem_addr_t)&base->gintmsk);
2803
2804 /* Read and handle interrupt status register */
2805 while ((int_status = sys_read32(gintsts_reg) & gintmsk)) {
2806
2807 LOG_DBG("GINTSTS 0x%x", int_status);
2808
2809 if (int_status & USB_DWC2_GINTSTS_SOF) {
2810 uint32_t dsts;
2811
2812 /* Clear USB SOF interrupt. */
2813 sys_write32(USB_DWC2_GINTSTS_SOF, gintsts_reg);
2814
2815 dsts = sys_read32((mem_addr_t)&base->dsts);
2816 priv->sof_num = usb_dwc2_get_dsts_soffn(dsts);
2817 udc_submit_event(dev, UDC_EVT_SOF, 0);
2818 }
2819
2820 if (int_status & USB_DWC2_GINTSTS_USBRST) {
2821 /* Clear and handle USB Reset interrupt. */
2822 sys_write32(USB_DWC2_GINTSTS_USBRST, gintsts_reg);
2823 dwc2_on_bus_reset(dev);
2824 LOG_DBG("USB Reset interrupt");
2825
2826 cancel_hibernation_request(priv);
2827 }
2828
2829 if (int_status & USB_DWC2_GINTSTS_ENUMDONE) {
2830 /* Clear and handle Enumeration Done interrupt. */
2831 sys_write32(USB_DWC2_GINTSTS_ENUMDONE, gintsts_reg);
2832 dwc2_handle_enumdone(dev);
2833 udc_submit_event(dev, UDC_EVT_RESET, 0);
2834 }
2835
2836 if (int_status & USB_DWC2_GINTSTS_WKUPINT) {
2837 /* Clear Resume/Remote Wakeup Detected interrupt. */
2838 sys_write32(USB_DWC2_GINTSTS_WKUPINT, gintsts_reg);
2839 udc_set_suspended(dev, false);
2840 udc_submit_event(dev, UDC_EVT_RESUME, 0);
2841
2842 cancel_hibernation_request(priv);
2843 }
2844
2845 if (int_status & USB_DWC2_GINTSTS_IEPINT) {
2846 /* Handle IN Endpoints interrupt */
2847 dwc2_handle_iepint(dev);
2848 }
2849
2850 if (int_status & USB_DWC2_GINTSTS_RXFLVL) {
2851 /* Handle RxFIFO Non-Empty interrupt */
2852 dwc2_handle_rxflvl(dev);
2853 }
2854
2855 if (int_status & USB_DWC2_GINTSTS_OEPINT) {
2856 /* Handle OUT Endpoints interrupt */
2857 dwc2_handle_oepint(dev);
2858 }
2859
2860 if (int_status & USB_DWC2_GINTSTS_INCOMPISOIN) {
2861 dwc2_handle_incompisoin(dev);
2862 }
2863
2864 if (int_status & USB_DWC2_GINTSTS_INCOMPISOOUT) {
2865 dwc2_handle_incompisoout(dev);
2866 }
2867
2868 if (int_status & USB_DWC2_GINTSTS_USBSUSP) {
2869 /* Clear USB Suspend interrupt. */
2870 sys_write32(USB_DWC2_GINTSTS_USBSUSP, gintsts_reg);
2871
2872 /* Notify the stack */
2873 udc_set_suspended(dev, true);
2874 udc_submit_event(dev, UDC_EVT_SUSPEND, 0);
2875
2876 request_hibernation(priv);
2877 }
2878 }
2879
2880 (void)dwc2_quirk_irq_clear(dev);
2881 }
2882
dwc2_handle_hibernation_exit(const struct device * dev,bool rwup,bool bus_reset)2883 static void dwc2_handle_hibernation_exit(const struct device *dev,
2884 bool rwup, bool bus_reset)
2885 {
2886 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2887 struct udc_dwc2_data *const priv = udc_get_private(dev);
2888
2889 dwc2_exit_hibernation(dev, rwup, bus_reset);
2890 dwc2_restore_device_registers(dev, rwup);
2891
2892 priv->hibernated = 0;
2893 if (!rwup) {
2894 LOG_DBG("Hibernation exit complete");
2895 }
2896
2897 /* Let stack know we are no longer suspended */
2898 udc_set_suspended(dev, false);
2899 udc_submit_event(dev, UDC_EVT_RESUME, 0);
2900
2901 if (rwup) {
2902 /* Resume has been driven for at least 1 ms now, do 1 ms more to
2903 * have sufficient margin.
2904 */
2905 k_msleep(1);
2906
2907 sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_RMTWKUPSIG);
2908 }
2909
2910 if (rwup) {
2911 LOG_DBG("Hibernation exit on Remote Wakeup complete");
2912 }
2913
2914 if (bus_reset) {
2915 /* Clear all pending transfers */
2916 k_event_clear(&priv->xfer_new, UINT32_MAX);
2917 k_event_clear(&priv->xfer_finished, UINT32_MAX);
2918 dwc2_on_bus_reset(dev);
2919 } else {
2920 /* Resume any pending transfer handling */
2921 if (k_event_test(&priv->xfer_new, UINT32_MAX)) {
2922 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
2923 }
2924
2925 if (k_event_test(&priv->xfer_finished, UINT32_MAX)) {
2926 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2927 }
2928 }
2929 }
2930
pull_next_ep_from_bitmap(uint32_t * bitmap)2931 static uint8_t pull_next_ep_from_bitmap(uint32_t *bitmap)
2932 {
2933 unsigned int bit;
2934
2935 __ASSERT_NO_MSG(bitmap && *bitmap);
2936
2937 bit = find_lsb_set(*bitmap) - 1;
2938 *bitmap &= ~BIT(bit);
2939
2940 if (bit >= 16) {
2941 return USB_EP_DIR_OUT | (bit - 16);
2942 } else {
2943 return USB_EP_DIR_IN | bit;
2944 }
2945 }
2946
dwc2_thread_handler(void * const arg)2947 static ALWAYS_INLINE void dwc2_thread_handler(void *const arg)
2948 {
2949 const struct device *dev = (const struct device *)arg;
2950 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2951 struct udc_dwc2_data *const priv = udc_get_private(dev);
2952 const struct udc_dwc2_config *const config = dev->config;
2953 struct udc_ep_config *ep_cfg;
2954 const uint32_t hibernation_exit_events = (BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET) |
2955 BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_HOST_RESUME));
2956 uint32_t prev;
2957 uint32_t evt;
2958 uint32_t eps;
2959 uint8_t ep;
2960
2961 /* This is the bottom-half of the ISR handler and the place where
2962 * a new transfer can be fed.
2963 */
2964 evt = k_event_wait(&priv->drv_evt, UINT32_MAX, false, K_FOREVER);
2965
2966 udc_lock_internal(dev, K_FOREVER);
2967
2968 if (evt & BIT(DWC2_DRV_EVT_XFER)) {
2969 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
2970
2971 if (!priv->hibernated) {
2972 LOG_DBG("New transfer(s) in the queue");
2973 eps = k_event_test(&priv->xfer_new, UINT32_MAX);
2974 k_event_clear(&priv->xfer_new, eps);
2975 } else {
2976 /* Events will be handled after hibernation exit */
2977 eps = 0;
2978 }
2979
2980 while (eps) {
2981 ep = pull_next_ep_from_bitmap(&eps);
2982 ep_cfg = udc_get_ep_cfg(dev, ep);
2983
2984 if (!udc_ep_is_busy(dev, ep_cfg->addr)) {
2985 dwc2_handle_xfer_next(dev, ep_cfg);
2986 } else {
2987 LOG_DBG("ep 0x%02x busy", ep_cfg->addr);
2988 }
2989 }
2990 }
2991
2992 if (evt & BIT(DWC2_DRV_EVT_EP_FINISHED)) {
2993 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2994
2995 if (!priv->hibernated) {
2996 eps = k_event_test(&priv->xfer_finished, UINT32_MAX);
2997 k_event_clear(&priv->xfer_finished, eps);
2998 } else {
2999 /* Events will be handled after hibernation exit */
3000 eps = 0;
3001 }
3002
3003 while (eps) {
3004 ep = pull_next_ep_from_bitmap(&eps);
3005 ep_cfg = udc_get_ep_cfg(dev, ep);
3006
3007 if (USB_EP_DIR_IS_IN(ep)) {
3008 LOG_DBG("DIN event ep 0x%02x", ep);
3009 dwc2_handle_evt_din(dev, ep_cfg);
3010 } else {
3011 LOG_DBG("DOUT event ep 0x%02x", ep_cfg->addr);
3012 dwc2_handle_evt_dout(dev, ep_cfg);
3013 }
3014
3015 if (!udc_ep_is_busy(dev, ep_cfg->addr)) {
3016 dwc2_handle_xfer_next(dev, ep_cfg);
3017 } else {
3018 LOG_DBG("ep 0x%02x busy", ep_cfg->addr);
3019 }
3020 }
3021 }
3022
3023 if (evt & BIT(DWC2_DRV_EVT_SETUP)) {
3024 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_SETUP));
3025
3026 LOG_DBG("SETUP event");
3027 dwc2_handle_evt_setup(dev);
3028 }
3029
3030 if (evt & BIT(DWC2_DRV_EVT_REMOTE_WAKEUP)) {
3031 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_REMOTE_WAKEUP) |
3032 BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
3033
3034 if (priv->hibernated) {
3035 config->irq_disable_func(dev);
3036
3037 dwc2_handle_hibernation_exit(dev, true, false);
3038
3039 config->irq_enable_func(dev);
3040 } else {
3041 sys_set_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_RMTWKUPSIG);
3042
3043 udc_set_suspended(dev, false);
3044 udc_submit_event(dev, UDC_EVT_RESUME, 0);
3045
3046 /* Drive resume for 2 ms to have sufficient margin */
3047 k_msleep(2);
3048
3049 sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_RMTWKUPSIG);
3050 }
3051 }
3052
3053 if (evt & BIT(DWC2_DRV_EVT_ENTER_HIBERNATION)) {
3054 config->irq_disable_func(dev);
3055
3056 prev = k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
3057
3058 /* Only enter hibernation if IRQ did not cancel the request */
3059 if (prev & BIT(DWC2_DRV_EVT_ENTER_HIBERNATION)) {
3060 dwc2_enter_hibernation(dev);
3061 }
3062
3063 config->irq_enable_func(dev);
3064 }
3065
3066 if (evt & hibernation_exit_events) {
3067 bool bus_reset;
3068
3069 LOG_DBG("Hibernation exit event");
3070 config->irq_disable_func(dev);
3071
3072 prev = k_event_clear(&priv->drv_evt, hibernation_exit_events);
3073 bus_reset = prev & BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET);
3074
3075 if (priv->hibernated) {
3076 dwc2_handle_hibernation_exit(dev, false, bus_reset);
3077 }
3078
3079 config->irq_enable_func(dev);
3080 }
3081
3082 if (evt & BIT(DWC2_DRV_EVT_ENUM_DONE)) {
3083 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENUM_DONE));
3084
3085 dwc2_ensure_setup_ready(dev);
3086 }
3087
3088 udc_unlock_internal(dev);
3089 }
3090
3091 static const struct udc_api udc_dwc2_api = {
3092 .lock = udc_dwc2_lock,
3093 .unlock = udc_dwc2_unlock,
3094 .device_speed = udc_dwc2_device_speed,
3095 .init = udc_dwc2_init,
3096 .enable = udc_dwc2_enable,
3097 .disable = udc_dwc2_disable,
3098 .shutdown = udc_dwc2_shutdown,
3099 .set_address = udc_dwc2_set_address,
3100 .test_mode = udc_dwc2_test_mode,
3101 .host_wakeup = udc_dwc2_host_wakeup,
3102 .ep_enable = udc_dwc2_ep_activate,
3103 .ep_disable = udc_dwc2_ep_deactivate,
3104 .ep_set_halt = udc_dwc2_ep_set_halt,
3105 .ep_clear_halt = udc_dwc2_ep_clear_halt,
3106 .ep_enqueue = udc_dwc2_ep_enqueue,
3107 .ep_dequeue = udc_dwc2_ep_dequeue,
3108 };
3109
3110 #define DT_DRV_COMPAT snps_dwc2
3111
3112 #define UDC_DWC2_VENDOR_QUIRK_GET(n) \
3113 COND_CODE_1(DT_NODE_VENDOR_HAS_IDX(DT_DRV_INST(n), 1), \
3114 (&dwc2_vendor_quirks_##n), \
3115 (NULL))
3116
3117 #define UDC_DWC2_DT_INST_REG_ADDR(n) \
3118 COND_CODE_1(DT_NUM_REGS(DT_DRV_INST(n)), (DT_INST_REG_ADDR(n)), \
3119 (DT_INST_REG_ADDR_BY_NAME(n, core)))
3120
3121 #define UDC_DWC2_PINCTRL_DT_INST_DEFINE(n) \
3122 COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \
3123 (PINCTRL_DT_INST_DEFINE(n)), ())
3124
3125 #define UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n) \
3126 COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \
3127 ((void *)PINCTRL_DT_INST_DEV_CONFIG_GET(n)), (NULL))
3128
3129 #define UDC_DWC2_IRQ_FLAGS_TYPE0(n) 0
3130 #define UDC_DWC2_IRQ_FLAGS_TYPE1(n) DT_INST_IRQ(n, type)
3131 #define DW_IRQ_FLAGS(n) \
3132 _CONCAT(UDC_DWC2_IRQ_FLAGS_TYPE, DT_INST_IRQ_HAS_CELL(n, type))(n)
3133
3134 /*
3135 * A UDC driver should always be implemented as a multi-instance
3136 * driver, even if your platform does not require it.
3137 */
3138 #define UDC_DWC2_DEVICE_DEFINE(n) \
3139 UDC_DWC2_PINCTRL_DT_INST_DEFINE(n); \
3140 \
3141 K_THREAD_STACK_DEFINE(udc_dwc2_stack_##n, CONFIG_UDC_DWC2_STACK_SIZE); \
3142 \
3143 static void udc_dwc2_thread_##n(void *dev, void *arg1, void *arg2) \
3144 { \
3145 while (true) { \
3146 dwc2_thread_handler(dev); \
3147 } \
3148 } \
3149 \
3150 static void udc_dwc2_make_thread_##n(const struct device *dev) \
3151 { \
3152 struct udc_dwc2_data *priv = udc_get_private(dev); \
3153 \
3154 k_thread_create(&priv->thread_data, \
3155 udc_dwc2_stack_##n, \
3156 K_THREAD_STACK_SIZEOF(udc_dwc2_stack_##n), \
3157 udc_dwc2_thread_##n, \
3158 (void *)dev, NULL, NULL, \
3159 K_PRIO_COOP(CONFIG_UDC_DWC2_THREAD_PRIORITY), \
3160 K_ESSENTIAL, \
3161 K_NO_WAIT); \
3162 k_thread_name_set(&priv->thread_data, dev->name); \
3163 } \
3164 \
3165 static void udc_dwc2_irq_enable_func_##n(const struct device *dev) \
3166 { \
3167 IRQ_CONNECT(DT_INST_IRQN(n), \
3168 DT_INST_IRQ(n, priority), \
3169 udc_dwc2_isr_handler, \
3170 DEVICE_DT_INST_GET(n), \
3171 DW_IRQ_FLAGS(n)); \
3172 \
3173 irq_enable(DT_INST_IRQN(n)); \
3174 } \
3175 \
3176 static void udc_dwc2_irq_disable_func_##n(const struct device *dev) \
3177 { \
3178 irq_disable(DT_INST_IRQN(n)); \
3179 } \
3180 \
3181 static struct udc_ep_config ep_cfg_out[DT_INST_PROP(n, num_out_eps)]; \
3182 static struct udc_ep_config ep_cfg_in[DT_INST_PROP(n, num_in_eps)]; \
3183 \
3184 static const struct udc_dwc2_config udc_dwc2_config_##n = { \
3185 .num_out_eps = DT_INST_PROP(n, num_out_eps), \
3186 .num_in_eps = DT_INST_PROP(n, num_in_eps), \
3187 .ep_cfg_in = ep_cfg_in, \
3188 .ep_cfg_out = ep_cfg_out, \
3189 .make_thread = udc_dwc2_make_thread_##n, \
3190 .base = (struct usb_dwc2_reg *)UDC_DWC2_DT_INST_REG_ADDR(n), \
3191 .pcfg = UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
3192 .irq_enable_func = udc_dwc2_irq_enable_func_##n, \
3193 .irq_disable_func = udc_dwc2_irq_disable_func_##n, \
3194 .quirks = UDC_DWC2_VENDOR_QUIRK_GET(n), \
3195 .ghwcfg1 = DT_INST_PROP(n, ghwcfg1), \
3196 .ghwcfg2 = DT_INST_PROP(n, ghwcfg2), \
3197 .ghwcfg4 = DT_INST_PROP(n, ghwcfg4), \
3198 }; \
3199 \
3200 static struct udc_dwc2_data udc_priv_##n = { \
3201 }; \
3202 \
3203 static struct udc_data udc_data_##n = { \
3204 .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \
3205 .priv = &udc_priv_##n, \
3206 }; \
3207 \
3208 DEVICE_DT_INST_DEFINE(n, dwc2_driver_preinit, NULL, \
3209 &udc_data_##n, &udc_dwc2_config_##n, \
3210 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
3211 &udc_dwc2_api);
3212
3213 DT_INST_FOREACH_STATUS_OKAY(UDC_DWC2_DEVICE_DEFINE)
3214