1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "udc_common.h"
8 #include "udc_dwc2.h"
9
10 #include <string.h>
11 #include <stdio.h>
12
13 #include <zephyr/cache.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/devicetree.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/sys_io.h>
18 #include <zephyr/sys/byteorder.h>
19 #include <zephyr/drivers/usb/udc.h>
20 #include <zephyr/usb/usb_ch9.h>
21 #include <usb_dwc2_hw.h>
22
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(udc_dwc2, CONFIG_UDC_DRIVER_LOG_LEVEL);
25 #include "udc_dwc2_vendor_quirks.h"
26
27 enum dwc2_drv_event_type {
28 /* Trigger next transfer, must not be used for control OUT */
29 DWC2_DRV_EVT_XFER,
30 /* Setup packet received */
31 DWC2_DRV_EVT_SETUP,
32 /* Transaction on endpoint is finished */
33 DWC2_DRV_EVT_EP_FINISHED,
34 /* Remote Wakeup should be initiated */
35 DWC2_DRV_EVT_REMOTE_WAKEUP,
36 /* Core should enter hibernation */
37 DWC2_DRV_EVT_ENTER_HIBERNATION,
38 /* Core should exit hibernation due to bus reset */
39 DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET,
40 /* Core should exit hibernation due to host resume */
41 DWC2_DRV_EVT_HIBERNATION_EXIT_HOST_RESUME,
42 };
43
44 /* Minimum RX FIFO size in 32-bit words considering the largest used OUT packet
45 * of 512 bytes. The value must be adjusted according to the number of OUT
46 * endpoints.
47 */
48 #define UDC_DWC2_GRXFSIZ_FS_DEFAULT (15U + 512U/4U)
49 /* Default Rx FIFO size in 32-bit words calculated to support High-Speed with:
50 * * 1 control endpoint in Completer/Buffer DMA mode: 13 locations
51 * * Global OUT NAK: 1 location
52 * * Space for 3 * 1024 packets: ((1024/4) + 1) * 3 = 774 locations
53 * Driver adds 2 locations for each OUT endpoint to this value.
54 */
55 #define UDC_DWC2_GRXFSIZ_HS_DEFAULT (13 + 1 + 774)
56
57 /* TX FIFO0 depth in 32-bit words (used by control IN endpoint)
58 * Try 2 * bMaxPacketSize0 to allow simultaneous operation with a fallback to
59 * whatever is available when 2 * bMaxPacketSize0 is not possible.
60 */
61 #define UDC_DWC2_FIFO0_DEPTH (2 * 16U)
62
63 /* Get Data FIFO access register */
64 #define UDC_DWC2_EP_FIFO(base, idx) ((mem_addr_t)base + 0x1000 * (idx + 1))
65
66 enum dwc2_suspend_type {
67 DWC2_SUSPEND_NO_POWER_SAVING,
68 DWC2_SUSPEND_HIBERNATION,
69 };
70
71 /* Registers that have to be stored before Partial Power Down or Hibernation */
72 struct dwc2_reg_backup {
73 uint32_t gotgctl;
74 uint32_t gahbcfg;
75 uint32_t gusbcfg;
76 uint32_t gintmsk;
77 uint32_t grxfsiz;
78 uint32_t gnptxfsiz;
79 uint32_t gi2cctl;
80 uint32_t glpmcfg;
81 uint32_t gdfifocfg;
82 union {
83 uint32_t dptxfsiz[15];
84 uint32_t dieptxf[15];
85 };
86 uint32_t dcfg;
87 uint32_t dctl;
88 uint32_t diepmsk;
89 uint32_t doepmsk;
90 uint32_t daintmsk;
91 uint32_t diepctl[16];
92 uint32_t dieptsiz[16];
93 uint32_t diepdma[16];
94 uint32_t doepctl[16];
95 uint32_t doeptsiz[16];
96 uint32_t doepdma[16];
97 uint32_t pcgcctl;
98 };
99
100 /* Driver private data per instance */
101 struct udc_dwc2_data {
102 struct k_thread thread_data;
103 /* Main events the driver thread waits for */
104 struct k_event drv_evt;
105 /* Transfer triggers (IN on bits 0-15, OUT on bits 16-31) */
106 struct k_event xfer_new;
107 /* Finished transactions (IN on bits 0-15, OUT on bits 16-31) */
108 struct k_event xfer_finished;
109 struct dwc2_reg_backup backup;
110 uint32_t ghwcfg1;
111 uint32_t txf_set;
112 uint32_t max_xfersize;
113 uint32_t max_pktcnt;
114 uint32_t tx_len[16];
115 uint32_t rx_siz[16];
116 uint16_t dfifodepth;
117 uint16_t rxfifo_depth;
118 uint16_t max_txfifo_depth[16];
119 uint16_t sof_num;
120 /* Configuration flags */
121 unsigned int dynfifosizing : 1;
122 unsigned int bufferdma : 1;
123 unsigned int syncrst : 1;
124 /* Runtime state flags */
125 unsigned int hibernated : 1;
126 unsigned int enumdone : 1;
127 unsigned int enumspd : 2;
128 enum dwc2_suspend_type suspend_type;
129 /* Number of endpoints including control endpoint */
130 uint8_t numdeveps;
131 /* Number of IN endpoints including control endpoint */
132 uint8_t ineps;
133 /* Number of OUT endpoints including control endpoint */
134 uint8_t outeps;
135 uint8_t setup[8];
136 };
137
138 #if defined(CONFIG_PINCTRL)
139 #include <zephyr/drivers/pinctrl.h>
140
dwc2_init_pinctrl(const struct device * dev)141 static int dwc2_init_pinctrl(const struct device *dev)
142 {
143 const struct udc_dwc2_config *const config = dev->config;
144 const struct pinctrl_dev_config *const pcfg = config->pcfg;
145 int ret = 0;
146
147 if (pcfg == NULL) {
148 LOG_INF("Skip pinctrl configuration");
149 return 0;
150 }
151
152 ret = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT);
153 if (ret) {
154 LOG_ERR("Failed to apply default pinctrl state (%d)", ret);
155 }
156
157 LOG_DBG("Apply pinctrl");
158
159 return ret;
160 }
161 #else
dwc2_init_pinctrl(const struct device * dev)162 static int dwc2_init_pinctrl(const struct device *dev)
163 {
164 ARG_UNUSED(dev);
165
166 return 0;
167 }
168 #endif
169
dwc2_get_base(const struct device * dev)170 static inline struct usb_dwc2_reg *dwc2_get_base(const struct device *dev)
171 {
172 const struct udc_dwc2_config *const config = dev->config;
173
174 return config->base;
175 }
176
dwc2_wait_for_bit(const struct device * dev,mem_addr_t addr,uint32_t bit)177 static void dwc2_wait_for_bit(const struct device *dev,
178 mem_addr_t addr, uint32_t bit)
179 {
180 k_timepoint_t timeout = sys_timepoint_calc(K_MSEC(100));
181
182 /* This could potentially be converted to use proper synchronization
183 * primitives instead of busy looping, but the number of interrupt bits
184 * this function can be waiting for is rather high.
185 *
186 * Busy looping is most likely fine unless profiling shows otherwise.
187 */
188 while (!(sys_read32(addr) & bit)) {
189 if (dwc2_quirk_is_phy_clk_off(dev)) {
190 /* No point in waiting, because the bit can only be set
191 * when the PHY is actively clocked.
192 */
193 return;
194 }
195
196 if (sys_timepoint_expired(timeout)) {
197 LOG_ERR("Timeout waiting for bit 0x%08X at 0x%08X",
198 bit, (uint32_t)addr);
199 return;
200 }
201 }
202 }
203
204 /* Get DOEPCTLn or DIEPCTLn register address */
dwc2_get_dxepctl_reg(const struct device * dev,const uint8_t ep)205 static mem_addr_t dwc2_get_dxepctl_reg(const struct device *dev, const uint8_t ep)
206 {
207 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
208 uint8_t ep_idx = USB_EP_GET_IDX(ep);
209
210 if (USB_EP_DIR_IS_OUT(ep)) {
211 return (mem_addr_t)&base->out_ep[ep_idx].doepctl;
212 } else {
213 return (mem_addr_t)&base->in_ep[ep_idx].diepctl;
214 }
215 }
216
217 /* Get available FIFO space in bytes */
dwc2_ftx_avail(const struct device * dev,const uint32_t idx)218 static uint32_t dwc2_ftx_avail(const struct device *dev, const uint32_t idx)
219 {
220 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
221 mem_addr_t reg = (mem_addr_t)&base->in_ep[idx].dtxfsts;
222 uint32_t dtxfsts;
223
224 dtxfsts = sys_read32(reg);
225
226 return usb_dwc2_get_dtxfsts_ineptxfspcavail(dtxfsts) * 4;
227 }
228
dwc2_get_iept_pktctn(const struct device * dev,const uint32_t idx)229 static uint32_t dwc2_get_iept_pktctn(const struct device *dev, const uint32_t idx)
230 {
231 struct udc_dwc2_data *const priv = udc_get_private(dev);
232
233 if (idx == 0) {
234 return usb_dwc2_get_dieptsiz0_pktcnt(UINT32_MAX);
235 } else {
236 return priv->max_pktcnt;
237 }
238 }
239
dwc2_get_iept_xfersize(const struct device * dev,const uint32_t idx)240 static uint32_t dwc2_get_iept_xfersize(const struct device *dev, const uint32_t idx)
241 {
242 struct udc_dwc2_data *const priv = udc_get_private(dev);
243
244 if (idx == 0) {
245 return usb_dwc2_get_dieptsiz0_xfersize(UINT32_MAX);
246 } else {
247 return priv->max_xfersize;
248 }
249 }
250
dwc2_flush_rx_fifo(const struct device * dev)251 static void dwc2_flush_rx_fifo(const struct device *dev)
252 {
253 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
254 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
255
256 sys_write32(USB_DWC2_GRSTCTL_RXFFLSH, grstctl_reg);
257 while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_RXFFLSH) {
258 }
259 }
260
dwc2_flush_tx_fifo(const struct device * dev,const uint8_t fnum)261 static void dwc2_flush_tx_fifo(const struct device *dev, const uint8_t fnum)
262 {
263 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
264 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
265 uint32_t grstctl;
266
267 grstctl = usb_dwc2_set_grstctl_txfnum(fnum) | USB_DWC2_GRSTCTL_TXFFLSH;
268
269 sys_write32(grstctl, grstctl_reg);
270 while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_TXFFLSH) {
271 }
272 }
273
274 /* Return TX FIFOi depth in 32-bit words (i = f_idx + 1) */
dwc2_get_txfdep(const struct device * dev,const uint32_t f_idx)275 static uint32_t dwc2_get_txfdep(const struct device *dev, const uint32_t f_idx)
276 {
277 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
278 uint32_t dieptxf;
279
280 dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]);
281
282 return usb_dwc2_get_dieptxf_inepntxfdep(dieptxf);
283 }
284
285 /* Return TX FIFOi address (i = f_idx + 1) */
dwc2_get_txfaddr(const struct device * dev,const uint32_t f_idx)286 static uint32_t dwc2_get_txfaddr(const struct device *dev, const uint32_t f_idx)
287 {
288 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
289 uint32_t dieptxf;
290
291 dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]);
292
293 return usb_dwc2_get_dieptxf_inepntxfstaddr(dieptxf);
294 }
295
296 /* Set TX FIFOi address and depth (i = f_idx + 1) */
dwc2_set_txf(const struct device * dev,const uint32_t f_idx,const uint32_t dep,const uint32_t addr)297 static void dwc2_set_txf(const struct device *dev, const uint32_t f_idx,
298 const uint32_t dep, const uint32_t addr)
299 {
300 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
301 uint32_t dieptxf;
302
303 dieptxf = usb_dwc2_set_dieptxf_inepntxfdep(dep) |
304 usb_dwc2_set_dieptxf_inepntxfstaddr(addr);
305
306 sys_write32(dieptxf, (mem_addr_t)&base->dieptxf[f_idx]);
307 }
308
309 /* Enable/disable endpoint interrupt */
dwc2_set_epint(const struct device * dev,struct udc_ep_config * const cfg,const bool enabled)310 static void dwc2_set_epint(const struct device *dev,
311 struct udc_ep_config *const cfg, const bool enabled)
312 {
313 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
314 mem_addr_t reg = (mem_addr_t)&base->daintmsk;
315 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
316 uint32_t epmsk;
317
318 if (USB_EP_DIR_IS_IN(cfg->addr)) {
319 epmsk = USB_DWC2_DAINT_INEPINT(ep_idx);
320 } else {
321 epmsk = USB_DWC2_DAINT_OUTEPINT(ep_idx);
322 }
323
324 if (enabled) {
325 sys_set_bits(reg, epmsk);
326 } else {
327 sys_clear_bits(reg, epmsk);
328 }
329 }
330
dwc2_ep_is_periodic(struct udc_ep_config * const cfg)331 static bool dwc2_ep_is_periodic(struct udc_ep_config *const cfg)
332 {
333 switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) {
334 case USB_EP_TYPE_INTERRUPT:
335 __fallthrough;
336 case USB_EP_TYPE_ISO:
337 return true;
338 default:
339 return false;
340 }
341 }
342
dwc2_ep_is_iso(struct udc_ep_config * const cfg)343 static bool dwc2_ep_is_iso(struct udc_ep_config *const cfg)
344 {
345 return (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) == USB_EP_TYPE_ISO;
346 }
347
dwc2_dma_buffer_ok_to_use(const struct device * dev,void * buf,uint32_t xfersize,uint16_t mps)348 static bool dwc2_dma_buffer_ok_to_use(const struct device *dev, void *buf,
349 uint32_t xfersize, uint16_t mps)
350 {
351 ARG_UNUSED(dev);
352
353 if (!IS_ALIGNED(buf, 4)) {
354 LOG_ERR("Buffer not aligned");
355 return false;
356 }
357
358 /* We can only do 1 packet if Max Packet Size is not multiple of 4 */
359 if (unlikely(mps % 4) && (xfersize > USB_MPS_EP_SIZE(mps))) {
360 LOG_ERR("Padding not supported");
361 return false;
362 }
363
364 return true;
365 }
366
367 /* Can be called from ISR context */
dwc2_tx_fifo_write(const struct device * dev,struct udc_ep_config * const cfg,struct net_buf * const buf)368 static int dwc2_tx_fifo_write(const struct device *dev,
369 struct udc_ep_config *const cfg, struct net_buf *const buf)
370 {
371 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
372 struct udc_dwc2_data *const priv = udc_get_private(dev);
373 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
374
375 mem_addr_t dieptsiz_reg = (mem_addr_t)&base->in_ep[ep_idx].dieptsiz;
376 /* TODO: use dwc2_get_dxepctl_reg() */
377 mem_addr_t diepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl;
378 mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint;
379
380 uint32_t diepctl;
381 uint32_t max_xfersize, max_pktcnt, pktcnt;
382 const uint32_t addnl = USB_MPS_ADDITIONAL_TRANSACTIONS(cfg->mps);
383 const size_t d = sizeof(uint32_t);
384 unsigned int key;
385 uint32_t len;
386 const bool is_periodic = dwc2_ep_is_periodic(cfg);
387 const bool is_iso = dwc2_ep_is_iso(cfg);
388
389 if (is_iso) {
390 /* Isochronous transfers can only be programmed one
391 * (micro-)frame at a time.
392 */
393 len = MIN(buf->len, USB_MPS_TO_TPL(cfg->mps));
394 } else {
395 /* DMA automatically handles packet split. In completer mode,
396 * the value is sanitized below.
397 */
398 len = buf->len;
399 }
400
401 if (!priv->bufferdma) {
402 uint32_t spcavail = dwc2_ftx_avail(dev, ep_idx);
403 uint32_t spcperpkt = ROUND_UP(udc_mps_ep_size(cfg), 4);
404 uint32_t max_pkts, max_transfer;
405
406 /* Maximum number of packets that can fit in TxFIFO */
407 max_pkts = spcavail / spcperpkt;
408
409 /* We can transfer up to max_pkts MPS packets and a short one */
410 max_transfer = (max_pkts * udc_mps_ep_size(cfg)) +
411 (spcavail % spcperpkt);
412
413 /* If there is enough space for the transfer, there's no need
414 * to check any additional conditions. If the transfer is larger
415 * than TxFIFO then TxFIFO must be able to hold at least one
416 * packet (for periodic transfers at least the number of packets
417 * per microframe).
418 */
419 if ((len > max_transfer) && ((1 + addnl) > max_pkts)) {
420 LOG_ERR("ep 0x%02x FIFO space is too low, %u (%u)",
421 cfg->addr, spcavail, len);
422 return -EAGAIN;
423 }
424
425 len = MIN(len, max_transfer);
426 }
427
428 if (len != 0U) {
429 max_pktcnt = dwc2_get_iept_pktctn(dev, ep_idx);
430 max_xfersize = dwc2_get_iept_xfersize(dev, ep_idx);
431
432 if (len > max_xfersize) {
433 /*
434 * Avoid short packets if the transfer size cannot be
435 * handled in one set.
436 */
437 len = ROUND_DOWN(max_xfersize, USB_MPS_TO_TPL(cfg->mps));
438 }
439
440 /*
441 * Determine the number of packets for the current transfer;
442 * if the pktcnt is too large, truncate the actual transfer length.
443 */
444 pktcnt = DIV_ROUND_UP(len, udc_mps_ep_size(cfg));
445 if (pktcnt > max_pktcnt) {
446 pktcnt = ROUND_DOWN(max_pktcnt, (1 + addnl));
447 len = pktcnt * udc_mps_ep_size(cfg);
448 }
449 } else {
450 /* ZLP */
451 pktcnt = 1U;
452 }
453
454 LOG_DBG("Prepare ep 0x%02x xfer len %u pktcnt %u addnl %u",
455 cfg->addr, len, pktcnt, addnl);
456 priv->tx_len[ep_idx] = len;
457
458 /* Lock and write to endpoint FIFO */
459 key = irq_lock();
460
461 /* Set number of packets and transfer size */
462 sys_write32((is_periodic ? usb_dwc2_set_dieptsizn_mc(1 + addnl) : 0) |
463 usb_dwc2_set_dieptsizn_pktcnt(pktcnt) |
464 usb_dwc2_set_dieptsizn_xfersize(len), dieptsiz_reg);
465
466 if (priv->bufferdma) {
467 if (!dwc2_dma_buffer_ok_to_use(dev, buf->data, len, cfg->mps)) {
468 /* Cannot continue unless buffer is bounced. Device will
469 * cease to function. Is fatal error appropriate here?
470 */
471 irq_unlock(key);
472 return -ENOTSUP;
473 }
474
475 sys_write32((uint32_t)buf->data,
476 (mem_addr_t)&base->in_ep[ep_idx].diepdma);
477
478 sys_cache_data_flush_range(buf->data, len);
479 }
480
481 diepctl = sys_read32(diepctl_reg);
482 if (!(diepctl & USB_DWC2_DEPCTL_USBACTEP)) {
483 /* Do not attempt to write data on inactive endpoint, because
484 * no fifo is assigned to inactive endpoint and therefore it is
485 * possible that the write will corrupt other endpoint fifo.
486 */
487 irq_unlock(key);
488 return -ENOENT;
489 }
490
491 if (is_iso) {
492 /* Queue transfer on next SOF. TODO: allow stack to explicitly
493 * specify on which (micro-)frame the data should be sent.
494 */
495 if (priv->sof_num & 1) {
496 diepctl |= USB_DWC2_DEPCTL_SETEVENFR;
497 } else {
498 diepctl |= USB_DWC2_DEPCTL_SETODDFR;
499 }
500 }
501
502 /* Clear NAK and set endpoint enable */
503 diepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK;
504 sys_write32(diepctl, diepctl_reg);
505
506 /* Clear IN Endpoint NAK Effective interrupt in case it was set */
507 sys_write32(USB_DWC2_DIEPINT_INEPNAKEFF, diepint_reg);
508
509 if (!priv->bufferdma) {
510 const uint8_t *src = buf->data;
511
512 while (pktcnt > 0) {
513 uint32_t pktlen = MIN(len, udc_mps_ep_size(cfg));
514
515 for (uint32_t i = 0UL; i < pktlen; i += d) {
516 uint32_t val = src[i];
517
518 if (i + 1 < pktlen) {
519 val |= ((uint32_t)src[i + 1UL]) << 8;
520 }
521 if (i + 2 < pktlen) {
522 val |= ((uint32_t)src[i + 2UL]) << 16;
523 }
524 if (i + 3 < pktlen) {
525 val |= ((uint32_t)src[i + 3UL]) << 24;
526 }
527
528 sys_write32(val, UDC_DWC2_EP_FIFO(base, ep_idx));
529 }
530
531 pktcnt--;
532 src += pktlen;
533 len -= pktlen;
534 }
535 }
536
537 irq_unlock(key);
538
539 return 0;
540 }
541
dwc2_read_fifo(const struct device * dev,const uint8_t ep,struct net_buf * const buf,const size_t size)542 static inline int dwc2_read_fifo(const struct device *dev, const uint8_t ep,
543 struct net_buf *const buf, const size_t size)
544 {
545 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
546 size_t len = buf ? MIN(size, net_buf_tailroom(buf)) : 0;
547 const size_t d = sizeof(uint32_t);
548
549 /* FIFO access is always in 32-bit words */
550
551 for (uint32_t n = 0; n < (len / d); n++) {
552 net_buf_add_le32(buf, sys_read32(UDC_DWC2_EP_FIFO(base, ep)));
553 }
554
555 if (len % d) {
556 uint8_t r[4];
557
558 /* Get the remaining */
559 sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)), r);
560 for (uint32_t i = 0U; i < (len % d); i++) {
561 net_buf_add_u8(buf, r[i]);
562 }
563 }
564
565 if (unlikely(size > len)) {
566 for (uint32_t n = 0; n < DIV_ROUND_UP(size - len, d); n++) {
567 (void)sys_read32(UDC_DWC2_EP_FIFO(base, ep));
568 }
569 }
570
571 return 0;
572 }
573
574 /* Can be called from ISR and we call it only when there is a buffer in the queue */
dwc2_prep_rx(const struct device * dev,struct net_buf * buf,struct udc_ep_config * const cfg)575 static void dwc2_prep_rx(const struct device *dev, struct net_buf *buf,
576 struct udc_ep_config *const cfg)
577 {
578 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
579 struct udc_dwc2_data *const priv = udc_get_private(dev);
580 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
581 mem_addr_t doeptsiz_reg = (mem_addr_t)&base->out_ep[ep_idx].doeptsiz;
582 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, ep_idx);
583 uint32_t pktcnt;
584 uint32_t doeptsiz;
585 uint32_t doepctl;
586 uint32_t xfersize;
587
588 /* Clear NAK and set endpoint enable */
589 doepctl = sys_read32(doepctl_reg);
590 doepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK;
591
592 if (dwc2_ep_is_iso(cfg)) {
593 xfersize = USB_MPS_TO_TPL(cfg->mps);
594 pktcnt = 1 + USB_MPS_ADDITIONAL_TRANSACTIONS(cfg->mps);
595
596 if (xfersize > net_buf_tailroom(buf)) {
597 LOG_ERR("ISO RX buffer too small");
598 return;
599 }
600
601 /* Set the Even/Odd (micro-)frame appropriately */
602 if (priv->sof_num & 1) {
603 doepctl |= USB_DWC2_DEPCTL_SETEVENFR;
604 } else {
605 doepctl |= USB_DWC2_DEPCTL_SETODDFR;
606 }
607 } else {
608 xfersize = net_buf_tailroom(buf);
609
610 /* Do as many packets in a single transfer as possible */
611 if (xfersize > priv->max_xfersize) {
612 xfersize = ROUND_DOWN(priv->max_xfersize, USB_MPS_TO_TPL(cfg->mps));
613 }
614
615 pktcnt = DIV_ROUND_UP(xfersize, USB_MPS_EP_SIZE(cfg->mps));
616 }
617
618 pktcnt = DIV_ROUND_UP(xfersize, udc_mps_ep_size(cfg));
619 doeptsiz = usb_dwc2_set_doeptsizn_pktcnt(pktcnt) |
620 usb_dwc2_set_doeptsizn_xfersize(xfersize);
621 if (cfg->addr == USB_CONTROL_EP_OUT) {
622 /* Use 1 to allow 8 byte long buffers for SETUP data */
623 doeptsiz |= (1 << USB_DWC2_DOEPTSIZ0_SUPCNT_POS);
624 }
625
626 priv->rx_siz[ep_idx] = doeptsiz;
627 sys_write32(doeptsiz, doeptsiz_reg);
628
629 if (priv->bufferdma) {
630 if (!dwc2_dma_buffer_ok_to_use(dev, buf->data, xfersize, cfg->mps)) {
631 /* Cannot continue unless buffer is bounced. Device will
632 * cease to function. Is fatal error appropriate here?
633 */
634 return;
635 }
636
637 sys_write32((uint32_t)buf->data,
638 (mem_addr_t)&base->out_ep[ep_idx].doepdma);
639
640 sys_cache_data_invd_range(buf->data, xfersize);
641 }
642
643 sys_write32(doepctl, doepctl_reg);
644
645 LOG_INF("Prepare RX 0x%02x doeptsiz 0x%x", cfg->addr, doeptsiz);
646 }
647
dwc2_handle_xfer_next(const struct device * dev,struct udc_ep_config * const cfg)648 static void dwc2_handle_xfer_next(const struct device *dev,
649 struct udc_ep_config *const cfg)
650 {
651 struct net_buf *buf;
652
653 buf = udc_buf_peek(dev, cfg->addr);
654 if (buf == NULL) {
655 return;
656 }
657
658 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
659 dwc2_prep_rx(dev, buf, cfg);
660 } else {
661 int err = dwc2_tx_fifo_write(dev, cfg, buf);
662
663 if (err) {
664 LOG_ERR("Failed to start write to TX FIFO, ep 0x%02x (err: %d)",
665 cfg->addr, err);
666
667 buf = udc_buf_get(dev, cfg->addr);
668 if (udc_submit_ep_event(dev, buf, -ECONNREFUSED)) {
669 LOG_ERR("Failed to submit endpoint event");
670 };
671
672 return;
673 }
674 }
675
676 udc_ep_set_busy(dev, cfg->addr, true);
677 }
678
dwc2_ctrl_feed_dout(const struct device * dev,const size_t length)679 static int dwc2_ctrl_feed_dout(const struct device *dev, const size_t length)
680 {
681 struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
682 struct net_buf *buf;
683
684 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length);
685 if (buf == NULL) {
686 return -ENOMEM;
687 }
688
689 udc_buf_put(ep_cfg, buf);
690 dwc2_prep_rx(dev, buf, ep_cfg);
691 LOG_DBG("feed buf %p", buf);
692
693 return 0;
694 }
695
dwc2_handle_evt_setup(const struct device * dev)696 static int dwc2_handle_evt_setup(const struct device *dev)
697 {
698 struct udc_dwc2_data *const priv = udc_get_private(dev);
699 struct net_buf *buf;
700 int err;
701
702 buf = udc_buf_get(dev, USB_CONTROL_EP_OUT);
703 if (buf == NULL) {
704 LOG_ERR("No buffer queued for control ep");
705 return -ENODATA;
706 }
707
708 net_buf_add_mem(buf, priv->setup, sizeof(priv->setup));
709 udc_ep_buf_set_setup(buf);
710 LOG_HEXDUMP_DBG(buf->data, buf->len, "setup");
711
712 /* Update to next stage of control transfer */
713 udc_ctrl_update_stage(dev, buf);
714
715 /* We always allocate and feed buffer large enough for a setup packet. */
716
717 if (udc_ctrl_stage_is_data_out(dev)) {
718 /* Allocate and feed buffer for data OUT stage */
719 LOG_DBG("s:%p|feed for -out-", buf);
720
721 /* Allocate at least 8 bytes in case the host decides to send
722 * SETUP DATA instead of OUT DATA packet.
723 */
724 err = dwc2_ctrl_feed_dout(dev, MAX(udc_data_stage_length(buf), 8));
725 if (err == -ENOMEM) {
726 err = udc_submit_ep_event(dev, buf, err);
727 }
728 } else if (udc_ctrl_stage_is_data_in(dev)) {
729 LOG_DBG("s:%p|feed for -in-status", buf);
730
731 err = dwc2_ctrl_feed_dout(dev, 8);
732 if (err == -ENOMEM) {
733 err = udc_submit_ep_event(dev, buf, err);
734 }
735
736 err = udc_ctrl_submit_s_in_status(dev);
737 } else {
738 LOG_DBG("s:%p|feed >setup", buf);
739
740 err = dwc2_ctrl_feed_dout(dev, 8);
741 if (err == -ENOMEM) {
742 err = udc_submit_ep_event(dev, buf, err);
743 }
744
745 err = udc_ctrl_submit_s_status(dev);
746 }
747
748 return err;
749 }
750
dwc2_handle_evt_dout(const struct device * dev,struct udc_ep_config * const cfg)751 static inline int dwc2_handle_evt_dout(const struct device *dev,
752 struct udc_ep_config *const cfg)
753 {
754 struct net_buf *buf;
755 int err = 0;
756
757 buf = udc_buf_get(dev, cfg->addr);
758 if (buf == NULL) {
759 LOG_ERR("No buffer queued for ep 0x%02x", cfg->addr);
760 return -ENODATA;
761 }
762
763 udc_ep_set_busy(dev, cfg->addr, false);
764
765 if (cfg->addr == USB_CONTROL_EP_OUT) {
766 if (udc_ctrl_stage_is_status_out(dev)) {
767 /* s-in-status finished */
768 LOG_DBG("dout:%p| status, feed >s", buf);
769
770 /* Feed a buffer for the next setup packet */
771 err = dwc2_ctrl_feed_dout(dev, 8);
772 if (err == -ENOMEM) {
773 err = udc_submit_ep_event(dev, buf, err);
774 }
775
776 /* Status stage finished, notify upper layer */
777 udc_ctrl_submit_status(dev, buf);
778 } else {
779 /*
780 * For all other cases we feed with a buffer
781 * large enough for setup packet.
782 */
783 LOG_DBG("dout:%p| data, feed >s", buf);
784
785 err = dwc2_ctrl_feed_dout(dev, 8);
786 if (err == -ENOMEM) {
787 err = udc_submit_ep_event(dev, buf, err);
788 }
789 }
790
791 /* Update to next stage of control transfer */
792 udc_ctrl_update_stage(dev, buf);
793
794 if (udc_ctrl_stage_is_status_in(dev)) {
795 err = udc_ctrl_submit_s_out_status(dev, buf);
796 }
797 } else {
798 err = udc_submit_ep_event(dev, buf, 0);
799 }
800
801 return err;
802 }
803
dwc2_handle_evt_din(const struct device * dev,struct udc_ep_config * const cfg)804 static int dwc2_handle_evt_din(const struct device *dev,
805 struct udc_ep_config *const cfg)
806 {
807 struct net_buf *buf;
808
809 buf = udc_buf_peek(dev, cfg->addr);
810 if (buf == NULL) {
811 LOG_ERR("No buffer for ep 0x%02x", cfg->addr);
812 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
813 return -ENOBUFS;
814 }
815
816 if (buf->len) {
817 /* Looks like we failed to continue in ISR, retry */
818 return dwc2_tx_fifo_write(dev, cfg, buf);
819 }
820
821 if (cfg->addr == USB_CONTROL_EP_IN && udc_ep_buf_has_zlp(buf)) {
822 udc_ep_buf_clear_zlp(buf);
823 return dwc2_tx_fifo_write(dev, cfg, buf);
824 }
825
826 buf = udc_buf_get(dev, cfg->addr);
827 udc_ep_set_busy(dev, cfg->addr, false);
828
829 if (cfg->addr == USB_CONTROL_EP_IN) {
830 if (udc_ctrl_stage_is_status_in(dev) ||
831 udc_ctrl_stage_is_no_data(dev)) {
832 /* Status stage finished, notify upper layer */
833 udc_ctrl_submit_status(dev, buf);
834 }
835
836 /* Update to next stage of control transfer */
837 udc_ctrl_update_stage(dev, buf);
838
839 if (udc_ctrl_stage_is_status_out(dev)) {
840 /*
841 * IN transfer finished, release buffer,
842 * control OUT buffer should be already fed.
843 */
844 net_buf_unref(buf);
845 }
846
847 return 0;
848 }
849
850 return udc_submit_ep_event(dev, buf, 0);
851 }
852
dwc2_backup_registers(const struct device * dev)853 static void dwc2_backup_registers(const struct device *dev)
854 {
855 const struct udc_dwc2_config *const config = dev->config;
856 struct usb_dwc2_reg *const base = config->base;
857 struct udc_dwc2_data *const priv = udc_get_private(dev);
858 struct dwc2_reg_backup *backup = &priv->backup;
859
860 backup->gotgctl = sys_read32((mem_addr_t)&base->gotgctl);
861 backup->gahbcfg = sys_read32((mem_addr_t)&base->gahbcfg);
862 backup->gusbcfg = sys_read32((mem_addr_t)&base->gusbcfg);
863 backup->gintmsk = sys_read32((mem_addr_t)&base->gintmsk);
864 backup->grxfsiz = sys_read32((mem_addr_t)&base->grxfsiz);
865 backup->gnptxfsiz = sys_read32((mem_addr_t)&base->gnptxfsiz);
866 backup->gi2cctl = sys_read32((mem_addr_t)&base->gi2cctl);
867 backup->glpmcfg = sys_read32((mem_addr_t)&base->glpmcfg);
868 backup->gdfifocfg = sys_read32((mem_addr_t)&base->gdfifocfg);
869
870 for (uint8_t i = 1U; i < priv->ineps; i++) {
871 backup->dieptxf[i - 1] = sys_read32((mem_addr_t)&base->dieptxf[i - 1]);
872 }
873
874 backup->dcfg = sys_read32((mem_addr_t)&base->dcfg);
875 backup->dctl = sys_read32((mem_addr_t)&base->dctl);
876 backup->diepmsk = sys_read32((mem_addr_t)&base->diepmsk);
877 backup->doepmsk = sys_read32((mem_addr_t)&base->doepmsk);
878 backup->daintmsk = sys_read32((mem_addr_t)&base->daintmsk);
879
880 for (uint8_t i = 0U; i < 16; i++) {
881 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
882
883 if (epdir == USB_DWC2_GHWCFG1_EPDIR_IN || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
884 backup->diepctl[i] = sys_read32((mem_addr_t)&base->in_ep[i].diepctl);
885 if (backup->diepctl[i] & USB_DWC2_DEPCTL_DPID) {
886 backup->diepctl[i] |= USB_DWC2_DEPCTL_SETD1PID;
887 } else {
888 backup->diepctl[i] |= USB_DWC2_DEPCTL_SETD0PID;
889 }
890 backup->dieptsiz[i] = sys_read32((mem_addr_t)&base->in_ep[i].dieptsiz);
891 backup->diepdma[i] = sys_read32((mem_addr_t)&base->in_ep[i].diepdma);
892 }
893
894 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
895 backup->doepctl[i] = sys_read32((mem_addr_t)&base->out_ep[i].doepctl);
896 if (backup->doepctl[i] & USB_DWC2_DEPCTL_DPID) {
897 backup->doepctl[i] |= USB_DWC2_DEPCTL_SETD1PID;
898 } else {
899 backup->doepctl[i] |= USB_DWC2_DEPCTL_SETD0PID;
900 }
901 backup->doeptsiz[i] = sys_read32((mem_addr_t)&base->out_ep[i].doeptsiz);
902 backup->doepdma[i] = sys_read32((mem_addr_t)&base->out_ep[i].doepdma);
903 }
904 }
905
906 backup->pcgcctl = sys_read32((mem_addr_t)&base->pcgcctl);
907 }
908
dwc2_restore_essential_registers(const struct device * dev,bool rwup,bool bus_reset)909 static void dwc2_restore_essential_registers(const struct device *dev,
910 bool rwup, bool bus_reset)
911 {
912 const struct udc_dwc2_config *const config = dev->config;
913 struct usb_dwc2_reg *const base = config->base;
914 struct udc_dwc2_data *const priv = udc_get_private(dev);
915 struct dwc2_reg_backup *backup = &priv->backup;
916 uint32_t pcgcctl = backup->pcgcctl & USB_DWC2_PCGCCTL_RESTOREVALUE_MASK;
917
918 sys_write32(backup->glpmcfg, (mem_addr_t)&base->glpmcfg);
919 sys_write32(backup->gi2cctl, (mem_addr_t)&base->gi2cctl);
920 sys_write32(pcgcctl, (mem_addr_t)&base->pcgcctl);
921
922 sys_write32(backup->gahbcfg | USB_DWC2_GAHBCFG_GLBINTRMASK,
923 (mem_addr_t)&base->gahbcfg);
924
925 sys_write32(0xFFFFFFFFUL, (mem_addr_t)&base->gintsts);
926 sys_write32(USB_DWC2_GINTSTS_RSTRDONEINT, (mem_addr_t)&base->gintmsk);
927
928 sys_write32(backup->gusbcfg, (mem_addr_t)&base->gusbcfg);
929 sys_write32(backup->dcfg, (mem_addr_t)&base->dcfg);
930
931 if (bus_reset) {
932 sys_write32(backup->dcfg, (mem_addr_t)&base->dcfg);
933 }
934
935 if (!rwup) {
936 pcgcctl |= USB_DWC2_PCGCCTL_RESTOREMODE | USB_DWC2_PCGCCTL_RSTPDWNMODULE;
937 }
938 sys_write32(pcgcctl, (mem_addr_t)&base->pcgcctl);
939 k_busy_wait(1);
940
941 pcgcctl |= USB_DWC2_PCGCCTL_ESSREGRESTORED;
942 sys_write32(pcgcctl, (mem_addr_t)&base->pcgcctl);
943 }
944
dwc2_restore_device_registers(const struct device * dev,bool rwup)945 static void dwc2_restore_device_registers(const struct device *dev, bool rwup)
946 {
947 const struct udc_dwc2_config *const config = dev->config;
948 struct usb_dwc2_reg *const base = config->base;
949 struct udc_dwc2_data *const priv = udc_get_private(dev);
950 struct dwc2_reg_backup *backup = &priv->backup;
951
952 sys_write32(backup->gotgctl, (mem_addr_t)&base->gotgctl);
953 sys_write32(backup->gahbcfg, (mem_addr_t)&base->gahbcfg);
954 sys_write32(backup->gusbcfg, (mem_addr_t)&base->gusbcfg);
955 sys_write32(backup->gintmsk, (mem_addr_t)&base->gintmsk);
956 sys_write32(backup->grxfsiz, (mem_addr_t)&base->grxfsiz);
957 sys_write32(backup->gnptxfsiz, (mem_addr_t)&base->gnptxfsiz);
958 sys_write32(backup->gdfifocfg, (mem_addr_t)&base->gdfifocfg);
959
960 for (uint8_t i = 1U; i < priv->ineps; i++) {
961 sys_write32(backup->dieptxf[i - 1], (mem_addr_t)&base->dieptxf[i - 1]);
962 }
963
964 if (!rwup) {
965 sys_write32(backup->dctl, (mem_addr_t)&base->dctl);
966 }
967
968 sys_write32(backup->diepmsk, (mem_addr_t)&base->diepmsk);
969 sys_write32(backup->doepmsk, (mem_addr_t)&base->doepmsk);
970 sys_write32(backup->daintmsk, (mem_addr_t)&base->daintmsk);
971
972 for (uint8_t i = 0U; i < 16; i++) {
973 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
974
975 if (epdir == USB_DWC2_GHWCFG1_EPDIR_IN || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
976 sys_write32(backup->dieptsiz[i], (mem_addr_t)&base->in_ep[i].dieptsiz);
977 sys_write32(backup->diepdma[i], (mem_addr_t)&base->in_ep[i].diepdma);
978 sys_write32(backup->diepctl[i], (mem_addr_t)&base->in_ep[i].diepctl);
979 }
980
981 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
982 sys_write32(backup->doeptsiz[i], (mem_addr_t)&base->out_ep[i].doeptsiz);
983 sys_write32(backup->doepdma[i], (mem_addr_t)&base->out_ep[i].doepdma);
984 sys_write32(backup->doepctl[i], (mem_addr_t)&base->out_ep[i].doepctl);
985 }
986 }
987 }
988
dwc2_enter_hibernation(const struct device * dev)989 static void dwc2_enter_hibernation(const struct device *dev)
990 {
991 const struct udc_dwc2_config *const config = dev->config;
992 struct usb_dwc2_reg *const base = config->base;
993 struct udc_dwc2_data *const priv = udc_get_private(dev);
994 mem_addr_t gpwrdn_reg = (mem_addr_t)&base->gpwrdn;
995 mem_addr_t pcgcctl_reg = (mem_addr_t)&base->pcgcctl;
996
997 dwc2_backup_registers(dev);
998
999 /* This code currently only supports UTMI+. UTMI+ runs at either 30 or
1000 * 60 MHz and therefore 1 us busy waits have sufficiently large margin.
1001 */
1002
1003 /* Enable PMU Logic */
1004 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUACTV);
1005 k_busy_wait(1);
1006
1007 /* Stop PHY clock */
1008 sys_set_bits(pcgcctl_reg, USB_DWC2_PCGCCTL_STOPPCLK);
1009 k_busy_wait(1);
1010
1011 /* Enable PMU interrupt */
1012 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUINTSEL);
1013 k_busy_wait(1);
1014
1015 /* Unmask PMU interrupt bits */
1016 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_LINESTAGECHANGEMSK |
1017 USB_DWC2_GPWRDN_RESETDETMSK |
1018 USB_DWC2_GPWRDN_DISCONNECTDETECTMSK |
1019 USB_DWC2_GPWRDN_STSCHNGINTMSK);
1020 k_busy_wait(1);
1021
1022 /* Enable power clamps */
1023 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNCLMP);
1024 k_busy_wait(1);
1025
1026 /* Switch off power to the controller */
1027 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNSWTCH);
1028
1029 (void)dwc2_quirk_post_hibernation_entry(dev);
1030
1031 /* Mark that the core is hibernated */
1032 priv->hibernated = 1;
1033 LOG_DBG("Hibernated");
1034 }
1035
dwc2_exit_hibernation(const struct device * dev,bool rwup,bool bus_reset)1036 static void dwc2_exit_hibernation(const struct device *dev,
1037 bool rwup, bool bus_reset)
1038 {
1039 const struct udc_dwc2_config *const config = dev->config;
1040 struct usb_dwc2_reg *const base = config->base;
1041 struct udc_dwc2_data *const priv = udc_get_private(dev);
1042 mem_addr_t gpwrdn_reg = (mem_addr_t)&base->gpwrdn;
1043 mem_addr_t pcgcctl_reg = (mem_addr_t)&base->pcgcctl;
1044
1045 (void)dwc2_quirk_pre_hibernation_exit(dev);
1046
1047 /* Switch on power to the controller */
1048 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNSWTCH);
1049 k_busy_wait(1);
1050
1051 /* Reset the controller */
1052 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNRST_N);
1053 k_busy_wait(1);
1054
1055 /* Enable restore from PMU */
1056 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_RESTORE);
1057 k_busy_wait(1);
1058
1059 /* Disable power clamps */
1060 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNCLMP);
1061
1062 if (rwup) {
1063 if (priv->syncrst) {
1064 k_busy_wait(1);
1065 } else {
1066 k_busy_wait(50);
1067 }
1068 }
1069
1070 /* Remove reset to the controller */
1071 sys_set_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PWRDNRST_N);
1072 k_busy_wait(1);
1073
1074 /* Disable PMU interrupt */
1075 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUINTSEL);
1076
1077 dwc2_restore_essential_registers(dev, rwup, bus_reset);
1078
1079 /* Note: in Remote Wakeup case 15 ms max signaling time starts now */
1080
1081 /* Wait for Restore Done Interrupt */
1082 dwc2_wait_for_bit(dev, (mem_addr_t)&base->gintsts, USB_DWC2_GINTSTS_RSTRDONEINT);
1083 if (!bus_reset) {
1084 sys_write32(0xFFFFFFFFUL, (mem_addr_t)&base->gintsts);
1085 }
1086
1087 /* Disable restore from PMU */
1088 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_RESTORE);
1089 k_busy_wait(1);
1090
1091 if (!rwup) {
1092 /* Clear reset to power down module */
1093 sys_clear_bits(pcgcctl_reg, USB_DWC2_PCGCCTL_RSTPDWNMODULE);
1094 }
1095
1096 /* Restore GUSBCFG, DCFG and DCTL */
1097 sys_write32(priv->backup.gusbcfg, (mem_addr_t)&base->gusbcfg);
1098 sys_write32(priv->backup.dcfg, (mem_addr_t)&base->dcfg);
1099 sys_write32(priv->backup.dctl, (mem_addr_t)&base->dctl);
1100
1101 /* Disable PMU */
1102 sys_clear_bits(gpwrdn_reg, USB_DWC2_GPWRDN_PMUACTV);
1103 if (!rwup) {
1104 k_busy_wait(5);
1105 sys_set_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_PWRONPRGDONE);
1106 } else {
1107 k_busy_wait(1);
1108 sys_write32(USB_DWC2_DCTL_RMTWKUPSIG | priv->backup.dctl,
1109 (mem_addr_t)&base->dctl);
1110 }
1111
1112 k_msleep(1);
1113 sys_write32(0xFFFFFFFFUL, (mem_addr_t)&base->gintsts);
1114 }
1115
cancel_hibernation_request(struct udc_dwc2_data * const priv)1116 static void cancel_hibernation_request(struct udc_dwc2_data *const priv)
1117 {
1118 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
1119 }
1120
request_hibernation(struct udc_dwc2_data * const priv)1121 static void request_hibernation(struct udc_dwc2_data *const priv)
1122 {
1123 if (priv->suspend_type == DWC2_SUSPEND_HIBERNATION) {
1124 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
1125 }
1126 }
1127
dwc2_unset_unused_fifo(const struct device * dev)1128 static void dwc2_unset_unused_fifo(const struct device *dev)
1129 {
1130 struct udc_dwc2_data *const priv = udc_get_private(dev);
1131 struct udc_ep_config *tmp;
1132
1133 for (uint8_t i = priv->ineps - 1U; i > 0; i--) {
1134 tmp = udc_get_ep_cfg(dev, i | USB_EP_DIR_IN);
1135
1136 if (tmp->stat.enabled && (priv->txf_set & BIT(i))) {
1137 return;
1138 }
1139
1140 if (!tmp->stat.enabled && (priv->txf_set & BIT(i))) {
1141 priv->txf_set &= ~BIT(i);
1142 }
1143 }
1144 }
1145
1146 /*
1147 * In dedicated FIFO mode there are i (i = 1 ... ineps - 1) FIFO size registers,
1148 * e.g. DIEPTXF1, DIEPTXF2, ... DIEPTXF4. When dynfifosizing is enabled,
1149 * the size register is mutable. The offset of DIEPTXF1 registers is 0.
1150 */
dwc2_set_dedicated_fifo(const struct device * dev,struct udc_ep_config * const cfg,uint32_t * const diepctl)1151 static int dwc2_set_dedicated_fifo(const struct device *dev,
1152 struct udc_ep_config *const cfg,
1153 uint32_t *const diepctl)
1154 {
1155 struct udc_dwc2_data *const priv = udc_get_private(dev);
1156 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1157 const uint32_t addnl = USB_MPS_ADDITIONAL_TRANSACTIONS(cfg->mps);
1158 uint32_t reqdep;
1159 uint32_t txfaddr;
1160 uint32_t txfdep;
1161 uint32_t tmp;
1162
1163 /* Keep everything but FIFO number */
1164 tmp = *diepctl & ~USB_DWC2_DEPCTL_TXFNUM_MASK;
1165
1166 reqdep = DIV_ROUND_UP(udc_mps_ep_size(cfg), 4U);
1167 if (priv->bufferdma) {
1168 /* In DMA mode, TxFIFO capable of holding 2 packets is enough */
1169 reqdep *= MIN(2, (1 + addnl));
1170 } else {
1171 reqdep *= (1 + addnl);
1172 }
1173
1174 if (priv->dynfifosizing) {
1175 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1176 dwc2_unset_unused_fifo(dev);
1177 }
1178
1179 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1180 LOG_WRN("Some of the FIFOs higher than %u are set, %lx",
1181 ep_idx, priv->txf_set & ~BIT_MASK(ep_idx));
1182 return -EIO;
1183 }
1184
1185 if ((ep_idx - 1) != 0U) {
1186 txfaddr = dwc2_get_txfdep(dev, ep_idx - 2) +
1187 dwc2_get_txfaddr(dev, ep_idx - 2);
1188 } else {
1189 txfaddr = priv->rxfifo_depth +
1190 MIN(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]);
1191 }
1192
1193 /* Make sure to not set TxFIFO greater than hardware allows */
1194 txfdep = reqdep;
1195 if (txfdep > priv->max_txfifo_depth[ep_idx]) {
1196 return -ENOMEM;
1197 }
1198
1199 /* Do not allocate TxFIFO outside the SPRAM */
1200 if (txfaddr + txfdep > priv->dfifodepth) {
1201 return -ENOMEM;
1202 }
1203
1204 /* Set FIFO depth (32-bit words) and address */
1205 dwc2_set_txf(dev, ep_idx - 1, txfdep, txfaddr);
1206 } else {
1207 txfdep = dwc2_get_txfdep(dev, ep_idx - 1);
1208 txfaddr = dwc2_get_txfaddr(dev, ep_idx - 1);
1209
1210 if (reqdep > txfdep) {
1211 return -ENOMEM;
1212 }
1213
1214 LOG_DBG("Reuse FIFO%u addr 0x%08x depth %u", ep_idx, txfaddr, txfdep);
1215 }
1216
1217 /* Assign FIFO to the IN endpoint */
1218 *diepctl = tmp | usb_dwc2_set_depctl_txfnum(ep_idx);
1219 priv->txf_set |= BIT(ep_idx);
1220 dwc2_flush_tx_fifo(dev, ep_idx);
1221
1222 LOG_INF("Set FIFO%u (ep 0x%02x) addr 0x%04x depth %u size %u",
1223 ep_idx, cfg->addr, txfaddr, txfdep, dwc2_ftx_avail(dev, ep_idx));
1224
1225 return 0;
1226 }
1227
dwc2_ep_control_enable(const struct device * dev,struct udc_ep_config * const cfg)1228 static int dwc2_ep_control_enable(const struct device *dev,
1229 struct udc_ep_config *const cfg)
1230 {
1231 mem_addr_t dxepctl0_reg;
1232 uint32_t dxepctl0;
1233
1234 dxepctl0_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1235 dxepctl0 = sys_read32(dxepctl0_reg);
1236
1237 dxepctl0 &= ~USB_DWC2_DEPCTL0_MPS_MASK;
1238 switch (cfg->mps) {
1239 case 8:
1240 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_8 << USB_DWC2_DEPCTL_MPS_POS;
1241 break;
1242 case 16:
1243 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_16 << USB_DWC2_DEPCTL_MPS_POS;
1244 break;
1245 case 32:
1246 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_32 << USB_DWC2_DEPCTL_MPS_POS;
1247 break;
1248 case 64:
1249 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_64 << USB_DWC2_DEPCTL_MPS_POS;
1250 break;
1251 default:
1252 return -EINVAL;
1253 }
1254
1255 dxepctl0 |= USB_DWC2_DEPCTL_USBACTEP;
1256
1257 if (cfg->addr == USB_CONTROL_EP_OUT) {
1258 int ret;
1259
1260 dwc2_flush_rx_fifo(dev);
1261 ret = dwc2_ctrl_feed_dout(dev, 8);
1262 if (ret) {
1263 return ret;
1264 }
1265 } else {
1266 dwc2_flush_tx_fifo(dev, 0);
1267 }
1268
1269 sys_write32(dxepctl0, dxepctl0_reg);
1270 dwc2_set_epint(dev, cfg, true);
1271
1272 return 0;
1273 }
1274
udc_dwc2_ep_activate(const struct device * dev,struct udc_ep_config * const cfg)1275 static int udc_dwc2_ep_activate(const struct device *dev,
1276 struct udc_ep_config *const cfg)
1277 {
1278 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1279 struct udc_dwc2_data *const priv = udc_get_private(dev);
1280 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1281 mem_addr_t dxepctl_reg;
1282 uint32_t dxepctl;
1283
1284 LOG_DBG("Enable ep 0x%02x", cfg->addr);
1285
1286 if (ep_idx == 0U) {
1287 return dwc2_ep_control_enable(dev, cfg);
1288 }
1289
1290 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
1291 /* TODO: use dwc2_get_dxepctl_reg() */
1292 dxepctl_reg = (mem_addr_t)&base->out_ep[ep_idx].doepctl;
1293 } else {
1294 if (priv->ineps > 0U && ep_idx > (priv->ineps - 1U)) {
1295 LOG_ERR("No resources available for ep 0x%02x", cfg->addr);
1296 return -EINVAL;
1297 }
1298
1299 dxepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl;
1300 }
1301
1302 dxepctl = sys_read32(dxepctl_reg);
1303 /* Set max packet size */
1304 dxepctl &= ~USB_DWC2_DEPCTL_MPS_MASK;
1305 dxepctl |= usb_dwc2_set_depctl_mps(udc_mps_ep_size(cfg));
1306
1307 /* Set endpoint type */
1308 dxepctl &= ~USB_DWC2_DEPCTL_EPTYPE_MASK;
1309
1310 switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) {
1311 case USB_EP_TYPE_BULK:
1312 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_BULK <<
1313 USB_DWC2_DEPCTL_EPTYPE_POS;
1314 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1315 break;
1316 case USB_EP_TYPE_INTERRUPT:
1317 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_INTERRUPT <<
1318 USB_DWC2_DEPCTL_EPTYPE_POS;
1319 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1320 break;
1321 case USB_EP_TYPE_ISO:
1322 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_ISO <<
1323 USB_DWC2_DEPCTL_EPTYPE_POS;
1324 break;
1325 default:
1326 return -EINVAL;
1327 }
1328
1329 if (USB_EP_DIR_IS_IN(cfg->addr) && udc_mps_ep_size(cfg) != 0U) {
1330 int ret = dwc2_set_dedicated_fifo(dev, cfg, &dxepctl);
1331
1332 if (ret) {
1333 return ret;
1334 }
1335 }
1336
1337 dxepctl |= USB_DWC2_DEPCTL_USBACTEP;
1338
1339 /* Enable endpoint interrupts */
1340 dwc2_set_epint(dev, cfg, true);
1341 sys_write32(dxepctl, dxepctl_reg);
1342
1343 for (uint8_t i = 1U; i < priv->ineps; i++) {
1344 LOG_DBG("DIEPTXF%u %08x DIEPCTL%u %08x",
1345 i, sys_read32((mem_addr_t)&base->dieptxf[i - 1U]), i, dxepctl);
1346 }
1347
1348 return 0;
1349 }
1350
dwc2_unset_dedicated_fifo(const struct device * dev,struct udc_ep_config * const cfg,uint32_t * const diepctl)1351 static int dwc2_unset_dedicated_fifo(const struct device *dev,
1352 struct udc_ep_config *const cfg,
1353 uint32_t *const diepctl)
1354 {
1355 struct udc_dwc2_data *const priv = udc_get_private(dev);
1356 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1357
1358 /* Clear FIFO number field */
1359 *diepctl &= ~USB_DWC2_DEPCTL_TXFNUM_MASK;
1360
1361 if (priv->dynfifosizing) {
1362 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1363 LOG_WRN("Some of the FIFOs higher than %u are set, %lx",
1364 ep_idx, priv->txf_set & ~BIT_MASK(ep_idx));
1365 return 0;
1366 }
1367
1368 dwc2_set_txf(dev, ep_idx - 1, 0, 0);
1369 }
1370
1371 priv->txf_set &= ~BIT(ep_idx);
1372
1373 return 0;
1374 }
1375
1376 /* Disabled IN endpoint means that device will send NAK (isochronous: ZLP) after
1377 * receiving IN token from host even if there is packet available in TxFIFO.
1378 * Disabled OUT endpoint means that device will NAK (isochronous: discard data)
1379 * incoming OUT data (or HS PING) even if there is space available in RxFIFO.
1380 *
1381 * Set stall parameter to true if caller wants to send STALL instead of NAK.
1382 */
udc_dwc2_ep_disable(const struct device * dev,struct udc_ep_config * const cfg,bool stall)1383 static void udc_dwc2_ep_disable(const struct device *dev,
1384 struct udc_ep_config *const cfg, bool stall)
1385 {
1386 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1387 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1388 mem_addr_t dxepctl_reg;
1389 uint32_t dxepctl;
1390 const bool is_iso = dwc2_ep_is_iso(cfg);
1391
1392 dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1393 dxepctl = sys_read32(dxepctl_reg);
1394
1395 if (!is_iso && (dxepctl & USB_DWC2_DEPCTL_NAKSTS)) {
1396 /* Endpoint already sends forced NAKs. STALL if necessary. */
1397 if (stall) {
1398 dxepctl |= USB_DWC2_DEPCTL_STALL;
1399 sys_write32(dxepctl, dxepctl_reg);
1400 }
1401
1402 return;
1403 }
1404
1405 /* FIXME: This function needs to be changed to not synchronously wait
1406 * for the events to happen because the actions here are racing against
1407 * the USB host packets. It is possible that the IN token or OUT DATA
1408 * gets sent shortly before this function disables the endpoint. If this
1409 * happens, the XferCompl would be set and driver will incorrectly think
1410 * that either:
1411 * * never queued transfer finished, OR
1412 * * transfer queued in incompisoin handler finished (before it really
1413 * does and then it'll "double"-finish when it actually finishes)
1414 *
1415 * For the time being XferCompl is cleared as a workaround.
1416 */
1417
1418 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
1419 mem_addr_t dctl_reg, gintsts_reg, doepint_reg;
1420 uint32_t dctl;
1421
1422 dctl_reg = (mem_addr_t)&base->dctl;
1423 gintsts_reg = (mem_addr_t)&base->gintsts;
1424 doepint_reg = (mem_addr_t)&base->out_ep[ep_idx].doepint;
1425
1426 dctl = sys_read32(dctl_reg);
1427
1428 if (sys_read32(gintsts_reg) & USB_DWC2_GINTSTS_GOUTNAKEFF) {
1429 LOG_ERR("GOUTNAKEFF already active");
1430 } else {
1431 dctl |= USB_DWC2_DCTL_SGOUTNAK;
1432 sys_write32(dctl, dctl_reg);
1433 dctl &= ~USB_DWC2_DCTL_SGOUTNAK;
1434 }
1435
1436 dwc2_wait_for_bit(dev, gintsts_reg, USB_DWC2_GINTSTS_GOUTNAKEFF);
1437
1438 /* The application cannot disable control OUT endpoint 0. */
1439 if (ep_idx != 0) {
1440 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS;
1441 }
1442
1443 if (stall) {
1444 /* For OUT endpoints STALL is set instead of SNAK */
1445 dxepctl |= USB_DWC2_DEPCTL_STALL;
1446 } else {
1447 dxepctl |= USB_DWC2_DEPCTL_SNAK;
1448 }
1449 sys_write32(dxepctl, dxepctl_reg);
1450
1451 if (ep_idx != 0) {
1452 dwc2_wait_for_bit(dev, doepint_reg, USB_DWC2_DOEPINT_EPDISBLD);
1453 }
1454
1455 /* Clear Endpoint Disabled interrupt */
1456 sys_write32(USB_DWC2_DOEPINT_EPDISBLD | USB_DWC2_DOEPINT_XFERCOMPL, doepint_reg);
1457
1458 dctl |= USB_DWC2_DCTL_CGOUTNAK;
1459 sys_write32(dctl, dctl_reg);
1460 } else {
1461 mem_addr_t diepint_reg;
1462
1463 diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint;
1464
1465 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_SNAK;
1466 if (stall) {
1467 /* For IN endpoints STALL is set in addition to SNAK */
1468 dxepctl |= USB_DWC2_DEPCTL_STALL;
1469 }
1470 sys_write32(dxepctl, dxepctl_reg);
1471
1472 dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_INEPNAKEFF);
1473
1474 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS;
1475 sys_write32(dxepctl, dxepctl_reg);
1476
1477 dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_EPDISBLD);
1478
1479 /* Clear Endpoint Disabled interrupt */
1480 sys_write32(USB_DWC2_DIEPINT_EPDISBLD | USB_DWC2_DIEPINT_XFERCOMPL, diepint_reg);
1481
1482 /* TODO: Read DIEPTSIZn here? Programming Guide suggest it to
1483 * let application know how many bytes of interrupted transfer
1484 * were transferred to the host.
1485 */
1486
1487 dwc2_flush_tx_fifo(dev, usb_dwc2_get_depctl_txfnum(dxepctl));
1488 }
1489
1490 udc_ep_set_busy(dev, cfg->addr, false);
1491 }
1492
1493 /* Deactivated endpoint means that there will be a bus timeout when the host
1494 * tries to access the endpoint.
1495 */
udc_dwc2_ep_deactivate(const struct device * dev,struct udc_ep_config * const cfg)1496 static int udc_dwc2_ep_deactivate(const struct device *dev,
1497 struct udc_ep_config *const cfg)
1498 {
1499 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1500 mem_addr_t dxepctl_reg;
1501 uint32_t dxepctl;
1502
1503 dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1504 dxepctl = sys_read32(dxepctl_reg);
1505
1506 if (dxepctl & USB_DWC2_DEPCTL_USBACTEP) {
1507 LOG_DBG("Disable ep 0x%02x DxEPCTL%u %x",
1508 cfg->addr, ep_idx, dxepctl);
1509
1510 udc_dwc2_ep_disable(dev, cfg, false);
1511
1512 dxepctl = sys_read32(dxepctl_reg);
1513 dxepctl &= ~USB_DWC2_DEPCTL_USBACTEP;
1514 } else {
1515 LOG_WRN("ep 0x%02x is not active DxEPCTL%u %x",
1516 cfg->addr, ep_idx, dxepctl);
1517 }
1518
1519 if (USB_EP_DIR_IS_IN(cfg->addr) && udc_mps_ep_size(cfg) != 0U &&
1520 ep_idx != 0U) {
1521 dwc2_unset_dedicated_fifo(dev, cfg, &dxepctl);
1522 }
1523
1524 sys_write32(dxepctl, dxepctl_reg);
1525 dwc2_set_epint(dev, cfg, false);
1526
1527 if (cfg->addr == USB_CONTROL_EP_OUT) {
1528 struct net_buf *buf = udc_buf_get_all(dev, cfg->addr);
1529
1530 /* Release the buffer allocated in dwc2_ctrl_feed_dout() */
1531 if (buf) {
1532 net_buf_unref(buf);
1533 }
1534 }
1535
1536 return 0;
1537 }
1538
udc_dwc2_ep_set_halt(const struct device * dev,struct udc_ep_config * const cfg)1539 static int udc_dwc2_ep_set_halt(const struct device *dev,
1540 struct udc_ep_config *const cfg)
1541 {
1542 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1543
1544 udc_dwc2_ep_disable(dev, cfg, true);
1545
1546 LOG_DBG("Set halt ep 0x%02x", cfg->addr);
1547 if (ep_idx != 0) {
1548 cfg->stat.halted = true;
1549 }
1550
1551 return 0;
1552 }
1553
udc_dwc2_ep_clear_halt(const struct device * dev,struct udc_ep_config * const cfg)1554 static int udc_dwc2_ep_clear_halt(const struct device *dev,
1555 struct udc_ep_config *const cfg)
1556 {
1557 struct udc_dwc2_data *const priv = udc_get_private(dev);
1558 mem_addr_t dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1559 uint32_t dxepctl;
1560
1561 dxepctl = sys_read32(dxepctl_reg);
1562 dxepctl &= ~USB_DWC2_DEPCTL_STALL;
1563 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1564 sys_write32(dxepctl, dxepctl_reg);
1565
1566 LOG_DBG("Clear halt ep 0x%02x", cfg->addr);
1567 cfg->stat.halted = false;
1568
1569 /* Resume queued transfers if any */
1570 if (udc_buf_peek(dev, cfg->addr)) {
1571 uint32_t ep_bit;
1572
1573 if (USB_EP_DIR_IS_IN(cfg->addr)) {
1574 ep_bit = BIT(USB_EP_GET_IDX(cfg->addr));
1575 } else {
1576 ep_bit = BIT(16 + USB_EP_GET_IDX(cfg->addr));
1577 }
1578
1579 k_event_post(&priv->xfer_new, ep_bit);
1580 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
1581 }
1582
1583 return 0;
1584 }
1585
udc_dwc2_ep_enqueue(const struct device * dev,struct udc_ep_config * const cfg,struct net_buf * const buf)1586 static int udc_dwc2_ep_enqueue(const struct device *dev,
1587 struct udc_ep_config *const cfg,
1588 struct net_buf *const buf)
1589 {
1590 struct udc_dwc2_data *const priv = udc_get_private(dev);
1591
1592 LOG_DBG("%p enqueue %x %p", dev, cfg->addr, buf);
1593 udc_buf_put(cfg, buf);
1594
1595 if (!cfg->stat.halted) {
1596 uint32_t ep_bit;
1597
1598 if (USB_EP_DIR_IS_IN(cfg->addr)) {
1599 ep_bit = BIT(USB_EP_GET_IDX(cfg->addr));
1600 } else {
1601 ep_bit = BIT(16 + USB_EP_GET_IDX(cfg->addr));
1602 }
1603
1604 k_event_post(&priv->xfer_new, ep_bit);
1605 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
1606 }
1607
1608 return 0;
1609 }
1610
udc_dwc2_ep_dequeue(const struct device * dev,struct udc_ep_config * const cfg)1611 static int udc_dwc2_ep_dequeue(const struct device *dev,
1612 struct udc_ep_config *const cfg)
1613 {
1614 struct net_buf *buf;
1615
1616 udc_dwc2_ep_disable(dev, cfg, false);
1617
1618 buf = udc_buf_get_all(dev, cfg->addr);
1619 if (buf) {
1620 udc_submit_ep_event(dev, buf, -ECONNABORTED);
1621 }
1622
1623 udc_ep_set_busy(dev, cfg->addr, false);
1624
1625 LOG_DBG("dequeue ep 0x%02x", cfg->addr);
1626
1627 return 0;
1628 }
1629
udc_dwc2_set_address(const struct device * dev,const uint8_t addr)1630 static int udc_dwc2_set_address(const struct device *dev, const uint8_t addr)
1631 {
1632 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1633 mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg;
1634 uint32_t dcfg;
1635
1636 if (addr > (USB_DWC2_DCFG_DEVADDR_MASK >> USB_DWC2_DCFG_DEVADDR_POS)) {
1637 return -EINVAL;
1638 }
1639
1640 dcfg = sys_read32(dcfg_reg);
1641 dcfg &= ~USB_DWC2_DCFG_DEVADDR_MASK;
1642 dcfg |= usb_dwc2_set_dcfg_devaddr(addr);
1643 sys_write32(dcfg, dcfg_reg);
1644 LOG_DBG("Set new address %u for %p", addr, dev);
1645
1646 return 0;
1647 }
1648
udc_dwc2_test_mode(const struct device * dev,const uint8_t mode,const bool dryrun)1649 static int udc_dwc2_test_mode(const struct device *dev,
1650 const uint8_t mode, const bool dryrun)
1651 {
1652 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1653 mem_addr_t dctl_reg = (mem_addr_t)&base->dctl;
1654 uint32_t dctl;
1655
1656 if (mode == 0U || mode > USB_DWC2_DCTL_TSTCTL_TESTFE) {
1657 return -EINVAL;
1658 }
1659
1660 dctl = sys_read32(dctl_reg);
1661 if (usb_dwc2_get_dctl_tstctl(dctl) != USB_DWC2_DCTL_TSTCTL_DISABLED) {
1662 return -EALREADY;
1663 }
1664
1665 if (dryrun) {
1666 LOG_DBG("Test Mode %u supported", mode);
1667 return 0;
1668 }
1669
1670 dctl |= usb_dwc2_set_dctl_tstctl(mode);
1671 sys_write32(dctl, dctl_reg);
1672 LOG_DBG("Enable Test Mode %u", mode);
1673
1674 return 0;
1675 }
1676
udc_dwc2_host_wakeup(const struct device * dev)1677 static int udc_dwc2_host_wakeup(const struct device *dev)
1678 {
1679 struct udc_dwc2_data *const priv = udc_get_private(dev);
1680
1681 LOG_DBG("Remote wakeup from %p", dev);
1682
1683 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_REMOTE_WAKEUP));
1684
1685 return 0;
1686 }
1687
1688 /* Return actual USB device speed */
udc_dwc2_device_speed(const struct device * dev)1689 static enum udc_bus_speed udc_dwc2_device_speed(const struct device *dev)
1690 {
1691 struct udc_dwc2_data *const priv = udc_get_private(dev);
1692
1693 switch (priv->enumspd) {
1694 case USB_DWC2_DSTS_ENUMSPD_HS3060:
1695 return UDC_BUS_SPEED_HS;
1696 case USB_DWC2_DSTS_ENUMSPD_LS6:
1697 __ASSERT(false, "Low speed mode not supported");
1698 __fallthrough;
1699 case USB_DWC2_DSTS_ENUMSPD_FS48:
1700 __fallthrough;
1701 case USB_DWC2_DSTS_ENUMSPD_FS3060:
1702 __fallthrough;
1703 default:
1704 return UDC_BUS_SPEED_FS;
1705 }
1706 }
1707
dwc2_core_soft_reset(const struct device * dev)1708 static int dwc2_core_soft_reset(const struct device *dev)
1709 {
1710 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1711 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
1712 const unsigned int csr_timeout_us = 10000UL;
1713 uint32_t cnt = 0UL;
1714
1715 /* Check AHB master idle state */
1716 while (!(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_AHBIDLE)) {
1717 k_busy_wait(1);
1718
1719 if (++cnt > csr_timeout_us) {
1720 LOG_ERR("Wait for AHB idle timeout, GRSTCTL 0x%08x",
1721 sys_read32(grstctl_reg));
1722 return -EIO;
1723 }
1724 }
1725
1726 /* Apply Core Soft Reset */
1727 sys_write32(USB_DWC2_GRSTCTL_CSFTRST, grstctl_reg);
1728
1729 cnt = 0UL;
1730 do {
1731 if (++cnt > csr_timeout_us) {
1732 LOG_ERR("Wait for CSR done timeout, GRSTCTL 0x%08x",
1733 sys_read32(grstctl_reg));
1734 return -EIO;
1735 }
1736
1737 k_busy_wait(1);
1738 } while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRST &&
1739 !(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRSTDONE));
1740
1741 sys_clear_bits(grstctl_reg, USB_DWC2_GRSTCTL_CSFTRST | USB_DWC2_GRSTCTL_CSFTRSTDONE);
1742
1743 return 0;
1744 }
1745
udc_dwc2_init_controller(const struct device * dev)1746 static int udc_dwc2_init_controller(const struct device *dev)
1747 {
1748 const struct udc_dwc2_config *const config = dev->config;
1749 struct udc_dwc2_data *const priv = udc_get_private(dev);
1750 struct usb_dwc2_reg *const base = config->base;
1751 mem_addr_t grxfsiz_reg = (mem_addr_t)&base->grxfsiz;
1752 mem_addr_t gahbcfg_reg = (mem_addr_t)&base->gahbcfg;
1753 mem_addr_t gusbcfg_reg = (mem_addr_t)&base->gusbcfg;
1754 mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg;
1755 uint32_t dcfg;
1756 uint32_t gusbcfg;
1757 uint32_t gahbcfg;
1758 uint32_t ghwcfg2;
1759 uint32_t ghwcfg3;
1760 uint32_t ghwcfg4;
1761 uint32_t val;
1762 int ret;
1763 bool hs_phy;
1764
1765 ret = dwc2_core_soft_reset(dev);
1766 if (ret) {
1767 return ret;
1768 }
1769
1770 priv->ghwcfg1 = sys_read32((mem_addr_t)&base->ghwcfg1);
1771 ghwcfg2 = sys_read32((mem_addr_t)&base->ghwcfg2);
1772 ghwcfg3 = sys_read32((mem_addr_t)&base->ghwcfg3);
1773 ghwcfg4 = sys_read32((mem_addr_t)&base->ghwcfg4);
1774
1775 if (!(ghwcfg4 & USB_DWC2_GHWCFG4_DEDFIFOMODE)) {
1776 LOG_ERR("Only dedicated TX FIFO mode is supported");
1777 return -ENOTSUP;
1778 }
1779
1780 /*
1781 * Force device mode as we do no support role changes.
1782 * Wait 25ms for the change to take effect.
1783 */
1784 gusbcfg = USB_DWC2_GUSBCFG_FORCEDEVMODE;
1785 sys_write32(gusbcfg, gusbcfg_reg);
1786 k_msleep(25);
1787
1788 /* Buffer DMA is always supported in Internal DMA mode.
1789 * TODO: check and support descriptor DMA if available
1790 */
1791 priv->bufferdma = (usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2) ==
1792 USB_DWC2_GHWCFG2_OTGARCH_INTERNALDMA);
1793
1794 if (!IS_ENABLED(CONFIG_UDC_DWC2_DMA)) {
1795 priv->bufferdma = 0;
1796 } else if (priv->bufferdma) {
1797 LOG_WRN("Experimental DMA enabled");
1798 }
1799
1800 if (ghwcfg2 & USB_DWC2_GHWCFG2_DYNFIFOSIZING) {
1801 LOG_DBG("Dynamic FIFO Sizing is enabled");
1802 priv->dynfifosizing = true;
1803 }
1804
1805 if (IS_ENABLED(CONFIG_UDC_DWC2_HIBERNATION) &&
1806 ghwcfg4 & USB_DWC2_GHWCFG4_HIBERNATION) {
1807 LOG_INF("Hibernation enabled");
1808 priv->suspend_type = DWC2_SUSPEND_HIBERNATION;
1809 } else {
1810 priv->suspend_type = DWC2_SUSPEND_NO_POWER_SAVING;
1811 }
1812
1813 /* Get the number or endpoints and IN endpoints we can use later */
1814 priv->numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(ghwcfg2) + 1U;
1815 priv->ineps = usb_dwc2_get_ghwcfg4_ineps(ghwcfg4) + 1U;
1816 LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", priv->numdeveps);
1817 LOG_DBG("Number of IN endpoints (INEPS + 1) %u", priv->ineps);
1818
1819 LOG_DBG("Number of periodic IN endpoints (NUMDEVPERIOEPS) %u",
1820 usb_dwc2_get_ghwcfg4_numdevperioeps(ghwcfg4));
1821 LOG_DBG("Number of additional control endpoints (NUMCTLEPS) %u",
1822 usb_dwc2_get_ghwcfg4_numctleps(ghwcfg4));
1823
1824 LOG_DBG("OTG architecture (OTGARCH) %u, mode (OTGMODE) %u",
1825 usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2),
1826 usb_dwc2_get_ghwcfg2_otgmode(ghwcfg2));
1827
1828 priv->dfifodepth = usb_dwc2_get_ghwcfg3_dfifodepth(ghwcfg3);
1829 LOG_DBG("DFIFO depth (DFIFODEPTH) %u bytes", priv->dfifodepth * 4);
1830
1831 priv->max_pktcnt = GHWCFG3_PKTCOUNT(usb_dwc2_get_ghwcfg3_pktsizewidth(ghwcfg3));
1832 priv->max_xfersize = GHWCFG3_XFERSIZE(usb_dwc2_get_ghwcfg3_xfersizewidth(ghwcfg3));
1833 LOG_DBG("Max packet count %u, Max transfer size %u",
1834 priv->max_pktcnt, priv->max_xfersize);
1835
1836 LOG_DBG("Vendor Control interface support enabled: %s",
1837 (ghwcfg3 & USB_DWC2_GHWCFG3_VNDCTLSUPT) ? "true" : "false");
1838
1839 LOG_DBG("PHY interface type: FSPHYTYPE %u, HSPHYTYPE %u, DATAWIDTH %u",
1840 usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2),
1841 usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2),
1842 usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4));
1843
1844 LOG_DBG("LPM mode is %s",
1845 (ghwcfg3 & USB_DWC2_GHWCFG3_LPMMODE) ? "enabled" : "disabled");
1846
1847 if (ghwcfg3 & USB_DWC2_GHWCFG3_RSTTYPE) {
1848 priv->syncrst = 1;
1849 }
1850
1851 /* Configure AHB, select Completer or DMA mode */
1852 gahbcfg = sys_read32(gahbcfg_reg);
1853
1854 if (priv->bufferdma) {
1855 gahbcfg |= USB_DWC2_GAHBCFG_DMAEN;
1856 } else {
1857 gahbcfg &= ~USB_DWC2_GAHBCFG_DMAEN;
1858 }
1859
1860 sys_write32(gahbcfg, gahbcfg_reg);
1861
1862 dcfg = sys_read32(dcfg_reg);
1863
1864 dcfg &= ~USB_DWC2_DCFG_DESCDMA;
1865
1866 /* Configure PHY and device speed */
1867 dcfg &= ~USB_DWC2_DCFG_DEVSPD_MASK;
1868 switch (usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2)) {
1869 case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUSULPI:
1870 __fallthrough;
1871 case USB_DWC2_GHWCFG2_HSPHYTYPE_ULPI:
1872 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 |
1873 USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_ULPI;
1874 dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20
1875 << USB_DWC2_DCFG_DEVSPD_POS;
1876 hs_phy = true;
1877 break;
1878 case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUS:
1879 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 |
1880 USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_UTMI;
1881 dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20
1882 << USB_DWC2_DCFG_DEVSPD_POS;
1883 hs_phy = true;
1884 break;
1885 case USB_DWC2_GHWCFG2_HSPHYTYPE_NO_HS:
1886 __fallthrough;
1887 default:
1888 if (usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2) !=
1889 USB_DWC2_GHWCFG2_FSPHYTYPE_NO_FS) {
1890 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB11;
1891 }
1892
1893 dcfg |= USB_DWC2_DCFG_DEVSPD_USBFS1148
1894 << USB_DWC2_DCFG_DEVSPD_POS;
1895 hs_phy = false;
1896 }
1897
1898 if (usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4)) {
1899 gusbcfg |= USB_DWC2_GUSBCFG_PHYIF_16_BIT;
1900 }
1901
1902 /* Update PHY configuration */
1903 sys_write32(gusbcfg, gusbcfg_reg);
1904 sys_write32(dcfg, dcfg_reg);
1905
1906 priv->outeps = 0U;
1907 for (uint8_t i = 0U; i < priv->numdeveps; i++) {
1908 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
1909
1910 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
1911 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
1912 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
1913
1914 sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg);
1915 priv->outeps++;
1916 }
1917 }
1918
1919 LOG_DBG("Number of OUT endpoints %u", priv->outeps);
1920
1921 /* Read and store all TxFIFO depths because Programmed FIFO Depths must
1922 * not exceed the power-on values.
1923 */
1924 val = sys_read32((mem_addr_t)&base->gnptxfsiz);
1925 priv->max_txfifo_depth[0] = usb_dwc2_get_gnptxfsiz_nptxfdep(val);
1926 for (uint8_t i = 1; i < priv->ineps; i++) {
1927 priv->max_txfifo_depth[i] = dwc2_get_txfdep(dev, i - 1);
1928 }
1929
1930 priv->rxfifo_depth = usb_dwc2_get_grxfsiz(sys_read32(grxfsiz_reg));
1931
1932 if (priv->dynfifosizing) {
1933 uint32_t gnptxfsiz;
1934 uint32_t default_depth;
1935
1936 /* TODO: For proper runtime FIFO sizing UDC driver would have to
1937 * have prior knowledge of the USB configurations. Only with the
1938 * prior knowledge, the driver will be able to fairly distribute
1939 * available resources. For the time being just use different
1940 * defaults based on maximum configured PHY speed, but this has
1941 * to be revised if e.g. thresholding support would be necessary
1942 * on some target.
1943 */
1944 if (hs_phy) {
1945 default_depth = UDC_DWC2_GRXFSIZ_HS_DEFAULT;
1946 } else {
1947 default_depth = UDC_DWC2_GRXFSIZ_FS_DEFAULT;
1948 }
1949 default_depth += priv->outeps * 2U;
1950
1951 /* Driver does not dynamically resize RxFIFO so there is no need
1952 * to store reset value. Read the reset value and make sure that
1953 * the programmed value is not greater than what driver sets.
1954 */
1955 priv->rxfifo_depth = MIN(priv->rxfifo_depth, default_depth);
1956 sys_write32(usb_dwc2_set_grxfsiz(priv->rxfifo_depth), grxfsiz_reg);
1957
1958 /* Set TxFIFO 0 depth */
1959 val = MIN(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]);
1960 gnptxfsiz = usb_dwc2_set_gnptxfsiz_nptxfdep(val) |
1961 usb_dwc2_set_gnptxfsiz_nptxfstaddr(priv->rxfifo_depth);
1962
1963 sys_write32(gnptxfsiz, (mem_addr_t)&base->gnptxfsiz);
1964 }
1965
1966 LOG_DBG("RX FIFO size %u bytes", priv->rxfifo_depth * 4);
1967 for (uint8_t i = 1U; i < priv->ineps; i++) {
1968 LOG_DBG("TX FIFO%u depth %u addr %u",
1969 i, priv->max_txfifo_depth[i], dwc2_get_txfaddr(dev, i));
1970 }
1971
1972 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT,
1973 USB_EP_TYPE_CONTROL, 64, 0)) {
1974 LOG_ERR("Failed to enable control endpoint");
1975 return -EIO;
1976 }
1977
1978 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN,
1979 USB_EP_TYPE_CONTROL, 64, 0)) {
1980 LOG_ERR("Failed to enable control endpoint");
1981 return -EIO;
1982 }
1983
1984 /* Unmask interrupts */
1985 sys_write32(USB_DWC2_GINTSTS_OEPINT | USB_DWC2_GINTSTS_IEPINT |
1986 USB_DWC2_GINTSTS_ENUMDONE | USB_DWC2_GINTSTS_USBRST |
1987 USB_DWC2_GINTSTS_WKUPINT | USB_DWC2_GINTSTS_USBSUSP |
1988 USB_DWC2_GINTSTS_INCOMPISOOUT | USB_DWC2_GINTSTS_INCOMPISOIN |
1989 USB_DWC2_GINTSTS_SOF,
1990 (mem_addr_t)&base->gintmsk);
1991
1992 return 0;
1993 }
1994
udc_dwc2_enable(const struct device * dev)1995 static int udc_dwc2_enable(const struct device *dev)
1996 {
1997 const struct udc_dwc2_config *const config = dev->config;
1998 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1999 int err;
2000
2001 err = dwc2_quirk_pre_enable(dev);
2002 if (err) {
2003 LOG_ERR("Quirk pre enable failed %d", err);
2004 return err;
2005 }
2006
2007 err = udc_dwc2_init_controller(dev);
2008 if (err) {
2009 return err;
2010 }
2011
2012 err = dwc2_quirk_post_enable(dev);
2013 if (err) {
2014 LOG_ERR("Quirk post enable failed %d", err);
2015 return err;
2016 }
2017
2018 /* Enable global interrupt */
2019 sys_set_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK);
2020 config->irq_enable_func(dev);
2021
2022 /* Disable soft disconnect */
2023 sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_SFTDISCON);
2024 LOG_DBG("Enable device %p", base);
2025
2026 return 0;
2027 }
2028
udc_dwc2_disable(const struct device * dev)2029 static int udc_dwc2_disable(const struct device *dev)
2030 {
2031 const struct udc_dwc2_config *const config = dev->config;
2032 struct udc_dwc2_data *const priv = udc_get_private(dev);
2033 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2034 mem_addr_t dctl_reg = (mem_addr_t)&base->dctl;
2035 int err;
2036
2037 /* Enable soft disconnect */
2038 sys_set_bits(dctl_reg, USB_DWC2_DCTL_SFTDISCON);
2039 LOG_DBG("Disable device %p", dev);
2040
2041 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
2042 LOG_DBG("Failed to disable control endpoint");
2043 return -EIO;
2044 }
2045
2046 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
2047 LOG_DBG("Failed to disable control endpoint");
2048 return -EIO;
2049 }
2050
2051 config->irq_disable_func(dev);
2052
2053 if (priv->hibernated) {
2054 dwc2_exit_hibernation(dev, false, true);
2055 priv->hibernated = 0;
2056 }
2057
2058 sys_clear_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK);
2059
2060 err = dwc2_quirk_disable(dev);
2061 if (err) {
2062 LOG_ERR("Quirk disable failed %d", err);
2063 return err;
2064 }
2065
2066 return 0;
2067 }
2068
udc_dwc2_init(const struct device * dev)2069 static int udc_dwc2_init(const struct device *dev)
2070 {
2071 int ret;
2072
2073 ret = dwc2_quirk_init(dev);
2074 if (ret) {
2075 LOG_ERR("Quirk init failed %d", ret);
2076 return ret;
2077 }
2078
2079 return dwc2_init_pinctrl(dev);
2080 }
2081
udc_dwc2_shutdown(const struct device * dev)2082 static int udc_dwc2_shutdown(const struct device *dev)
2083 {
2084 int ret;
2085
2086 ret = dwc2_quirk_shutdown(dev);
2087 if (ret) {
2088 LOG_ERR("Quirk shutdown failed %d", ret);
2089 return ret;
2090 }
2091
2092 return 0;
2093 }
2094
dwc2_driver_preinit(const struct device * dev)2095 static int dwc2_driver_preinit(const struct device *dev)
2096 {
2097 const struct udc_dwc2_config *config = dev->config;
2098 struct udc_dwc2_data *const priv = udc_get_private(dev);
2099 struct udc_data *data = dev->data;
2100 uint16_t mps = 1023;
2101 uint32_t numdeveps;
2102 uint32_t ineps;
2103 int err;
2104
2105 k_mutex_init(&data->mutex);
2106
2107 k_event_init(&priv->drv_evt);
2108 k_event_init(&priv->xfer_new);
2109 k_event_init(&priv->xfer_finished);
2110
2111 data->caps.rwup = true;
2112 data->caps.addr_before_status = true;
2113 data->caps.mps0 = UDC_MPS0_64;
2114
2115 (void)dwc2_quirk_caps(dev);
2116 if (data->caps.hs) {
2117 mps = 1024;
2118 }
2119
2120 /*
2121 * At this point, we cannot or do not want to access the hardware
2122 * registers to get GHWCFGn values. For now, we will use devicetree to
2123 * get GHWCFGn values and use them to determine the number and type of
2124 * configured endpoints in the hardware. This can be considered a
2125 * workaround, and we may change the upper layer internals to avoid it
2126 * in the future.
2127 */
2128 ineps = usb_dwc2_get_ghwcfg4_ineps(config->ghwcfg4) + 1U;
2129 numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(config->ghwcfg2) + 1U;
2130 LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", numdeveps);
2131 LOG_DBG("Number of IN endpoints (INEPS + 1) %u", ineps);
2132
2133 for (uint32_t i = 0, n = 0; i < numdeveps; i++) {
2134 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i);
2135
2136 if (epdir != USB_DWC2_GHWCFG1_EPDIR_OUT &&
2137 epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2138 continue;
2139 }
2140
2141 if (i == 0) {
2142 config->ep_cfg_out[n].caps.control = 1;
2143 config->ep_cfg_out[n].caps.mps = 64;
2144 } else {
2145 config->ep_cfg_out[n].caps.bulk = 1;
2146 config->ep_cfg_out[n].caps.interrupt = 1;
2147 config->ep_cfg_out[n].caps.iso = 1;
2148 config->ep_cfg_out[n].caps.high_bandwidth = data->caps.hs;
2149 config->ep_cfg_out[n].caps.mps = mps;
2150 }
2151
2152 config->ep_cfg_out[n].caps.out = 1;
2153 config->ep_cfg_out[n].addr = USB_EP_DIR_OUT | i;
2154
2155 LOG_DBG("Register ep 0x%02x (%u)", i, n);
2156 err = udc_register_ep(dev, &config->ep_cfg_out[n]);
2157 if (err != 0) {
2158 LOG_ERR("Failed to register endpoint");
2159 return err;
2160 }
2161
2162 n++;
2163 /* Also check the number of desired OUT endpoints in devicetree. */
2164 if (n >= config->num_out_eps) {
2165 break;
2166 }
2167 }
2168
2169 for (uint32_t i = 0, n = 0; i < numdeveps; i++) {
2170 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i);
2171
2172 if (epdir != USB_DWC2_GHWCFG1_EPDIR_IN &&
2173 epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2174 continue;
2175 }
2176
2177 if (i == 0) {
2178 config->ep_cfg_in[n].caps.control = 1;
2179 config->ep_cfg_in[n].caps.mps = 64;
2180 } else {
2181 config->ep_cfg_in[n].caps.bulk = 1;
2182 config->ep_cfg_in[n].caps.interrupt = 1;
2183 config->ep_cfg_in[n].caps.iso = 1;
2184 config->ep_cfg_in[n].caps.high_bandwidth = data->caps.hs;
2185 config->ep_cfg_in[n].caps.mps = mps;
2186 }
2187
2188 config->ep_cfg_in[n].caps.in = 1;
2189 config->ep_cfg_in[n].addr = USB_EP_DIR_IN | i;
2190
2191 LOG_DBG("Register ep 0x%02x (%u)", USB_EP_DIR_IN | i, n);
2192 err = udc_register_ep(dev, &config->ep_cfg_in[n]);
2193 if (err != 0) {
2194 LOG_ERR("Failed to register endpoint");
2195 return err;
2196 }
2197
2198 n++;
2199 /* Also check the number of desired IN endpoints in devicetree. */
2200 if (n >= MIN(ineps, config->num_in_eps)) {
2201 break;
2202 }
2203 }
2204
2205 config->make_thread(dev);
2206
2207 return 0;
2208 }
2209
udc_dwc2_lock(const struct device * dev)2210 static int udc_dwc2_lock(const struct device *dev)
2211 {
2212 return udc_lock_internal(dev, K_FOREVER);
2213 }
2214
udc_dwc2_unlock(const struct device * dev)2215 static int udc_dwc2_unlock(const struct device *dev)
2216 {
2217 return udc_unlock_internal(dev);
2218 }
2219
dwc2_on_bus_reset(const struct device * dev)2220 static void dwc2_on_bus_reset(const struct device *dev)
2221 {
2222 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2223 struct udc_dwc2_data *const priv = udc_get_private(dev);
2224 uint32_t doepmsk;
2225
2226 /* Set the NAK bit for all OUT endpoints */
2227 for (uint8_t i = 0U; i < priv->numdeveps; i++) {
2228 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
2229 mem_addr_t doepctl_reg;
2230
2231 LOG_DBG("ep 0x%02x EPDIR %u", i, epdir);
2232 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
2233 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2234 doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
2235 sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg);
2236 }
2237 }
2238
2239 doepmsk = USB_DWC2_DOEPINT_SETUP | USB_DWC2_DOEPINT_XFERCOMPL;
2240 if (priv->bufferdma) {
2241 doepmsk |= USB_DWC2_DOEPINT_STSPHSERCVD;
2242 }
2243
2244 sys_write32(doepmsk, (mem_addr_t)&base->doepmsk);
2245 sys_set_bits((mem_addr_t)&base->diepmsk, USB_DWC2_DIEPINT_XFERCOMPL);
2246
2247 /* Software has to handle RxFLvl interrupt only in Completer mode */
2248 if (!priv->bufferdma) {
2249 sys_set_bits((mem_addr_t)&base->gintmsk,
2250 USB_DWC2_GINTSTS_RXFLVL);
2251 }
2252
2253 /* Clear device address during reset. */
2254 sys_clear_bits((mem_addr_t)&base->dcfg, USB_DWC2_DCFG_DEVADDR_MASK);
2255
2256 /* Speed enumeration must happen after reset. */
2257 priv->enumdone = 0;
2258 }
2259
dwc2_handle_enumdone(const struct device * dev)2260 static void dwc2_handle_enumdone(const struct device *dev)
2261 {
2262 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2263 struct udc_dwc2_data *const priv = udc_get_private(dev);
2264 uint32_t dsts;
2265
2266 dsts = sys_read32((mem_addr_t)&base->dsts);
2267 priv->enumspd = usb_dwc2_get_dsts_enumspd(dsts);
2268 priv->enumdone = 1;
2269 }
2270
dwc2_read_fifo_setup(const struct device * dev,uint8_t ep,const size_t size)2271 static inline int dwc2_read_fifo_setup(const struct device *dev, uint8_t ep,
2272 const size_t size)
2273 {
2274 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2275 struct udc_dwc2_data *const priv = udc_get_private(dev);
2276 size_t offset;
2277
2278 /* FIFO access is always in 32-bit words */
2279
2280 if (size != 8) {
2281 LOG_ERR("%d bytes SETUP", size);
2282 }
2283
2284 /*
2285 * We store the setup packet temporarily in the driver's private data
2286 * because there is always a race risk after the status stage OUT
2287 * packet from the host and the new setup packet. This is fine in
2288 * bottom-half processing because the events arrive in a queue and
2289 * there will be a next net_buf for the setup packet.
2290 */
2291 for (offset = 0; offset < MIN(size, 8); offset += 4) {
2292 sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)),
2293 &priv->setup[offset]);
2294 }
2295
2296 /* On protocol error simply discard extra data */
2297 while (offset < size) {
2298 sys_read32(UDC_DWC2_EP_FIFO(base, ep));
2299 offset += 4;
2300 }
2301
2302 return 0;
2303 }
2304
dwc2_handle_rxflvl(const struct device * dev)2305 static inline void dwc2_handle_rxflvl(const struct device *dev)
2306 {
2307 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2308 struct udc_ep_config *ep_cfg;
2309 struct net_buf *buf;
2310 uint32_t grxstsp;
2311 uint32_t pktsts;
2312 uint32_t bcnt;
2313 uint8_t ep;
2314
2315 grxstsp = sys_read32((mem_addr_t)&base->grxstsp);
2316 ep = usb_dwc2_get_grxstsp_epnum(grxstsp);
2317 bcnt = usb_dwc2_get_grxstsp_bcnt(grxstsp);
2318 pktsts = usb_dwc2_get_grxstsp_pktsts(grxstsp);
2319
2320 LOG_DBG("ep 0x%02x: pktsts %u, bcnt %u", ep, pktsts, bcnt);
2321
2322 switch (pktsts) {
2323 case USB_DWC2_GRXSTSR_PKTSTS_SETUP:
2324 dwc2_read_fifo_setup(dev, ep, bcnt);
2325 break;
2326 case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA:
2327 ep_cfg = udc_get_ep_cfg(dev, ep);
2328
2329 buf = udc_buf_peek(dev, ep_cfg->addr);
2330
2331 /* RxFIFO data must be retrieved even when buf is NULL */
2332 dwc2_read_fifo(dev, ep, buf, bcnt);
2333 break;
2334 case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA_DONE:
2335 LOG_DBG("RX pktsts DONE");
2336 break;
2337 case USB_DWC2_GRXSTSR_PKTSTS_SETUP_DONE:
2338 LOG_DBG("SETUP pktsts DONE");
2339 case USB_DWC2_GRXSTSR_PKTSTS_GLOBAL_OUT_NAK:
2340 LOG_DBG("Global OUT NAK");
2341 break;
2342 default:
2343 break;
2344 }
2345 }
2346
dwc2_handle_in_xfercompl(const struct device * dev,const uint8_t ep_idx)2347 static inline void dwc2_handle_in_xfercompl(const struct device *dev,
2348 const uint8_t ep_idx)
2349 {
2350 struct udc_dwc2_data *const priv = udc_get_private(dev);
2351 struct udc_ep_config *ep_cfg;
2352 struct net_buf *buf;
2353
2354 ep_cfg = udc_get_ep_cfg(dev, ep_idx | USB_EP_DIR_IN);
2355 buf = udc_buf_peek(dev, ep_cfg->addr);
2356 if (buf == NULL) {
2357 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
2358 return;
2359 }
2360
2361 net_buf_pull(buf, priv->tx_len[ep_idx]);
2362 if (buf->len && dwc2_tx_fifo_write(dev, ep_cfg, buf) == 0) {
2363 return;
2364 }
2365
2366 k_event_post(&priv->xfer_finished, BIT(ep_idx));
2367 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2368 }
2369
dwc2_handle_iepint(const struct device * dev)2370 static inline void dwc2_handle_iepint(const struct device *dev)
2371 {
2372 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2373 const uint8_t n_max = 16;
2374 uint32_t diepmsk;
2375 uint32_t daint;
2376
2377 diepmsk = sys_read32((mem_addr_t)&base->diepmsk);
2378 daint = sys_read32((mem_addr_t)&base->daint);
2379
2380 for (uint8_t n = 0U; n < n_max; n++) {
2381 mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[n].diepint;
2382 uint32_t diepint;
2383 uint32_t status;
2384
2385 if (daint & USB_DWC2_DAINT_INEPINT(n)) {
2386 /* Read and clear interrupt status */
2387 diepint = sys_read32(diepint_reg);
2388 status = diepint & diepmsk;
2389 sys_write32(status, diepint_reg);
2390
2391 LOG_DBG("ep 0x%02x interrupt status: 0x%x",
2392 n | USB_EP_DIR_IN, status);
2393
2394 if (status & USB_DWC2_DIEPINT_XFERCOMPL) {
2395 dwc2_handle_in_xfercompl(dev, n);
2396 }
2397
2398 }
2399 }
2400
2401 /* Clear IEPINT interrupt */
2402 sys_write32(USB_DWC2_GINTSTS_IEPINT, (mem_addr_t)&base->gintsts);
2403 }
2404
dwc2_handle_out_xfercompl(const struct device * dev,const uint8_t ep_idx)2405 static inline void dwc2_handle_out_xfercompl(const struct device *dev,
2406 const uint8_t ep_idx)
2407 {
2408 struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, ep_idx);
2409 struct udc_dwc2_data *const priv = udc_get_private(dev);
2410 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2411 uint32_t bcnt;
2412 struct net_buf *buf;
2413 uint32_t doeptsiz;
2414 const bool is_iso = dwc2_ep_is_iso(ep_cfg);
2415
2416 doeptsiz = sys_read32((mem_addr_t)&base->out_ep[ep_idx].doeptsiz);
2417
2418 buf = udc_buf_peek(dev, ep_cfg->addr);
2419 if (!buf) {
2420 LOG_ERR("No buffer for ep 0x%02x", ep_cfg->addr);
2421 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
2422 return;
2423 }
2424
2425 /* The original transfer size value is necessary here because controller
2426 * decreases the value for every byte stored.
2427 */
2428 bcnt = usb_dwc2_get_doeptsizn_xfersize(priv->rx_siz[ep_idx]) -
2429 usb_dwc2_get_doeptsizn_xfersize(doeptsiz);
2430
2431 if (is_iso) {
2432 uint32_t pkts;
2433 bool valid;
2434
2435 pkts = usb_dwc2_get_doeptsizn_pktcnt(priv->rx_siz[ep_idx]) -
2436 usb_dwc2_get_doeptsizn_pktcnt(doeptsiz);
2437 switch (usb_dwc2_get_doeptsizn_rxdpid(doeptsiz)) {
2438 case USB_DWC2_DOEPTSIZN_RXDPID_DATA0:
2439 valid = (pkts == 1);
2440 break;
2441 case USB_DWC2_DOEPTSIZN_RXDPID_DATA1:
2442 valid = (pkts == 2);
2443 break;
2444 case USB_DWC2_DOEPTSIZN_RXDPID_DATA2:
2445 valid = (pkts == 3);
2446 break;
2447 case USB_DWC2_DOEPTSIZN_RXDPID_MDATA:
2448 default:
2449 valid = false;
2450 break;
2451 }
2452
2453 if (!valid) {
2454 if (!priv->bufferdma) {
2455 /* RxFlvl added data to net buf, rollback */
2456 net_buf_remove_mem(buf, bcnt);
2457 }
2458 /* Data is not valid, discard it */
2459 bcnt = 0;
2460 }
2461 }
2462
2463 if (priv->bufferdma && bcnt) {
2464 sys_cache_data_invd_range(buf->data, bcnt);
2465 net_buf_add(buf, bcnt);
2466 }
2467
2468 if (!is_iso && (bcnt % udc_mps_ep_size(ep_cfg)) == 0 &&
2469 net_buf_tailroom(buf)) {
2470 dwc2_prep_rx(dev, buf, ep_cfg);
2471 } else {
2472 k_event_post(&priv->xfer_finished, BIT(16 + ep_idx));
2473 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2474 }
2475 }
2476
dwc2_handle_oepint(const struct device * dev)2477 static inline void dwc2_handle_oepint(const struct device *dev)
2478 {
2479 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2480 struct udc_dwc2_data *const priv = udc_get_private(dev);
2481 const uint8_t n_max = 16;
2482 uint32_t doepmsk;
2483 uint32_t daint;
2484
2485 doepmsk = sys_read32((mem_addr_t)&base->doepmsk);
2486 daint = sys_read32((mem_addr_t)&base->daint);
2487
2488 for (uint8_t n = 0U; n < n_max; n++) {
2489 mem_addr_t doepint_reg = (mem_addr_t)&base->out_ep[n].doepint;
2490 uint32_t doepint;
2491 uint32_t status;
2492
2493 if (!(daint & USB_DWC2_DAINT_OUTEPINT(n))) {
2494 continue;
2495 }
2496
2497 /* Read and clear interrupt status */
2498 doepint = sys_read32(doepint_reg);
2499 status = doepint & doepmsk;
2500 sys_write32(status, doepint_reg);
2501
2502 LOG_DBG("ep 0x%02x interrupt status: 0x%x", n, status);
2503
2504 /* StupPktRcvd is not enabled for interrupt, but must be checked
2505 * when XferComp hits to determine if SETUP token was received.
2506 */
2507 if (priv->bufferdma && (status & USB_DWC2_DOEPINT_XFERCOMPL) &&
2508 (doepint & USB_DWC2_DOEPINT_STUPPKTRCVD)) {
2509 uint32_t addr;
2510
2511 sys_write32(USB_DWC2_DOEPINT_STUPPKTRCVD, doepint_reg);
2512 status &= ~USB_DWC2_DOEPINT_XFERCOMPL;
2513
2514 /* DMAAddr points past the memory location where the
2515 * SETUP data was stored. Copy the received SETUP data
2516 * to temporary location used also in Completer mode
2517 * which allows common SETUP interrupt handling.
2518 */
2519 addr = sys_read32((mem_addr_t)&base->out_ep[0].doepdma);
2520 sys_cache_data_invd_range((void *)(addr - 8), 8);
2521 memcpy(priv->setup, (void *)(addr - 8), sizeof(priv->setup));
2522 }
2523
2524 if (status & USB_DWC2_DOEPINT_SETUP) {
2525 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_SETUP));
2526 }
2527
2528 if (status & USB_DWC2_DOEPINT_STSPHSERCVD) {
2529 /* Driver doesn't need any special handling, but it is
2530 * mandatory that the bit is cleared in Buffer DMA mode.
2531 * If the bit is not cleared (i.e. when this interrupt
2532 * bit is masked), then SETUP interrupts will cease
2533 * after first control transfer with data stage from
2534 * device to host.
2535 */
2536 }
2537
2538 if (status & USB_DWC2_DOEPINT_XFERCOMPL) {
2539 dwc2_handle_out_xfercompl(dev, n);
2540 }
2541 }
2542
2543 /* Clear OEPINT interrupt */
2544 sys_write32(USB_DWC2_GINTSTS_OEPINT, (mem_addr_t)&base->gintsts);
2545 }
2546
2547 /* In DWC2 otg context incomplete isochronous IN transfer means that the host
2548 * did not issue IN token to at least one isochronous endpoint and software has
2549 * find on which endpoints the data is no longer valid and discard it.
2550 */
dwc2_handle_incompisoin(const struct device * dev)2551 static void dwc2_handle_incompisoin(const struct device *dev)
2552 {
2553 const struct udc_dwc2_config *const config = dev->config;
2554 struct usb_dwc2_reg *const base = config->base;
2555 struct udc_dwc2_data *const priv = udc_get_private(dev);
2556 mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts;
2557 const uint32_t mask =
2558 USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPTYPE_MASK |
2559 USB_DWC2_DEPCTL_USBACTEP;
2560 const uint32_t val =
2561 USB_DWC2_DEPCTL_EPENA |
2562 usb_dwc2_set_depctl_eptype(USB_DWC2_DEPCTL_EPTYPE_ISO) |
2563 USB_DWC2_DEPCTL_USBACTEP;
2564
2565 for (uint8_t i = 1U; i < priv->numdeveps; i++) {
2566 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
2567
2568 if (epdir == USB_DWC2_GHWCFG1_EPDIR_IN ||
2569 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2570 mem_addr_t diepctl_reg = dwc2_get_dxepctl_reg(dev, i | USB_EP_DIR_IN);
2571 uint32_t diepctl;
2572
2573 diepctl = sys_read32(diepctl_reg);
2574
2575 /* Check if endpoint didn't receive ISO OUT data */
2576 if ((diepctl & mask) == val) {
2577 struct udc_ep_config *cfg;
2578 struct net_buf *buf;
2579
2580 cfg = udc_get_ep_cfg(dev, i | USB_EP_DIR_IN);
2581 __ASSERT_NO_MSG(cfg && cfg->stat.enabled &&
2582 dwc2_ep_is_iso(cfg));
2583
2584 udc_dwc2_ep_disable(dev, cfg, false);
2585
2586 buf = udc_buf_get(dev, cfg->addr);
2587 if (buf) {
2588 /* Data is no longer relevant */
2589 udc_submit_ep_event(dev, buf, 0);
2590
2591 /* Try to queue next packet before SOF */
2592 dwc2_handle_xfer_next(dev, cfg);
2593 }
2594 }
2595 }
2596 }
2597
2598 sys_write32(USB_DWC2_GINTSTS_INCOMPISOIN, gintsts_reg);
2599 }
2600
2601 /* In DWC2 otg context incomplete isochronous OUT transfer means that the host
2602 * did not issue OUT token to at least one isochronous endpoint and software has
2603 * to find on which endpoint it didn't receive any data and let the stack know.
2604 */
dwc2_handle_incompisoout(const struct device * dev)2605 static void dwc2_handle_incompisoout(const struct device *dev)
2606 {
2607 const struct udc_dwc2_config *const config = dev->config;
2608 struct usb_dwc2_reg *const base = config->base;
2609 struct udc_dwc2_data *const priv = udc_get_private(dev);
2610 mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts;
2611 const uint32_t mask =
2612 USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPTYPE_MASK |
2613 USB_DWC2_DEPCTL_DPID | USB_DWC2_DEPCTL_USBACTEP;
2614 const uint32_t val =
2615 USB_DWC2_DEPCTL_EPENA |
2616 usb_dwc2_set_depctl_eptype(USB_DWC2_DEPCTL_EPTYPE_ISO) |
2617 ((priv->sof_num & 1) ? USB_DWC2_DEPCTL_DPID : 0) |
2618 USB_DWC2_DEPCTL_USBACTEP;
2619
2620 for (uint8_t i = 1U; i < priv->numdeveps; i++) {
2621 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
2622
2623 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
2624 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2625 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
2626 uint32_t doepctl;
2627
2628 doepctl = sys_read32(doepctl_reg);
2629
2630 /* Check if endpoint didn't receive ISO OUT data */
2631 if ((doepctl & mask) == val) {
2632 struct udc_ep_config *cfg;
2633 struct net_buf *buf;
2634
2635 cfg = udc_get_ep_cfg(dev, i);
2636 __ASSERT_NO_MSG(cfg && cfg->stat.enabled &&
2637 dwc2_ep_is_iso(cfg));
2638
2639 udc_dwc2_ep_disable(dev, cfg, false);
2640
2641 buf = udc_buf_get(dev, cfg->addr);
2642 if (buf) {
2643 udc_submit_ep_event(dev, buf, 0);
2644 }
2645 }
2646 }
2647 }
2648
2649 sys_write32(USB_DWC2_GINTSTS_INCOMPISOOUT, gintsts_reg);
2650 }
2651
udc_dwc2_isr_handler(const struct device * dev)2652 static void udc_dwc2_isr_handler(const struct device *dev)
2653 {
2654 const struct udc_dwc2_config *const config = dev->config;
2655 struct usb_dwc2_reg *const base = config->base;
2656 struct udc_dwc2_data *const priv = udc_get_private(dev);
2657 mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts;
2658 uint32_t int_status;
2659 uint32_t gintmsk;
2660
2661 if (priv->hibernated) {
2662 uint32_t gpwrdn = sys_read32((mem_addr_t)&base->gpwrdn);
2663 bool reset, resume = false;
2664
2665 /* Clear interrupts */
2666 sys_write32(gpwrdn, (mem_addr_t)&base->gpwrdn);
2667
2668 if (gpwrdn & USB_DWC2_GPWRDN_LNSTSCHNG) {
2669 resume = usb_dwc2_get_gpwrdn_linestate(gpwrdn) ==
2670 USB_DWC2_GPWRDN_LINESTATE_DM1DP0;
2671 }
2672
2673 reset = gpwrdn & USB_DWC2_GPWRDN_RESETDETECTED;
2674
2675 if (resume) {
2676 k_event_post(&priv->drv_evt,
2677 BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_HOST_RESUME));
2678 }
2679
2680 if (reset) {
2681 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET));
2682 }
2683
2684 (void)dwc2_quirk_irq_clear(dev);
2685 return;
2686 }
2687
2688 gintmsk = sys_read32((mem_addr_t)&base->gintmsk);
2689
2690 /* Read and handle interrupt status register */
2691 while ((int_status = sys_read32(gintsts_reg) & gintmsk)) {
2692
2693 LOG_DBG("GINTSTS 0x%x", int_status);
2694
2695 if (int_status & USB_DWC2_GINTSTS_SOF) {
2696 uint32_t dsts;
2697
2698 /* Clear USB SOF interrupt. */
2699 sys_write32(USB_DWC2_GINTSTS_SOF, gintsts_reg);
2700
2701 dsts = sys_read32((mem_addr_t)&base->dsts);
2702 priv->sof_num = usb_dwc2_get_dsts_soffn(dsts);
2703 udc_submit_event(dev, UDC_EVT_SOF, 0);
2704 }
2705
2706 if (int_status & USB_DWC2_GINTSTS_USBRST) {
2707 /* Clear and handle USB Reset interrupt. */
2708 sys_write32(USB_DWC2_GINTSTS_USBRST, gintsts_reg);
2709 dwc2_on_bus_reset(dev);
2710 LOG_DBG("USB Reset interrupt");
2711
2712 cancel_hibernation_request(priv);
2713 }
2714
2715 if (int_status & USB_DWC2_GINTSTS_ENUMDONE) {
2716 /* Clear and handle Enumeration Done interrupt. */
2717 sys_write32(USB_DWC2_GINTSTS_ENUMDONE, gintsts_reg);
2718 dwc2_handle_enumdone(dev);
2719 udc_submit_event(dev, UDC_EVT_RESET, 0);
2720 }
2721
2722 if (int_status & USB_DWC2_GINTSTS_WKUPINT) {
2723 /* Clear Resume/Remote Wakeup Detected interrupt. */
2724 sys_write32(USB_DWC2_GINTSTS_WKUPINT, gintsts_reg);
2725 udc_set_suspended(dev, false);
2726 udc_submit_event(dev, UDC_EVT_RESUME, 0);
2727
2728 cancel_hibernation_request(priv);
2729 }
2730
2731 if (int_status & USB_DWC2_GINTSTS_IEPINT) {
2732 /* Handle IN Endpoints interrupt */
2733 dwc2_handle_iepint(dev);
2734 }
2735
2736 if (int_status & USB_DWC2_GINTSTS_RXFLVL) {
2737 /* Handle RxFIFO Non-Empty interrupt */
2738 dwc2_handle_rxflvl(dev);
2739 }
2740
2741 if (int_status & USB_DWC2_GINTSTS_OEPINT) {
2742 /* Handle OUT Endpoints interrupt */
2743 dwc2_handle_oepint(dev);
2744 }
2745
2746 if (int_status & USB_DWC2_GINTSTS_INCOMPISOIN) {
2747 dwc2_handle_incompisoin(dev);
2748 }
2749
2750 if (int_status & USB_DWC2_GINTSTS_INCOMPISOOUT) {
2751 dwc2_handle_incompisoout(dev);
2752 }
2753
2754 if (int_status & USB_DWC2_GINTSTS_USBSUSP) {
2755 /* Clear USB Suspend interrupt. */
2756 sys_write32(USB_DWC2_GINTSTS_USBSUSP, gintsts_reg);
2757
2758 /* Notify the stack */
2759 udc_set_suspended(dev, true);
2760 udc_submit_event(dev, UDC_EVT_SUSPEND, 0);
2761
2762 request_hibernation(priv);
2763 }
2764 }
2765
2766 (void)dwc2_quirk_irq_clear(dev);
2767 }
2768
dwc2_handle_hibernation_exit(const struct device * dev,bool rwup,bool bus_reset)2769 static void dwc2_handle_hibernation_exit(const struct device *dev,
2770 bool rwup, bool bus_reset)
2771 {
2772 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2773 struct udc_dwc2_data *const priv = udc_get_private(dev);
2774
2775 dwc2_exit_hibernation(dev, rwup, bus_reset);
2776 dwc2_restore_device_registers(dev, rwup);
2777
2778 priv->hibernated = 0;
2779 if (!rwup) {
2780 LOG_DBG("Hibernation exit complete");
2781 }
2782
2783 /* Let stack know we are no longer suspended */
2784 udc_set_suspended(dev, false);
2785 udc_submit_event(dev, UDC_EVT_RESUME, 0);
2786
2787 if (rwup) {
2788 /* Resume has been driven for at least 1 ms now, do 1 ms more to
2789 * have sufficient margin.
2790 */
2791 k_msleep(1);
2792
2793 sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_RMTWKUPSIG);
2794 }
2795
2796 if (rwup) {
2797 LOG_DBG("Hibernation exit on Remote Wakeup complete");
2798 }
2799
2800 if (bus_reset) {
2801 /* Clear all pending transfers */
2802 k_event_clear(&priv->xfer_new, UINT32_MAX);
2803 k_event_clear(&priv->xfer_finished, UINT32_MAX);
2804 dwc2_on_bus_reset(dev);
2805 } else {
2806 /* Resume any pending transfer handling */
2807 if (k_event_test(&priv->xfer_new, UINT32_MAX)) {
2808 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
2809 }
2810
2811 if (k_event_test(&priv->xfer_finished, UINT32_MAX)) {
2812 k_event_post(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2813 }
2814 }
2815 }
2816
pull_next_ep_from_bitmap(uint32_t * bitmap)2817 static uint8_t pull_next_ep_from_bitmap(uint32_t *bitmap)
2818 {
2819 unsigned int bit;
2820
2821 __ASSERT_NO_MSG(bitmap && *bitmap);
2822
2823 bit = find_lsb_set(*bitmap) - 1;
2824 *bitmap &= ~BIT(bit);
2825
2826 if (bit >= 16) {
2827 return USB_EP_DIR_OUT | (bit - 16);
2828 } else {
2829 return USB_EP_DIR_IN | bit;
2830 }
2831 }
2832
dwc2_thread_handler(void * const arg)2833 static ALWAYS_INLINE void dwc2_thread_handler(void *const arg)
2834 {
2835 const struct device *dev = (const struct device *)arg;
2836 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
2837 struct udc_dwc2_data *const priv = udc_get_private(dev);
2838 const struct udc_dwc2_config *const config = dev->config;
2839 struct udc_ep_config *ep_cfg;
2840 const uint32_t hibernation_exit_events = (BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET) |
2841 BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_HOST_RESUME));
2842 uint32_t prev;
2843 uint32_t evt;
2844 uint32_t eps;
2845 uint8_t ep;
2846
2847 /* This is the bottom-half of the ISR handler and the place where
2848 * a new transfer can be fed.
2849 */
2850 evt = k_event_wait(&priv->drv_evt, UINT32_MAX, false, K_FOREVER);
2851
2852 udc_lock_internal(dev, K_FOREVER);
2853
2854 if (evt & BIT(DWC2_DRV_EVT_XFER)) {
2855 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_XFER));
2856
2857 if (!priv->hibernated) {
2858 LOG_DBG("New transfer(s) in the queue");
2859 eps = k_event_test(&priv->xfer_new, UINT32_MAX);
2860 k_event_clear(&priv->xfer_new, eps);
2861 } else {
2862 /* Events will be handled after hibernation exit */
2863 eps = 0;
2864 }
2865
2866 while (eps) {
2867 ep = pull_next_ep_from_bitmap(&eps);
2868 ep_cfg = udc_get_ep_cfg(dev, ep);
2869
2870 if (!udc_ep_is_busy(dev, ep_cfg->addr)) {
2871 dwc2_handle_xfer_next(dev, ep_cfg);
2872 } else {
2873 LOG_DBG("ep 0x%02x busy", ep_cfg->addr);
2874 }
2875 }
2876 }
2877
2878 if (evt & BIT(DWC2_DRV_EVT_EP_FINISHED)) {
2879 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_EP_FINISHED));
2880
2881 if (!priv->hibernated) {
2882 eps = k_event_test(&priv->xfer_finished, UINT32_MAX);
2883 k_event_clear(&priv->xfer_finished, eps);
2884 } else {
2885 /* Events will be handled after hibernation exit */
2886 eps = 0;
2887 }
2888
2889 while (eps) {
2890 ep = pull_next_ep_from_bitmap(&eps);
2891 ep_cfg = udc_get_ep_cfg(dev, ep);
2892
2893 if (USB_EP_DIR_IS_IN(ep)) {
2894 LOG_DBG("DIN event ep 0x%02x", ep);
2895 dwc2_handle_evt_din(dev, ep_cfg);
2896 } else {
2897 LOG_DBG("DOUT event ep 0x%02x", ep_cfg->addr);
2898 dwc2_handle_evt_dout(dev, ep_cfg);
2899 }
2900
2901 if (!udc_ep_is_busy(dev, ep_cfg->addr)) {
2902 dwc2_handle_xfer_next(dev, ep_cfg);
2903 } else {
2904 LOG_DBG("ep 0x%02x busy", ep_cfg->addr);
2905 }
2906 }
2907 }
2908
2909 if (evt & BIT(DWC2_DRV_EVT_SETUP)) {
2910 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_SETUP));
2911
2912 LOG_DBG("SETUP event");
2913 dwc2_handle_evt_setup(dev);
2914 }
2915
2916 if (evt & BIT(DWC2_DRV_EVT_REMOTE_WAKEUP)) {
2917 k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_REMOTE_WAKEUP) |
2918 BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
2919
2920 if (priv->hibernated) {
2921 config->irq_disable_func(dev);
2922
2923 dwc2_handle_hibernation_exit(dev, true, false);
2924
2925 config->irq_enable_func(dev);
2926 } else {
2927 sys_set_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_RMTWKUPSIG);
2928
2929 udc_set_suspended(dev, false);
2930 udc_submit_event(dev, UDC_EVT_RESUME, 0);
2931
2932 /* Drive resume for 2 ms to have sufficient margin */
2933 k_msleep(2);
2934
2935 sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_RMTWKUPSIG);
2936 }
2937 }
2938
2939 if (evt & BIT(DWC2_DRV_EVT_ENTER_HIBERNATION)) {
2940 config->irq_disable_func(dev);
2941
2942 prev = k_event_clear(&priv->drv_evt, BIT(DWC2_DRV_EVT_ENTER_HIBERNATION));
2943
2944 /* Only enter hibernation if IRQ did not cancel the request */
2945 if (prev & BIT(DWC2_DRV_EVT_ENTER_HIBERNATION)) {
2946 dwc2_enter_hibernation(dev);
2947 }
2948
2949 config->irq_enable_func(dev);
2950 }
2951
2952 if (evt & hibernation_exit_events) {
2953 bool bus_reset;
2954
2955 LOG_DBG("Hibernation exit event");
2956 config->irq_disable_func(dev);
2957
2958 prev = k_event_clear(&priv->drv_evt, hibernation_exit_events);
2959 bus_reset = prev & BIT(DWC2_DRV_EVT_HIBERNATION_EXIT_BUS_RESET);
2960
2961 if (priv->hibernated) {
2962 dwc2_handle_hibernation_exit(dev, false, bus_reset);
2963 }
2964
2965 config->irq_enable_func(dev);
2966 }
2967
2968 udc_unlock_internal(dev);
2969 }
2970
2971 static const struct udc_api udc_dwc2_api = {
2972 .lock = udc_dwc2_lock,
2973 .unlock = udc_dwc2_unlock,
2974 .device_speed = udc_dwc2_device_speed,
2975 .init = udc_dwc2_init,
2976 .enable = udc_dwc2_enable,
2977 .disable = udc_dwc2_disable,
2978 .shutdown = udc_dwc2_shutdown,
2979 .set_address = udc_dwc2_set_address,
2980 .test_mode = udc_dwc2_test_mode,
2981 .host_wakeup = udc_dwc2_host_wakeup,
2982 .ep_enable = udc_dwc2_ep_activate,
2983 .ep_disable = udc_dwc2_ep_deactivate,
2984 .ep_set_halt = udc_dwc2_ep_set_halt,
2985 .ep_clear_halt = udc_dwc2_ep_clear_halt,
2986 .ep_enqueue = udc_dwc2_ep_enqueue,
2987 .ep_dequeue = udc_dwc2_ep_dequeue,
2988 };
2989
2990 #define DT_DRV_COMPAT snps_dwc2
2991
2992 #define UDC_DWC2_VENDOR_QUIRK_GET(n) \
2993 COND_CODE_1(DT_NODE_VENDOR_HAS_IDX(DT_DRV_INST(n), 1), \
2994 (&dwc2_vendor_quirks_##n), \
2995 (NULL))
2996
2997 #define UDC_DWC2_DT_INST_REG_ADDR(n) \
2998 COND_CODE_1(DT_NUM_REGS(DT_DRV_INST(n)), (DT_INST_REG_ADDR(n)), \
2999 (DT_INST_REG_ADDR_BY_NAME(n, core)))
3000
3001 #define UDC_DWC2_PINCTRL_DT_INST_DEFINE(n) \
3002 COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \
3003 (PINCTRL_DT_INST_DEFINE(n)), ())
3004
3005 #define UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n) \
3006 COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \
3007 ((void *)PINCTRL_DT_INST_DEV_CONFIG_GET(n)), (NULL))
3008
3009 #define UDC_DWC2_IRQ_FLAGS_TYPE0(n) 0
3010 #define UDC_DWC2_IRQ_FLAGS_TYPE1(n) DT_INST_IRQ(n, type)
3011 #define DW_IRQ_FLAGS(n) \
3012 _CONCAT(UDC_DWC2_IRQ_FLAGS_TYPE, DT_INST_IRQ_HAS_CELL(n, type))(n)
3013
3014 /*
3015 * A UDC driver should always be implemented as a multi-instance
3016 * driver, even if your platform does not require it.
3017 */
3018 #define UDC_DWC2_DEVICE_DEFINE(n) \
3019 UDC_DWC2_PINCTRL_DT_INST_DEFINE(n); \
3020 \
3021 K_THREAD_STACK_DEFINE(udc_dwc2_stack_##n, CONFIG_UDC_DWC2_STACK_SIZE); \
3022 \
3023 static void udc_dwc2_thread_##n(void *dev, void *arg1, void *arg2) \
3024 { \
3025 while (true) { \
3026 dwc2_thread_handler(dev); \
3027 } \
3028 } \
3029 \
3030 static void udc_dwc2_make_thread_##n(const struct device *dev) \
3031 { \
3032 struct udc_dwc2_data *priv = udc_get_private(dev); \
3033 \
3034 k_thread_create(&priv->thread_data, \
3035 udc_dwc2_stack_##n, \
3036 K_THREAD_STACK_SIZEOF(udc_dwc2_stack_##n), \
3037 udc_dwc2_thread_##n, \
3038 (void *)dev, NULL, NULL, \
3039 K_PRIO_COOP(CONFIG_UDC_DWC2_THREAD_PRIORITY), \
3040 K_ESSENTIAL, \
3041 K_NO_WAIT); \
3042 k_thread_name_set(&priv->thread_data, dev->name); \
3043 } \
3044 \
3045 static void udc_dwc2_irq_enable_func_##n(const struct device *dev) \
3046 { \
3047 IRQ_CONNECT(DT_INST_IRQN(n), \
3048 DT_INST_IRQ(n, priority), \
3049 udc_dwc2_isr_handler, \
3050 DEVICE_DT_INST_GET(n), \
3051 DW_IRQ_FLAGS(n)); \
3052 \
3053 irq_enable(DT_INST_IRQN(n)); \
3054 } \
3055 \
3056 static void udc_dwc2_irq_disable_func_##n(const struct device *dev) \
3057 { \
3058 irq_disable(DT_INST_IRQN(n)); \
3059 } \
3060 \
3061 static struct udc_ep_config ep_cfg_out[DT_INST_PROP(n, num_out_eps)]; \
3062 static struct udc_ep_config ep_cfg_in[DT_INST_PROP(n, num_in_eps)]; \
3063 \
3064 static const struct udc_dwc2_config udc_dwc2_config_##n = { \
3065 .num_out_eps = DT_INST_PROP(n, num_out_eps), \
3066 .num_in_eps = DT_INST_PROP(n, num_in_eps), \
3067 .ep_cfg_in = ep_cfg_in, \
3068 .ep_cfg_out = ep_cfg_out, \
3069 .make_thread = udc_dwc2_make_thread_##n, \
3070 .base = (struct usb_dwc2_reg *)UDC_DWC2_DT_INST_REG_ADDR(n), \
3071 .pcfg = UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
3072 .irq_enable_func = udc_dwc2_irq_enable_func_##n, \
3073 .irq_disable_func = udc_dwc2_irq_disable_func_##n, \
3074 .quirks = UDC_DWC2_VENDOR_QUIRK_GET(n), \
3075 .ghwcfg1 = DT_INST_PROP(n, ghwcfg1), \
3076 .ghwcfg2 = DT_INST_PROP(n, ghwcfg2), \
3077 .ghwcfg4 = DT_INST_PROP(n, ghwcfg4), \
3078 }; \
3079 \
3080 static struct udc_dwc2_data udc_priv_##n = { \
3081 }; \
3082 \
3083 static struct udc_data udc_data_##n = { \
3084 .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \
3085 .priv = &udc_priv_##n, \
3086 }; \
3087 \
3088 DEVICE_DT_INST_DEFINE(n, dwc2_driver_preinit, NULL, \
3089 &udc_data_##n, &udc_dwc2_config_##n, \
3090 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
3091 &udc_dwc2_api);
3092
3093 DT_INST_FOREACH_STATUS_OKAY(UDC_DWC2_DEVICE_DEFINE)
3094