1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "udc_common.h"
8 #include "udc_dwc2.h"
9
10 #include <string.h>
11 #include <stdio.h>
12
13 #include <zephyr/cache.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/devicetree.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/sys_io.h>
18 #include <zephyr/sys/byteorder.h>
19 #include <zephyr/drivers/usb/udc.h>
20 #include <zephyr/usb/usb_ch9.h>
21 #include <usb_dwc2_hw.h>
22
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(udc_dwc2, CONFIG_UDC_DRIVER_LOG_LEVEL);
25 #include "udc_dwc2_vendor_quirks.h"
26
27 enum dwc2_drv_event_type {
28 /* Trigger next transfer, must not be used for control OUT */
29 DWC2_DRV_EVT_XFER,
30 /* Setup packet received */
31 DWC2_DRV_EVT_SETUP,
32 /* OUT transaction for specific endpoint is finished */
33 DWC2_DRV_EVT_DOUT,
34 /* IN transaction for specific endpoint is finished */
35 DWC2_DRV_EVT_DIN,
36 };
37
38 struct dwc2_drv_event {
39 const struct device *dev;
40 enum dwc2_drv_event_type type;
41 uint32_t bcnt;
42 uint8_t ep;
43 };
44
45 K_MSGQ_DEFINE(drv_msgq, sizeof(struct dwc2_drv_event),
46 CONFIG_UDC_DWC2_MAX_QMESSAGES, sizeof(void *));
47
48
49 /* Minimum RX FIFO size in 32-bit words considering the largest used OUT packet
50 * of 512 bytes. The value must be adjusted according to the number of OUT
51 * endpoints.
52 */
53 #define UDC_DWC2_GRXFSIZ_DEFAULT (15U + 512U/4U)
54
55 /* TX FIFO0 depth in 32-bit words (used by control IN endpoint) */
56 #define UDC_DWC2_FIFO0_DEPTH 16U
57
58 /* Get Data FIFO access register */
59 #define UDC_DWC2_EP_FIFO(base, idx) ((mem_addr_t)base + 0x1000 * (idx + 1))
60
61 /* Driver private data per instance */
62 struct udc_dwc2_data {
63 struct k_thread thread_data;
64 uint32_t ghwcfg1;
65 uint32_t enumspd;
66 uint32_t txf_set;
67 uint32_t max_xfersize;
68 uint32_t max_pktcnt;
69 uint32_t tx_len[16];
70 uint16_t dfifodepth;
71 uint16_t rxfifo_depth;
72 uint16_t max_txfifo_depth[16];
73 unsigned int dynfifosizing : 1;
74 unsigned int bufferdma : 1;
75 /* Number of endpoints including control endpoint */
76 uint8_t numdeveps;
77 /* Number of IN endpoints including control endpoint */
78 uint8_t ineps;
79 /* Number of OUT endpoints including control endpoint */
80 uint8_t outeps;
81 uint8_t setup[8];
82 };
83
84 #if defined(CONFIG_PINCTRL)
85 #include <zephyr/drivers/pinctrl.h>
86
dwc2_init_pinctrl(const struct device * dev)87 static int dwc2_init_pinctrl(const struct device *dev)
88 {
89 const struct udc_dwc2_config *const config = dev->config;
90 const struct pinctrl_dev_config *const pcfg = config->pcfg;
91 int ret = 0;
92
93 if (pcfg == NULL) {
94 LOG_INF("Skip pinctrl configuration");
95 return 0;
96 }
97
98 ret = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT);
99 if (ret) {
100 LOG_ERR("Failed to apply default pinctrl state (%d)", ret);
101 }
102
103 LOG_DBG("Apply pinctrl");
104
105 return ret;
106 }
107 #else
dwc2_init_pinctrl(const struct device * dev)108 static int dwc2_init_pinctrl(const struct device *dev)
109 {
110 ARG_UNUSED(dev);
111
112 return 0;
113 }
114 #endif
115
dwc2_get_base(const struct device * dev)116 static inline struct usb_dwc2_reg *dwc2_get_base(const struct device *dev)
117 {
118 const struct udc_dwc2_config *const config = dev->config;
119
120 return config->base;
121 }
122
123 /* Get DOEPCTLn or DIEPCTLn register address */
dwc2_get_dxepctl_reg(const struct device * dev,const uint8_t ep)124 static mem_addr_t dwc2_get_dxepctl_reg(const struct device *dev, const uint8_t ep)
125 {
126 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
127 uint8_t ep_idx = USB_EP_GET_IDX(ep);
128
129 if (USB_EP_DIR_IS_OUT(ep)) {
130 return (mem_addr_t)&base->out_ep[ep_idx].doepctl;
131 } else {
132 return (mem_addr_t)&base->in_ep[ep_idx].diepctl;
133 }
134 }
135
136 /* Get available FIFO space in bytes */
dwc2_ftx_avail(const struct device * dev,const uint32_t idx)137 static uint32_t dwc2_ftx_avail(const struct device *dev, const uint32_t idx)
138 {
139 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
140 mem_addr_t reg = (mem_addr_t)&base->in_ep[idx].dtxfsts;
141 uint32_t dtxfsts;
142
143 dtxfsts = sys_read32(reg);
144
145 return usb_dwc2_get_dtxfsts_ineptxfspcavail(dtxfsts) * 4;
146 }
147
dwc2_get_iept_pktctn(const struct device * dev,const uint32_t idx)148 static uint32_t dwc2_get_iept_pktctn(const struct device *dev, const uint32_t idx)
149 {
150 struct udc_dwc2_data *const priv = udc_get_private(dev);
151
152 if (idx == 0) {
153 return usb_dwc2_get_dieptsiz0_pktcnt(UINT32_MAX);
154 } else {
155 return priv->max_pktcnt;
156 }
157 }
158
dwc2_get_iept_xfersize(const struct device * dev,const uint32_t idx)159 static uint32_t dwc2_get_iept_xfersize(const struct device *dev, const uint32_t idx)
160 {
161 struct udc_dwc2_data *const priv = udc_get_private(dev);
162
163 if (idx == 0) {
164 return usb_dwc2_get_dieptsiz0_xfersize(UINT32_MAX);
165 } else {
166 return priv->max_xfersize;
167 }
168 }
169
dwc2_flush_rx_fifo(const struct device * dev)170 static void dwc2_flush_rx_fifo(const struct device *dev)
171 {
172 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
173 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
174
175 sys_write32(USB_DWC2_GRSTCTL_RXFFLSH, grstctl_reg);
176 while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_RXFFLSH) {
177 }
178 }
179
dwc2_flush_tx_fifo(const struct device * dev,const uint8_t idx)180 static void dwc2_flush_tx_fifo(const struct device *dev, const uint8_t idx)
181 {
182 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
183 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
184 /* TODO: use dwc2_get_dxepctl_reg() */
185 mem_addr_t diepctl_reg = (mem_addr_t)&base->in_ep[idx].diepctl;
186 uint32_t grstctl;
187 uint32_t fnum;
188
189 fnum = usb_dwc2_get_depctl_txfnum(sys_read32(diepctl_reg));
190 grstctl = usb_dwc2_set_grstctl_txfnum(fnum) | USB_DWC2_GRSTCTL_TXFFLSH;
191
192 sys_write32(grstctl, grstctl_reg);
193 while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_TXFFLSH) {
194 }
195 }
196
197 /* Return TX FIFOi depth in 32-bit words (i = f_idx + 1) */
dwc2_get_txfdep(const struct device * dev,const uint32_t f_idx)198 static uint32_t dwc2_get_txfdep(const struct device *dev, const uint32_t f_idx)
199 {
200 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
201 uint32_t dieptxf;
202
203 dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]);
204
205 return usb_dwc2_get_dieptxf_inepntxfdep(dieptxf);
206 }
207
208 /* Return TX FIFOi address (i = f_idx + 1) */
dwc2_get_txfaddr(const struct device * dev,const uint32_t f_idx)209 static uint32_t dwc2_get_txfaddr(const struct device *dev, const uint32_t f_idx)
210 {
211 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
212 uint32_t dieptxf;
213
214 dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]);
215
216 return usb_dwc2_get_dieptxf_inepntxfstaddr(dieptxf);
217 }
218
219 /* Set TX FIFOi address and depth (i = f_idx + 1) */
dwc2_set_txf(const struct device * dev,const uint32_t f_idx,const uint32_t dep,const uint32_t addr)220 static void dwc2_set_txf(const struct device *dev, const uint32_t f_idx,
221 const uint32_t dep, const uint32_t addr)
222 {
223 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
224 uint32_t dieptxf;
225
226 dieptxf = usb_dwc2_set_dieptxf_inepntxfdep(dep) |
227 usb_dwc2_set_dieptxf_inepntxfstaddr(addr);
228
229 sys_write32(dieptxf, (mem_addr_t)&base->dieptxf[f_idx]);
230 }
231
232 /* Enable/disable endpoint interrupt */
dwc2_set_epint(const struct device * dev,struct udc_ep_config * const cfg,const bool enabled)233 static void dwc2_set_epint(const struct device *dev,
234 struct udc_ep_config *const cfg, const bool enabled)
235 {
236 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
237 mem_addr_t reg = (mem_addr_t)&base->daintmsk;
238 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
239 uint32_t epmsk;
240
241 if (USB_EP_DIR_IS_IN(cfg->addr)) {
242 epmsk = USB_DWC2_DAINT_INEPINT(ep_idx);
243 } else {
244 epmsk = USB_DWC2_DAINT_OUTEPINT(ep_idx);
245 }
246
247 if (enabled) {
248 sys_set_bits(reg, epmsk);
249 } else {
250 sys_clear_bits(reg, epmsk);
251 }
252 }
253
dwc2_dma_buffer_ok_to_use(const struct device * dev,void * buf,uint32_t xfersize,uint16_t mps)254 static bool dwc2_dma_buffer_ok_to_use(const struct device *dev, void *buf,
255 uint32_t xfersize, uint16_t mps)
256 {
257 ARG_UNUSED(dev);
258
259 if (!IS_ALIGNED(buf, 4)) {
260 LOG_ERR("Buffer not aligned");
261 return false;
262 }
263
264 /* If Max Packet Size is not */
265 if (unlikely(mps % 4) && (xfersize > mps)) {
266 LOG_ERR("Padding not supported");
267 return false;
268 }
269
270 return true;
271 }
272
273 /* Can be called from ISR context */
dwc2_tx_fifo_write(const struct device * dev,struct udc_ep_config * const cfg,struct net_buf * const buf)274 static int dwc2_tx_fifo_write(const struct device *dev,
275 struct udc_ep_config *const cfg, struct net_buf *const buf)
276 {
277 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
278 struct udc_dwc2_data *const priv = udc_get_private(dev);
279 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
280
281 mem_addr_t dieptsiz_reg = (mem_addr_t)&base->in_ep[ep_idx].dieptsiz;
282 /* TODO: use dwc2_get_dxepctl_reg() */
283 mem_addr_t diepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl;
284 mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint;
285
286 uint32_t max_xfersize, max_pktcnt, pktcnt, spcavail;
287 const size_t d = sizeof(uint32_t);
288 unsigned int key;
289 uint32_t len;
290
291 spcavail = dwc2_ftx_avail(dev, ep_idx);
292 /* Round down to multiple of endpoint MPS */
293 spcavail -= spcavail % cfg->mps;
294 /*
295 * Here, the available space should be equal to the FIFO space
296 * assigned/configured for that endpoint because we do not schedule another
297 * transfer until the previous one has not finished. For simplicity,
298 * we only check that the available space is not less than the endpoint
299 * MPS.
300 */
301 if (spcavail < cfg->mps) {
302 LOG_ERR("ep 0x%02x FIFO space is too low, %u (%u)",
303 cfg->addr, spcavail, dwc2_ftx_avail(dev, ep_idx));
304 return -EAGAIN;
305 }
306
307 len = MIN(buf->len, spcavail);
308
309 if (len != 0U) {
310 max_pktcnt = dwc2_get_iept_pktctn(dev, ep_idx);
311 max_xfersize = dwc2_get_iept_xfersize(dev, ep_idx);
312
313 if (len > max_xfersize) {
314 /*
315 * Avoid short packets if the transfer size cannot be
316 * handled in one set.
317 */
318 len = ROUND_DOWN(max_xfersize, cfg->mps);
319 }
320
321 /*
322 * Determine the number of packets for the current transfer;
323 * if the pktcnt is too large, truncate the actual transfer length.
324 */
325 pktcnt = DIV_ROUND_UP(len, cfg->mps);
326 if (pktcnt > max_pktcnt) {
327 pktcnt = max_pktcnt;
328 len = pktcnt * cfg->mps;
329 }
330 } else {
331 /* ZLP */
332 pktcnt = 1U;
333 }
334
335 LOG_DBG("Prepare ep 0x%02x xfer len %u pktcnt %u spcavail %u",
336 cfg->addr, len, pktcnt, spcavail);
337 priv->tx_len[ep_idx] = len;
338
339 /* Lock and write to endpoint FIFO */
340 key = irq_lock();
341
342 /* Set number of packets and transfer size */
343 sys_write32((pktcnt << USB_DWC2_DEPTSIZN_PKTCNT_POS) | len, dieptsiz_reg);
344
345 if (priv->bufferdma) {
346 if (!dwc2_dma_buffer_ok_to_use(dev, buf->data, len, cfg->mps)) {
347 /* Cannot continue unless buffer is bounced. Device will
348 * cease to function. Is fatal error appropriate here?
349 */
350 irq_unlock(key);
351 return -ENOTSUP;
352 }
353
354 sys_write32((uint32_t)buf->data,
355 (mem_addr_t)&base->in_ep[ep_idx].diepdma);
356
357 sys_cache_data_flush_range(buf->data, len);
358 }
359
360 /* Clear NAK and set endpoint enable */
361 sys_set_bits(diepctl_reg, USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK);
362 /* Clear IN Endpoint NAK Effective interrupt in case it was set */
363 sys_write32(USB_DWC2_DIEPINT_INEPNAKEFF, diepint_reg);
364
365 if (!priv->bufferdma) {
366 /* FIFO access is always in 32-bit words */
367
368 for (uint32_t i = 0UL; i < len; i += d) {
369 uint32_t val = buf->data[i];
370
371 if (i + 1 < len) {
372 val |= ((uint32_t)buf->data[i + 1UL]) << 8;
373 }
374 if (i + 2 < len) {
375 val |= ((uint32_t)buf->data[i + 2UL]) << 16;
376 }
377 if (i + 3 < len) {
378 val |= ((uint32_t)buf->data[i + 3UL]) << 24;
379 }
380
381 sys_write32(val, UDC_DWC2_EP_FIFO(base, ep_idx));
382 }
383 }
384
385 irq_unlock(key);
386
387 return 0;
388 }
389
dwc2_read_fifo(const struct device * dev,const uint8_t ep,struct net_buf * const buf,const size_t size)390 static inline int dwc2_read_fifo(const struct device *dev, const uint8_t ep,
391 struct net_buf *const buf, const size_t size)
392 {
393 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
394 size_t len = buf ? MIN(size, net_buf_tailroom(buf)) : 0;
395 const size_t d = sizeof(uint32_t);
396
397 /* FIFO access is always in 32-bit words */
398
399 for (uint32_t n = 0; n < (len / d); n++) {
400 net_buf_add_le32(buf, sys_read32(UDC_DWC2_EP_FIFO(base, ep)));
401 }
402
403 if (len % d) {
404 uint8_t r[4];
405
406 /* Get the remaining */
407 sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)), r);
408 for (uint32_t i = 0U; i < (len % d); i++) {
409 net_buf_add_u8(buf, r[i]);
410 }
411 }
412
413 if (unlikely(size > len)) {
414 for (uint32_t n = 0; n < DIV_ROUND_UP(size - len, d); n++) {
415 (void)sys_read32(UDC_DWC2_EP_FIFO(base, ep));
416 }
417 }
418
419 return 0;
420 }
421
dwc2_rx_xfer_size(struct udc_dwc2_data * const priv,struct udc_ep_config * const cfg,struct net_buf * buf)422 static uint32_t dwc2_rx_xfer_size(struct udc_dwc2_data *const priv,
423 struct udc_ep_config *const cfg,
424 struct net_buf *buf)
425 {
426 uint32_t size;
427
428 if (priv->bufferdma) {
429 size = net_buf_tailroom(buf);
430
431 /* Do as many packets in a single DMA as possible */
432 if (size > priv->max_xfersize) {
433 size = ROUND_DOWN(priv->max_xfersize, cfg->mps);
434 }
435 } else {
436 /* Completer mode can always program Max Packet Size, RxFLvl
437 * interrupt will drop excessive data if necessary (i.e. buffer
438 * is too short).
439 */
440 size = cfg->mps;
441 }
442
443 return size;
444 }
445
446 /* Can be called from ISR and we call it only when there is a buffer in the queue */
dwc2_prep_rx(const struct device * dev,struct net_buf * buf,struct udc_ep_config * const cfg,const bool ncnak)447 static void dwc2_prep_rx(const struct device *dev, struct net_buf *buf,
448 struct udc_ep_config *const cfg, const bool ncnak)
449 {
450 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
451 struct udc_dwc2_data *const priv = udc_get_private(dev);
452 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
453 mem_addr_t doeptsiz_reg = (mem_addr_t)&base->out_ep[ep_idx].doeptsiz;
454 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, ep_idx);
455 uint32_t doeptsiz;
456 uint32_t xfersize;
457
458 xfersize = dwc2_rx_xfer_size(priv, cfg, buf);
459
460 doeptsiz = xfersize | usb_dwc2_set_deptsizn_pktcnt(DIV_ROUND_UP(xfersize, cfg->mps));
461 if (cfg->addr == USB_CONTROL_EP_OUT) {
462 /* Use 1 to allow 8 byte long buffers for SETUP data */
463 doeptsiz |= (1 << USB_DWC2_DOEPTSIZ0_SUPCNT_POS);
464 }
465
466 sys_write32(doeptsiz, doeptsiz_reg);
467
468 if (priv->bufferdma) {
469 if (!dwc2_dma_buffer_ok_to_use(dev, buf->data, xfersize, cfg->mps)) {
470 /* Cannot continue unless buffer is bounced. Device will
471 * cease to function. Is fatal error appropriate here?
472 */
473 return;
474 }
475
476 sys_write32((uint32_t)buf->data,
477 (mem_addr_t)&base->out_ep[ep_idx].doepdma);
478
479 sys_cache_data_invd_range(buf->data, xfersize);
480 }
481
482 if (ncnak) {
483 sys_set_bits(doepctl_reg, USB_DWC2_DEPCTL_EPENA);
484 } else {
485 sys_set_bits(doepctl_reg, USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK);
486 }
487
488 LOG_INF("Prepare RX 0x%02x doeptsiz 0x%x", cfg->addr, doeptsiz);
489 }
490
dwc2_handle_xfer_next(const struct device * dev,struct udc_ep_config * const cfg)491 static void dwc2_handle_xfer_next(const struct device *dev,
492 struct udc_ep_config *const cfg)
493 {
494 struct net_buf *buf;
495
496 buf = udc_buf_peek(dev, cfg->addr);
497 if (buf == NULL) {
498 return;
499 }
500
501 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
502 dwc2_prep_rx(dev, buf, cfg, 0);
503 } else {
504 if (dwc2_tx_fifo_write(dev, cfg, buf)) {
505 LOG_ERR("Failed to start write to TX FIFO, ep 0x%02x",
506 cfg->addr);
507 }
508 }
509
510 udc_ep_set_busy(dev, cfg->addr, true);
511 }
512
dwc2_ctrl_feed_dout(const struct device * dev,const size_t length)513 static int dwc2_ctrl_feed_dout(const struct device *dev, const size_t length)
514 {
515 struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
516 struct net_buf *buf;
517
518 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length);
519 if (buf == NULL) {
520 return -ENOMEM;
521 }
522
523 udc_buf_put(ep_cfg, buf);
524 dwc2_prep_rx(dev, buf, ep_cfg, 0);
525 LOG_DBG("feed buf %p", buf);
526
527 return 0;
528 }
529
dwc2_handle_evt_setup(const struct device * dev)530 static int dwc2_handle_evt_setup(const struct device *dev)
531 {
532 struct udc_dwc2_data *const priv = udc_get_private(dev);
533 struct net_buf *buf;
534 int err;
535
536 buf = udc_buf_get(dev, USB_CONTROL_EP_OUT);
537 if (buf == NULL) {
538 LOG_ERR("No buffer queued for control ep");
539 return -ENODATA;
540 }
541
542 net_buf_add_mem(buf, priv->setup, sizeof(priv->setup));
543 udc_ep_buf_set_setup(buf);
544 LOG_HEXDUMP_DBG(buf->data, buf->len, "setup");
545
546 /* Update to next stage of control transfer */
547 udc_ctrl_update_stage(dev, buf);
548
549 /* We always allocate and feed buffer large enough for a setup packet. */
550
551 if (udc_ctrl_stage_is_data_out(dev)) {
552 /* Allocate and feed buffer for data OUT stage */
553 LOG_DBG("s:%p|feed for -out-", buf);
554
555 /* Allocate at least 8 bytes in case the host decides to send
556 * SETUP DATA instead of OUT DATA packet.
557 */
558 err = dwc2_ctrl_feed_dout(dev, MAX(udc_data_stage_length(buf), 8));
559 if (err == -ENOMEM) {
560 err = udc_submit_ep_event(dev, buf, err);
561 }
562 } else if (udc_ctrl_stage_is_data_in(dev)) {
563 LOG_DBG("s:%p|feed for -in-status", buf);
564
565 err = dwc2_ctrl_feed_dout(dev, 8);
566 if (err == -ENOMEM) {
567 err = udc_submit_ep_event(dev, buf, err);
568 }
569
570 err = udc_ctrl_submit_s_in_status(dev);
571 } else {
572 LOG_DBG("s:%p|feed >setup", buf);
573
574 err = dwc2_ctrl_feed_dout(dev, 8);
575 if (err == -ENOMEM) {
576 err = udc_submit_ep_event(dev, buf, err);
577 }
578
579 err = udc_ctrl_submit_s_status(dev);
580 }
581
582 return err;
583 }
584
dwc2_handle_evt_dout(const struct device * dev,struct udc_ep_config * const cfg)585 static inline int dwc2_handle_evt_dout(const struct device *dev,
586 struct udc_ep_config *const cfg)
587 {
588 struct net_buf *buf;
589 int err = 0;
590
591 buf = udc_buf_get(dev, cfg->addr);
592 if (buf == NULL) {
593 LOG_ERR("No buffer queued for control ep");
594 return -ENODATA;
595 }
596
597 udc_ep_set_busy(dev, cfg->addr, false);
598
599 if (cfg->addr == USB_CONTROL_EP_OUT) {
600 if (udc_ctrl_stage_is_status_out(dev)) {
601 /* s-in-status finished */
602 LOG_DBG("dout:%p| status, feed >s", buf);
603
604 /* Feed a buffer for the next setup packet */
605 err = dwc2_ctrl_feed_dout(dev, 8);
606 if (err == -ENOMEM) {
607 err = udc_submit_ep_event(dev, buf, err);
608 }
609
610 /* Status stage finished, notify upper layer */
611 udc_ctrl_submit_status(dev, buf);
612 } else {
613 /*
614 * For all other cases we feed with a buffer
615 * large enough for setup packet.
616 */
617 LOG_DBG("dout:%p| data, feed >s", buf);
618
619 err = dwc2_ctrl_feed_dout(dev, 8);
620 if (err == -ENOMEM) {
621 err = udc_submit_ep_event(dev, buf, err);
622 }
623 }
624
625 /* Update to next stage of control transfer */
626 udc_ctrl_update_stage(dev, buf);
627
628 if (udc_ctrl_stage_is_status_in(dev)) {
629 err = udc_ctrl_submit_s_out_status(dev, buf);
630 }
631 } else {
632 err = udc_submit_ep_event(dev, buf, 0);
633 }
634
635 return err;
636 }
637
dwc2_handle_evt_din(const struct device * dev,struct udc_ep_config * const cfg)638 static int dwc2_handle_evt_din(const struct device *dev,
639 struct udc_ep_config *const cfg)
640 {
641 struct net_buf *buf;
642
643 buf = udc_buf_peek(dev, cfg->addr);
644 if (buf == NULL) {
645 LOG_ERR("No buffer for ep 0x%02x", cfg->addr);
646 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
647 return -ENOBUFS;
648 }
649
650 if (buf->len) {
651 /* Looks like we failed to continue in ISR, retry */
652 return dwc2_tx_fifo_write(dev, cfg, buf);
653 }
654
655 if (cfg->addr == USB_CONTROL_EP_IN && udc_ep_buf_has_zlp(buf)) {
656 udc_ep_buf_clear_zlp(buf);
657 return dwc2_tx_fifo_write(dev, cfg, buf);
658 }
659
660 buf = udc_buf_get(dev, cfg->addr);
661 udc_ep_set_busy(dev, cfg->addr, false);
662
663 if (cfg->addr == USB_CONTROL_EP_IN) {
664 if (udc_ctrl_stage_is_status_in(dev) ||
665 udc_ctrl_stage_is_no_data(dev)) {
666 /* Status stage finished, notify upper layer */
667 udc_ctrl_submit_status(dev, buf);
668 }
669
670 /* Update to next stage of control transfer */
671 udc_ctrl_update_stage(dev, buf);
672
673 if (udc_ctrl_stage_is_status_out(dev)) {
674 /*
675 * IN transfer finished, release buffer,
676 * control OUT buffer should be already fed.
677 */
678 net_buf_unref(buf);
679 }
680
681 return 0;
682 }
683
684 return udc_submit_ep_event(dev, buf, 0);
685 }
686
dwc2_thread_handler(void * const arg)687 static ALWAYS_INLINE void dwc2_thread_handler(void *const arg)
688 {
689 const struct device *dev = (const struct device *)arg;
690 struct udc_ep_config *ep_cfg;
691 struct dwc2_drv_event evt;
692
693 /* This is the bottom-half of the ISR handler and the place where
694 * a new transfer can be fed.
695 */
696 k_msgq_get(&drv_msgq, &evt, K_FOREVER);
697 ep_cfg = udc_get_ep_cfg(dev, evt.ep);
698
699 switch (evt.type) {
700 case DWC2_DRV_EVT_XFER:
701 LOG_DBG("New transfer in the queue");
702 break;
703 case DWC2_DRV_EVT_SETUP:
704 LOG_DBG("SETUP event");
705 dwc2_handle_evt_setup(dev);
706 break;
707 case DWC2_DRV_EVT_DOUT:
708 LOG_DBG("DOUT event ep 0x%02x", ep_cfg->addr);
709 dwc2_handle_evt_dout(dev, ep_cfg);
710 break;
711 case DWC2_DRV_EVT_DIN:
712 LOG_DBG("DIN event");
713 dwc2_handle_evt_din(dev, ep_cfg);
714 break;
715 }
716
717 if (ep_cfg->addr != USB_CONTROL_EP_OUT && !udc_ep_is_busy(dev, ep_cfg->addr)) {
718 dwc2_handle_xfer_next(dev, ep_cfg);
719 } else {
720 LOG_DBG("ep 0x%02x busy", ep_cfg->addr);
721 }
722 }
723
dwc2_on_bus_reset(const struct device * dev)724 static void dwc2_on_bus_reset(const struct device *dev)
725 {
726 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
727 struct udc_dwc2_data *const priv = udc_get_private(dev);
728 uint32_t doepmsk;
729
730 /* Set the NAK bit for all OUT endpoints */
731 for (uint8_t i = 0U; i < priv->numdeveps; i++) {
732 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
733 mem_addr_t doepctl_reg;
734
735 LOG_DBG("ep 0x%02x EPDIR %u", i, epdir);
736 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
737 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
738 doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
739 sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg);
740 }
741 }
742
743 doepmsk = USB_DWC2_DOEPINT_SETUP;
744 if (priv->bufferdma) {
745 doepmsk |= USB_DWC2_DOEPINT_XFERCOMPL |
746 USB_DWC2_DOEPINT_STSPHSERCVD;
747 }
748
749 sys_write32(doepmsk, (mem_addr_t)&base->doepmsk);
750 sys_set_bits((mem_addr_t)&base->diepmsk, USB_DWC2_DIEPINT_XFERCOMPL);
751
752 /* Software has to handle RxFLvl interrupt only in Completer mode */
753 if (!priv->bufferdma) {
754 sys_set_bits((mem_addr_t)&base->gintmsk,
755 USB_DWC2_GINTSTS_RXFLVL);
756 }
757
758 /* Clear device address during reset. */
759 sys_clear_bits((mem_addr_t)&base->dcfg, USB_DWC2_DCFG_DEVADDR_MASK);
760 }
761
dwc2_handle_enumdone(const struct device * dev)762 static void dwc2_handle_enumdone(const struct device *dev)
763 {
764 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
765 struct udc_dwc2_data *const priv = udc_get_private(dev);
766 uint32_t dsts;
767
768 dsts = sys_read32((mem_addr_t)&base->dsts);
769 priv->enumspd = usb_dwc2_get_dsts_enumspd(dsts);
770 }
771
dwc2_read_fifo_setup(const struct device * dev,uint8_t ep,const size_t size)772 static inline int dwc2_read_fifo_setup(const struct device *dev, uint8_t ep,
773 const size_t size)
774 {
775 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
776 struct udc_dwc2_data *const priv = udc_get_private(dev);
777 size_t offset;
778
779 /* FIFO access is always in 32-bit words */
780
781 if (size != 8) {
782 LOG_ERR("%d bytes SETUP", size);
783 }
784
785 /*
786 * We store the setup packet temporarily in the driver's private data
787 * because there is always a race risk after the status stage OUT
788 * packet from the host and the new setup packet. This is fine in
789 * bottom-half processing because the events arrive in a queue and
790 * there will be a next net_buf for the setup packet.
791 */
792 for (offset = 0; offset < MIN(size, 8); offset += 4) {
793 sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)),
794 &priv->setup[offset]);
795 }
796
797 /* On protocol error simply discard extra data */
798 while (offset < size) {
799 sys_read32(UDC_DWC2_EP_FIFO(base, ep));
800 offset += 4;
801 }
802
803 return 0;
804 }
805
dwc2_handle_rxflvl(const struct device * dev)806 static inline void dwc2_handle_rxflvl(const struct device *dev)
807 {
808 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
809 struct udc_ep_config *ep_cfg;
810 struct dwc2_drv_event evt;
811 struct net_buf *buf;
812 uint32_t grxstsp;
813 uint32_t pktsts;
814
815 grxstsp = sys_read32((mem_addr_t)&base->grxstsp);
816 evt.ep = usb_dwc2_get_grxstsp_epnum(grxstsp);
817 evt.bcnt = usb_dwc2_get_grxstsp_bcnt(grxstsp);
818 pktsts = usb_dwc2_get_grxstsp_pktsts(grxstsp);
819
820 LOG_DBG("ep 0x%02x: pktsts %u, bcnt %u", evt.ep, pktsts, evt.bcnt);
821
822 switch (pktsts) {
823 case USB_DWC2_GRXSTSR_PKTSTS_SETUP:
824 dwc2_read_fifo_setup(dev, evt.ep, evt.bcnt);
825 break;
826 case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA:
827 evt.type = DWC2_DRV_EVT_DOUT;
828 ep_cfg = udc_get_ep_cfg(dev, evt.ep);
829
830 buf = udc_buf_peek(dev, ep_cfg->addr);
831
832 /* RxFIFO data must be retrieved even when buf is NULL */
833 dwc2_read_fifo(dev, evt.ep, buf, evt.bcnt);
834
835 if (buf == NULL) {
836 LOG_ERR("No buffer for ep 0x%02x", ep_cfg->addr);
837 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
838 break;
839 }
840
841 if (net_buf_tailroom(buf) && evt.bcnt == ep_cfg->mps) {
842 dwc2_prep_rx(dev, buf, ep_cfg, 0);
843 } else {
844 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
845 }
846
847 break;
848 case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA_DONE:
849 LOG_DBG("RX pktsts DONE");
850 break;
851 case USB_DWC2_GRXSTSR_PKTSTS_SETUP_DONE:
852 LOG_DBG("SETUP pktsts DONE");
853 case USB_DWC2_GRXSTSR_PKTSTS_GLOBAL_OUT_NAK:
854 LOG_DBG("Global OUT NAK");
855 break;
856 default:
857 break;
858 }
859 }
860
dwc2_handle_in_xfercompl(const struct device * dev,const uint8_t ep_idx)861 static inline void dwc2_handle_in_xfercompl(const struct device *dev,
862 const uint8_t ep_idx)
863 {
864 struct udc_dwc2_data *const priv = udc_get_private(dev);
865 struct udc_ep_config *ep_cfg;
866 struct dwc2_drv_event evt;
867 struct net_buf *buf;
868
869 ep_cfg = udc_get_ep_cfg(dev, ep_idx | USB_EP_DIR_IN);
870 buf = udc_buf_peek(dev, ep_cfg->addr);
871 if (buf == NULL) {
872 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
873 return;
874 }
875
876 net_buf_pull(buf, priv->tx_len[ep_idx]);
877 if (buf->len && dwc2_tx_fifo_write(dev, ep_cfg, buf) == 0) {
878 return;
879 }
880
881 evt.dev = dev;
882 evt.ep = ep_cfg->addr;
883 evt.type = DWC2_DRV_EVT_DIN;
884 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
885 }
886
dwc2_handle_iepint(const struct device * dev)887 static inline void dwc2_handle_iepint(const struct device *dev)
888 {
889 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
890 const uint8_t n_max = 16;
891 uint32_t diepmsk;
892 uint32_t daint;
893
894 diepmsk = sys_read32((mem_addr_t)&base->diepmsk);
895 daint = sys_read32((mem_addr_t)&base->daint);
896
897 for (uint8_t n = 0U; n < n_max; n++) {
898 mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[n].diepint;
899 uint32_t diepint;
900 uint32_t status;
901
902 if (daint & USB_DWC2_DAINT_INEPINT(n)) {
903 /* Read and clear interrupt status */
904 diepint = sys_read32(diepint_reg);
905 status = diepint & diepmsk;
906 sys_write32(status, diepint_reg);
907
908 LOG_DBG("ep 0x%02x interrupt status: 0x%x",
909 n | USB_EP_DIR_IN, status);
910
911 if (status & USB_DWC2_DIEPINT_XFERCOMPL) {
912 dwc2_handle_in_xfercompl(dev, n);
913 }
914
915 }
916 }
917
918 /* Clear IEPINT interrupt */
919 sys_write32(USB_DWC2_GINTSTS_IEPINT, (mem_addr_t)&base->gintsts);
920 }
921
dwc2_handle_out_xfercompl(const struct device * dev,const uint8_t ep_idx)922 static inline void dwc2_handle_out_xfercompl(const struct device *dev,
923 const uint8_t ep_idx)
924 {
925 struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, ep_idx);
926 struct udc_dwc2_data *const priv = udc_get_private(dev);
927 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
928 struct dwc2_drv_event evt;
929 struct net_buf *buf;
930 uint32_t doeptsiz;
931
932 doeptsiz = sys_read32((mem_addr_t)&base->out_ep[ep_idx].doeptsiz);
933
934 buf = udc_buf_peek(dev, ep_cfg->addr);
935 if (!buf) {
936 LOG_ERR("No buffer for ep 0x%02x", ep_cfg->addr);
937 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
938 return;
939 }
940
941 evt.type = DWC2_DRV_EVT_DOUT;
942 evt.ep = ep_cfg->addr;
943 /* Assume udc buffer and endpoint config is the same as it was when
944 * transfer was scheduled in dwc2_prep_rx(). The original transfer size
945 * value is necessary here because controller decreases the value for
946 * every byte stored.
947 */
948 evt.bcnt = dwc2_rx_xfer_size(priv, ep_cfg, buf) -
949 usb_dwc2_get_deptsizn_xfersize(doeptsiz);
950
951 if (priv->bufferdma) {
952 sys_cache_data_invd_range(buf->data, evt.bcnt);
953 }
954
955 net_buf_add(buf, evt.bcnt);
956
957 if (((evt.bcnt % ep_cfg->mps) == 0) && net_buf_tailroom(buf)) {
958 dwc2_prep_rx(dev, buf, ep_cfg, 0);
959 } else {
960 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
961 }
962 }
963
dwc2_handle_oepint(const struct device * dev)964 static inline void dwc2_handle_oepint(const struct device *dev)
965 {
966 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
967 struct udc_dwc2_data *const priv = udc_get_private(dev);
968 const uint8_t n_max = 16;
969 uint32_t doepmsk;
970 uint32_t daint;
971
972 doepmsk = sys_read32((mem_addr_t)&base->doepmsk);
973 daint = sys_read32((mem_addr_t)&base->daint);
974
975 for (uint8_t n = 0U; n < n_max; n++) {
976 mem_addr_t doepint_reg = (mem_addr_t)&base->out_ep[n].doepint;
977 uint32_t doepint;
978 uint32_t status;
979
980 if (!(daint & USB_DWC2_DAINT_OUTEPINT(n))) {
981 continue;
982 }
983
984 /* Read and clear interrupt status */
985 doepint = sys_read32(doepint_reg);
986 status = doepint & doepmsk;
987 sys_write32(status, doepint_reg);
988
989 LOG_DBG("ep 0x%02x interrupt status: 0x%x", n, status);
990
991 /* StupPktRcvd is not enabled for interrupt, but must be checked
992 * when XferComp hits to determine if SETUP token was received.
993 */
994 if (priv->bufferdma && (status & USB_DWC2_DOEPINT_XFERCOMPL) &&
995 (doepint & USB_DWC2_DOEPINT_STUPPKTRCVD)) {
996 uint32_t addr;
997
998 sys_write32(USB_DWC2_DOEPINT_STUPPKTRCVD, doepint_reg);
999 status &= ~USB_DWC2_DOEPINT_XFERCOMPL;
1000
1001 /* DMAAddr points past the memory location where the
1002 * SETUP data was stored. Copy the received SETUP data
1003 * to temporary location used also in Completer mode
1004 * which allows common SETUP interrupt handling.
1005 */
1006 addr = sys_read32((mem_addr_t)&base->out_ep[0].doepdma);
1007 sys_cache_data_invd_range((void *)(addr - 8), 8);
1008 memcpy(priv->setup, (void *)(addr - 8), sizeof(priv->setup));
1009 }
1010
1011 if (status & USB_DWC2_DOEPINT_SETUP) {
1012 struct dwc2_drv_event evt = {
1013 .type = DWC2_DRV_EVT_SETUP,
1014 .ep = USB_CONTROL_EP_OUT,
1015 .bcnt = 8,
1016 };
1017
1018 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
1019 }
1020
1021 if (status & USB_DWC2_DOEPINT_STSPHSERCVD) {
1022 /* Driver doesn't need any special handling, but it is
1023 * mandatory that the bit is cleared in Buffer DMA mode.
1024 * If the bit is not cleared (i.e. when this interrupt
1025 * bit is masked), then SETUP interrupts will cease
1026 * after first control transfer with data stage from
1027 * device to host.
1028 */
1029 }
1030
1031 if (status & USB_DWC2_DOEPINT_XFERCOMPL) {
1032 dwc2_handle_out_xfercompl(dev, n);
1033 }
1034 }
1035
1036 /* Clear OEPINT interrupt */
1037 sys_write32(USB_DWC2_GINTSTS_OEPINT, (mem_addr_t)&base->gintsts);
1038 }
1039
udc_dwc2_isr_handler(const struct device * dev)1040 static void udc_dwc2_isr_handler(const struct device *dev)
1041 {
1042 const struct udc_dwc2_config *const config = dev->config;
1043 struct usb_dwc2_reg *const base = config->base;
1044 mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts;
1045 uint32_t int_status;
1046 uint32_t gintmsk;
1047
1048 gintmsk = sys_read32((mem_addr_t)&base->gintmsk);
1049
1050 /* Read and handle interrupt status register */
1051 while ((int_status = sys_read32(gintsts_reg) & gintmsk)) {
1052
1053 LOG_DBG("GINTSTS 0x%x", int_status);
1054
1055 if (int_status & USB_DWC2_GINTSTS_SOF) {
1056 /* Clear USB SOF interrupt. */
1057 sys_write32(USB_DWC2_GINTSTS_SOF, gintsts_reg);
1058 udc_submit_event(dev, UDC_EVT_SOF, 0);
1059 }
1060
1061 if (int_status & USB_DWC2_GINTSTS_USBRST) {
1062 /* Clear and handle USB Reset interrupt. */
1063 sys_write32(USB_DWC2_GINTSTS_USBRST, gintsts_reg);
1064 dwc2_on_bus_reset(dev);
1065 LOG_DBG("USB Reset interrupt");
1066 }
1067
1068 if (int_status & USB_DWC2_GINTSTS_ENUMDONE) {
1069 /* Clear and handle Enumeration Done interrupt. */
1070 sys_write32(USB_DWC2_GINTSTS_ENUMDONE, gintsts_reg);
1071 dwc2_handle_enumdone(dev);
1072 udc_submit_event(dev, UDC_EVT_RESET, 0);
1073 }
1074
1075 if (int_status & USB_DWC2_GINTSTS_USBSUSP) {
1076 /* Clear USB Suspend interrupt. */
1077 sys_write32(USB_DWC2_GINTSTS_USBSUSP, gintsts_reg);
1078 udc_set_suspended(dev, true);
1079 udc_submit_event(dev, UDC_EVT_SUSPEND, 0);
1080 }
1081
1082 if (int_status & USB_DWC2_GINTSTS_WKUPINT) {
1083 /* Clear Resume/Remote Wakeup Detected interrupt. */
1084 sys_write32(USB_DWC2_GINTSTS_WKUPINT, gintsts_reg);
1085 udc_set_suspended(dev, false);
1086 udc_submit_event(dev, UDC_EVT_RESUME, 0);
1087 }
1088
1089 if (int_status & USB_DWC2_GINTSTS_IEPINT) {
1090 /* Handle IN Endpoints interrupt */
1091 dwc2_handle_iepint(dev);
1092 }
1093
1094 if (int_status & USB_DWC2_GINTSTS_RXFLVL) {
1095 /* Handle RxFIFO Non-Empty interrupt */
1096 dwc2_handle_rxflvl(dev);
1097 }
1098
1099 if (int_status & USB_DWC2_GINTSTS_OEPINT) {
1100 /* Handle OUT Endpoints interrupt */
1101 dwc2_handle_oepint(dev);
1102 }
1103 }
1104
1105 (void)dwc2_quirk_irq_clear(dev);
1106 }
1107
dwc2_unset_unused_fifo(const struct device * dev)1108 static void dwc2_unset_unused_fifo(const struct device *dev)
1109 {
1110 struct udc_dwc2_data *const priv = udc_get_private(dev);
1111 struct udc_ep_config *tmp;
1112
1113 for (uint8_t i = priv->ineps - 1U; i > 0; i--) {
1114 tmp = udc_get_ep_cfg(dev, i | USB_EP_DIR_IN);
1115
1116 if (tmp->stat.enabled && (priv->txf_set & BIT(i))) {
1117 return;
1118 }
1119
1120 if (!tmp->stat.enabled && (priv->txf_set & BIT(i))) {
1121 priv->txf_set &= ~BIT(i);
1122 }
1123 }
1124 }
1125
1126 /*
1127 * In dedicated FIFO mode there are i (i = 1 ... ineps - 1) FIFO size registers,
1128 * e.g. DIEPTXF1, DIEPTXF2, ... DIEPTXF4. When dynfifosizing is enabled,
1129 * the size register is mutable. The offset of DIEPTXF1 registers is 0.
1130 */
dwc2_set_dedicated_fifo(const struct device * dev,struct udc_ep_config * const cfg,uint32_t * const diepctl)1131 static int dwc2_set_dedicated_fifo(const struct device *dev,
1132 struct udc_ep_config *const cfg,
1133 uint32_t *const diepctl)
1134 {
1135 struct udc_dwc2_data *const priv = udc_get_private(dev);
1136 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1137 uint32_t txfaddr;
1138 uint32_t txfdep;
1139 uint32_t tmp;
1140
1141 /* Keep everything but FIFO number */
1142 tmp = *diepctl & ~USB_DWC2_DEPCTL_TXFNUM_MASK;
1143
1144 if (priv->dynfifosizing) {
1145 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1146 dwc2_unset_unused_fifo(dev);
1147 }
1148
1149 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1150 LOG_WRN("Some of the FIFOs higher than %u are set, %lx",
1151 ep_idx, priv->txf_set & ~BIT_MASK(ep_idx));
1152 return -EIO;
1153 }
1154
1155 if ((ep_idx - 1) != 0U) {
1156 txfaddr = dwc2_get_txfdep(dev, ep_idx - 2) +
1157 dwc2_get_txfaddr(dev, ep_idx - 2);
1158 } else {
1159 txfaddr = priv->rxfifo_depth +
1160 MAX(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]);
1161 }
1162
1163 /* Make sure to not set TxFIFO greater than hardware allows */
1164 txfdep = DIV_ROUND_UP(cfg->mps, 4U);
1165 if (txfdep > priv->max_txfifo_depth[ep_idx]) {
1166 return -ENOMEM;
1167 }
1168
1169 /* Set FIFO depth (32-bit words) and address */
1170 dwc2_set_txf(dev, ep_idx - 1, txfdep, txfaddr);
1171 } else {
1172 txfdep = dwc2_get_txfdep(dev, ep_idx - 1);
1173 txfaddr = dwc2_get_txfaddr(dev, ep_idx - 1);
1174
1175 if (cfg->mps < txfdep * 4U) {
1176 return -ENOMEM;
1177 }
1178
1179 LOG_DBG("Reuse FIFO%u addr 0x%08x depth %u", ep_idx, txfaddr, txfdep);
1180 }
1181
1182 /* Assign FIFO to the IN endpoint */
1183 *diepctl = tmp | usb_dwc2_set_depctl_txfnum(ep_idx);
1184 priv->txf_set |= BIT(ep_idx);
1185 dwc2_flush_tx_fifo(dev, ep_idx);
1186
1187 LOG_INF("Set FIFO%u (ep 0x%02x) addr 0x%04x depth %u size %u",
1188 ep_idx, cfg->addr, txfaddr, txfdep, dwc2_ftx_avail(dev, ep_idx));
1189
1190 return 0;
1191 }
1192
dwc2_ep_control_enable(const struct device * dev,struct udc_ep_config * const cfg)1193 static int dwc2_ep_control_enable(const struct device *dev,
1194 struct udc_ep_config *const cfg)
1195 {
1196 mem_addr_t dxepctl0_reg;
1197 uint32_t dxepctl0;
1198
1199 dxepctl0_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1200 dxepctl0 = sys_read32(dxepctl0_reg);
1201
1202 dxepctl0 &= ~USB_DWC2_DEPCTL0_MPS_MASK;
1203 switch (cfg->mps) {
1204 case 8:
1205 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_8 << USB_DWC2_DEPCTL_MPS_POS;
1206 break;
1207 case 16:
1208 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_16 << USB_DWC2_DEPCTL_MPS_POS;
1209 break;
1210 case 32:
1211 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_32 << USB_DWC2_DEPCTL_MPS_POS;
1212 break;
1213 case 64:
1214 dxepctl0 |= USB_DWC2_DEPCTL0_MPS_64 << USB_DWC2_DEPCTL_MPS_POS;
1215 break;
1216 default:
1217 return -EINVAL;
1218 }
1219
1220 dxepctl0 |= USB_DWC2_DEPCTL_USBACTEP;
1221
1222 if (cfg->addr == USB_CONTROL_EP_OUT) {
1223 int ret;
1224
1225 dwc2_flush_rx_fifo(dev);
1226 ret = dwc2_ctrl_feed_dout(dev, 8);
1227 if (ret) {
1228 return ret;
1229 }
1230 } else {
1231 dwc2_flush_tx_fifo(dev, 0);
1232 }
1233
1234 sys_write32(dxepctl0, dxepctl0_reg);
1235 dwc2_set_epint(dev, cfg, true);
1236
1237 return 0;
1238 }
1239
udc_dwc2_ep_activate(const struct device * dev,struct udc_ep_config * const cfg)1240 static int udc_dwc2_ep_activate(const struct device *dev,
1241 struct udc_ep_config *const cfg)
1242 {
1243 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1244 struct udc_dwc2_data *const priv = udc_get_private(dev);
1245 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1246 mem_addr_t dxepctl_reg;
1247 uint32_t dxepctl;
1248
1249 LOG_DBG("Enable ep 0x%02x", cfg->addr);
1250
1251 if (ep_idx == 0U) {
1252 return dwc2_ep_control_enable(dev, cfg);
1253 }
1254
1255 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
1256 /* TODO: use dwc2_get_dxepctl_reg() */
1257 dxepctl_reg = (mem_addr_t)&base->out_ep[ep_idx].doepctl;
1258 } else {
1259 if (priv->ineps > 0U && ep_idx > (priv->ineps - 1U)) {
1260 LOG_ERR("No resources available for ep 0x%02x", cfg->addr);
1261 return -EINVAL;
1262 }
1263
1264 dxepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl;
1265 }
1266
1267 if (cfg->mps > usb_dwc2_get_depctl_mps(UINT16_MAX)) {
1268 return -EINVAL;
1269 }
1270
1271 if (priv->bufferdma && (cfg->mps % 4)) {
1272 /* TODO: In Buffer DMA mode, DMA will insert padding bytes in
1273 * between packets if endpoint Max Packet Size is not multiple
1274 * of 4 (DWORD) and single transfer spans across multiple
1275 * packets.
1276 *
1277 * In order to support such Max Packet Sizes, the driver would
1278 * have to remove the padding in between the packets. Besides
1279 * just driver shuffling the data, the buffers would have to be
1280 * large enough to temporarily hold the paddings.
1281 *
1282 * For the time being just error out early.
1283 */
1284 LOG_ERR("Driver requires MPS to be multiple of 4");
1285 return -EINVAL;
1286 }
1287
1288 dxepctl = sys_read32(dxepctl_reg);
1289 /* Set max packet size */
1290 dxepctl &= ~USB_DWC2_DEPCTL_MPS_MASK;
1291 dxepctl |= cfg->mps << USB_DWC2_DEPCTL_MPS_POS;
1292
1293 /* Set endpoint type */
1294 dxepctl &= ~USB_DWC2_DEPCTL_EPTYPE_MASK;
1295
1296 switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) {
1297 case USB_EP_TYPE_BULK:
1298 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_BULK <<
1299 USB_DWC2_DEPCTL_EPTYPE_POS;
1300 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1301 break;
1302 case USB_EP_TYPE_INTERRUPT:
1303 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_INTERRUPT <<
1304 USB_DWC2_DEPCTL_EPTYPE_POS;
1305 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1306 break;
1307 case USB_EP_TYPE_ISO:
1308 dxepctl |= USB_DWC2_DEPCTL_EPTYPE_ISO <<
1309 USB_DWC2_DEPCTL_EPTYPE_POS;
1310 break;
1311 default:
1312 return -EINVAL;
1313 }
1314
1315 if (USB_EP_DIR_IS_IN(cfg->addr) && cfg->mps != 0U) {
1316 int ret = dwc2_set_dedicated_fifo(dev, cfg, &dxepctl);
1317
1318 if (ret) {
1319 return ret;
1320 }
1321 }
1322
1323 dxepctl |= USB_DWC2_DEPCTL_USBACTEP;
1324
1325 /* Enable endpoint interrupts */
1326 dwc2_set_epint(dev, cfg, true);
1327 sys_write32(dxepctl, dxepctl_reg);
1328
1329 for (uint8_t i = 1U; i < priv->ineps; i++) {
1330 LOG_DBG("DIEPTXF%u %08x DIEPCTL%u %08x",
1331 i, sys_read32((mem_addr_t)&base->dieptxf[i - 1U]), i, dxepctl);
1332 }
1333
1334 return 0;
1335 }
1336
dwc2_unset_dedicated_fifo(const struct device * dev,struct udc_ep_config * const cfg,uint32_t * const diepctl)1337 static int dwc2_unset_dedicated_fifo(const struct device *dev,
1338 struct udc_ep_config *const cfg,
1339 uint32_t *const diepctl)
1340 {
1341 struct udc_dwc2_data *const priv = udc_get_private(dev);
1342 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1343
1344 /* Clear FIFO number field */
1345 *diepctl &= ~USB_DWC2_DEPCTL_TXFNUM_MASK;
1346
1347 if (priv->dynfifosizing) {
1348 if (priv->txf_set & ~BIT_MASK(ep_idx)) {
1349 LOG_WRN("Some of the FIFOs higher than %u are set, %lx",
1350 ep_idx, priv->txf_set & ~BIT_MASK(ep_idx));
1351 return 0;
1352 }
1353
1354 dwc2_set_txf(dev, ep_idx - 1, 0, 0);
1355 }
1356
1357 priv->txf_set &= ~BIT(ep_idx);
1358
1359 return 0;
1360 }
1361
dwc2_wait_for_bit(const struct device * dev,mem_addr_t addr,uint32_t bit)1362 static void dwc2_wait_for_bit(const struct device *dev,
1363 mem_addr_t addr, uint32_t bit)
1364 {
1365 k_timepoint_t timeout = sys_timepoint_calc(K_MSEC(100));
1366
1367 /* This could potentially be converted to use proper synchronization
1368 * primitives instead of busy looping, but the number of interrupt bits
1369 * this function can be waiting for is rather high.
1370 *
1371 * Busy looping is most likely fine unless profiling shows otherwise.
1372 */
1373 while (!(sys_read32(addr) & bit)) {
1374 if (dwc2_quirk_is_phy_clk_off(dev)) {
1375 /* No point in waiting, because the bit can only be set
1376 * when the PHY is actively clocked.
1377 */
1378 return;
1379 }
1380
1381 if (sys_timepoint_expired(timeout)) {
1382 LOG_ERR("Timeout waiting for bit 0x%08X at 0x%08X",
1383 bit, (uint32_t)addr);
1384 return;
1385 }
1386 }
1387 }
1388
1389 /* Disabled IN endpoint means that device will send NAK (isochronous: ZLP) after
1390 * receiving IN token from host even if there is packet available in TxFIFO.
1391 * Disabled OUT endpoint means that device will NAK (isochronous: discard data)
1392 * incoming OUT data (or HS PING) even if there is space available in RxFIFO.
1393 *
1394 * Set stall parameter to true if caller wants to send STALL instead of NAK.
1395 */
udc_dwc2_ep_disable(const struct device * dev,struct udc_ep_config * const cfg,bool stall)1396 static void udc_dwc2_ep_disable(const struct device *dev,
1397 struct udc_ep_config *const cfg, bool stall)
1398 {
1399 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1400 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1401 mem_addr_t dxepctl_reg;
1402 uint32_t dxepctl;
1403
1404 dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1405 dxepctl = sys_read32(dxepctl_reg);
1406
1407 if (dxepctl & USB_DWC2_DEPCTL_NAKSTS) {
1408 /* Endpoint already sends forced NAKs. STALL if necessary. */
1409 if (stall) {
1410 dxepctl |= USB_DWC2_DEPCTL_STALL;
1411 sys_write32(dxepctl, dxepctl_reg);
1412 }
1413
1414 return;
1415 }
1416
1417 if (USB_EP_DIR_IS_OUT(cfg->addr)) {
1418 mem_addr_t dctl_reg, gintsts_reg, doepint_reg;
1419 uint32_t dctl;
1420
1421 dctl_reg = (mem_addr_t)&base->dctl;
1422 gintsts_reg = (mem_addr_t)&base->gintsts;
1423 doepint_reg = (mem_addr_t)&base->out_ep[ep_idx].doepint;
1424
1425 dctl = sys_read32(dctl_reg);
1426
1427 if (sys_read32(gintsts_reg) & USB_DWC2_GINTSTS_GOUTNAKEFF) {
1428 LOG_ERR("GOUTNAKEFF already active");
1429 } else {
1430 dctl |= USB_DWC2_DCTL_SGOUTNAK;
1431 sys_write32(dctl, dctl_reg);
1432 dctl &= ~USB_DWC2_DCTL_SGOUTNAK;
1433 }
1434
1435 dwc2_wait_for_bit(dev, gintsts_reg, USB_DWC2_GINTSTS_GOUTNAKEFF);
1436
1437 /* The application cannot disable control OUT endpoint 0. */
1438 if (ep_idx != 0) {
1439 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS;
1440 }
1441
1442 if (stall) {
1443 /* For OUT endpoints STALL is set instead of SNAK */
1444 dxepctl |= USB_DWC2_DEPCTL_STALL;
1445 } else {
1446 dxepctl |= USB_DWC2_DEPCTL_SNAK;
1447 }
1448 sys_write32(dxepctl, dxepctl_reg);
1449
1450 if (ep_idx != 0) {
1451 dwc2_wait_for_bit(dev, doepint_reg, USB_DWC2_DOEPINT_EPDISBLD);
1452 }
1453
1454 /* Clear Endpoint Disabled interrupt */
1455 sys_write32(USB_DWC2_DIEPINT_EPDISBLD, doepint_reg);
1456
1457 dctl |= USB_DWC2_DCTL_CGOUTNAK;
1458 sys_write32(dctl, dctl_reg);
1459 } else {
1460 mem_addr_t diepint_reg;
1461
1462 diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint;
1463
1464 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_SNAK;
1465 if (stall) {
1466 /* For IN endpoints STALL is set in addition to SNAK */
1467 dxepctl |= USB_DWC2_DEPCTL_STALL;
1468 }
1469 sys_write32(dxepctl, dxepctl_reg);
1470
1471 dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_INEPNAKEFF);
1472
1473 dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS;
1474 sys_write32(dxepctl, dxepctl_reg);
1475
1476 dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_EPDISBLD);
1477
1478 /* Clear Endpoint Disabled interrupt */
1479 sys_write32(USB_DWC2_DIEPINT_EPDISBLD, diepint_reg);
1480
1481 /* TODO: Read DIEPTSIZn here? Programming Guide suggest it to
1482 * let application know how many bytes of interrupted transfer
1483 * were transferred to the host.
1484 */
1485
1486 dwc2_flush_tx_fifo(dev, ep_idx);
1487 }
1488
1489 udc_ep_set_busy(dev, cfg->addr, false);
1490 }
1491
1492 /* Deactivated endpoint means that there will be a bus timeout when the host
1493 * tries to access the endpoint.
1494 */
udc_dwc2_ep_deactivate(const struct device * dev,struct udc_ep_config * const cfg)1495 static int udc_dwc2_ep_deactivate(const struct device *dev,
1496 struct udc_ep_config *const cfg)
1497 {
1498 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1499 mem_addr_t dxepctl_reg;
1500 uint32_t dxepctl;
1501
1502 dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1503 dxepctl = sys_read32(dxepctl_reg);
1504
1505 if (dxepctl & USB_DWC2_DEPCTL_USBACTEP) {
1506 LOG_DBG("Disable ep 0x%02x DxEPCTL%u %x",
1507 cfg->addr, ep_idx, dxepctl);
1508
1509 udc_dwc2_ep_disable(dev, cfg, false);
1510
1511 dxepctl = sys_read32(dxepctl_reg);
1512 dxepctl &= ~USB_DWC2_DEPCTL_USBACTEP;
1513 } else {
1514 LOG_WRN("ep 0x%02x is not active DxEPCTL%u %x",
1515 cfg->addr, ep_idx, dxepctl);
1516 }
1517
1518 if (USB_EP_DIR_IS_IN(cfg->addr) && cfg->mps != 0U && ep_idx != 0U) {
1519 dwc2_unset_dedicated_fifo(dev, cfg, &dxepctl);
1520 }
1521
1522 sys_write32(dxepctl, dxepctl_reg);
1523 dwc2_set_epint(dev, cfg, false);
1524
1525 if (cfg->addr == USB_CONTROL_EP_OUT) {
1526 struct net_buf *buf = udc_buf_get_all(dev, cfg->addr);
1527
1528 /* Release the buffer allocated in dwc2_ctrl_feed_dout() */
1529 if (buf) {
1530 net_buf_unref(buf);
1531 }
1532 }
1533
1534 return 0;
1535 }
1536
udc_dwc2_ep_set_halt(const struct device * dev,struct udc_ep_config * const cfg)1537 static int udc_dwc2_ep_set_halt(const struct device *dev,
1538 struct udc_ep_config *const cfg)
1539 {
1540 uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr);
1541
1542 udc_dwc2_ep_disable(dev, cfg, true);
1543
1544 LOG_DBG("Set halt ep 0x%02x", cfg->addr);
1545 if (ep_idx != 0) {
1546 cfg->stat.halted = true;
1547 }
1548
1549 return 0;
1550 }
1551
udc_dwc2_ep_clear_halt(const struct device * dev,struct udc_ep_config * const cfg)1552 static int udc_dwc2_ep_clear_halt(const struct device *dev,
1553 struct udc_ep_config *const cfg)
1554 {
1555 mem_addr_t dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr);
1556 uint32_t dxepctl;
1557 struct dwc2_drv_event evt = {
1558 .ep = cfg->addr,
1559 .type = DWC2_DRV_EVT_XFER,
1560 };
1561
1562 dxepctl = sys_read32(dxepctl_reg);
1563 dxepctl &= ~USB_DWC2_DEPCTL_STALL;
1564 dxepctl |= USB_DWC2_DEPCTL_SETD0PID;
1565 sys_write32(dxepctl, dxepctl_reg);
1566
1567 LOG_DBG("Clear halt ep 0x%02x", cfg->addr);
1568 cfg->stat.halted = false;
1569
1570 /* Resume queued transfers if any */
1571 if (udc_buf_peek(dev, cfg->addr)) {
1572 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
1573 }
1574
1575 return 0;
1576 }
1577
udc_dwc2_ep_enqueue(const struct device * dev,struct udc_ep_config * const cfg,struct net_buf * const buf)1578 static int udc_dwc2_ep_enqueue(const struct device *dev,
1579 struct udc_ep_config *const cfg,
1580 struct net_buf *const buf)
1581 {
1582 struct dwc2_drv_event evt = {
1583 .ep = cfg->addr,
1584 .type = DWC2_DRV_EVT_XFER,
1585 };
1586
1587 LOG_DBG("%p enqueue %x %p", dev, cfg->addr, buf);
1588 udc_buf_put(cfg, buf);
1589
1590 if (!cfg->stat.halted) {
1591 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
1592 }
1593
1594 return 0;
1595 }
1596
udc_dwc2_ep_dequeue(const struct device * dev,struct udc_ep_config * const cfg)1597 static int udc_dwc2_ep_dequeue(const struct device *dev,
1598 struct udc_ep_config *const cfg)
1599 {
1600 struct net_buf *buf;
1601
1602 udc_dwc2_ep_disable(dev, cfg, false);
1603
1604 buf = udc_buf_get_all(dev, cfg->addr);
1605 if (buf) {
1606 udc_submit_ep_event(dev, buf, -ECONNABORTED);
1607 }
1608
1609 udc_ep_set_busy(dev, cfg->addr, false);
1610
1611 LOG_DBG("dequeue ep 0x%02x", cfg->addr);
1612
1613 return 0;
1614 }
1615
udc_dwc2_set_address(const struct device * dev,const uint8_t addr)1616 static int udc_dwc2_set_address(const struct device *dev, const uint8_t addr)
1617 {
1618 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1619 mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg;
1620 uint32_t dcfg;
1621
1622 if (addr > (USB_DWC2_DCFG_DEVADDR_MASK >> USB_DWC2_DCFG_DEVADDR_POS)) {
1623 return -EINVAL;
1624 }
1625
1626 dcfg = sys_read32(dcfg_reg);
1627 dcfg &= ~USB_DWC2_DCFG_DEVADDR_MASK;
1628 dcfg |= usb_dwc2_set_dcfg_devaddr(addr);
1629 sys_write32(dcfg, dcfg_reg);
1630 LOG_DBG("Set new address %u for %p", addr, dev);
1631
1632 return 0;
1633 }
1634
udc_dwc2_test_mode(const struct device * dev,const uint8_t mode,const bool dryrun)1635 static int udc_dwc2_test_mode(const struct device *dev,
1636 const uint8_t mode, const bool dryrun)
1637 {
1638 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1639 mem_addr_t dctl_reg = (mem_addr_t)&base->dctl;
1640 uint32_t dctl;
1641
1642 if (mode == 0U || mode > USB_DWC2_DCTL_TSTCTL_TESTFE) {
1643 return -EINVAL;
1644 }
1645
1646 dctl = sys_read32(dctl_reg);
1647 if (usb_dwc2_get_dctl_tstctl(dctl) != USB_DWC2_DCTL_TSTCTL_DISABLED) {
1648 return -EALREADY;
1649 }
1650
1651 if (dryrun) {
1652 LOG_DBG("Test Mode %u supported", mode);
1653 return 0;
1654 }
1655
1656 dctl |= usb_dwc2_set_dctl_tstctl(mode);
1657 sys_write32(dctl, dctl_reg);
1658 LOG_DBG("Enable Test Mode %u", mode);
1659
1660 return 0;
1661 }
1662
udc_dwc2_host_wakeup(const struct device * dev)1663 static int udc_dwc2_host_wakeup(const struct device *dev)
1664 {
1665 LOG_DBG("Remote wakeup from %p", dev);
1666
1667 return -ENOTSUP;
1668 }
1669
1670 /* Return actual USB device speed */
udc_dwc2_device_speed(const struct device * dev)1671 static enum udc_bus_speed udc_dwc2_device_speed(const struct device *dev)
1672 {
1673 struct udc_dwc2_data *const priv = udc_get_private(dev);
1674
1675 switch (priv->enumspd) {
1676 case USB_DWC2_DSTS_ENUMSPD_HS3060:
1677 return UDC_BUS_SPEED_HS;
1678 case USB_DWC2_DSTS_ENUMSPD_LS6:
1679 __ASSERT(false, "Low speed mode not supported");
1680 __fallthrough;
1681 case USB_DWC2_DSTS_ENUMSPD_FS48:
1682 __fallthrough;
1683 case USB_DWC2_DSTS_ENUMSPD_FS3060:
1684 __fallthrough;
1685 default:
1686 return UDC_BUS_SPEED_FS;
1687 }
1688 }
1689
dwc2_core_soft_reset(const struct device * dev)1690 static int dwc2_core_soft_reset(const struct device *dev)
1691 {
1692 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1693 mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl;
1694 const unsigned int csr_timeout_us = 10000UL;
1695 uint32_t cnt = 0UL;
1696
1697 /* Check AHB master idle state */
1698 while (!(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_AHBIDLE)) {
1699 k_busy_wait(1);
1700
1701 if (++cnt > csr_timeout_us) {
1702 LOG_ERR("Wait for AHB idle timeout, GRSTCTL 0x%08x",
1703 sys_read32(grstctl_reg));
1704 return -EIO;
1705 }
1706 }
1707
1708 /* Apply Core Soft Reset */
1709 sys_write32(USB_DWC2_GRSTCTL_CSFTRST, grstctl_reg);
1710
1711 cnt = 0UL;
1712 do {
1713 if (++cnt > csr_timeout_us) {
1714 LOG_ERR("Wait for CSR done timeout, GRSTCTL 0x%08x",
1715 sys_read32(grstctl_reg));
1716 return -EIO;
1717 }
1718
1719 k_busy_wait(1);
1720 } while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRST &&
1721 !(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRSTDONE));
1722
1723 sys_clear_bits(grstctl_reg, USB_DWC2_GRSTCTL_CSFTRST | USB_DWC2_GRSTCTL_CSFTRSTDONE);
1724
1725 return 0;
1726 }
1727
udc_dwc2_init_controller(const struct device * dev)1728 static int udc_dwc2_init_controller(const struct device *dev)
1729 {
1730 const struct udc_dwc2_config *const config = dev->config;
1731 struct udc_dwc2_data *const priv = udc_get_private(dev);
1732 struct usb_dwc2_reg *const base = config->base;
1733 mem_addr_t grxfsiz_reg = (mem_addr_t)&base->grxfsiz;
1734 mem_addr_t gahbcfg_reg = (mem_addr_t)&base->gahbcfg;
1735 mem_addr_t gusbcfg_reg = (mem_addr_t)&base->gusbcfg;
1736 mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg;
1737 uint32_t dcfg;
1738 uint32_t gusbcfg;
1739 uint32_t gahbcfg;
1740 uint32_t ghwcfg2;
1741 uint32_t ghwcfg3;
1742 uint32_t ghwcfg4;
1743 uint32_t val;
1744 int ret;
1745
1746 ret = dwc2_core_soft_reset(dev);
1747 if (ret) {
1748 return ret;
1749 }
1750
1751 priv->ghwcfg1 = sys_read32((mem_addr_t)&base->ghwcfg1);
1752 ghwcfg2 = sys_read32((mem_addr_t)&base->ghwcfg2);
1753 ghwcfg3 = sys_read32((mem_addr_t)&base->ghwcfg3);
1754 ghwcfg4 = sys_read32((mem_addr_t)&base->ghwcfg4);
1755
1756 if (!(ghwcfg4 & USB_DWC2_GHWCFG4_DEDFIFOMODE)) {
1757 LOG_ERR("Only dedicated TX FIFO mode is supported");
1758 return -ENOTSUP;
1759 }
1760
1761 /*
1762 * Force device mode as we do no support role changes.
1763 * Wait 25ms for the change to take effect.
1764 */
1765 gusbcfg = USB_DWC2_GUSBCFG_FORCEDEVMODE;
1766 sys_write32(gusbcfg, gusbcfg_reg);
1767 k_msleep(25);
1768
1769 /* Buffer DMA is always supported in Internal DMA mode.
1770 * TODO: check and support descriptor DMA if available
1771 */
1772 priv->bufferdma = (usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2) ==
1773 USB_DWC2_GHWCFG2_OTGARCH_INTERNALDMA);
1774
1775 if (!IS_ENABLED(CONFIG_UDC_DWC2_DMA)) {
1776 priv->bufferdma = 0;
1777 } else if (priv->bufferdma) {
1778 LOG_WRN("Experimental DMA enabled");
1779 }
1780
1781 if (ghwcfg2 & USB_DWC2_GHWCFG2_DYNFIFOSIZING) {
1782 LOG_DBG("Dynamic FIFO Sizing is enabled");
1783 priv->dynfifosizing = true;
1784 }
1785
1786 /* Get the number or endpoints and IN endpoints we can use later */
1787 priv->numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(ghwcfg2) + 1U;
1788 priv->ineps = usb_dwc2_get_ghwcfg4_ineps(ghwcfg4) + 1U;
1789 LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", priv->numdeveps);
1790 LOG_DBG("Number of IN endpoints (INEPS + 1) %u", priv->ineps);
1791
1792 LOG_DBG("Number of periodic IN endpoints (NUMDEVPERIOEPS) %u",
1793 usb_dwc2_get_ghwcfg4_numdevperioeps(ghwcfg4));
1794 LOG_DBG("Number of additional control endpoints (NUMCTLEPS) %u",
1795 usb_dwc2_get_ghwcfg4_numctleps(ghwcfg4));
1796
1797 LOG_DBG("OTG architecture (OTGARCH) %u, mode (OTGMODE) %u",
1798 usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2),
1799 usb_dwc2_get_ghwcfg2_otgmode(ghwcfg2));
1800
1801 priv->dfifodepth = usb_dwc2_get_ghwcfg3_dfifodepth(ghwcfg3);
1802 LOG_DBG("DFIFO depth (DFIFODEPTH) %u bytes", priv->dfifodepth * 4);
1803
1804 priv->max_pktcnt = GHWCFG3_PKTCOUNT(usb_dwc2_get_ghwcfg3_pktsizewidth(ghwcfg3));
1805 priv->max_xfersize = GHWCFG3_XFERSIZE(usb_dwc2_get_ghwcfg3_xfersizewidth(ghwcfg3));
1806 LOG_DBG("Max packet count %u, Max transfer size %u",
1807 priv->max_pktcnt, priv->max_xfersize);
1808
1809 LOG_DBG("Vendor Control interface support enabled: %s",
1810 (ghwcfg3 & USB_DWC2_GHWCFG3_VNDCTLSUPT) ? "true" : "false");
1811
1812 LOG_DBG("PHY interface type: FSPHYTYPE %u, HSPHYTYPE %u, DATAWIDTH %u",
1813 usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2),
1814 usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2),
1815 usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4));
1816
1817 LOG_DBG("LPM mode is %s",
1818 (ghwcfg3 & USB_DWC2_GHWCFG3_LPMMODE) ? "enabled" : "disabled");
1819
1820 /* Configure AHB, select Completer or DMA mode */
1821 gahbcfg = sys_read32(gahbcfg_reg);
1822
1823 if (priv->bufferdma) {
1824 gahbcfg |= USB_DWC2_GAHBCFG_DMAEN;
1825 } else {
1826 gahbcfg &= ~USB_DWC2_GAHBCFG_DMAEN;
1827 }
1828
1829 sys_write32(gahbcfg, gahbcfg_reg);
1830
1831 dcfg = sys_read32(dcfg_reg);
1832
1833 dcfg &= ~USB_DWC2_DCFG_DESCDMA;
1834
1835 /* Configure PHY and device speed */
1836 dcfg &= ~USB_DWC2_DCFG_DEVSPD_MASK;
1837 switch (usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2)) {
1838 case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUSULPI:
1839 __fallthrough;
1840 case USB_DWC2_GHWCFG2_HSPHYTYPE_ULPI:
1841 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 |
1842 USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_ULPI;
1843 dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20
1844 << USB_DWC2_DCFG_DEVSPD_POS;
1845 break;
1846 case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUS:
1847 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 |
1848 USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_UTMI;
1849 dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20
1850 << USB_DWC2_DCFG_DEVSPD_POS;
1851 break;
1852 case USB_DWC2_GHWCFG2_HSPHYTYPE_NO_HS:
1853 __fallthrough;
1854 default:
1855 if (usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2) !=
1856 USB_DWC2_GHWCFG2_FSPHYTYPE_NO_FS) {
1857 gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB11;
1858 }
1859
1860 dcfg |= USB_DWC2_DCFG_DEVSPD_USBFS1148
1861 << USB_DWC2_DCFG_DEVSPD_POS;
1862 }
1863
1864 if (usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4)) {
1865 gusbcfg |= USB_DWC2_GUSBCFG_PHYIF_16_BIT;
1866 }
1867
1868 /* Update PHY configuration */
1869 sys_write32(gusbcfg, gusbcfg_reg);
1870 sys_write32(dcfg, dcfg_reg);
1871
1872 priv->outeps = 0U;
1873 for (uint8_t i = 0U; i < priv->numdeveps; i++) {
1874 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i);
1875
1876 if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT ||
1877 epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) {
1878 mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, i);
1879
1880 sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg);
1881 priv->outeps++;
1882 }
1883 }
1884
1885 LOG_DBG("Number of OUT endpoints %u", priv->outeps);
1886
1887 /* Read and store all TxFIFO depths because Programmed FIFO Depths must
1888 * not exceed the power-on values.
1889 */
1890 val = sys_read32((mem_addr_t)&base->gnptxfsiz);
1891 priv->max_txfifo_depth[0] = usb_dwc2_get_gnptxfsiz_nptxfdep(val);
1892 for (uint8_t i = 1; i < priv->ineps; i++) {
1893 priv->max_txfifo_depth[i] = dwc2_get_txfdep(dev, i - 1);
1894 }
1895
1896 priv->rxfifo_depth = usb_dwc2_get_grxfsiz(sys_read32(grxfsiz_reg));
1897
1898 if (priv->dynfifosizing) {
1899 uint32_t gnptxfsiz;
1900
1901 /* Driver does not dynamically resize RxFIFO so there is no need
1902 * to store reset value. Read the reset value and make sure that
1903 * the programmed value is not greater than what driver sets.
1904 */
1905 priv->rxfifo_depth = MIN(priv->rxfifo_depth,
1906 UDC_DWC2_GRXFSIZ_DEFAULT + priv->outeps * 2U);
1907 sys_write32(usb_dwc2_set_grxfsiz(priv->rxfifo_depth), grxfsiz_reg);
1908
1909 /* Set TxFIFO 0 depth */
1910 val = MAX(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]);
1911 gnptxfsiz = usb_dwc2_set_gnptxfsiz_nptxfdep(val) |
1912 usb_dwc2_set_gnptxfsiz_nptxfstaddr(priv->rxfifo_depth);
1913
1914 sys_write32(gnptxfsiz, (mem_addr_t)&base->gnptxfsiz);
1915 }
1916
1917 LOG_DBG("RX FIFO size %u bytes", priv->rxfifo_depth * 4);
1918 for (uint8_t i = 1U; i < priv->ineps; i++) {
1919 LOG_DBG("TX FIFO%u depth %u addr %u",
1920 i, priv->max_txfifo_depth[i], dwc2_get_txfaddr(dev, i));
1921 }
1922
1923 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT,
1924 USB_EP_TYPE_CONTROL, 64, 0)) {
1925 LOG_ERR("Failed to enable control endpoint");
1926 return -EIO;
1927 }
1928
1929 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN,
1930 USB_EP_TYPE_CONTROL, 64, 0)) {
1931 LOG_ERR("Failed to enable control endpoint");
1932 return -EIO;
1933 }
1934
1935 /* Unmask interrupts */
1936 sys_write32(USB_DWC2_GINTSTS_OEPINT | USB_DWC2_GINTSTS_IEPINT |
1937 USB_DWC2_GINTSTS_ENUMDONE | USB_DWC2_GINTSTS_USBRST |
1938 USB_DWC2_GINTSTS_WKUPINT | USB_DWC2_GINTSTS_USBSUSP |
1939 USB_DWC2_GINTSTS_SOF,
1940 (mem_addr_t)&base->gintmsk);
1941
1942 return 0;
1943 }
1944
udc_dwc2_enable(const struct device * dev)1945 static int udc_dwc2_enable(const struct device *dev)
1946 {
1947 const struct udc_dwc2_config *const config = dev->config;
1948 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1949 int err;
1950
1951 err = dwc2_quirk_pre_enable(dev);
1952 if (err) {
1953 LOG_ERR("Quirk pre enable failed %d", err);
1954 return err;
1955 }
1956
1957 err = udc_dwc2_init_controller(dev);
1958 if (err) {
1959 return err;
1960 }
1961
1962 err = dwc2_quirk_post_enable(dev);
1963 if (err) {
1964 LOG_ERR("Quirk post enable failed %d", err);
1965 return err;
1966 }
1967
1968 /* Enable global interrupt */
1969 sys_set_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK);
1970 config->irq_enable_func(dev);
1971
1972 /* Disable soft disconnect */
1973 sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_SFTDISCON);
1974 LOG_DBG("Enable device %p", base);
1975
1976 return 0;
1977 }
1978
udc_dwc2_disable(const struct device * dev)1979 static int udc_dwc2_disable(const struct device *dev)
1980 {
1981 const struct udc_dwc2_config *const config = dev->config;
1982 struct usb_dwc2_reg *const base = dwc2_get_base(dev);
1983 mem_addr_t dctl_reg = (mem_addr_t)&base->dctl;
1984 int err;
1985
1986 /* Enable soft disconnect */
1987 sys_set_bits(dctl_reg, USB_DWC2_DCTL_SFTDISCON);
1988 LOG_DBG("Disable device %p", dev);
1989
1990 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
1991 LOG_DBG("Failed to disable control endpoint");
1992 return -EIO;
1993 }
1994
1995 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
1996 LOG_DBG("Failed to disable control endpoint");
1997 return -EIO;
1998 }
1999
2000 config->irq_disable_func(dev);
2001 sys_clear_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK);
2002
2003 err = dwc2_quirk_disable(dev);
2004 if (err) {
2005 LOG_ERR("Quirk disable failed %d", err);
2006 return err;
2007 }
2008
2009 return 0;
2010 }
2011
udc_dwc2_init(const struct device * dev)2012 static int udc_dwc2_init(const struct device *dev)
2013 {
2014 int ret;
2015
2016 ret = dwc2_quirk_init(dev);
2017 if (ret) {
2018 LOG_ERR("Quirk init failed %d", ret);
2019 return ret;
2020 }
2021
2022 return dwc2_init_pinctrl(dev);
2023 }
2024
udc_dwc2_shutdown(const struct device * dev)2025 static int udc_dwc2_shutdown(const struct device *dev)
2026 {
2027 int ret;
2028
2029 ret = dwc2_quirk_shutdown(dev);
2030 if (ret) {
2031 LOG_ERR("Quirk shutdown failed %d", ret);
2032 return ret;
2033 }
2034
2035 return 0;
2036 }
2037
dwc2_driver_preinit(const struct device * dev)2038 static int dwc2_driver_preinit(const struct device *dev)
2039 {
2040 const struct udc_dwc2_config *config = dev->config;
2041 struct udc_data *data = dev->data;
2042 uint16_t mps = 1023;
2043 uint32_t numdeveps;
2044 uint32_t ineps;
2045 int err;
2046
2047 k_mutex_init(&data->mutex);
2048
2049 data->caps.addr_before_status = true;
2050 data->caps.mps0 = UDC_MPS0_64;
2051
2052 (void)dwc2_quirk_caps(dev);
2053 if (data->caps.hs) {
2054 mps = 1024;
2055 }
2056
2057 /*
2058 * At this point, we cannot or do not want to access the hardware
2059 * registers to get GHWCFGn values. For now, we will use devicetree to
2060 * get GHWCFGn values and use them to determine the number and type of
2061 * configured endpoints in the hardware. This can be considered a
2062 * workaround, and we may change the upper layer internals to avoid it
2063 * in the future.
2064 */
2065 ineps = usb_dwc2_get_ghwcfg4_ineps(config->ghwcfg4) + 1U;
2066 numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(config->ghwcfg2) + 1U;
2067 LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", numdeveps);
2068 LOG_DBG("Number of IN endpoints (INEPS + 1) %u", ineps);
2069
2070 for (uint32_t i = 0, n = 0; i < numdeveps; i++) {
2071 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i);
2072
2073 if (epdir != USB_DWC2_GHWCFG1_EPDIR_OUT &&
2074 epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2075 continue;
2076 }
2077
2078 if (i == 0) {
2079 config->ep_cfg_out[n].caps.control = 1;
2080 config->ep_cfg_out[n].caps.mps = 64;
2081 } else {
2082 config->ep_cfg_out[n].caps.bulk = 1;
2083 config->ep_cfg_out[n].caps.interrupt = 1;
2084 config->ep_cfg_out[n].caps.iso = 1;
2085 config->ep_cfg_out[n].caps.mps = mps;
2086 }
2087
2088 config->ep_cfg_out[n].caps.out = 1;
2089 config->ep_cfg_out[n].addr = USB_EP_DIR_OUT | i;
2090
2091 LOG_DBG("Register ep 0x%02x (%u)", i, n);
2092 err = udc_register_ep(dev, &config->ep_cfg_out[n]);
2093 if (err != 0) {
2094 LOG_ERR("Failed to register endpoint");
2095 return err;
2096 }
2097
2098 n++;
2099 /* Also check the number of desired OUT endpoints in devicetree. */
2100 if (n >= config->num_out_eps) {
2101 break;
2102 }
2103 }
2104
2105 for (uint32_t i = 0, n = 0; i < numdeveps; i++) {
2106 uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i);
2107
2108 if (epdir != USB_DWC2_GHWCFG1_EPDIR_IN &&
2109 epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) {
2110 continue;
2111 }
2112
2113 if (i == 0) {
2114 config->ep_cfg_in[n].caps.control = 1;
2115 config->ep_cfg_in[n].caps.mps = 64;
2116 } else {
2117 config->ep_cfg_in[n].caps.bulk = 1;
2118 config->ep_cfg_in[n].caps.interrupt = 1;
2119 config->ep_cfg_in[n].caps.iso = 1;
2120 config->ep_cfg_in[n].caps.mps = mps;
2121 }
2122
2123 config->ep_cfg_in[n].caps.in = 1;
2124 config->ep_cfg_in[n].addr = USB_EP_DIR_IN | i;
2125
2126 LOG_DBG("Register ep 0x%02x (%u)", USB_EP_DIR_IN | i, n);
2127 err = udc_register_ep(dev, &config->ep_cfg_in[n]);
2128 if (err != 0) {
2129 LOG_ERR("Failed to register endpoint");
2130 return err;
2131 }
2132
2133 n++;
2134 /* Also check the number of desired IN endpoints in devicetree. */
2135 if (n >= MIN(ineps, config->num_in_eps)) {
2136 break;
2137 }
2138 }
2139
2140 config->make_thread(dev);
2141
2142 return 0;
2143 }
2144
udc_dwc2_lock(const struct device * dev)2145 static int udc_dwc2_lock(const struct device *dev)
2146 {
2147 return udc_lock_internal(dev, K_FOREVER);
2148 }
2149
udc_dwc2_unlock(const struct device * dev)2150 static int udc_dwc2_unlock(const struct device *dev)
2151 {
2152 return udc_unlock_internal(dev);
2153 }
2154
2155 static const struct udc_api udc_dwc2_api = {
2156 .lock = udc_dwc2_lock,
2157 .unlock = udc_dwc2_unlock,
2158 .device_speed = udc_dwc2_device_speed,
2159 .init = udc_dwc2_init,
2160 .enable = udc_dwc2_enable,
2161 .disable = udc_dwc2_disable,
2162 .shutdown = udc_dwc2_shutdown,
2163 .set_address = udc_dwc2_set_address,
2164 .test_mode = udc_dwc2_test_mode,
2165 .host_wakeup = udc_dwc2_host_wakeup,
2166 .ep_enable = udc_dwc2_ep_activate,
2167 .ep_disable = udc_dwc2_ep_deactivate,
2168 .ep_set_halt = udc_dwc2_ep_set_halt,
2169 .ep_clear_halt = udc_dwc2_ep_clear_halt,
2170 .ep_enqueue = udc_dwc2_ep_enqueue,
2171 .ep_dequeue = udc_dwc2_ep_dequeue,
2172 };
2173
2174 #define DT_DRV_COMPAT snps_dwc2
2175
2176 #define UDC_DWC2_VENDOR_QUIRK_GET(n) \
2177 COND_CODE_1(DT_NODE_VENDOR_HAS_IDX(DT_DRV_INST(n), 1), \
2178 (&dwc2_vendor_quirks_##n), \
2179 (NULL))
2180
2181 #define UDC_DWC2_DT_INST_REG_ADDR(n) \
2182 COND_CODE_1(DT_NUM_REGS(DT_DRV_INST(n)), (DT_INST_REG_ADDR(n)), \
2183 (DT_INST_REG_ADDR_BY_NAME(n, core)))
2184
2185 #define UDC_DWC2_PINCTRL_DT_INST_DEFINE(n) \
2186 COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \
2187 (PINCTRL_DT_INST_DEFINE(n)), ())
2188
2189 #define UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n) \
2190 COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \
2191 ((void *)PINCTRL_DT_INST_DEV_CONFIG_GET(n)), (NULL))
2192
2193 #define UDC_DWC2_IRQ_FLAGS_TYPE0(n) 0
2194 #define UDC_DWC2_IRQ_FLAGS_TYPE1(n) DT_INST_IRQ(n, type)
2195 #define DW_IRQ_FLAGS(n) \
2196 _CONCAT(UDC_DWC2_IRQ_FLAGS_TYPE, DT_INST_IRQ_HAS_CELL(n, type))(n)
2197
2198 /*
2199 * A UDC driver should always be implemented as a multi-instance
2200 * driver, even if your platform does not require it.
2201 */
2202 #define UDC_DWC2_DEVICE_DEFINE(n) \
2203 UDC_DWC2_PINCTRL_DT_INST_DEFINE(n); \
2204 \
2205 K_THREAD_STACK_DEFINE(udc_dwc2_stack_##n, CONFIG_UDC_DWC2_STACK_SIZE); \
2206 \
2207 static void udc_dwc2_thread_##n(void *dev, void *arg1, void *arg2) \
2208 { \
2209 while (true) { \
2210 dwc2_thread_handler(dev); \
2211 } \
2212 } \
2213 \
2214 static void udc_dwc2_make_thread_##n(const struct device *dev) \
2215 { \
2216 struct udc_dwc2_data *priv = udc_get_private(dev); \
2217 \
2218 k_thread_create(&priv->thread_data, \
2219 udc_dwc2_stack_##n, \
2220 K_THREAD_STACK_SIZEOF(udc_dwc2_stack_##n), \
2221 udc_dwc2_thread_##n, \
2222 (void *)dev, NULL, NULL, \
2223 K_PRIO_COOP(CONFIG_UDC_DWC2_THREAD_PRIORITY), \
2224 K_ESSENTIAL, \
2225 K_NO_WAIT); \
2226 k_thread_name_set(&priv->thread_data, dev->name); \
2227 } \
2228 \
2229 static void udc_dwc2_irq_enable_func_##n(const struct device *dev) \
2230 { \
2231 IRQ_CONNECT(DT_INST_IRQN(n), \
2232 DT_INST_IRQ(n, priority), \
2233 udc_dwc2_isr_handler, \
2234 DEVICE_DT_INST_GET(n), \
2235 DW_IRQ_FLAGS(n)); \
2236 \
2237 irq_enable(DT_INST_IRQN(n)); \
2238 } \
2239 \
2240 static void udc_dwc2_irq_disable_func_##n(const struct device *dev) \
2241 { \
2242 irq_disable(DT_INST_IRQN(n)); \
2243 } \
2244 \
2245 static struct udc_ep_config ep_cfg_out[DT_INST_PROP(n, num_out_eps)]; \
2246 static struct udc_ep_config ep_cfg_in[DT_INST_PROP(n, num_in_eps)]; \
2247 \
2248 static const struct udc_dwc2_config udc_dwc2_config_##n = { \
2249 .num_out_eps = DT_INST_PROP(n, num_out_eps), \
2250 .num_in_eps = DT_INST_PROP(n, num_in_eps), \
2251 .ep_cfg_in = ep_cfg_in, \
2252 .ep_cfg_out = ep_cfg_out, \
2253 .make_thread = udc_dwc2_make_thread_##n, \
2254 .base = (struct usb_dwc2_reg *)UDC_DWC2_DT_INST_REG_ADDR(n), \
2255 .pcfg = UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
2256 .irq_enable_func = udc_dwc2_irq_enable_func_##n, \
2257 .irq_disable_func = udc_dwc2_irq_disable_func_##n, \
2258 .quirks = UDC_DWC2_VENDOR_QUIRK_GET(n), \
2259 .ghwcfg1 = DT_INST_PROP(n, ghwcfg1), \
2260 .ghwcfg2 = DT_INST_PROP(n, ghwcfg2), \
2261 .ghwcfg4 = DT_INST_PROP(n, ghwcfg4), \
2262 }; \
2263 \
2264 static struct udc_dwc2_data udc_priv_##n = { \
2265 }; \
2266 \
2267 static struct udc_data udc_data_##n = { \
2268 .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \
2269 .priv = &udc_priv_##n, \
2270 }; \
2271 \
2272 DEVICE_DT_INST_DEFINE(n, dwc2_driver_preinit, NULL, \
2273 &udc_data_##n, &udc_dwc2_config_##n, \
2274 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
2275 &udc_dwc2_api);
2276
2277 DT_INST_FOREACH_STATUS_OKAY(UDC_DWC2_DEVICE_DEFINE)
2278