1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * core.c - DesignWare USB3 DRD Controller Core file
4 *
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/version.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/interrupt.h>
20 #include <linux/ioport.h>
21 #include <linux/io.h>
22 #include <linux/list.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/of.h>
26 #include <linux/of_graph.h>
27 #include <linux/acpi.h>
28 #include <linux/pinctrl/consumer.h>
29 #include <linux/reset.h>
30 #include <linux/bitfield.h>
31
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/of.h>
35 #include <linux/usb/otg.h>
36
37 #include "core.h"
38 #include "gadget.h"
39 #include "io.h"
40
41 #include "debug.h"
42
43 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
44
45 /**
46 * dwc3_get_dr_mode - Validates and sets dr_mode
47 * @dwc: pointer to our context structure
48 */
dwc3_get_dr_mode(struct dwc3 * dwc)49 static int dwc3_get_dr_mode(struct dwc3 *dwc)
50 {
51 enum usb_dr_mode mode;
52 struct device *dev = dwc->dev;
53 unsigned int hw_mode;
54
55 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
56 dwc->dr_mode = USB_DR_MODE_OTG;
57
58 mode = dwc->dr_mode;
59 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
60
61 switch (hw_mode) {
62 case DWC3_GHWPARAMS0_MODE_GADGET:
63 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
64 dev_err(dev,
65 "Controller does not support host mode.\n");
66 return -EINVAL;
67 }
68 mode = USB_DR_MODE_PERIPHERAL;
69 break;
70 case DWC3_GHWPARAMS0_MODE_HOST:
71 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
72 dev_err(dev,
73 "Controller does not support device mode.\n");
74 return -EINVAL;
75 }
76 mode = USB_DR_MODE_HOST;
77 break;
78 default:
79 if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
80 mode = USB_DR_MODE_HOST;
81 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
82 mode = USB_DR_MODE_PERIPHERAL;
83
84 /*
85 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG
86 * mode. If the controller supports DRD but the dr_mode is not
87 * specified or set to OTG, then set the mode to peripheral.
88 */
89 if (mode == USB_DR_MODE_OTG && !dwc->edev &&
90 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
91 !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
92 !DWC3_VER_IS_PRIOR(DWC3, 330A))
93 mode = USB_DR_MODE_PERIPHERAL;
94 }
95
96 if (mode != dwc->dr_mode) {
97 dev_warn(dev,
98 "Configuration mismatch. dr_mode forced to %s\n",
99 mode == USB_DR_MODE_HOST ? "host" : "gadget");
100
101 dwc->dr_mode = mode;
102 }
103
104 return 0;
105 }
106
dwc3_set_prtcap(struct dwc3 * dwc,u32 mode)107 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
108 {
109 u32 reg;
110
111 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
112 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
113 reg |= DWC3_GCTL_PRTCAPDIR(mode);
114 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
115
116 dwc->current_dr_role = mode;
117 }
118
__dwc3_set_mode(struct work_struct * work)119 static void __dwc3_set_mode(struct work_struct *work)
120 {
121 struct dwc3 *dwc = work_to_dwc(work);
122 unsigned long flags;
123 int ret;
124 u32 reg;
125 u32 desired_dr_role;
126
127 mutex_lock(&dwc->mutex);
128 spin_lock_irqsave(&dwc->lock, flags);
129 desired_dr_role = dwc->desired_dr_role;
130 spin_unlock_irqrestore(&dwc->lock, flags);
131
132 pm_runtime_get_sync(dwc->dev);
133
134 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
135 dwc3_otg_update(dwc, 0);
136
137 if (!desired_dr_role)
138 goto out;
139
140 if (desired_dr_role == dwc->current_dr_role)
141 goto out;
142
143 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
144 goto out;
145
146 switch (dwc->current_dr_role) {
147 case DWC3_GCTL_PRTCAP_HOST:
148 dwc3_host_exit(dwc);
149 break;
150 case DWC3_GCTL_PRTCAP_DEVICE:
151 dwc3_gadget_exit(dwc);
152 dwc3_event_buffers_cleanup(dwc);
153 break;
154 case DWC3_GCTL_PRTCAP_OTG:
155 dwc3_otg_exit(dwc);
156 spin_lock_irqsave(&dwc->lock, flags);
157 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
158 spin_unlock_irqrestore(&dwc->lock, flags);
159 dwc3_otg_update(dwc, 1);
160 break;
161 default:
162 break;
163 }
164
165 /*
166 * When current_dr_role is not set, there's no role switching.
167 * Only perform GCTL.CoreSoftReset when there's DRD role switching.
168 */
169 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
170 DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
171 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
172 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
173 reg |= DWC3_GCTL_CORESOFTRESET;
174 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
175
176 /*
177 * Wait for internal clocks to synchronized. DWC_usb31 and
178 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
179 * keep it consistent across different IPs, let's wait up to
180 * 100ms before clearing GCTL.CORESOFTRESET.
181 */
182 msleep(100);
183
184 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
185 reg &= ~DWC3_GCTL_CORESOFTRESET;
186 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
187 }
188
189 spin_lock_irqsave(&dwc->lock, flags);
190
191 dwc3_set_prtcap(dwc, desired_dr_role);
192
193 spin_unlock_irqrestore(&dwc->lock, flags);
194
195 switch (desired_dr_role) {
196 case DWC3_GCTL_PRTCAP_HOST:
197 ret = dwc3_host_init(dwc);
198 if (ret) {
199 dev_err(dwc->dev, "failed to initialize host\n");
200 } else {
201 if (dwc->usb2_phy)
202 otg_set_vbus(dwc->usb2_phy->otg, true);
203 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
204 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
205 if (dwc->dis_split_quirk) {
206 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
207 reg |= DWC3_GUCTL3_SPLITDISABLE;
208 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
209 }
210 }
211 break;
212 case DWC3_GCTL_PRTCAP_DEVICE:
213 dwc3_core_soft_reset(dwc);
214
215 dwc3_event_buffers_setup(dwc);
216
217 if (dwc->usb2_phy)
218 otg_set_vbus(dwc->usb2_phy->otg, false);
219 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
220 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
221
222 ret = dwc3_gadget_init(dwc);
223 if (ret)
224 dev_err(dwc->dev, "failed to initialize peripheral\n");
225 break;
226 case DWC3_GCTL_PRTCAP_OTG:
227 dwc3_otg_init(dwc);
228 dwc3_otg_update(dwc, 0);
229 break;
230 default:
231 break;
232 }
233
234 out:
235 pm_runtime_mark_last_busy(dwc->dev);
236 pm_runtime_put_autosuspend(dwc->dev);
237 mutex_unlock(&dwc->mutex);
238 }
239
dwc3_set_mode(struct dwc3 * dwc,u32 mode)240 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
241 {
242 unsigned long flags;
243
244 if (dwc->dr_mode != USB_DR_MODE_OTG)
245 return;
246
247 spin_lock_irqsave(&dwc->lock, flags);
248 dwc->desired_dr_role = mode;
249 spin_unlock_irqrestore(&dwc->lock, flags);
250
251 queue_work(system_freezable_wq, &dwc->drd_work);
252 }
253
dwc3_core_fifo_space(struct dwc3_ep * dep,u8 type)254 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
255 {
256 struct dwc3 *dwc = dep->dwc;
257 u32 reg;
258
259 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
260 DWC3_GDBGFIFOSPACE_NUM(dep->number) |
261 DWC3_GDBGFIFOSPACE_TYPE(type));
262
263 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
264
265 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
266 }
267
268 /**
269 * dwc3_core_soft_reset - Issues core soft reset and PHY reset
270 * @dwc: pointer to our context structure
271 */
dwc3_core_soft_reset(struct dwc3 * dwc)272 int dwc3_core_soft_reset(struct dwc3 *dwc)
273 {
274 u32 reg;
275 int retries = 1000;
276
277 /*
278 * We're resetting only the device side because, if we're in host mode,
279 * XHCI driver will reset the host block. If dwc3 was configured for
280 * host-only mode or current role is host, then we can return early.
281 */
282 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
283 return 0;
284
285 /*
286 * If the dr_mode is host and the dwc->current_dr_role is not the
287 * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
288 * isn't executed yet. Ensure the phy is ready before the controller
289 * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
290 * the phy.
291 *
292 * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
293 * is port index. If this is a multiport host, then we need to reset
294 * all active ports.
295 */
296 if (dwc->dr_mode == USB_DR_MODE_HOST) {
297 u32 usb3_port;
298 u32 usb2_port;
299
300 usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
301 usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
302 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
303
304 usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
305 usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
306 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
307
308 /* Small delay for phy reset assertion */
309 usleep_range(1000, 2000);
310
311 usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
312 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
313
314 usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
315 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
316
317 /* Wait for clock synchronization */
318 msleep(50);
319 return 0;
320 }
321
322 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
323 reg |= DWC3_DCTL_CSFTRST;
324 reg &= ~DWC3_DCTL_RUN_STOP;
325 dwc3_gadget_dctl_write_safe(dwc, reg);
326
327 /*
328 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
329 * is cleared only after all the clocks are synchronized. This can
330 * take a little more than 50ms. Set the polling rate at 20ms
331 * for 10 times instead.
332 */
333 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
334 retries = 10;
335
336 do {
337 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
338 if (!(reg & DWC3_DCTL_CSFTRST))
339 goto done;
340
341 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
342 msleep(20);
343 else
344 udelay(1);
345 } while (--retries);
346
347 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
348 return -ETIMEDOUT;
349
350 done:
351 /*
352 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
353 * is cleared, we must wait at least 50ms before accessing the PHY
354 * domain (synchronization delay).
355 */
356 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
357 msleep(50);
358
359 return 0;
360 }
361
362 /*
363 * dwc3_frame_length_adjustment - Adjusts frame length if required
364 * @dwc3: Pointer to our controller context structure
365 */
dwc3_frame_length_adjustment(struct dwc3 * dwc)366 static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
367 {
368 u32 reg;
369 u32 dft;
370
371 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
372 return;
373
374 if (dwc->fladj == 0)
375 return;
376
377 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
378 dft = reg & DWC3_GFLADJ_30MHZ_MASK;
379 if (dft != dwc->fladj) {
380 reg &= ~DWC3_GFLADJ_30MHZ_MASK;
381 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
382 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
383 }
384 }
385
386 /**
387 * dwc3_ref_clk_period - Reference clock period configuration
388 * Default reference clock period depends on hardware
389 * configuration. For systems with reference clock that differs
390 * from the default, this will set clock period in DWC3_GUCTL
391 * register.
392 * @dwc: Pointer to our controller context structure
393 */
dwc3_ref_clk_period(struct dwc3 * dwc)394 static void dwc3_ref_clk_period(struct dwc3 *dwc)
395 {
396 unsigned long period;
397 unsigned long fladj;
398 unsigned long decr;
399 unsigned long rate;
400 u32 reg;
401
402 if (dwc->ref_clk) {
403 rate = clk_get_rate(dwc->ref_clk);
404 if (!rate)
405 return;
406 period = NSEC_PER_SEC / rate;
407 } else if (dwc->ref_clk_per) {
408 period = dwc->ref_clk_per;
409 rate = NSEC_PER_SEC / period;
410 } else {
411 return;
412 }
413
414 reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
415 reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
416 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period);
417 dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
418
419 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
420 return;
421
422 /*
423 * The calculation below is
424 *
425 * 125000 * (NSEC_PER_SEC / (rate * period) - 1)
426 *
427 * but rearranged for fixed-point arithmetic. The division must be
428 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and
429 * neither does rate * period).
430 *
431 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of
432 * nanoseconds of error caused by the truncation which happened during
433 * the division when calculating rate or period (whichever one was
434 * derived from the other). We first calculate the relative error, then
435 * scale it to units of 8 ppm.
436 */
437 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period);
438 fladj -= 125000;
439
440 /*
441 * The documented 240MHz constant is scaled by 2 to get PLS1 as well.
442 */
443 decr = 480000000 / rate;
444
445 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
446 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK
447 & ~DWC3_GFLADJ_240MHZDECR
448 & ~DWC3_GFLADJ_240MHZDECR_PLS1;
449 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
450 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
451 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
452
453 if (dwc->gfladj_refclk_lpm_sel)
454 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
455
456 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
457 }
458
459 /**
460 * dwc3_free_one_event_buffer - Frees one event buffer
461 * @dwc: Pointer to our controller context structure
462 * @evt: Pointer to event buffer to be freed
463 */
dwc3_free_one_event_buffer(struct dwc3 * dwc,struct dwc3_event_buffer * evt)464 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
465 struct dwc3_event_buffer *evt)
466 {
467 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
468 }
469
470 /**
471 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure
472 * @dwc: Pointer to our controller context structure
473 * @length: size of the event buffer
474 *
475 * Returns a pointer to the allocated event buffer structure on success
476 * otherwise ERR_PTR(errno).
477 */
dwc3_alloc_one_event_buffer(struct dwc3 * dwc,unsigned int length)478 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
479 unsigned int length)
480 {
481 struct dwc3_event_buffer *evt;
482
483 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
484 if (!evt)
485 return ERR_PTR(-ENOMEM);
486
487 evt->dwc = dwc;
488 evt->length = length;
489 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
490 if (!evt->cache)
491 return ERR_PTR(-ENOMEM);
492
493 evt->buf = dma_alloc_coherent(dwc->sysdev, length,
494 &evt->dma, GFP_KERNEL);
495 if (!evt->buf)
496 return ERR_PTR(-ENOMEM);
497
498 return evt;
499 }
500
501 /**
502 * dwc3_free_event_buffers - frees all allocated event buffers
503 * @dwc: Pointer to our controller context structure
504 */
dwc3_free_event_buffers(struct dwc3 * dwc)505 static void dwc3_free_event_buffers(struct dwc3 *dwc)
506 {
507 struct dwc3_event_buffer *evt;
508
509 evt = dwc->ev_buf;
510 if (evt)
511 dwc3_free_one_event_buffer(dwc, evt);
512 }
513
514 /**
515 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
516 * @dwc: pointer to our controller context structure
517 * @length: size of event buffer
518 *
519 * Returns 0 on success otherwise negative errno. In the error case, dwc
520 * may contain some buffers allocated but not all which were requested.
521 */
dwc3_alloc_event_buffers(struct dwc3 * dwc,unsigned int length)522 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
523 {
524 struct dwc3_event_buffer *evt;
525
526 evt = dwc3_alloc_one_event_buffer(dwc, length);
527 if (IS_ERR(evt)) {
528 dev_err(dwc->dev, "can't allocate event buffer\n");
529 return PTR_ERR(evt);
530 }
531 dwc->ev_buf = evt;
532
533 return 0;
534 }
535
536 /**
537 * dwc3_event_buffers_setup - setup our allocated event buffers
538 * @dwc: pointer to our controller context structure
539 *
540 * Returns 0 on success otherwise negative errno.
541 */
dwc3_event_buffers_setup(struct dwc3 * dwc)542 int dwc3_event_buffers_setup(struct dwc3 *dwc)
543 {
544 struct dwc3_event_buffer *evt;
545
546 evt = dwc->ev_buf;
547 evt->lpos = 0;
548 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
549 lower_32_bits(evt->dma));
550 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
551 upper_32_bits(evt->dma));
552 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
553 DWC3_GEVNTSIZ_SIZE(evt->length));
554 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
555
556 return 0;
557 }
558
dwc3_event_buffers_cleanup(struct dwc3 * dwc)559 void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
560 {
561 struct dwc3_event_buffer *evt;
562
563 evt = dwc->ev_buf;
564
565 evt->lpos = 0;
566
567 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
568 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
569 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
570 | DWC3_GEVNTSIZ_SIZE(0));
571 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
572 }
573
dwc3_core_num_eps(struct dwc3 * dwc)574 static void dwc3_core_num_eps(struct dwc3 *dwc)
575 {
576 struct dwc3_hwparams *parms = &dwc->hwparams;
577
578 dwc->num_eps = DWC3_NUM_EPS(parms);
579 }
580
dwc3_cache_hwparams(struct dwc3 * dwc)581 static void dwc3_cache_hwparams(struct dwc3 *dwc)
582 {
583 struct dwc3_hwparams *parms = &dwc->hwparams;
584
585 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
586 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
587 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
588 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
589 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
590 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
591 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
592 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
593 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
594
595 if (DWC3_IP_IS(DWC32))
596 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
597 }
598
dwc3_core_ulpi_init(struct dwc3 * dwc)599 static int dwc3_core_ulpi_init(struct dwc3 *dwc)
600 {
601 int intf;
602 int ret = 0;
603
604 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
605
606 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
607 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
608 dwc->hsphy_interface &&
609 !strncmp(dwc->hsphy_interface, "ulpi", 4)))
610 ret = dwc3_ulpi_init(dwc);
611
612 return ret;
613 }
614
615 /**
616 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
617 * @dwc: Pointer to our controller context structure
618 *
619 * Returns 0 on success. The USB PHY interfaces are configured but not
620 * initialized. The PHY interfaces and the PHYs get initialized together with
621 * the core in dwc3_core_init.
622 */
dwc3_phy_setup(struct dwc3 * dwc)623 static int dwc3_phy_setup(struct dwc3 *dwc)
624 {
625 unsigned int hw_mode;
626 u32 reg;
627
628 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
629
630 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
631
632 /*
633 * Make sure UX_EXIT_PX is cleared as that causes issues with some
634 * PHYs. Also, this bit is not supposed to be used in normal operation.
635 */
636 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
637
638 /*
639 * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
640 * to '0' during coreConsultant configuration. So default value
641 * will be '0' when the core is reset. Application needs to set it
642 * to '1' after the core initialization is completed.
643 */
644 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
645 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
646
647 /*
648 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
649 * power-on reset, and it can be set after core initialization, which is
650 * after device soft-reset during initialization.
651 */
652 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
653 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
654
655 if (dwc->u2ss_inp3_quirk)
656 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
657
658 if (dwc->dis_rxdet_inp3_quirk)
659 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
660
661 if (dwc->req_p1p2p3_quirk)
662 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
663
664 if (dwc->del_p1p2p3_quirk)
665 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
666
667 if (dwc->del_phy_power_chg_quirk)
668 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
669
670 if (dwc->lfps_filter_quirk)
671 reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
672
673 if (dwc->rx_detect_poll_quirk)
674 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
675
676 if (dwc->tx_de_emphasis_quirk)
677 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
678
679 if (dwc->dis_u3_susphy_quirk)
680 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
681
682 if (dwc->dis_del_phy_power_chg_quirk)
683 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
684
685 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
686
687 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
688
689 /* Select the HS PHY interface */
690 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
691 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
692 if (dwc->hsphy_interface &&
693 !strncmp(dwc->hsphy_interface, "utmi", 4)) {
694 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
695 break;
696 } else if (dwc->hsphy_interface &&
697 !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
698 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
699 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
700 } else {
701 /* Relying on default value. */
702 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
703 break;
704 }
705 fallthrough;
706 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
707 default:
708 break;
709 }
710
711 switch (dwc->hsphy_mode) {
712 case USBPHY_INTERFACE_MODE_UTMI:
713 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
714 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
715 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
716 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
717 break;
718 case USBPHY_INTERFACE_MODE_UTMIW:
719 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
720 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
721 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
722 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
723 break;
724 default:
725 break;
726 }
727
728 /*
729 * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
730 * '0' during coreConsultant configuration. So default value will
731 * be '0' when the core is reset. Application needs to set it to
732 * '1' after the core initialization is completed.
733 */
734 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
735 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
736
737 /*
738 * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
739 * power-on reset, and it can be set after core initialization, which is
740 * after device soft-reset during initialization.
741 */
742 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
743 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
744
745 if (dwc->dis_u2_susphy_quirk)
746 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
747
748 if (dwc->dis_enblslpm_quirk)
749 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
750 else
751 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
752
753 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
754 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
755
756 /*
757 * Some ULPI USB PHY does not support internal VBUS supply, to drive
758 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL
759 * bit of OTG_CTRL register. Controller configures the USB2 PHY
760 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus
761 * with an external supply.
762 */
763 if (dwc->ulpi_ext_vbus_drv)
764 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV;
765
766 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
767
768 return 0;
769 }
770
dwc3_phy_init(struct dwc3 * dwc)771 static int dwc3_phy_init(struct dwc3 *dwc)
772 {
773 int ret;
774
775 usb_phy_init(dwc->usb2_phy);
776 usb_phy_init(dwc->usb3_phy);
777
778 ret = phy_init(dwc->usb2_generic_phy);
779 if (ret < 0)
780 goto err_shutdown_usb3_phy;
781
782 ret = phy_init(dwc->usb3_generic_phy);
783 if (ret < 0)
784 goto err_exit_usb2_phy;
785
786 return 0;
787
788 err_exit_usb2_phy:
789 phy_exit(dwc->usb2_generic_phy);
790 err_shutdown_usb3_phy:
791 usb_phy_shutdown(dwc->usb3_phy);
792 usb_phy_shutdown(dwc->usb2_phy);
793
794 return ret;
795 }
796
dwc3_phy_exit(struct dwc3 * dwc)797 static void dwc3_phy_exit(struct dwc3 *dwc)
798 {
799 phy_exit(dwc->usb3_generic_phy);
800 phy_exit(dwc->usb2_generic_phy);
801
802 usb_phy_shutdown(dwc->usb3_phy);
803 usb_phy_shutdown(dwc->usb2_phy);
804 }
805
dwc3_phy_power_on(struct dwc3 * dwc)806 static int dwc3_phy_power_on(struct dwc3 *dwc)
807 {
808 int ret;
809
810 usb_phy_set_suspend(dwc->usb2_phy, 0);
811 usb_phy_set_suspend(dwc->usb3_phy, 0);
812
813 ret = phy_power_on(dwc->usb2_generic_phy);
814 if (ret < 0)
815 goto err_suspend_usb3_phy;
816
817 ret = phy_power_on(dwc->usb3_generic_phy);
818 if (ret < 0)
819 goto err_power_off_usb2_phy;
820
821 return 0;
822
823 err_power_off_usb2_phy:
824 phy_power_off(dwc->usb2_generic_phy);
825 err_suspend_usb3_phy:
826 usb_phy_set_suspend(dwc->usb3_phy, 1);
827 usb_phy_set_suspend(dwc->usb2_phy, 1);
828
829 return ret;
830 }
831
dwc3_phy_power_off(struct dwc3 * dwc)832 static void dwc3_phy_power_off(struct dwc3 *dwc)
833 {
834 phy_power_off(dwc->usb3_generic_phy);
835 phy_power_off(dwc->usb2_generic_phy);
836
837 usb_phy_set_suspend(dwc->usb3_phy, 1);
838 usb_phy_set_suspend(dwc->usb2_phy, 1);
839 }
840
dwc3_clk_enable(struct dwc3 * dwc)841 static int dwc3_clk_enable(struct dwc3 *dwc)
842 {
843 int ret;
844
845 ret = clk_prepare_enable(dwc->bus_clk);
846 if (ret)
847 return ret;
848
849 ret = clk_prepare_enable(dwc->ref_clk);
850 if (ret)
851 goto disable_bus_clk;
852
853 ret = clk_prepare_enable(dwc->susp_clk);
854 if (ret)
855 goto disable_ref_clk;
856
857 return 0;
858
859 disable_ref_clk:
860 clk_disable_unprepare(dwc->ref_clk);
861 disable_bus_clk:
862 clk_disable_unprepare(dwc->bus_clk);
863 return ret;
864 }
865
dwc3_clk_disable(struct dwc3 * dwc)866 static void dwc3_clk_disable(struct dwc3 *dwc)
867 {
868 clk_disable_unprepare(dwc->susp_clk);
869 clk_disable_unprepare(dwc->ref_clk);
870 clk_disable_unprepare(dwc->bus_clk);
871 }
872
dwc3_core_exit(struct dwc3 * dwc)873 static void dwc3_core_exit(struct dwc3 *dwc)
874 {
875 dwc3_event_buffers_cleanup(dwc);
876 dwc3_phy_power_off(dwc);
877 dwc3_phy_exit(dwc);
878 dwc3_clk_disable(dwc);
879 reset_control_assert(dwc->reset);
880 }
881
dwc3_core_is_valid(struct dwc3 * dwc)882 static bool dwc3_core_is_valid(struct dwc3 *dwc)
883 {
884 u32 reg;
885
886 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
887 dwc->ip = DWC3_GSNPS_ID(reg);
888
889 /* This should read as U3 followed by revision number */
890 if (DWC3_IP_IS(DWC3)) {
891 dwc->revision = reg;
892 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
893 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
894 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
895 } else {
896 return false;
897 }
898
899 return true;
900 }
901
dwc3_core_setup_global_control(struct dwc3 * dwc)902 static void dwc3_core_setup_global_control(struct dwc3 *dwc)
903 {
904 u32 reg;
905
906 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
907 reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
908
909 switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
910 case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
911 /**
912 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
913 * issue which would cause xHCI compliance tests to fail.
914 *
915 * Because of that we cannot enable clock gating on such
916 * configurations.
917 *
918 * Refers to:
919 *
920 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
921 * SOF/ITP Mode Used
922 */
923 if ((dwc->dr_mode == USB_DR_MODE_HOST ||
924 dwc->dr_mode == USB_DR_MODE_OTG) &&
925 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
926 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
927 else
928 reg &= ~DWC3_GCTL_DSBLCLKGTNG;
929 break;
930 case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
931 /*
932 * REVISIT Enabling this bit so that host-mode hibernation
933 * will work. Device-mode hibernation is not yet implemented.
934 */
935 reg |= DWC3_GCTL_GBLHIBERNATIONEN;
936 break;
937 default:
938 /* nothing */
939 break;
940 }
941
942 /* check if current dwc3 is on simulation board */
943 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
944 dev_info(dwc->dev, "Running with FPGA optimizations\n");
945 dwc->is_fpga = true;
946 }
947
948 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
949 "disable_scramble cannot be used on non-FPGA builds\n");
950
951 if (dwc->disable_scramble_quirk && dwc->is_fpga)
952 reg |= DWC3_GCTL_DISSCRAMBLE;
953 else
954 reg &= ~DWC3_GCTL_DISSCRAMBLE;
955
956 if (dwc->u2exit_lfps_quirk)
957 reg |= DWC3_GCTL_U2EXIT_LFPS;
958
959 /*
960 * WORKAROUND: DWC3 revisions <1.90a have a bug
961 * where the device can fail to connect at SuperSpeed
962 * and falls back to high-speed mode which causes
963 * the device to enter a Connect/Disconnect loop
964 */
965 if (DWC3_VER_IS_PRIOR(DWC3, 190A))
966 reg |= DWC3_GCTL_U2RSTECN;
967
968 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
969 }
970
971 static int dwc3_core_get_phy(struct dwc3 *dwc);
972 static int dwc3_core_ulpi_init(struct dwc3 *dwc);
973
974 /* set global incr burst type configuration registers */
dwc3_set_incr_burst_type(struct dwc3 * dwc)975 static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
976 {
977 struct device *dev = dwc->dev;
978 /* incrx_mode : for INCR burst type. */
979 bool incrx_mode;
980 /* incrx_size : for size of INCRX burst. */
981 u32 incrx_size;
982 u32 *vals;
983 u32 cfg;
984 int ntype;
985 int ret;
986 int i;
987
988 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
989
990 /*
991 * Handle property "snps,incr-burst-type-adjustment".
992 * Get the number of value from this property:
993 * result <= 0, means this property is not supported.
994 * result = 1, means INCRx burst mode supported.
995 * result > 1, means undefined length burst mode supported.
996 */
997 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
998 if (ntype <= 0)
999 return;
1000
1001 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
1002 if (!vals)
1003 return;
1004
1005 /* Get INCR burst type, and parse it */
1006 ret = device_property_read_u32_array(dev,
1007 "snps,incr-burst-type-adjustment", vals, ntype);
1008 if (ret) {
1009 kfree(vals);
1010 dev_err(dev, "Error to get property\n");
1011 return;
1012 }
1013
1014 incrx_size = *vals;
1015
1016 if (ntype > 1) {
1017 /* INCRX (undefined length) burst mode */
1018 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
1019 for (i = 1; i < ntype; i++) {
1020 if (vals[i] > incrx_size)
1021 incrx_size = vals[i];
1022 }
1023 } else {
1024 /* INCRX burst mode */
1025 incrx_mode = INCRX_BURST_MODE;
1026 }
1027
1028 kfree(vals);
1029
1030 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
1031 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
1032 if (incrx_mode)
1033 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
1034 switch (incrx_size) {
1035 case 256:
1036 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
1037 break;
1038 case 128:
1039 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
1040 break;
1041 case 64:
1042 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
1043 break;
1044 case 32:
1045 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
1046 break;
1047 case 16:
1048 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
1049 break;
1050 case 8:
1051 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
1052 break;
1053 case 4:
1054 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
1055 break;
1056 case 1:
1057 break;
1058 default:
1059 dev_err(dev, "Invalid property\n");
1060 break;
1061 }
1062
1063 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
1064 }
1065
dwc3_set_power_down_clk_scale(struct dwc3 * dwc)1066 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
1067 {
1068 u32 scale;
1069 u32 reg;
1070
1071 if (!dwc->susp_clk)
1072 return;
1073
1074 /*
1075 * The power down scale field specifies how many suspend_clk
1076 * periods fit into a 16KHz clock period. When performing
1077 * the division, round up the remainder.
1078 *
1079 * The power down scale value is calculated using the fastest
1080 * frequency of the suspend_clk. If it isn't fixed (but within
1081 * the accuracy requirement), the driver may not know the max
1082 * rate of the suspend_clk, so only update the power down scale
1083 * if the default is less than the calculated value from
1084 * clk_get_rate() or if the default is questionably high
1085 * (3x or more) to be within the requirement.
1086 */
1087 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
1088 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1089 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
1090 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
1091 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
1092 reg |= DWC3_GCTL_PWRDNSCALE(scale);
1093 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1094 }
1095 }
1096
1097 /**
1098 * dwc3_core_init - Low-level initialization of DWC3 Core
1099 * @dwc: Pointer to our controller context structure
1100 *
1101 * Returns 0 on success otherwise negative errno.
1102 */
dwc3_core_init(struct dwc3 * dwc)1103 static int dwc3_core_init(struct dwc3 *dwc)
1104 {
1105 unsigned int hw_mode;
1106 u32 reg;
1107 int ret;
1108
1109 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
1110
1111 /*
1112 * Write Linux Version Code to our GUID register so it's easy to figure
1113 * out which kernel version a bug was found.
1114 */
1115 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
1116
1117 ret = dwc3_phy_setup(dwc);
1118 if (ret)
1119 return ret;
1120
1121 if (!dwc->ulpi_ready) {
1122 ret = dwc3_core_ulpi_init(dwc);
1123 if (ret) {
1124 if (ret == -ETIMEDOUT) {
1125 dwc3_core_soft_reset(dwc);
1126 ret = -EPROBE_DEFER;
1127 }
1128 return ret;
1129 }
1130 dwc->ulpi_ready = true;
1131 }
1132
1133 if (!dwc->phys_ready) {
1134 ret = dwc3_core_get_phy(dwc);
1135 if (ret)
1136 goto err_exit_ulpi;
1137 dwc->phys_ready = true;
1138 }
1139
1140 ret = dwc3_phy_init(dwc);
1141 if (ret)
1142 goto err_exit_ulpi;
1143
1144 ret = dwc3_core_soft_reset(dwc);
1145 if (ret)
1146 goto err_exit_phy;
1147
1148 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
1149 !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
1150 if (!dwc->dis_u3_susphy_quirk) {
1151 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1152 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1153 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1154 }
1155
1156 if (!dwc->dis_u2_susphy_quirk) {
1157 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1158 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1159 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1160 }
1161 }
1162
1163 dwc3_core_setup_global_control(dwc);
1164 dwc3_core_num_eps(dwc);
1165
1166 /* Set power down scale of suspend_clk */
1167 dwc3_set_power_down_clk_scale(dwc);
1168
1169 /* Adjust Frame Length */
1170 dwc3_frame_length_adjustment(dwc);
1171
1172 /* Adjust Reference Clock Period */
1173 dwc3_ref_clk_period(dwc);
1174
1175 dwc3_set_incr_burst_type(dwc);
1176
1177 ret = dwc3_phy_power_on(dwc);
1178 if (ret)
1179 goto err_exit_phy;
1180
1181 ret = dwc3_event_buffers_setup(dwc);
1182 if (ret) {
1183 dev_err(dwc->dev, "failed to setup event buffers\n");
1184 goto err_power_off_phy;
1185 }
1186
1187 /*
1188 * ENDXFER polling is available on version 3.10a and later of
1189 * the DWC_usb3 controller. It is NOT available in the
1190 * DWC_usb31 controller.
1191 */
1192 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
1193 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1194 reg |= DWC3_GUCTL2_RST_ACTBITLATER;
1195 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1196 }
1197
1198 /*
1199 * When configured in HOST mode, after issuing U3/L2 exit controller
1200 * fails to send proper CRC checksum in CRC5 feild. Because of this
1201 * behaviour Transaction Error is generated, resulting in reset and
1202 * re-enumeration of usb device attached. All the termsel, xcvrsel,
1203 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
1204 * will correct this problem. This option is to support certain
1205 * legacy ULPI PHYs.
1206 */
1207 if (dwc->resume_hs_terminations) {
1208 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1209 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
1210 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1211 }
1212
1213 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
1214 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1215
1216 /*
1217 * Enable hardware control of sending remote wakeup
1218 * in HS when the device is in the L1 state.
1219 */
1220 if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
1221 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
1222
1223 /*
1224 * Decouple USB 2.0 L1 & L2 events which will allow for
1225 * gadget driver to only receive U3/L2 suspend & wakeup
1226 * events and prevent the more frequent L1 LPM transitions
1227 * from interrupting the driver.
1228 */
1229 if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
1230 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
1231
1232 if (dwc->dis_tx_ipgap_linecheck_quirk)
1233 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
1234
1235 if (dwc->parkmode_disable_ss_quirk)
1236 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
1237
1238 if (dwc->parkmode_disable_hs_quirk)
1239 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS;
1240
1241 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) &&
1242 (dwc->maximum_speed == USB_SPEED_HIGH ||
1243 dwc->maximum_speed == USB_SPEED_FULL))
1244 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
1245
1246 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1247 }
1248
1249 /*
1250 * Must config both number of packets and max burst settings to enable
1251 * RX and/or TX threshold.
1252 */
1253 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
1254 u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
1255 u8 rx_maxburst = dwc->rx_max_burst_prd;
1256 u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
1257 u8 tx_maxburst = dwc->tx_max_burst_prd;
1258
1259 if (rx_thr_num && rx_maxburst) {
1260 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1261 reg |= DWC31_RXTHRNUMPKTSEL_PRD;
1262
1263 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
1264 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
1265
1266 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
1267 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
1268
1269 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1270 }
1271
1272 if (tx_thr_num && tx_maxburst) {
1273 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1274 reg |= DWC31_TXTHRNUMPKTSEL_PRD;
1275
1276 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
1277 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
1278
1279 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
1280 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
1281
1282 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1283 }
1284 }
1285
1286 return 0;
1287
1288 err_power_off_phy:
1289 dwc3_phy_power_off(dwc);
1290 err_exit_phy:
1291 dwc3_phy_exit(dwc);
1292 err_exit_ulpi:
1293 dwc3_ulpi_exit(dwc);
1294
1295 return ret;
1296 }
1297
dwc3_core_get_phy(struct dwc3 * dwc)1298 static int dwc3_core_get_phy(struct dwc3 *dwc)
1299 {
1300 struct device *dev = dwc->dev;
1301 struct device_node *node = dev->of_node;
1302 int ret;
1303
1304 if (node) {
1305 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
1306 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
1307 } else {
1308 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
1309 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
1310 }
1311
1312 if (IS_ERR(dwc->usb2_phy)) {
1313 ret = PTR_ERR(dwc->usb2_phy);
1314 if (ret == -ENXIO || ret == -ENODEV)
1315 dwc->usb2_phy = NULL;
1316 else
1317 return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1318 }
1319
1320 if (IS_ERR(dwc->usb3_phy)) {
1321 ret = PTR_ERR(dwc->usb3_phy);
1322 if (ret == -ENXIO || ret == -ENODEV)
1323 dwc->usb3_phy = NULL;
1324 else
1325 return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1326 }
1327
1328 dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
1329 if (IS_ERR(dwc->usb2_generic_phy)) {
1330 ret = PTR_ERR(dwc->usb2_generic_phy);
1331 if (ret == -ENOSYS || ret == -ENODEV)
1332 dwc->usb2_generic_phy = NULL;
1333 else
1334 return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1335 }
1336
1337 dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
1338 if (IS_ERR(dwc->usb3_generic_phy)) {
1339 ret = PTR_ERR(dwc->usb3_generic_phy);
1340 if (ret == -ENOSYS || ret == -ENODEV)
1341 dwc->usb3_generic_phy = NULL;
1342 else
1343 return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1344 }
1345
1346 return 0;
1347 }
1348
dwc3_core_init_mode(struct dwc3 * dwc)1349 static int dwc3_core_init_mode(struct dwc3 *dwc)
1350 {
1351 struct device *dev = dwc->dev;
1352 int ret;
1353
1354 switch (dwc->dr_mode) {
1355 case USB_DR_MODE_PERIPHERAL:
1356 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
1357
1358 if (dwc->usb2_phy)
1359 otg_set_vbus(dwc->usb2_phy->otg, false);
1360 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
1361 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
1362
1363 ret = dwc3_gadget_init(dwc);
1364 if (ret)
1365 return dev_err_probe(dev, ret, "failed to initialize gadget\n");
1366 break;
1367 case USB_DR_MODE_HOST:
1368 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
1369
1370 if (dwc->usb2_phy)
1371 otg_set_vbus(dwc->usb2_phy->otg, true);
1372 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
1373 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
1374
1375 ret = dwc3_host_init(dwc);
1376 if (ret)
1377 return dev_err_probe(dev, ret, "failed to initialize host\n");
1378 break;
1379 case USB_DR_MODE_OTG:
1380 INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
1381 ret = dwc3_drd_init(dwc);
1382 if (ret)
1383 return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
1384 break;
1385 default:
1386 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
1387 return -EINVAL;
1388 }
1389
1390 return 0;
1391 }
1392
dwc3_core_exit_mode(struct dwc3 * dwc)1393 static void dwc3_core_exit_mode(struct dwc3 *dwc)
1394 {
1395 switch (dwc->dr_mode) {
1396 case USB_DR_MODE_PERIPHERAL:
1397 dwc3_gadget_exit(dwc);
1398 break;
1399 case USB_DR_MODE_HOST:
1400 dwc3_host_exit(dwc);
1401 break;
1402 case USB_DR_MODE_OTG:
1403 dwc3_drd_exit(dwc);
1404 break;
1405 default:
1406 /* do nothing */
1407 break;
1408 }
1409
1410 /* de-assert DRVVBUS for HOST and OTG mode */
1411 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
1412 }
1413
dwc3_get_properties(struct dwc3 * dwc)1414 static void dwc3_get_properties(struct dwc3 *dwc)
1415 {
1416 struct device *dev = dwc->dev;
1417 u8 lpm_nyet_threshold;
1418 u8 tx_de_emphasis;
1419 u8 hird_threshold;
1420 u8 rx_thr_num_pkt_prd = 0;
1421 u8 rx_max_burst_prd = 0;
1422 u8 tx_thr_num_pkt_prd = 0;
1423 u8 tx_max_burst_prd = 0;
1424 u8 tx_fifo_resize_max_num;
1425 const char *usb_psy_name;
1426 int ret;
1427
1428 /* default to highest possible threshold */
1429 lpm_nyet_threshold = 0xf;
1430
1431 /* default to -3.5dB de-emphasis */
1432 tx_de_emphasis = 1;
1433
1434 /*
1435 * default to assert utmi_sleep_n and use maximum allowed HIRD
1436 * threshold value of 0b1100
1437 */
1438 hird_threshold = 12;
1439
1440 /*
1441 * default to a TXFIFO size large enough to fit 6 max packets. This
1442 * allows for systems with larger bus latencies to have some headroom
1443 * for endpoints that have a large bMaxBurst value.
1444 */
1445 tx_fifo_resize_max_num = 6;
1446
1447 dwc->maximum_speed = usb_get_maximum_speed(dev);
1448 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
1449 dwc->dr_mode = usb_get_dr_mode(dev);
1450 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
1451
1452 dwc->sysdev_is_parent = device_property_read_bool(dev,
1453 "linux,sysdev_is_parent");
1454 if (dwc->sysdev_is_parent)
1455 dwc->sysdev = dwc->dev->parent;
1456 else
1457 dwc->sysdev = dwc->dev;
1458
1459 ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
1460 if (ret >= 0) {
1461 dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
1462 if (!dwc->usb_psy)
1463 dev_err(dev, "couldn't get usb power supply\n");
1464 }
1465
1466 dwc->has_lpm_erratum = device_property_read_bool(dev,
1467 "snps,has-lpm-erratum");
1468 device_property_read_u8(dev, "snps,lpm-nyet-threshold",
1469 &lpm_nyet_threshold);
1470 dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
1471 "snps,is-utmi-l1-suspend");
1472 device_property_read_u8(dev, "snps,hird-threshold",
1473 &hird_threshold);
1474 dwc->dis_start_transfer_quirk = device_property_read_bool(dev,
1475 "snps,dis-start-transfer-quirk");
1476 dwc->usb3_lpm_capable = device_property_read_bool(dev,
1477 "snps,usb3_lpm_capable");
1478 dwc->usb2_lpm_disable = device_property_read_bool(dev,
1479 "snps,usb2-lpm-disable");
1480 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
1481 "snps,usb2-gadget-lpm-disable");
1482 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
1483 &rx_thr_num_pkt_prd);
1484 device_property_read_u8(dev, "snps,rx-max-burst-prd",
1485 &rx_max_burst_prd);
1486 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd",
1487 &tx_thr_num_pkt_prd);
1488 device_property_read_u8(dev, "snps,tx-max-burst-prd",
1489 &tx_max_burst_prd);
1490 dwc->do_fifo_resize = device_property_read_bool(dev,
1491 "tx-fifo-resize");
1492 if (dwc->do_fifo_resize)
1493 device_property_read_u8(dev, "tx-fifo-max-num",
1494 &tx_fifo_resize_max_num);
1495
1496 dwc->disable_scramble_quirk = device_property_read_bool(dev,
1497 "snps,disable_scramble_quirk");
1498 dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
1499 "snps,u2exit_lfps_quirk");
1500 dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
1501 "snps,u2ss_inp3_quirk");
1502 dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
1503 "snps,req_p1p2p3_quirk");
1504 dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
1505 "snps,del_p1p2p3_quirk");
1506 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
1507 "snps,del_phy_power_chg_quirk");
1508 dwc->lfps_filter_quirk = device_property_read_bool(dev,
1509 "snps,lfps_filter_quirk");
1510 dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
1511 "snps,rx_detect_poll_quirk");
1512 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
1513 "snps,dis_u3_susphy_quirk");
1514 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
1515 "snps,dis_u2_susphy_quirk");
1516 dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
1517 "snps,dis_enblslpm_quirk");
1518 dwc->dis_u1_entry_quirk = device_property_read_bool(dev,
1519 "snps,dis-u1-entry-quirk");
1520 dwc->dis_u2_entry_quirk = device_property_read_bool(dev,
1521 "snps,dis-u2-entry-quirk");
1522 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
1523 "snps,dis_rxdet_inp3_quirk");
1524 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
1525 "snps,dis-u2-freeclk-exists-quirk");
1526 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
1527 "snps,dis-del-phy-power-chg-quirk");
1528 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
1529 "snps,dis-tx-ipgap-linecheck-quirk");
1530 dwc->resume_hs_terminations = device_property_read_bool(dev,
1531 "snps,resume-hs-terminations");
1532 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev,
1533 "snps,ulpi-ext-vbus-drv");
1534 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
1535 "snps,parkmode-disable-ss-quirk");
1536 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev,
1537 "snps,parkmode-disable-hs-quirk");
1538 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
1539 "snps,gfladj-refclk-lpm-sel-quirk");
1540
1541 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
1542 "snps,tx_de_emphasis_quirk");
1543 device_property_read_u8(dev, "snps,tx_de_emphasis",
1544 &tx_de_emphasis);
1545 device_property_read_string(dev, "snps,hsphy_interface",
1546 &dwc->hsphy_interface);
1547 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
1548 &dwc->fladj);
1549 device_property_read_u32(dev, "snps,ref-clock-period-ns",
1550 &dwc->ref_clk_per);
1551
1552 dwc->dis_metastability_quirk = device_property_read_bool(dev,
1553 "snps,dis_metastability_quirk");
1554
1555 dwc->dis_split_quirk = device_property_read_bool(dev,
1556 "snps,dis-split-quirk");
1557
1558 dwc->lpm_nyet_threshold = lpm_nyet_threshold;
1559 dwc->tx_de_emphasis = tx_de_emphasis;
1560
1561 dwc->hird_threshold = hird_threshold;
1562
1563 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
1564 dwc->rx_max_burst_prd = rx_max_burst_prd;
1565
1566 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
1567 dwc->tx_max_burst_prd = tx_max_burst_prd;
1568
1569 dwc->imod_interval = 0;
1570
1571 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
1572 }
1573
1574 /* check whether the core supports IMOD */
dwc3_has_imod(struct dwc3 * dwc)1575 bool dwc3_has_imod(struct dwc3 *dwc)
1576 {
1577 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
1578 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
1579 DWC3_IP_IS(DWC32);
1580 }
1581
dwc3_check_params(struct dwc3 * dwc)1582 static void dwc3_check_params(struct dwc3 *dwc)
1583 {
1584 struct device *dev = dwc->dev;
1585 unsigned int hwparam_gen =
1586 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
1587
1588 /* Check for proper value of imod_interval */
1589 if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
1590 dev_warn(dwc->dev, "Interrupt moderation not supported\n");
1591 dwc->imod_interval = 0;
1592 }
1593
1594 /*
1595 * Workaround for STAR 9000961433 which affects only version
1596 * 3.00a of the DWC_usb3 core. This prevents the controller
1597 * interrupt from being masked while handling events. IMOD
1598 * allows us to work around this issue. Enable it for the
1599 * affected version.
1600 */
1601 if (!dwc->imod_interval &&
1602 DWC3_VER_IS(DWC3, 300A))
1603 dwc->imod_interval = 1;
1604
1605 /* Check the maximum_speed parameter */
1606 switch (dwc->maximum_speed) {
1607 case USB_SPEED_FULL:
1608 case USB_SPEED_HIGH:
1609 break;
1610 case USB_SPEED_SUPER:
1611 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
1612 dev_warn(dev, "UDC doesn't support Gen 1\n");
1613 break;
1614 case USB_SPEED_SUPER_PLUS:
1615 if ((DWC3_IP_IS(DWC32) &&
1616 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
1617 (!DWC3_IP_IS(DWC32) &&
1618 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
1619 dev_warn(dev, "UDC doesn't support SSP\n");
1620 break;
1621 default:
1622 dev_err(dev, "invalid maximum_speed parameter %d\n",
1623 dwc->maximum_speed);
1624 fallthrough;
1625 case USB_SPEED_UNKNOWN:
1626 switch (hwparam_gen) {
1627 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1628 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1629 break;
1630 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1631 if (DWC3_IP_IS(DWC32))
1632 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1633 else
1634 dwc->maximum_speed = USB_SPEED_SUPER;
1635 break;
1636 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
1637 dwc->maximum_speed = USB_SPEED_HIGH;
1638 break;
1639 default:
1640 dwc->maximum_speed = USB_SPEED_SUPER;
1641 break;
1642 }
1643 break;
1644 }
1645
1646 /*
1647 * Currently the controller does not have visibility into the HW
1648 * parameter to determine the maximum number of lanes the HW supports.
1649 * If the number of lanes is not specified in the device property, then
1650 * set the default to support dual-lane for DWC_usb32 and single-lane
1651 * for DWC_usb31 for super-speed-plus.
1652 */
1653 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
1654 switch (dwc->max_ssp_rate) {
1655 case USB_SSP_GEN_2x1:
1656 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
1657 dev_warn(dev, "UDC only supports Gen 1\n");
1658 break;
1659 case USB_SSP_GEN_1x2:
1660 case USB_SSP_GEN_2x2:
1661 if (DWC3_IP_IS(DWC31))
1662 dev_warn(dev, "UDC only supports single lane\n");
1663 break;
1664 case USB_SSP_GEN_UNKNOWN:
1665 default:
1666 switch (hwparam_gen) {
1667 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1668 if (DWC3_IP_IS(DWC32))
1669 dwc->max_ssp_rate = USB_SSP_GEN_2x2;
1670 else
1671 dwc->max_ssp_rate = USB_SSP_GEN_2x1;
1672 break;
1673 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1674 if (DWC3_IP_IS(DWC32))
1675 dwc->max_ssp_rate = USB_SSP_GEN_1x2;
1676 break;
1677 }
1678 break;
1679 }
1680 }
1681 }
1682
dwc3_get_extcon(struct dwc3 * dwc)1683 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
1684 {
1685 struct device *dev = dwc->dev;
1686 struct device_node *np_phy;
1687 struct extcon_dev *edev = NULL;
1688 const char *name;
1689
1690 if (device_property_read_bool(dev, "extcon"))
1691 return extcon_get_edev_by_phandle(dev, 0);
1692
1693 /*
1694 * Device tree platforms should get extcon via phandle.
1695 * On ACPI platforms, we get the name from a device property.
1696 * This device property is for kernel internal use only and
1697 * is expected to be set by the glue code.
1698 */
1699 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
1700 return extcon_get_extcon_dev(name);
1701
1702 /*
1703 * Check explicitly if "usb-role-switch" is used since
1704 * extcon_find_edev_by_node() can not be used to check the absence of
1705 * an extcon device. In the absence of an device it will always return
1706 * EPROBE_DEFER.
1707 */
1708 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
1709 device_property_read_bool(dev, "usb-role-switch"))
1710 return NULL;
1711
1712 /*
1713 * Try to get an extcon device from the USB PHY controller's "port"
1714 * node. Check if it has the "port" node first, to avoid printing the
1715 * error message from underlying code, as it's a valid case: extcon
1716 * device (and "port" node) may be missing in case of "usb-role-switch"
1717 * or OTG mode.
1718 */
1719 np_phy = of_parse_phandle(dev->of_node, "phys", 0);
1720 if (of_graph_is_present(np_phy)) {
1721 struct device_node *np_conn;
1722
1723 np_conn = of_graph_get_remote_node(np_phy, -1, -1);
1724 if (np_conn)
1725 edev = extcon_find_edev_by_node(np_conn);
1726 of_node_put(np_conn);
1727 }
1728 of_node_put(np_phy);
1729
1730 return edev;
1731 }
1732
dwc3_get_clocks(struct dwc3 * dwc)1733 static int dwc3_get_clocks(struct dwc3 *dwc)
1734 {
1735 struct device *dev = dwc->dev;
1736
1737 if (!dev->of_node)
1738 return 0;
1739
1740 /*
1741 * Clocks are optional, but new DT platforms should support all clocks
1742 * as required by the DT-binding.
1743 * Some devices have different clock names in legacy device trees,
1744 * check for them to retain backwards compatibility.
1745 */
1746 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
1747 if (IS_ERR(dwc->bus_clk)) {
1748 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
1749 "could not get bus clock\n");
1750 }
1751
1752 if (dwc->bus_clk == NULL) {
1753 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
1754 if (IS_ERR(dwc->bus_clk)) {
1755 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
1756 "could not get bus clock\n");
1757 }
1758 }
1759
1760 dwc->ref_clk = devm_clk_get_optional(dev, "ref");
1761 if (IS_ERR(dwc->ref_clk)) {
1762 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
1763 "could not get ref clock\n");
1764 }
1765
1766 if (dwc->ref_clk == NULL) {
1767 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
1768 if (IS_ERR(dwc->ref_clk)) {
1769 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
1770 "could not get ref clock\n");
1771 }
1772 }
1773
1774 dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
1775 if (IS_ERR(dwc->susp_clk)) {
1776 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
1777 "could not get suspend clock\n");
1778 }
1779
1780 if (dwc->susp_clk == NULL) {
1781 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
1782 if (IS_ERR(dwc->susp_clk)) {
1783 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
1784 "could not get suspend clock\n");
1785 }
1786 }
1787
1788 return 0;
1789 }
1790
dwc3_probe(struct platform_device * pdev)1791 static int dwc3_probe(struct platform_device *pdev)
1792 {
1793 struct device *dev = &pdev->dev;
1794 struct resource *res, dwc_res;
1795 void __iomem *regs;
1796 struct dwc3 *dwc;
1797 int ret;
1798
1799 dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
1800 if (!dwc)
1801 return -ENOMEM;
1802
1803 dwc->dev = dev;
1804
1805 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1806 if (!res) {
1807 dev_err(dev, "missing memory resource\n");
1808 return -ENODEV;
1809 }
1810
1811 dwc->xhci_resources[0].start = res->start;
1812 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
1813 DWC3_XHCI_REGS_END;
1814 dwc->xhci_resources[0].flags = res->flags;
1815 dwc->xhci_resources[0].name = res->name;
1816
1817 /*
1818 * Request memory region but exclude xHCI regs,
1819 * since it will be requested by the xhci-plat driver.
1820 */
1821 dwc_res = *res;
1822 dwc_res.start += DWC3_GLOBALS_REGS_START;
1823
1824 if (dev->of_node) {
1825 struct device_node *parent = of_get_parent(dev->of_node);
1826
1827 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
1828 dwc_res.start -= DWC3_GLOBALS_REGS_START;
1829 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
1830 }
1831
1832 of_node_put(parent);
1833 }
1834
1835 regs = devm_ioremap_resource(dev, &dwc_res);
1836 if (IS_ERR(regs))
1837 return PTR_ERR(regs);
1838
1839 dwc->regs = regs;
1840 dwc->regs_size = resource_size(&dwc_res);
1841
1842 dwc3_get_properties(dwc);
1843
1844 dwc->reset = devm_reset_control_array_get_optional_shared(dev);
1845 if (IS_ERR(dwc->reset)) {
1846 ret = PTR_ERR(dwc->reset);
1847 goto err_put_psy;
1848 }
1849
1850 ret = dwc3_get_clocks(dwc);
1851 if (ret)
1852 goto err_put_psy;
1853
1854 ret = reset_control_deassert(dwc->reset);
1855 if (ret)
1856 goto err_put_psy;
1857
1858 ret = dwc3_clk_enable(dwc);
1859 if (ret)
1860 goto err_assert_reset;
1861
1862 if (!dwc3_core_is_valid(dwc)) {
1863 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
1864 ret = -ENODEV;
1865 goto err_disable_clks;
1866 }
1867
1868 platform_set_drvdata(pdev, dwc);
1869 dwc3_cache_hwparams(dwc);
1870
1871 if (!dwc->sysdev_is_parent &&
1872 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
1873 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
1874 if (ret)
1875 goto err_disable_clks;
1876 }
1877
1878 spin_lock_init(&dwc->lock);
1879 mutex_init(&dwc->mutex);
1880
1881 pm_runtime_get_noresume(dev);
1882 pm_runtime_set_active(dev);
1883 pm_runtime_use_autosuspend(dev);
1884 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
1885 pm_runtime_enable(dev);
1886
1887 pm_runtime_forbid(dev);
1888
1889 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
1890 if (ret) {
1891 dev_err(dwc->dev, "failed to allocate event buffers\n");
1892 ret = -ENOMEM;
1893 goto err_allow_rpm;
1894 }
1895
1896 dwc->edev = dwc3_get_extcon(dwc);
1897 if (IS_ERR(dwc->edev)) {
1898 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
1899 goto err_free_event_buffers;
1900 }
1901
1902 ret = dwc3_get_dr_mode(dwc);
1903 if (ret)
1904 goto err_free_event_buffers;
1905
1906 ret = dwc3_core_init(dwc);
1907 if (ret) {
1908 dev_err_probe(dev, ret, "failed to initialize core\n");
1909 goto err_free_event_buffers;
1910 }
1911
1912 dwc3_check_params(dwc);
1913 dwc3_debugfs_init(dwc);
1914
1915 ret = dwc3_core_init_mode(dwc);
1916 if (ret)
1917 goto err_exit_debugfs;
1918
1919 pm_runtime_put(dev);
1920
1921 return 0;
1922
1923 err_exit_debugfs:
1924 dwc3_debugfs_exit(dwc);
1925 dwc3_event_buffers_cleanup(dwc);
1926 dwc3_phy_power_off(dwc);
1927 dwc3_phy_exit(dwc);
1928 dwc3_ulpi_exit(dwc);
1929 err_free_event_buffers:
1930 dwc3_free_event_buffers(dwc);
1931 err_allow_rpm:
1932 pm_runtime_allow(dev);
1933 pm_runtime_disable(dev);
1934 pm_runtime_dont_use_autosuspend(dev);
1935 pm_runtime_set_suspended(dev);
1936 pm_runtime_put_noidle(dev);
1937 err_disable_clks:
1938 dwc3_clk_disable(dwc);
1939 err_assert_reset:
1940 reset_control_assert(dwc->reset);
1941 err_put_psy:
1942 if (dwc->usb_psy)
1943 power_supply_put(dwc->usb_psy);
1944
1945 return ret;
1946 }
1947
dwc3_remove(struct platform_device * pdev)1948 static void dwc3_remove(struct platform_device *pdev)
1949 {
1950 struct dwc3 *dwc = platform_get_drvdata(pdev);
1951
1952 pm_runtime_get_sync(&pdev->dev);
1953
1954 dwc3_core_exit_mode(dwc);
1955 dwc3_debugfs_exit(dwc);
1956
1957 dwc3_core_exit(dwc);
1958 dwc3_ulpi_exit(dwc);
1959
1960 pm_runtime_allow(&pdev->dev);
1961 pm_runtime_disable(&pdev->dev);
1962 pm_runtime_dont_use_autosuspend(&pdev->dev);
1963 pm_runtime_put_noidle(&pdev->dev);
1964 /*
1965 * HACK: Clear the driver data, which is currently accessed by parent
1966 * glue drivers, before allowing the parent to suspend.
1967 */
1968 platform_set_drvdata(pdev, NULL);
1969 pm_runtime_set_suspended(&pdev->dev);
1970
1971 dwc3_free_event_buffers(dwc);
1972
1973 if (dwc->usb_psy)
1974 power_supply_put(dwc->usb_psy);
1975 }
1976
1977 #ifdef CONFIG_PM
dwc3_core_init_for_resume(struct dwc3 * dwc)1978 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
1979 {
1980 int ret;
1981
1982 ret = reset_control_deassert(dwc->reset);
1983 if (ret)
1984 return ret;
1985
1986 ret = dwc3_clk_enable(dwc);
1987 if (ret)
1988 goto assert_reset;
1989
1990 ret = dwc3_core_init(dwc);
1991 if (ret)
1992 goto disable_clks;
1993
1994 return 0;
1995
1996 disable_clks:
1997 dwc3_clk_disable(dwc);
1998 assert_reset:
1999 reset_control_assert(dwc->reset);
2000
2001 return ret;
2002 }
2003
dwc3_suspend_common(struct dwc3 * dwc,pm_message_t msg)2004 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
2005 {
2006 unsigned long flags;
2007 u32 reg;
2008
2009 switch (dwc->current_dr_role) {
2010 case DWC3_GCTL_PRTCAP_DEVICE:
2011 if (pm_runtime_suspended(dwc->dev))
2012 break;
2013 dwc3_gadget_suspend(dwc);
2014 synchronize_irq(dwc->irq_gadget);
2015 dwc3_core_exit(dwc);
2016 break;
2017 case DWC3_GCTL_PRTCAP_HOST:
2018 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2019 dwc3_core_exit(dwc);
2020 break;
2021 }
2022
2023 /* Let controller to suspend HSPHY before PHY driver suspends */
2024 if (dwc->dis_u2_susphy_quirk ||
2025 dwc->dis_enblslpm_quirk) {
2026 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2027 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM |
2028 DWC3_GUSB2PHYCFG_SUSPHY;
2029 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2030
2031 /* Give some time for USB2 PHY to suspend */
2032 usleep_range(5000, 6000);
2033 }
2034
2035 phy_pm_runtime_put_sync(dwc->usb2_generic_phy);
2036 phy_pm_runtime_put_sync(dwc->usb3_generic_phy);
2037 break;
2038 case DWC3_GCTL_PRTCAP_OTG:
2039 /* do nothing during runtime_suspend */
2040 if (PMSG_IS_AUTO(msg))
2041 break;
2042
2043 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2044 spin_lock_irqsave(&dwc->lock, flags);
2045 dwc3_gadget_suspend(dwc);
2046 spin_unlock_irqrestore(&dwc->lock, flags);
2047 synchronize_irq(dwc->irq_gadget);
2048 }
2049
2050 dwc3_otg_exit(dwc);
2051 dwc3_core_exit(dwc);
2052 break;
2053 default:
2054 /* do nothing */
2055 break;
2056 }
2057
2058 return 0;
2059 }
2060
dwc3_resume_common(struct dwc3 * dwc,pm_message_t msg)2061 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
2062 {
2063 unsigned long flags;
2064 int ret;
2065 u32 reg;
2066
2067 switch (dwc->current_dr_role) {
2068 case DWC3_GCTL_PRTCAP_DEVICE:
2069 ret = dwc3_core_init_for_resume(dwc);
2070 if (ret)
2071 return ret;
2072
2073 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
2074 dwc3_gadget_resume(dwc);
2075 break;
2076 case DWC3_GCTL_PRTCAP_HOST:
2077 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2078 ret = dwc3_core_init_for_resume(dwc);
2079 if (ret)
2080 return ret;
2081 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
2082 break;
2083 }
2084 /* Restore GUSB2PHYCFG bits that were modified in suspend */
2085 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2086 if (dwc->dis_u2_susphy_quirk)
2087 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
2088
2089 if (dwc->dis_enblslpm_quirk)
2090 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
2091
2092 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2093
2094 phy_pm_runtime_get_sync(dwc->usb2_generic_phy);
2095 phy_pm_runtime_get_sync(dwc->usb3_generic_phy);
2096 break;
2097 case DWC3_GCTL_PRTCAP_OTG:
2098 /* nothing to do on runtime_resume */
2099 if (PMSG_IS_AUTO(msg))
2100 break;
2101
2102 ret = dwc3_core_init_for_resume(dwc);
2103 if (ret)
2104 return ret;
2105
2106 dwc3_set_prtcap(dwc, dwc->current_dr_role);
2107
2108 dwc3_otg_init(dwc);
2109 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
2110 dwc3_otg_host_init(dwc);
2111 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2112 spin_lock_irqsave(&dwc->lock, flags);
2113 dwc3_gadget_resume(dwc);
2114 spin_unlock_irqrestore(&dwc->lock, flags);
2115 }
2116
2117 break;
2118 default:
2119 /* do nothing */
2120 break;
2121 }
2122
2123 return 0;
2124 }
2125
dwc3_runtime_checks(struct dwc3 * dwc)2126 static int dwc3_runtime_checks(struct dwc3 *dwc)
2127 {
2128 switch (dwc->current_dr_role) {
2129 case DWC3_GCTL_PRTCAP_DEVICE:
2130 if (dwc->connected)
2131 return -EBUSY;
2132 break;
2133 case DWC3_GCTL_PRTCAP_HOST:
2134 default:
2135 /* do nothing */
2136 break;
2137 }
2138
2139 return 0;
2140 }
2141
dwc3_runtime_suspend(struct device * dev)2142 static int dwc3_runtime_suspend(struct device *dev)
2143 {
2144 struct dwc3 *dwc = dev_get_drvdata(dev);
2145 int ret;
2146
2147 if (dwc3_runtime_checks(dwc))
2148 return -EBUSY;
2149
2150 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
2151 if (ret)
2152 return ret;
2153
2154 return 0;
2155 }
2156
dwc3_runtime_resume(struct device * dev)2157 static int dwc3_runtime_resume(struct device *dev)
2158 {
2159 struct dwc3 *dwc = dev_get_drvdata(dev);
2160 int ret;
2161
2162 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
2163 if (ret)
2164 return ret;
2165
2166 switch (dwc->current_dr_role) {
2167 case DWC3_GCTL_PRTCAP_DEVICE:
2168 dwc3_gadget_process_pending_events(dwc);
2169 break;
2170 case DWC3_GCTL_PRTCAP_HOST:
2171 default:
2172 /* do nothing */
2173 break;
2174 }
2175
2176 pm_runtime_mark_last_busy(dev);
2177
2178 return 0;
2179 }
2180
dwc3_runtime_idle(struct device * dev)2181 static int dwc3_runtime_idle(struct device *dev)
2182 {
2183 struct dwc3 *dwc = dev_get_drvdata(dev);
2184
2185 switch (dwc->current_dr_role) {
2186 case DWC3_GCTL_PRTCAP_DEVICE:
2187 if (dwc3_runtime_checks(dwc))
2188 return -EBUSY;
2189 break;
2190 case DWC3_GCTL_PRTCAP_HOST:
2191 default:
2192 /* do nothing */
2193 break;
2194 }
2195
2196 pm_runtime_mark_last_busy(dev);
2197 pm_runtime_autosuspend(dev);
2198
2199 return 0;
2200 }
2201 #endif /* CONFIG_PM */
2202
2203 #ifdef CONFIG_PM_SLEEP
dwc3_suspend(struct device * dev)2204 static int dwc3_suspend(struct device *dev)
2205 {
2206 struct dwc3 *dwc = dev_get_drvdata(dev);
2207 int ret;
2208
2209 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
2210 if (ret)
2211 return ret;
2212
2213 pinctrl_pm_select_sleep_state(dev);
2214
2215 return 0;
2216 }
2217
dwc3_resume(struct device * dev)2218 static int dwc3_resume(struct device *dev)
2219 {
2220 struct dwc3 *dwc = dev_get_drvdata(dev);
2221 int ret;
2222
2223 pinctrl_pm_select_default_state(dev);
2224
2225 ret = dwc3_resume_common(dwc, PMSG_RESUME);
2226 if (ret)
2227 return ret;
2228
2229 pm_runtime_disable(dev);
2230 pm_runtime_set_active(dev);
2231 pm_runtime_enable(dev);
2232
2233 return 0;
2234 }
2235
dwc3_complete(struct device * dev)2236 static void dwc3_complete(struct device *dev)
2237 {
2238 struct dwc3 *dwc = dev_get_drvdata(dev);
2239 u32 reg;
2240
2241 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
2242 dwc->dis_split_quirk) {
2243 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
2244 reg |= DWC3_GUCTL3_SPLITDISABLE;
2245 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
2246 }
2247 }
2248 #else
2249 #define dwc3_complete NULL
2250 #endif /* CONFIG_PM_SLEEP */
2251
2252 static const struct dev_pm_ops dwc3_dev_pm_ops = {
2253 SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
2254 .complete = dwc3_complete,
2255 SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
2256 dwc3_runtime_idle)
2257 };
2258
2259 #ifdef CONFIG_OF
2260 static const struct of_device_id of_dwc3_match[] = {
2261 {
2262 .compatible = "snps,dwc3"
2263 },
2264 {
2265 .compatible = "synopsys,dwc3"
2266 },
2267 { },
2268 };
2269 MODULE_DEVICE_TABLE(of, of_dwc3_match);
2270 #endif
2271
2272 #ifdef CONFIG_ACPI
2273
2274 #define ACPI_ID_INTEL_BSW "808622B7"
2275
2276 static const struct acpi_device_id dwc3_acpi_match[] = {
2277 { ACPI_ID_INTEL_BSW, 0 },
2278 { },
2279 };
2280 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
2281 #endif
2282
2283 static struct platform_driver dwc3_driver = {
2284 .probe = dwc3_probe,
2285 .remove_new = dwc3_remove,
2286 .driver = {
2287 .name = "dwc3",
2288 .of_match_table = of_match_ptr(of_dwc3_match),
2289 .acpi_match_table = ACPI_PTR(dwc3_acpi_match),
2290 .pm = &dwc3_dev_pm_ops,
2291 },
2292 };
2293
2294 module_platform_driver(dwc3_driver);
2295
2296 MODULE_ALIAS("platform:dwc3");
2297 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
2298 MODULE_LICENSE("GPL v2");
2299 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
2300