1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11 #include <linux/pci.h>
12 #include <linux/iopoll.h>
13 #include <linux/irq.h>
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/slab.h>
18 #include <linux/dmi.h>
19 #include <linux/dma-mapping.h>
20
21 #include "xhci.h"
22 #include "xhci-trace.h"
23 #include "xhci-mtk.h"
24 #include "xhci-debugfs.h"
25 #include "xhci-dbgcap.h"
26
27 #define DRIVER_AUTHOR "Sarah Sharp"
28 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
29
30 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
31
32 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33 static int link_quirk;
34 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
36
37 static unsigned long long quirks;
38 module_param(quirks, ullong, S_IRUGO);
39 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
40
td_on_ring(struct xhci_td * td,struct xhci_ring * ring)41 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
42 {
43 struct xhci_segment *seg = ring->first_seg;
44
45 if (!td || !td->start_seg)
46 return false;
47 do {
48 if (seg == td->start_seg)
49 return true;
50 seg = seg->next;
51 } while (seg && seg != ring->first_seg);
52
53 return false;
54 }
55
56 /*
57 * xhci_handshake - spin reading hc until handshake completes or fails
58 * @ptr: address of hc register to be read
59 * @mask: bits to look at in result of read
60 * @done: value of those bits when handshake succeeds
61 * @usec: timeout in microseconds
62 *
63 * Returns negative errno, or zero on success
64 *
65 * Success happens when the "mask" bits have the specified value (hardware
66 * handshake done). There are two failure modes: "usec" have passed (major
67 * hardware flakeout), or the register reads as all-ones (hardware removed).
68 */
xhci_handshake(void __iomem * ptr,u32 mask,u32 done,int usec)69 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
70 {
71 u32 result;
72 int ret;
73
74 ret = readl_poll_timeout_atomic(ptr, result,
75 (result & mask) == done ||
76 result == U32_MAX,
77 1, usec);
78 if (result == U32_MAX) /* card removed */
79 return -ENODEV;
80
81 return ret;
82 }
83
84 /*
85 * Disable interrupts and begin the xHCI halting process.
86 */
xhci_quiesce(struct xhci_hcd * xhci)87 void xhci_quiesce(struct xhci_hcd *xhci)
88 {
89 u32 halted;
90 u32 cmd;
91 u32 mask;
92
93 mask = ~(XHCI_IRQS);
94 halted = readl(&xhci->op_regs->status) & STS_HALT;
95 if (!halted)
96 mask &= ~CMD_RUN;
97
98 cmd = readl(&xhci->op_regs->command);
99 cmd &= mask;
100 writel(cmd, &xhci->op_regs->command);
101 }
102
103 /*
104 * Force HC into halt state.
105 *
106 * Disable any IRQs and clear the run/stop bit.
107 * HC will complete any current and actively pipelined transactions, and
108 * should halt within 16 ms of the run/stop bit being cleared.
109 * Read HC Halted bit in the status register to see when the HC is finished.
110 */
xhci_halt(struct xhci_hcd * xhci)111 int xhci_halt(struct xhci_hcd *xhci)
112 {
113 int ret;
114 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
115 xhci_quiesce(xhci);
116
117 ret = xhci_handshake(&xhci->op_regs->status,
118 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
119 if (ret) {
120 xhci_warn(xhci, "Host halt failed, %d\n", ret);
121 return ret;
122 }
123 xhci->xhc_state |= XHCI_STATE_HALTED;
124 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
125 return ret;
126 }
127
128 /*
129 * Set the run bit and wait for the host to be running.
130 */
xhci_start(struct xhci_hcd * xhci)131 int xhci_start(struct xhci_hcd *xhci)
132 {
133 u32 temp;
134 int ret;
135
136 temp = readl(&xhci->op_regs->command);
137 temp |= (CMD_RUN);
138 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
139 temp);
140 writel(temp, &xhci->op_regs->command);
141
142 /*
143 * Wait for the HCHalted Status bit to be 0 to indicate the host is
144 * running.
145 */
146 ret = xhci_handshake(&xhci->op_regs->status,
147 STS_HALT, 0, XHCI_MAX_HALT_USEC);
148 if (ret == -ETIMEDOUT)
149 xhci_err(xhci, "Host took too long to start, "
150 "waited %u microseconds.\n",
151 XHCI_MAX_HALT_USEC);
152 if (!ret)
153 /* clear state flags. Including dying, halted or removing */
154 xhci->xhc_state = 0;
155
156 return ret;
157 }
158
159 /*
160 * Reset a halted HC.
161 *
162 * This resets pipelines, timers, counters, state machines, etc.
163 * Transactions will be terminated immediately, and operational registers
164 * will be set to their defaults.
165 */
xhci_reset(struct xhci_hcd * xhci)166 int xhci_reset(struct xhci_hcd *xhci)
167 {
168 u32 command;
169 u32 state;
170 int ret;
171
172 state = readl(&xhci->op_regs->status);
173
174 if (state == ~(u32)0) {
175 xhci_warn(xhci, "Host not accessible, reset failed.\n");
176 return -ENODEV;
177 }
178
179 if ((state & STS_HALT) == 0) {
180 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
181 return 0;
182 }
183
184 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
185 command = readl(&xhci->op_regs->command);
186 command |= CMD_RESET;
187 writel(command, &xhci->op_regs->command);
188
189 /* Existing Intel xHCI controllers require a delay of 1 mS,
190 * after setting the CMD_RESET bit, and before accessing any
191 * HC registers. This allows the HC to complete the
192 * reset operation and be ready for HC register access.
193 * Without this delay, the subsequent HC register access,
194 * may result in a system hang very rarely.
195 */
196 if (xhci->quirks & XHCI_INTEL_HOST)
197 udelay(1000);
198
199 ret = xhci_handshake(&xhci->op_regs->command,
200 CMD_RESET, 0, 10 * 1000 * 1000);
201 if (ret)
202 return ret;
203
204 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
205 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
206
207 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
208 "Wait for controller to be ready for doorbell rings");
209 /*
210 * xHCI cannot write to any doorbells or operational registers other
211 * than status until the "Controller Not Ready" flag is cleared.
212 */
213 ret = xhci_handshake(&xhci->op_regs->status,
214 STS_CNR, 0, 10 * 1000 * 1000);
215
216 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
217 xhci->usb2_rhub.bus_state.suspended_ports = 0;
218 xhci->usb2_rhub.bus_state.resuming_ports = 0;
219 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
220 xhci->usb3_rhub.bus_state.suspended_ports = 0;
221 xhci->usb3_rhub.bus_state.resuming_ports = 0;
222
223 return ret;
224 }
225
xhci_zero_64b_regs(struct xhci_hcd * xhci)226 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
227 {
228 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
229 int err, i;
230 u64 val;
231
232 /*
233 * Some Renesas controllers get into a weird state if they are
234 * reset while programmed with 64bit addresses (they will preserve
235 * the top half of the address in internal, non visible
236 * registers). You end up with half the address coming from the
237 * kernel, and the other half coming from the firmware. Also,
238 * changing the programming leads to extra accesses even if the
239 * controller is supposed to be halted. The controller ends up with
240 * a fatal fault, and is then ripe for being properly reset.
241 *
242 * Special care is taken to only apply this if the device is behind
243 * an iommu. Doing anything when there is no iommu is definitely
244 * unsafe...
245 */
246 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
247 return;
248
249 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
250
251 /* Clear HSEIE so that faults do not get signaled */
252 val = readl(&xhci->op_regs->command);
253 val &= ~CMD_HSEIE;
254 writel(val, &xhci->op_regs->command);
255
256 /* Clear HSE (aka FATAL) */
257 val = readl(&xhci->op_regs->status);
258 val |= STS_FATAL;
259 writel(val, &xhci->op_regs->status);
260
261 /* Now zero the registers, and brace for impact */
262 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
263 if (upper_32_bits(val))
264 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
265 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
266 if (upper_32_bits(val))
267 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
268
269 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
270 struct xhci_intr_reg __iomem *ir;
271
272 ir = &xhci->run_regs->ir_set[i];
273 val = xhci_read_64(xhci, &ir->erst_base);
274 if (upper_32_bits(val))
275 xhci_write_64(xhci, 0, &ir->erst_base);
276 val= xhci_read_64(xhci, &ir->erst_dequeue);
277 if (upper_32_bits(val))
278 xhci_write_64(xhci, 0, &ir->erst_dequeue);
279 }
280
281 /* Wait for the fault to appear. It will be cleared on reset */
282 err = xhci_handshake(&xhci->op_regs->status,
283 STS_FATAL, STS_FATAL,
284 XHCI_MAX_HALT_USEC);
285 if (!err)
286 xhci_info(xhci, "Fault detected\n");
287 }
288
289 #ifdef CONFIG_USB_PCI
290 /*
291 * Set up MSI
292 */
xhci_setup_msi(struct xhci_hcd * xhci)293 static int xhci_setup_msi(struct xhci_hcd *xhci)
294 {
295 int ret;
296 /*
297 * TODO:Check with MSI Soc for sysdev
298 */
299 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
300
301 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
302 if (ret < 0) {
303 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
304 "failed to allocate MSI entry");
305 return ret;
306 }
307
308 ret = request_irq(pdev->irq, xhci_msi_irq,
309 0, "xhci_hcd", xhci_to_hcd(xhci));
310 if (ret) {
311 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
312 "disable MSI interrupt");
313 pci_free_irq_vectors(pdev);
314 }
315
316 return ret;
317 }
318
319 /*
320 * Set up MSI-X
321 */
xhci_setup_msix(struct xhci_hcd * xhci)322 static int xhci_setup_msix(struct xhci_hcd *xhci)
323 {
324 int i, ret = 0;
325 struct usb_hcd *hcd = xhci_to_hcd(xhci);
326 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
327
328 /*
329 * calculate number of msi-x vectors supported.
330 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
331 * with max number of interrupters based on the xhci HCSPARAMS1.
332 * - num_online_cpus: maximum msi-x vectors per CPUs core.
333 * Add additional 1 vector to ensure always available interrupt.
334 */
335 xhci->msix_count = min(num_online_cpus() + 1,
336 HCS_MAX_INTRS(xhci->hcs_params1));
337
338 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
339 PCI_IRQ_MSIX);
340 if (ret < 0) {
341 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
342 "Failed to enable MSI-X");
343 return ret;
344 }
345
346 for (i = 0; i < xhci->msix_count; i++) {
347 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
348 "xhci_hcd", xhci_to_hcd(xhci));
349 if (ret)
350 goto disable_msix;
351 }
352
353 hcd->msix_enabled = 1;
354 return ret;
355
356 disable_msix:
357 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
358 while (--i >= 0)
359 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
360 pci_free_irq_vectors(pdev);
361 return ret;
362 }
363
364 /* Free any IRQs and disable MSI-X */
xhci_cleanup_msix(struct xhci_hcd * xhci)365 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
366 {
367 struct usb_hcd *hcd = xhci_to_hcd(xhci);
368 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
369
370 if (xhci->quirks & XHCI_PLAT)
371 return;
372
373 /* return if using legacy interrupt */
374 if (hcd->irq > 0)
375 return;
376
377 if (hcd->msix_enabled) {
378 int i;
379
380 for (i = 0; i < xhci->msix_count; i++)
381 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
382 } else {
383 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
384 }
385
386 pci_free_irq_vectors(pdev);
387 hcd->msix_enabled = 0;
388 }
389
xhci_msix_sync_irqs(struct xhci_hcd * xhci)390 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
391 {
392 struct usb_hcd *hcd = xhci_to_hcd(xhci);
393
394 if (hcd->msix_enabled) {
395 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
396 int i;
397
398 for (i = 0; i < xhci->msix_count; i++)
399 synchronize_irq(pci_irq_vector(pdev, i));
400 }
401 }
402
xhci_try_enable_msi(struct usb_hcd * hcd)403 static int xhci_try_enable_msi(struct usb_hcd *hcd)
404 {
405 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
406 struct pci_dev *pdev;
407 int ret;
408
409 /* The xhci platform device has set up IRQs through usb_add_hcd. */
410 if (xhci->quirks & XHCI_PLAT)
411 return 0;
412
413 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
414 /*
415 * Some Fresco Logic host controllers advertise MSI, but fail to
416 * generate interrupts. Don't even try to enable MSI.
417 */
418 if (xhci->quirks & XHCI_BROKEN_MSI)
419 goto legacy_irq;
420
421 /* unregister the legacy interrupt */
422 if (hcd->irq)
423 free_irq(hcd->irq, hcd);
424 hcd->irq = 0;
425
426 ret = xhci_setup_msix(xhci);
427 if (ret)
428 /* fall back to msi*/
429 ret = xhci_setup_msi(xhci);
430
431 if (!ret) {
432 hcd->msi_enabled = 1;
433 return 0;
434 }
435
436 if (!pdev->irq) {
437 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
438 return -EINVAL;
439 }
440
441 legacy_irq:
442 if (!strlen(hcd->irq_descr))
443 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
444 hcd->driver->description, hcd->self.busnum);
445
446 /* fall back to legacy interrupt*/
447 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
448 hcd->irq_descr, hcd);
449 if (ret) {
450 xhci_err(xhci, "request interrupt %d failed\n",
451 pdev->irq);
452 return ret;
453 }
454 hcd->irq = pdev->irq;
455 return 0;
456 }
457
458 #else
459
xhci_try_enable_msi(struct usb_hcd * hcd)460 static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
461 {
462 return 0;
463 }
464
xhci_cleanup_msix(struct xhci_hcd * xhci)465 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
466 {
467 }
468
xhci_msix_sync_irqs(struct xhci_hcd * xhci)469 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
470 {
471 }
472
473 #endif
474
compliance_mode_recovery(struct timer_list * t)475 static void compliance_mode_recovery(struct timer_list *t)
476 {
477 struct xhci_hcd *xhci;
478 struct usb_hcd *hcd;
479 struct xhci_hub *rhub;
480 u32 temp;
481 int i;
482
483 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
484 rhub = &xhci->usb3_rhub;
485
486 for (i = 0; i < rhub->num_ports; i++) {
487 temp = readl(rhub->ports[i]->addr);
488 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
489 /*
490 * Compliance Mode Detected. Letting USB Core
491 * handle the Warm Reset
492 */
493 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
494 "Compliance mode detected->port %d",
495 i + 1);
496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
497 "Attempting compliance mode recovery");
498 hcd = xhci->shared_hcd;
499
500 if (hcd->state == HC_STATE_SUSPENDED)
501 usb_hcd_resume_root_hub(hcd);
502
503 usb_hcd_poll_rh_status(hcd);
504 }
505 }
506
507 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
508 mod_timer(&xhci->comp_mode_recovery_timer,
509 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
510 }
511
512 /*
513 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
514 * that causes ports behind that hardware to enter compliance mode sometimes.
515 * The quirk creates a timer that polls every 2 seconds the link state of
516 * each host controller's port and recovers it by issuing a Warm reset
517 * if Compliance mode is detected, otherwise the port will become "dead" (no
518 * device connections or disconnections will be detected anymore). Becasue no
519 * status event is generated when entering compliance mode (per xhci spec),
520 * this quirk is needed on systems that have the failing hardware installed.
521 */
compliance_mode_recovery_timer_init(struct xhci_hcd * xhci)522 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
523 {
524 xhci->port_status_u0 = 0;
525 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
526 0);
527 xhci->comp_mode_recovery_timer.expires = jiffies +
528 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
529
530 add_timer(&xhci->comp_mode_recovery_timer);
531 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
532 "Compliance mode recovery timer initialized");
533 }
534
535 /*
536 * This function identifies the systems that have installed the SN65LVPE502CP
537 * USB3.0 re-driver and that need the Compliance Mode Quirk.
538 * Systems:
539 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
540 */
xhci_compliance_mode_recovery_timer_quirk_check(void)541 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
542 {
543 const char *dmi_product_name, *dmi_sys_vendor;
544
545 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
546 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
547 if (!dmi_product_name || !dmi_sys_vendor)
548 return false;
549
550 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
551 return false;
552
553 if (strstr(dmi_product_name, "Z420") ||
554 strstr(dmi_product_name, "Z620") ||
555 strstr(dmi_product_name, "Z820") ||
556 strstr(dmi_product_name, "Z1 Workstation"))
557 return true;
558
559 return false;
560 }
561
xhci_all_ports_seen_u0(struct xhci_hcd * xhci)562 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
563 {
564 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
565 }
566
567
568 /*
569 * Initialize memory for HCD and xHC (one-time init).
570 *
571 * Program the PAGESIZE register, initialize the device context array, create
572 * device contexts (?), set up a command ring segment (or two?), create event
573 * ring (one for now).
574 */
xhci_init(struct usb_hcd * hcd)575 static int xhci_init(struct usb_hcd *hcd)
576 {
577 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
578 int retval = 0;
579
580 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
581 spin_lock_init(&xhci->lock);
582 if (xhci->hci_version == 0x95 && link_quirk) {
583 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
584 "QUIRK: Not clearing Link TRB chain bits.");
585 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
586 } else {
587 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
588 "xHCI doesn't need link TRB QUIRK");
589 }
590 retval = xhci_mem_init(xhci, GFP_KERNEL);
591 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
592
593 /* Initializing Compliance Mode Recovery Data If Needed */
594 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
595 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
596 compliance_mode_recovery_timer_init(xhci);
597 }
598
599 return retval;
600 }
601
602 /*-------------------------------------------------------------------------*/
603
604
xhci_run_finished(struct xhci_hcd * xhci)605 static int xhci_run_finished(struct xhci_hcd *xhci)
606 {
607 if (xhci_start(xhci)) {
608 xhci_halt(xhci);
609 return -ENODEV;
610 }
611 xhci->shared_hcd->state = HC_STATE_RUNNING;
612 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
613
614 if (xhci->quirks & XHCI_NEC_HOST)
615 xhci_ring_cmd_db(xhci);
616
617 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
618 "Finished xhci_run for USB3 roothub");
619 return 0;
620 }
621
622 /*
623 * Start the HC after it was halted.
624 *
625 * This function is called by the USB core when the HC driver is added.
626 * Its opposite is xhci_stop().
627 *
628 * xhci_init() must be called once before this function can be called.
629 * Reset the HC, enable device slot contexts, program DCBAAP, and
630 * set command ring pointer and event ring pointer.
631 *
632 * Setup MSI-X vectors and enable interrupts.
633 */
xhci_run(struct usb_hcd * hcd)634 int xhci_run(struct usb_hcd *hcd)
635 {
636 u32 temp;
637 u64 temp_64;
638 int ret;
639 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
640
641 /* Start the xHCI host controller running only after the USB 2.0 roothub
642 * is setup.
643 */
644
645 hcd->uses_new_polling = 1;
646 if (!usb_hcd_is_primary_hcd(hcd))
647 return xhci_run_finished(xhci);
648
649 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
650
651 ret = xhci_try_enable_msi(hcd);
652 if (ret)
653 return ret;
654
655 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
656 temp_64 &= ~ERST_PTR_MASK;
657 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
658 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
659
660 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
661 "// Set the interrupt modulation register");
662 temp = readl(&xhci->ir_set->irq_control);
663 temp &= ~ER_IRQ_INTERVAL_MASK;
664 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
665 writel(temp, &xhci->ir_set->irq_control);
666
667 /* Set the HCD state before we enable the irqs */
668 temp = readl(&xhci->op_regs->command);
669 temp |= (CMD_EIE);
670 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
671 "// Enable interrupts, cmd = 0x%x.", temp);
672 writel(temp, &xhci->op_regs->command);
673
674 temp = readl(&xhci->ir_set->irq_pending);
675 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
676 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
677 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
678 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
679
680 if (xhci->quirks & XHCI_NEC_HOST) {
681 struct xhci_command *command;
682
683 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
684 if (!command)
685 return -ENOMEM;
686
687 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
688 TRB_TYPE(TRB_NEC_GET_FW));
689 if (ret)
690 xhci_free_command(xhci, command);
691 }
692 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
693 "Finished xhci_run for USB2 roothub");
694
695 xhci_dbc_init(xhci);
696
697 xhci_debugfs_init(xhci);
698
699 return 0;
700 }
701 EXPORT_SYMBOL_GPL(xhci_run);
702
703 /*
704 * Stop xHCI driver.
705 *
706 * This function is called by the USB core when the HC driver is removed.
707 * Its opposite is xhci_run().
708 *
709 * Disable device contexts, disable IRQs, and quiesce the HC.
710 * Reset the HC, finish any completed transactions, and cleanup memory.
711 */
xhci_stop(struct usb_hcd * hcd)712 static void xhci_stop(struct usb_hcd *hcd)
713 {
714 u32 temp;
715 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
716
717 mutex_lock(&xhci->mutex);
718
719 /* Only halt host and free memory after both hcds are removed */
720 if (!usb_hcd_is_primary_hcd(hcd)) {
721 mutex_unlock(&xhci->mutex);
722 return;
723 }
724
725 xhci_dbc_exit(xhci);
726
727 spin_lock_irq(&xhci->lock);
728 xhci->xhc_state |= XHCI_STATE_HALTED;
729 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
730 xhci_halt(xhci);
731 xhci_reset(xhci);
732 spin_unlock_irq(&xhci->lock);
733
734 xhci_cleanup_msix(xhci);
735
736 /* Deleting Compliance Mode Recovery Timer */
737 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
738 (!(xhci_all_ports_seen_u0(xhci)))) {
739 del_timer_sync(&xhci->comp_mode_recovery_timer);
740 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
741 "%s: compliance mode recovery timer deleted",
742 __func__);
743 }
744
745 if (xhci->quirks & XHCI_AMD_PLL_FIX)
746 usb_amd_dev_put();
747
748 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
749 "// Disabling event ring interrupts");
750 temp = readl(&xhci->op_regs->status);
751 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
752 temp = readl(&xhci->ir_set->irq_pending);
753 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
754
755 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
756 xhci_mem_cleanup(xhci);
757 xhci_debugfs_exit(xhci);
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
759 "xhci_stop completed - status = %x",
760 readl(&xhci->op_regs->status));
761 mutex_unlock(&xhci->mutex);
762 }
763
764 /*
765 * Shutdown HC (not bus-specific)
766 *
767 * This is called when the machine is rebooting or halting. We assume that the
768 * machine will be powered off, and the HC's internal state will be reset.
769 * Don't bother to free memory.
770 *
771 * This will only ever be called with the main usb_hcd (the USB3 roothub).
772 */
xhci_shutdown(struct usb_hcd * hcd)773 static void xhci_shutdown(struct usb_hcd *hcd)
774 {
775 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
776
777 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
778 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
779
780 spin_lock_irq(&xhci->lock);
781 xhci_halt(xhci);
782 /* Workaround for spurious wakeups at shutdown with HSW */
783 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
784 xhci_reset(xhci);
785 spin_unlock_irq(&xhci->lock);
786
787 xhci_cleanup_msix(xhci);
788
789 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
790 "xhci_shutdown completed - status = %x",
791 readl(&xhci->op_regs->status));
792
793 /* Yet another workaround for spurious wakeups at shutdown with HSW */
794 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
795 pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
796 }
797
798 #ifdef CONFIG_PM
xhci_save_registers(struct xhci_hcd * xhci)799 static void xhci_save_registers(struct xhci_hcd *xhci)
800 {
801 xhci->s3.command = readl(&xhci->op_regs->command);
802 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
803 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
804 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
805 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
806 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
807 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
808 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
809 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
810 }
811
xhci_restore_registers(struct xhci_hcd * xhci)812 static void xhci_restore_registers(struct xhci_hcd *xhci)
813 {
814 writel(xhci->s3.command, &xhci->op_regs->command);
815 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
816 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
817 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
818 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
819 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
820 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
821 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
822 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
823 }
824
xhci_set_cmd_ring_deq(struct xhci_hcd * xhci)825 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
826 {
827 u64 val_64;
828
829 /* step 2: initialize command ring buffer */
830 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
831 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
832 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
833 xhci->cmd_ring->dequeue) &
834 (u64) ~CMD_RING_RSVD_BITS) |
835 xhci->cmd_ring->cycle_state;
836 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
837 "// Setting command ring address to 0x%llx",
838 (long unsigned long) val_64);
839 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
840 }
841
842 /*
843 * The whole command ring must be cleared to zero when we suspend the host.
844 *
845 * The host doesn't save the command ring pointer in the suspend well, so we
846 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
847 * aligned, because of the reserved bits in the command ring dequeue pointer
848 * register. Therefore, we can't just set the dequeue pointer back in the
849 * middle of the ring (TRBs are 16-byte aligned).
850 */
xhci_clear_command_ring(struct xhci_hcd * xhci)851 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
852 {
853 struct xhci_ring *ring;
854 struct xhci_segment *seg;
855
856 ring = xhci->cmd_ring;
857 seg = ring->deq_seg;
858 do {
859 memset(seg->trbs, 0,
860 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
861 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
862 cpu_to_le32(~TRB_CYCLE);
863 seg = seg->next;
864 } while (seg != ring->deq_seg);
865
866 /* Reset the software enqueue and dequeue pointers */
867 ring->deq_seg = ring->first_seg;
868 ring->dequeue = ring->first_seg->trbs;
869 ring->enq_seg = ring->deq_seg;
870 ring->enqueue = ring->dequeue;
871
872 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
873 /*
874 * Ring is now zeroed, so the HW should look for change of ownership
875 * when the cycle bit is set to 1.
876 */
877 ring->cycle_state = 1;
878
879 /*
880 * Reset the hardware dequeue pointer.
881 * Yes, this will need to be re-written after resume, but we're paranoid
882 * and want to make sure the hardware doesn't access bogus memory
883 * because, say, the BIOS or an SMI started the host without changing
884 * the command ring pointers.
885 */
886 xhci_set_cmd_ring_deq(xhci);
887 }
888
xhci_disable_port_wake_on_bits(struct xhci_hcd * xhci)889 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
890 {
891 struct xhci_port **ports;
892 int port_index;
893 unsigned long flags;
894 u32 t1, t2, portsc;
895
896 spin_lock_irqsave(&xhci->lock, flags);
897
898 /* disable usb3 ports Wake bits */
899 port_index = xhci->usb3_rhub.num_ports;
900 ports = xhci->usb3_rhub.ports;
901 while (port_index--) {
902 t1 = readl(ports[port_index]->addr);
903 portsc = t1;
904 t1 = xhci_port_state_to_neutral(t1);
905 t2 = t1 & ~PORT_WAKE_BITS;
906 if (t1 != t2) {
907 writel(t2, ports[port_index]->addr);
908 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
909 xhci->usb3_rhub.hcd->self.busnum,
910 port_index + 1, portsc, t2);
911 }
912 }
913
914 /* disable usb2 ports Wake bits */
915 port_index = xhci->usb2_rhub.num_ports;
916 ports = xhci->usb2_rhub.ports;
917 while (port_index--) {
918 t1 = readl(ports[port_index]->addr);
919 portsc = t1;
920 t1 = xhci_port_state_to_neutral(t1);
921 t2 = t1 & ~PORT_WAKE_BITS;
922 if (t1 != t2) {
923 writel(t2, ports[port_index]->addr);
924 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
925 xhci->usb2_rhub.hcd->self.busnum,
926 port_index + 1, portsc, t2);
927 }
928 }
929 spin_unlock_irqrestore(&xhci->lock, flags);
930 }
931
xhci_pending_portevent(struct xhci_hcd * xhci)932 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
933 {
934 struct xhci_port **ports;
935 int port_index;
936 u32 status;
937 u32 portsc;
938
939 status = readl(&xhci->op_regs->status);
940 if (status & STS_EINT)
941 return true;
942 /*
943 * Checking STS_EINT is not enough as there is a lag between a change
944 * bit being set and the Port Status Change Event that it generated
945 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
946 */
947
948 port_index = xhci->usb2_rhub.num_ports;
949 ports = xhci->usb2_rhub.ports;
950 while (port_index--) {
951 portsc = readl(ports[port_index]->addr);
952 if (portsc & PORT_CHANGE_MASK ||
953 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
954 return true;
955 }
956 port_index = xhci->usb3_rhub.num_ports;
957 ports = xhci->usb3_rhub.ports;
958 while (port_index--) {
959 portsc = readl(ports[port_index]->addr);
960 if (portsc & PORT_CHANGE_MASK ||
961 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
962 return true;
963 }
964 return false;
965 }
966
967 /*
968 * Stop HC (not bus-specific)
969 *
970 * This is called when the machine transition into S3/S4 mode.
971 *
972 */
xhci_suspend(struct xhci_hcd * xhci,bool do_wakeup)973 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
974 {
975 int rc = 0;
976 unsigned int delay = XHCI_MAX_HALT_USEC;
977 struct usb_hcd *hcd = xhci_to_hcd(xhci);
978 u32 command;
979 u32 res;
980
981 if (!hcd->state)
982 return 0;
983
984 if (hcd->state != HC_STATE_SUSPENDED ||
985 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
986 return -EINVAL;
987
988 xhci_dbc_suspend(xhci);
989
990 /* Clear root port wake on bits if wakeup not allowed. */
991 if (!do_wakeup)
992 xhci_disable_port_wake_on_bits(xhci);
993
994 /* Don't poll the roothubs on bus suspend. */
995 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
996 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
997 del_timer_sync(&hcd->rh_timer);
998 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
999 del_timer_sync(&xhci->shared_hcd->rh_timer);
1000
1001 if (xhci->quirks & XHCI_SUSPEND_DELAY)
1002 usleep_range(1000, 1500);
1003
1004 spin_lock_irq(&xhci->lock);
1005 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1006 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1007 /* step 1: stop endpoint */
1008 /* skipped assuming that port suspend has done */
1009
1010 /* step 2: clear Run/Stop bit */
1011 command = readl(&xhci->op_regs->command);
1012 command &= ~CMD_RUN;
1013 writel(command, &xhci->op_regs->command);
1014
1015 /* Some chips from Fresco Logic need an extraordinary delay */
1016 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1017
1018 if (xhci_handshake(&xhci->op_regs->status,
1019 STS_HALT, STS_HALT, delay)) {
1020 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1021 spin_unlock_irq(&xhci->lock);
1022 return -ETIMEDOUT;
1023 }
1024 xhci_clear_command_ring(xhci);
1025
1026 /* step 3: save registers */
1027 xhci_save_registers(xhci);
1028
1029 /* step 4: set CSS flag */
1030 command = readl(&xhci->op_regs->command);
1031 command |= CMD_CSS;
1032 writel(command, &xhci->op_regs->command);
1033 xhci->broken_suspend = 0;
1034 if (xhci_handshake(&xhci->op_regs->status,
1035 STS_SAVE, 0, 20 * 1000)) {
1036 /*
1037 * AMD SNPS xHC 3.0 occasionally does not clear the
1038 * SSS bit of USBSTS and when driver tries to poll
1039 * to see if the xHC clears BIT(8) which never happens
1040 * and driver assumes that controller is not responding
1041 * and times out. To workaround this, its good to check
1042 * if SRE and HCE bits are not set (as per xhci
1043 * Section 5.4.2) and bypass the timeout.
1044 */
1045 res = readl(&xhci->op_regs->status);
1046 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1047 (((res & STS_SRE) == 0) &&
1048 ((res & STS_HCE) == 0))) {
1049 xhci->broken_suspend = 1;
1050 } else {
1051 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1052 spin_unlock_irq(&xhci->lock);
1053 return -ETIMEDOUT;
1054 }
1055 }
1056 spin_unlock_irq(&xhci->lock);
1057
1058 /*
1059 * Deleting Compliance Mode Recovery Timer because the xHCI Host
1060 * is about to be suspended.
1061 */
1062 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1063 (!(xhci_all_ports_seen_u0(xhci)))) {
1064 del_timer_sync(&xhci->comp_mode_recovery_timer);
1065 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1066 "%s: compliance mode recovery timer deleted",
1067 __func__);
1068 }
1069
1070 /* step 5: remove core well power */
1071 /* synchronize irq when using MSI-X */
1072 xhci_msix_sync_irqs(xhci);
1073
1074 return rc;
1075 }
1076 EXPORT_SYMBOL_GPL(xhci_suspend);
1077
1078 /*
1079 * start xHC (not bus-specific)
1080 *
1081 * This is called when the machine transition from S3/S4 mode.
1082 *
1083 */
xhci_resume(struct xhci_hcd * xhci,bool hibernated)1084 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1085 {
1086 u32 command, temp = 0;
1087 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1088 struct usb_hcd *secondary_hcd;
1089 int retval = 0;
1090 bool comp_timer_running = false;
1091
1092 if (!hcd->state)
1093 return 0;
1094
1095 /* Wait a bit if either of the roothubs need to settle from the
1096 * transition into bus suspend.
1097 */
1098
1099 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1100 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1101 msleep(100);
1102
1103 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1104 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1105
1106 spin_lock_irq(&xhci->lock);
1107 if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
1108 hibernated = true;
1109
1110 if (!hibernated) {
1111 /*
1112 * Some controllers might lose power during suspend, so wait
1113 * for controller not ready bit to clear, just as in xHC init.
1114 */
1115 retval = xhci_handshake(&xhci->op_regs->status,
1116 STS_CNR, 0, 10 * 1000 * 1000);
1117 if (retval) {
1118 xhci_warn(xhci, "Controller not ready at resume %d\n",
1119 retval);
1120 spin_unlock_irq(&xhci->lock);
1121 return retval;
1122 }
1123 /* step 1: restore register */
1124 xhci_restore_registers(xhci);
1125 /* step 2: initialize command ring buffer */
1126 xhci_set_cmd_ring_deq(xhci);
1127 /* step 3: restore state and start state*/
1128 /* step 3: set CRS flag */
1129 command = readl(&xhci->op_regs->command);
1130 command |= CMD_CRS;
1131 writel(command, &xhci->op_regs->command);
1132 /*
1133 * Some controllers take up to 55+ ms to complete the controller
1134 * restore so setting the timeout to 100ms. Xhci specification
1135 * doesn't mention any timeout value.
1136 */
1137 if (xhci_handshake(&xhci->op_regs->status,
1138 STS_RESTORE, 0, 100 * 1000)) {
1139 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1140 spin_unlock_irq(&xhci->lock);
1141 return -ETIMEDOUT;
1142 }
1143 temp = readl(&xhci->op_regs->status);
1144 }
1145
1146 /* If restore operation fails, re-initialize the HC during resume */
1147 if ((temp & STS_SRE) || hibernated) {
1148
1149 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1150 !(xhci_all_ports_seen_u0(xhci))) {
1151 del_timer_sync(&xhci->comp_mode_recovery_timer);
1152 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1153 "Compliance Mode Recovery Timer deleted!");
1154 }
1155
1156 /* Let the USB core know _both_ roothubs lost power. */
1157 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1158 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1159
1160 xhci_dbg(xhci, "Stop HCD\n");
1161 xhci_halt(xhci);
1162 xhci_zero_64b_regs(xhci);
1163 xhci_reset(xhci);
1164 spin_unlock_irq(&xhci->lock);
1165 xhci_cleanup_msix(xhci);
1166
1167 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1168 temp = readl(&xhci->op_regs->status);
1169 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1170 temp = readl(&xhci->ir_set->irq_pending);
1171 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1172
1173 xhci_dbg(xhci, "cleaning up memory\n");
1174 xhci_mem_cleanup(xhci);
1175 xhci_debugfs_exit(xhci);
1176 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1177 readl(&xhci->op_regs->status));
1178
1179 /* USB core calls the PCI reinit and start functions twice:
1180 * first with the primary HCD, and then with the secondary HCD.
1181 * If we don't do the same, the host will never be started.
1182 */
1183 if (!usb_hcd_is_primary_hcd(hcd))
1184 secondary_hcd = hcd;
1185 else
1186 secondary_hcd = xhci->shared_hcd;
1187
1188 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1189 retval = xhci_init(hcd->primary_hcd);
1190 if (retval)
1191 return retval;
1192 comp_timer_running = true;
1193
1194 xhci_dbg(xhci, "Start the primary HCD\n");
1195 retval = xhci_run(hcd->primary_hcd);
1196 if (!retval) {
1197 xhci_dbg(xhci, "Start the secondary HCD\n");
1198 retval = xhci_run(secondary_hcd);
1199 }
1200 hcd->state = HC_STATE_SUSPENDED;
1201 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1202 goto done;
1203 }
1204
1205 /* step 4: set Run/Stop bit */
1206 command = readl(&xhci->op_regs->command);
1207 command |= CMD_RUN;
1208 writel(command, &xhci->op_regs->command);
1209 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1210 0, 250 * 1000);
1211
1212 /* step 5: walk topology and initialize portsc,
1213 * portpmsc and portli
1214 */
1215 /* this is done in bus_resume */
1216
1217 /* step 6: restart each of the previously
1218 * Running endpoints by ringing their doorbells
1219 */
1220
1221 spin_unlock_irq(&xhci->lock);
1222
1223 xhci_dbc_resume(xhci);
1224
1225 done:
1226 if (retval == 0) {
1227 /* Resume root hubs only when have pending events. */
1228 if (xhci_pending_portevent(xhci)) {
1229 usb_hcd_resume_root_hub(xhci->shared_hcd);
1230 usb_hcd_resume_root_hub(hcd);
1231 }
1232 }
1233
1234 /*
1235 * If system is subject to the Quirk, Compliance Mode Timer needs to
1236 * be re-initialized Always after a system resume. Ports are subject
1237 * to suffer the Compliance Mode issue again. It doesn't matter if
1238 * ports have entered previously to U0 before system's suspension.
1239 */
1240 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1241 compliance_mode_recovery_timer_init(xhci);
1242
1243 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1244 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1245
1246 /* Re-enable port polling. */
1247 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1248 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1249 usb_hcd_poll_rh_status(xhci->shared_hcd);
1250 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1251 usb_hcd_poll_rh_status(hcd);
1252
1253 return retval;
1254 }
1255 EXPORT_SYMBOL_GPL(xhci_resume);
1256 #endif /* CONFIG_PM */
1257
1258 /*-------------------------------------------------------------------------*/
1259
1260 /*
1261 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1262 * we'll copy the actual data into the TRB address register. This is limited to
1263 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1264 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1265 */
xhci_map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1266 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1267 gfp_t mem_flags)
1268 {
1269 if (xhci_urb_suitable_for_idt(urb))
1270 return 0;
1271
1272 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1273 }
1274
1275 /**
1276 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1277 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1278 * value to right shift 1 for the bitmask.
1279 *
1280 * Index = (epnum * 2) + direction - 1,
1281 * where direction = 0 for OUT, 1 for IN.
1282 * For control endpoints, the IN index is used (OUT index is unused), so
1283 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1284 */
xhci_get_endpoint_index(struct usb_endpoint_descriptor * desc)1285 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1286 {
1287 unsigned int index;
1288 if (usb_endpoint_xfer_control(desc))
1289 index = (unsigned int) (usb_endpoint_num(desc)*2);
1290 else
1291 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1292 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1293 return index;
1294 }
1295
1296 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1297 * address from the XHCI endpoint index.
1298 */
xhci_get_endpoint_address(unsigned int ep_index)1299 unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1300 {
1301 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1302 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1303 return direction | number;
1304 }
1305
1306 /* Find the flag for this endpoint (for use in the control context). Use the
1307 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1308 * bit 1, etc.
1309 */
xhci_get_endpoint_flag(struct usb_endpoint_descriptor * desc)1310 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1311 {
1312 return 1 << (xhci_get_endpoint_index(desc) + 1);
1313 }
1314
1315 /* Find the flag for this endpoint (for use in the control context). Use the
1316 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1317 * bit 1, etc.
1318 */
xhci_get_endpoint_flag_from_index(unsigned int ep_index)1319 static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1320 {
1321 return 1 << (ep_index + 1);
1322 }
1323
1324 /* Compute the last valid endpoint context index. Basically, this is the
1325 * endpoint index plus one. For slot contexts with more than valid endpoint,
1326 * we find the most significant bit set in the added contexts flags.
1327 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1328 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1329 */
xhci_last_valid_endpoint(u32 added_ctxs)1330 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1331 {
1332 return fls(added_ctxs) - 1;
1333 }
1334
1335 /* Returns 1 if the arguments are OK;
1336 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1337 */
xhci_check_args(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep,int check_ep,bool check_virt_dev,const char * func)1338 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1339 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1340 const char *func) {
1341 struct xhci_hcd *xhci;
1342 struct xhci_virt_device *virt_dev;
1343
1344 if (!hcd || (check_ep && !ep) || !udev) {
1345 pr_debug("xHCI %s called with invalid args\n", func);
1346 return -EINVAL;
1347 }
1348 if (!udev->parent) {
1349 pr_debug("xHCI %s called for root hub\n", func);
1350 return 0;
1351 }
1352
1353 xhci = hcd_to_xhci(hcd);
1354 if (check_virt_dev) {
1355 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1356 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1357 func);
1358 return -EINVAL;
1359 }
1360
1361 virt_dev = xhci->devs[udev->slot_id];
1362 if (virt_dev->udev != udev) {
1363 xhci_dbg(xhci, "xHCI %s called with udev and "
1364 "virt_dev does not match\n", func);
1365 return -EINVAL;
1366 }
1367 }
1368
1369 if (xhci->xhc_state & XHCI_STATE_HALTED)
1370 return -ENODEV;
1371
1372 return 1;
1373 }
1374
1375 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1376 struct usb_device *udev, struct xhci_command *command,
1377 bool ctx_change, bool must_succeed);
1378
1379 /*
1380 * Full speed devices may have a max packet size greater than 8 bytes, but the
1381 * USB core doesn't know that until it reads the first 8 bytes of the
1382 * descriptor. If the usb_device's max packet size changes after that point,
1383 * we need to issue an evaluate context command and wait on it.
1384 */
xhci_check_maxpacket(struct xhci_hcd * xhci,unsigned int slot_id,unsigned int ep_index,struct urb * urb)1385 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1386 unsigned int ep_index, struct urb *urb)
1387 {
1388 struct xhci_container_ctx *out_ctx;
1389 struct xhci_input_control_ctx *ctrl_ctx;
1390 struct xhci_ep_ctx *ep_ctx;
1391 struct xhci_command *command;
1392 int max_packet_size;
1393 int hw_max_packet_size;
1394 int ret = 0;
1395
1396 out_ctx = xhci->devs[slot_id]->out_ctx;
1397 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1398 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1399 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1400 if (hw_max_packet_size != max_packet_size) {
1401 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1402 "Max Packet Size for ep 0 changed.");
1403 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1404 "Max packet size in usb_device = %d",
1405 max_packet_size);
1406 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1407 "Max packet size in xHCI HW = %d",
1408 hw_max_packet_size);
1409 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1410 "Issuing evaluate context command.");
1411
1412 /* Set up the input context flags for the command */
1413 /* FIXME: This won't work if a non-default control endpoint
1414 * changes max packet sizes.
1415 */
1416
1417 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1418 if (!command)
1419 return -ENOMEM;
1420
1421 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1422 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1423 if (!ctrl_ctx) {
1424 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1425 __func__);
1426 ret = -ENOMEM;
1427 goto command_cleanup;
1428 }
1429 /* Set up the modified control endpoint 0 */
1430 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1431 xhci->devs[slot_id]->out_ctx, ep_index);
1432
1433 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1434 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1435 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1436
1437 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1438 ctrl_ctx->drop_flags = 0;
1439
1440 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1441 true, false);
1442
1443 /* Clean up the input context for later use by bandwidth
1444 * functions.
1445 */
1446 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1447 command_cleanup:
1448 kfree(command->completion);
1449 kfree(command);
1450 }
1451 return ret;
1452 }
1453
1454 /*
1455 * non-error returns are a promise to giveback() the urb later
1456 * we drop ownership so next owner (or urb unlink) can get it
1457 */
xhci_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1458 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1459 {
1460 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1461 unsigned long flags;
1462 int ret = 0;
1463 unsigned int slot_id, ep_index;
1464 unsigned int *ep_state;
1465 struct urb_priv *urb_priv;
1466 int num_tds;
1467
1468 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1469 true, true, __func__) <= 0)
1470 return -EINVAL;
1471
1472 slot_id = urb->dev->slot_id;
1473 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1474 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1475
1476 if (!HCD_HW_ACCESSIBLE(hcd)) {
1477 if (!in_interrupt())
1478 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1479 return -ESHUTDOWN;
1480 }
1481 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1482 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1483 return -ENODEV;
1484 }
1485
1486 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1487 num_tds = urb->number_of_packets;
1488 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1489 urb->transfer_buffer_length > 0 &&
1490 urb->transfer_flags & URB_ZERO_PACKET &&
1491 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1492 num_tds = 2;
1493 else
1494 num_tds = 1;
1495
1496 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1497 if (!urb_priv)
1498 return -ENOMEM;
1499
1500 urb_priv->num_tds = num_tds;
1501 urb_priv->num_tds_done = 0;
1502 urb->hcpriv = urb_priv;
1503
1504 trace_xhci_urb_enqueue(urb);
1505
1506 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1507 /* Check to see if the max packet size for the default control
1508 * endpoint changed during FS device enumeration
1509 */
1510 if (urb->dev->speed == USB_SPEED_FULL) {
1511 ret = xhci_check_maxpacket(xhci, slot_id,
1512 ep_index, urb);
1513 if (ret < 0) {
1514 xhci_urb_free_priv(urb_priv);
1515 urb->hcpriv = NULL;
1516 return ret;
1517 }
1518 }
1519 }
1520
1521 spin_lock_irqsave(&xhci->lock, flags);
1522
1523 if (xhci->xhc_state & XHCI_STATE_DYING) {
1524 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1525 urb->ep->desc.bEndpointAddress, urb);
1526 ret = -ESHUTDOWN;
1527 goto free_priv;
1528 }
1529 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1530 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1531 *ep_state);
1532 ret = -EINVAL;
1533 goto free_priv;
1534 }
1535 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1536 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1537 ret = -EINVAL;
1538 goto free_priv;
1539 }
1540
1541 switch (usb_endpoint_type(&urb->ep->desc)) {
1542
1543 case USB_ENDPOINT_XFER_CONTROL:
1544 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1545 slot_id, ep_index);
1546 break;
1547 case USB_ENDPOINT_XFER_BULK:
1548 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1549 slot_id, ep_index);
1550 break;
1551 case USB_ENDPOINT_XFER_INT:
1552 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1553 slot_id, ep_index);
1554 break;
1555 case USB_ENDPOINT_XFER_ISOC:
1556 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1557 slot_id, ep_index);
1558 }
1559
1560 if (ret) {
1561 free_priv:
1562 xhci_urb_free_priv(urb_priv);
1563 urb->hcpriv = NULL;
1564 }
1565 spin_unlock_irqrestore(&xhci->lock, flags);
1566 return ret;
1567 }
1568
1569 /*
1570 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1571 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1572 * should pick up where it left off in the TD, unless a Set Transfer Ring
1573 * Dequeue Pointer is issued.
1574 *
1575 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1576 * the ring. Since the ring is a contiguous structure, they can't be physically
1577 * removed. Instead, there are two options:
1578 *
1579 * 1) If the HC is in the middle of processing the URB to be canceled, we
1580 * simply move the ring's dequeue pointer past those TRBs using the Set
1581 * Transfer Ring Dequeue Pointer command. This will be the common case,
1582 * when drivers timeout on the last submitted URB and attempt to cancel.
1583 *
1584 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1585 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1586 * HC will need to invalidate the any TRBs it has cached after the stop
1587 * endpoint command, as noted in the xHCI 0.95 errata.
1588 *
1589 * 3) The TD may have completed by the time the Stop Endpoint Command
1590 * completes, so software needs to handle that case too.
1591 *
1592 * This function should protect against the TD enqueueing code ringing the
1593 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1594 * It also needs to account for multiple cancellations on happening at the same
1595 * time for the same endpoint.
1596 *
1597 * Note that this function can be called in any context, or so says
1598 * usb_hcd_unlink_urb()
1599 */
xhci_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1600 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1601 {
1602 unsigned long flags;
1603 int ret, i;
1604 u32 temp;
1605 struct xhci_hcd *xhci;
1606 struct urb_priv *urb_priv;
1607 struct xhci_td *td;
1608 unsigned int ep_index;
1609 struct xhci_ring *ep_ring;
1610 struct xhci_virt_ep *ep;
1611 struct xhci_command *command;
1612 struct xhci_virt_device *vdev;
1613
1614 xhci = hcd_to_xhci(hcd);
1615 spin_lock_irqsave(&xhci->lock, flags);
1616
1617 trace_xhci_urb_dequeue(urb);
1618
1619 /* Make sure the URB hasn't completed or been unlinked already */
1620 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1621 if (ret)
1622 goto done;
1623
1624 /* give back URB now if we can't queue it for cancel */
1625 vdev = xhci->devs[urb->dev->slot_id];
1626 urb_priv = urb->hcpriv;
1627 if (!vdev || !urb_priv)
1628 goto err_giveback;
1629
1630 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1631 ep = &vdev->eps[ep_index];
1632 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1633 if (!ep || !ep_ring)
1634 goto err_giveback;
1635
1636 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1637 temp = readl(&xhci->op_regs->status);
1638 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1639 xhci_hc_died(xhci);
1640 goto done;
1641 }
1642
1643 /*
1644 * check ring is not re-allocated since URB was enqueued. If it is, then
1645 * make sure none of the ring related pointers in this URB private data
1646 * are touched, such as td_list, otherwise we overwrite freed data
1647 */
1648 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1649 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1650 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1651 td = &urb_priv->td[i];
1652 if (!list_empty(&td->cancelled_td_list))
1653 list_del_init(&td->cancelled_td_list);
1654 }
1655 goto err_giveback;
1656 }
1657
1658 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1659 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1660 "HC halted, freeing TD manually.");
1661 for (i = urb_priv->num_tds_done;
1662 i < urb_priv->num_tds;
1663 i++) {
1664 td = &urb_priv->td[i];
1665 if (!list_empty(&td->td_list))
1666 list_del_init(&td->td_list);
1667 if (!list_empty(&td->cancelled_td_list))
1668 list_del_init(&td->cancelled_td_list);
1669 }
1670 goto err_giveback;
1671 }
1672
1673 i = urb_priv->num_tds_done;
1674 if (i < urb_priv->num_tds)
1675 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1676 "Cancel URB %p, dev %s, ep 0x%x, "
1677 "starting at offset 0x%llx",
1678 urb, urb->dev->devpath,
1679 urb->ep->desc.bEndpointAddress,
1680 (unsigned long long) xhci_trb_virt_to_dma(
1681 urb_priv->td[i].start_seg,
1682 urb_priv->td[i].first_trb));
1683
1684 for (; i < urb_priv->num_tds; i++) {
1685 td = &urb_priv->td[i];
1686 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1687 }
1688
1689 /* Queue a stop endpoint command, but only if this is
1690 * the first cancellation to be handled.
1691 */
1692 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1693 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1694 if (!command) {
1695 ret = -ENOMEM;
1696 goto done;
1697 }
1698 ep->ep_state |= EP_STOP_CMD_PENDING;
1699 ep->stop_cmd_timer.expires = jiffies +
1700 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1701 add_timer(&ep->stop_cmd_timer);
1702 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1703 ep_index, 0);
1704 xhci_ring_cmd_db(xhci);
1705 }
1706 done:
1707 spin_unlock_irqrestore(&xhci->lock, flags);
1708 return ret;
1709
1710 err_giveback:
1711 if (urb_priv)
1712 xhci_urb_free_priv(urb_priv);
1713 usb_hcd_unlink_urb_from_ep(hcd, urb);
1714 spin_unlock_irqrestore(&xhci->lock, flags);
1715 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1716 return ret;
1717 }
1718
1719 /* Drop an endpoint from a new bandwidth configuration for this device.
1720 * Only one call to this function is allowed per endpoint before
1721 * check_bandwidth() or reset_bandwidth() must be called.
1722 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1723 * add the endpoint to the schedule with possibly new parameters denoted by a
1724 * different endpoint descriptor in usb_host_endpoint.
1725 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1726 * not allowed.
1727 *
1728 * The USB core will not allow URBs to be queued to an endpoint that is being
1729 * disabled, so there's no need for mutual exclusion to protect
1730 * the xhci->devs[slot_id] structure.
1731 */
xhci_drop_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)1732 static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1733 struct usb_host_endpoint *ep)
1734 {
1735 struct xhci_hcd *xhci;
1736 struct xhci_container_ctx *in_ctx, *out_ctx;
1737 struct xhci_input_control_ctx *ctrl_ctx;
1738 unsigned int ep_index;
1739 struct xhci_ep_ctx *ep_ctx;
1740 u32 drop_flag;
1741 u32 new_add_flags, new_drop_flags;
1742 int ret;
1743
1744 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1745 if (ret <= 0)
1746 return ret;
1747 xhci = hcd_to_xhci(hcd);
1748 if (xhci->xhc_state & XHCI_STATE_DYING)
1749 return -ENODEV;
1750
1751 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1752 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1753 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1754 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1755 __func__, drop_flag);
1756 return 0;
1757 }
1758
1759 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1760 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1761 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1762 if (!ctrl_ctx) {
1763 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1764 __func__);
1765 return 0;
1766 }
1767
1768 ep_index = xhci_get_endpoint_index(&ep->desc);
1769 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1770 /* If the HC already knows the endpoint is disabled,
1771 * or the HCD has noted it is disabled, ignore this request
1772 */
1773 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1774 le32_to_cpu(ctrl_ctx->drop_flags) &
1775 xhci_get_endpoint_flag(&ep->desc)) {
1776 /* Do not warn when called after a usb_device_reset */
1777 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1778 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1779 __func__, ep);
1780 return 0;
1781 }
1782
1783 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1784 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1785
1786 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1787 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1788
1789 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1790
1791 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1792
1793 if (xhci->quirks & XHCI_MTK_HOST)
1794 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1795
1796 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1797 (unsigned int) ep->desc.bEndpointAddress,
1798 udev->slot_id,
1799 (unsigned int) new_drop_flags,
1800 (unsigned int) new_add_flags);
1801 return 0;
1802 }
1803
1804 /* Add an endpoint to a new possible bandwidth configuration for this device.
1805 * Only one call to this function is allowed per endpoint before
1806 * check_bandwidth() or reset_bandwidth() must be called.
1807 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1808 * add the endpoint to the schedule with possibly new parameters denoted by a
1809 * different endpoint descriptor in usb_host_endpoint.
1810 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1811 * not allowed.
1812 *
1813 * The USB core will not allow URBs to be queued to an endpoint until the
1814 * configuration or alt setting is installed in the device, so there's no need
1815 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1816 */
xhci_add_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)1817 static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1818 struct usb_host_endpoint *ep)
1819 {
1820 struct xhci_hcd *xhci;
1821 struct xhci_container_ctx *in_ctx;
1822 unsigned int ep_index;
1823 struct xhci_input_control_ctx *ctrl_ctx;
1824 struct xhci_ep_ctx *ep_ctx;
1825 u32 added_ctxs;
1826 u32 new_add_flags, new_drop_flags;
1827 struct xhci_virt_device *virt_dev;
1828 int ret = 0;
1829
1830 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1831 if (ret <= 0) {
1832 /* So we won't queue a reset ep command for a root hub */
1833 ep->hcpriv = NULL;
1834 return ret;
1835 }
1836 xhci = hcd_to_xhci(hcd);
1837 if (xhci->xhc_state & XHCI_STATE_DYING)
1838 return -ENODEV;
1839
1840 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1841 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1842 /* FIXME when we have to issue an evaluate endpoint command to
1843 * deal with ep0 max packet size changing once we get the
1844 * descriptors
1845 */
1846 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1847 __func__, added_ctxs);
1848 return 0;
1849 }
1850
1851 virt_dev = xhci->devs[udev->slot_id];
1852 in_ctx = virt_dev->in_ctx;
1853 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1854 if (!ctrl_ctx) {
1855 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1856 __func__);
1857 return 0;
1858 }
1859
1860 ep_index = xhci_get_endpoint_index(&ep->desc);
1861 /* If this endpoint is already in use, and the upper layers are trying
1862 * to add it again without dropping it, reject the addition.
1863 */
1864 if (virt_dev->eps[ep_index].ring &&
1865 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1866 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1867 "without dropping it.\n",
1868 (unsigned int) ep->desc.bEndpointAddress);
1869 return -EINVAL;
1870 }
1871
1872 /* If the HCD has already noted the endpoint is enabled,
1873 * ignore this request.
1874 */
1875 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1876 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1877 __func__, ep);
1878 return 0;
1879 }
1880
1881 /*
1882 * Configuration and alternate setting changes must be done in
1883 * process context, not interrupt context (or so documenation
1884 * for usb_set_interface() and usb_set_configuration() claim).
1885 */
1886 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1887 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1888 __func__, ep->desc.bEndpointAddress);
1889 return -ENOMEM;
1890 }
1891
1892 if (xhci->quirks & XHCI_MTK_HOST) {
1893 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1894 if (ret < 0) {
1895 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1896 virt_dev->eps[ep_index].new_ring = NULL;
1897 return ret;
1898 }
1899 }
1900
1901 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1902 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1903
1904 /* If xhci_endpoint_disable() was called for this endpoint, but the
1905 * xHC hasn't been notified yet through the check_bandwidth() call,
1906 * this re-adds a new state for the endpoint from the new endpoint
1907 * descriptors. We must drop and re-add this endpoint, so we leave the
1908 * drop flags alone.
1909 */
1910 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1911
1912 /* Store the usb_device pointer for later use */
1913 ep->hcpriv = udev;
1914
1915 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1916 trace_xhci_add_endpoint(ep_ctx);
1917
1918 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1919
1920 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1921 (unsigned int) ep->desc.bEndpointAddress,
1922 udev->slot_id,
1923 (unsigned int) new_drop_flags,
1924 (unsigned int) new_add_flags);
1925 return 0;
1926 }
1927
xhci_zero_in_ctx(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)1928 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1929 {
1930 struct xhci_input_control_ctx *ctrl_ctx;
1931 struct xhci_ep_ctx *ep_ctx;
1932 struct xhci_slot_ctx *slot_ctx;
1933 int i;
1934
1935 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1936 if (!ctrl_ctx) {
1937 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1938 __func__);
1939 return;
1940 }
1941
1942 /* When a device's add flag and drop flag are zero, any subsequent
1943 * configure endpoint command will leave that endpoint's state
1944 * untouched. Make sure we don't leave any old state in the input
1945 * endpoint contexts.
1946 */
1947 ctrl_ctx->drop_flags = 0;
1948 ctrl_ctx->add_flags = 0;
1949 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1950 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1951 /* Endpoint 0 is always valid */
1952 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1953 for (i = 1; i < 31; i++) {
1954 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1955 ep_ctx->ep_info = 0;
1956 ep_ctx->ep_info2 = 0;
1957 ep_ctx->deq = 0;
1958 ep_ctx->tx_info = 0;
1959 }
1960 }
1961
xhci_configure_endpoint_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)1962 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1963 struct usb_device *udev, u32 *cmd_status)
1964 {
1965 int ret;
1966
1967 switch (*cmd_status) {
1968 case COMP_COMMAND_ABORTED:
1969 case COMP_COMMAND_RING_STOPPED:
1970 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1971 ret = -ETIME;
1972 break;
1973 case COMP_RESOURCE_ERROR:
1974 dev_warn(&udev->dev,
1975 "Not enough host controller resources for new device state.\n");
1976 ret = -ENOMEM;
1977 /* FIXME: can we allocate more resources for the HC? */
1978 break;
1979 case COMP_BANDWIDTH_ERROR:
1980 case COMP_SECONDARY_BANDWIDTH_ERROR:
1981 dev_warn(&udev->dev,
1982 "Not enough bandwidth for new device state.\n");
1983 ret = -ENOSPC;
1984 /* FIXME: can we go back to the old state? */
1985 break;
1986 case COMP_TRB_ERROR:
1987 /* the HCD set up something wrong */
1988 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1989 "add flag = 1, "
1990 "and endpoint is not disabled.\n");
1991 ret = -EINVAL;
1992 break;
1993 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1994 dev_warn(&udev->dev,
1995 "ERROR: Incompatible device for endpoint configure command.\n");
1996 ret = -ENODEV;
1997 break;
1998 case COMP_SUCCESS:
1999 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2000 "Successful Endpoint Configure command");
2001 ret = 0;
2002 break;
2003 default:
2004 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2005 *cmd_status);
2006 ret = -EINVAL;
2007 break;
2008 }
2009 return ret;
2010 }
2011
xhci_evaluate_context_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)2012 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2013 struct usb_device *udev, u32 *cmd_status)
2014 {
2015 int ret;
2016
2017 switch (*cmd_status) {
2018 case COMP_COMMAND_ABORTED:
2019 case COMP_COMMAND_RING_STOPPED:
2020 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2021 ret = -ETIME;
2022 break;
2023 case COMP_PARAMETER_ERROR:
2024 dev_warn(&udev->dev,
2025 "WARN: xHCI driver setup invalid evaluate context command.\n");
2026 ret = -EINVAL;
2027 break;
2028 case COMP_SLOT_NOT_ENABLED_ERROR:
2029 dev_warn(&udev->dev,
2030 "WARN: slot not enabled for evaluate context command.\n");
2031 ret = -EINVAL;
2032 break;
2033 case COMP_CONTEXT_STATE_ERROR:
2034 dev_warn(&udev->dev,
2035 "WARN: invalid context state for evaluate context command.\n");
2036 ret = -EINVAL;
2037 break;
2038 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2039 dev_warn(&udev->dev,
2040 "ERROR: Incompatible device for evaluate context command.\n");
2041 ret = -ENODEV;
2042 break;
2043 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2044 /* Max Exit Latency too large error */
2045 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2046 ret = -EINVAL;
2047 break;
2048 case COMP_SUCCESS:
2049 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2050 "Successful evaluate context command");
2051 ret = 0;
2052 break;
2053 default:
2054 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2055 *cmd_status);
2056 ret = -EINVAL;
2057 break;
2058 }
2059 return ret;
2060 }
2061
xhci_count_num_new_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2062 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2063 struct xhci_input_control_ctx *ctrl_ctx)
2064 {
2065 u32 valid_add_flags;
2066 u32 valid_drop_flags;
2067
2068 /* Ignore the slot flag (bit 0), and the default control endpoint flag
2069 * (bit 1). The default control endpoint is added during the Address
2070 * Device command and is never removed until the slot is disabled.
2071 */
2072 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2073 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2074
2075 /* Use hweight32 to count the number of ones in the add flags, or
2076 * number of endpoints added. Don't count endpoints that are changed
2077 * (both added and dropped).
2078 */
2079 return hweight32(valid_add_flags) -
2080 hweight32(valid_add_flags & valid_drop_flags);
2081 }
2082
xhci_count_num_dropped_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2083 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2084 struct xhci_input_control_ctx *ctrl_ctx)
2085 {
2086 u32 valid_add_flags;
2087 u32 valid_drop_flags;
2088
2089 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2090 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2091
2092 return hweight32(valid_drop_flags) -
2093 hweight32(valid_add_flags & valid_drop_flags);
2094 }
2095
2096 /*
2097 * We need to reserve the new number of endpoints before the configure endpoint
2098 * command completes. We can't subtract the dropped endpoints from the number
2099 * of active endpoints until the command completes because we can oversubscribe
2100 * the host in this case:
2101 *
2102 * - the first configure endpoint command drops more endpoints than it adds
2103 * - a second configure endpoint command that adds more endpoints is queued
2104 * - the first configure endpoint command fails, so the config is unchanged
2105 * - the second command may succeed, even though there isn't enough resources
2106 *
2107 * Must be called with xhci->lock held.
2108 */
xhci_reserve_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2109 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2110 struct xhci_input_control_ctx *ctrl_ctx)
2111 {
2112 u32 added_eps;
2113
2114 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2115 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2116 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2117 "Not enough ep ctxs: "
2118 "%u active, need to add %u, limit is %u.",
2119 xhci->num_active_eps, added_eps,
2120 xhci->limit_active_eps);
2121 return -ENOMEM;
2122 }
2123 xhci->num_active_eps += added_eps;
2124 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2125 "Adding %u ep ctxs, %u now active.", added_eps,
2126 xhci->num_active_eps);
2127 return 0;
2128 }
2129
2130 /*
2131 * The configure endpoint was failed by the xHC for some other reason, so we
2132 * need to revert the resources that failed configuration would have used.
2133 *
2134 * Must be called with xhci->lock held.
2135 */
xhci_free_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2136 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2137 struct xhci_input_control_ctx *ctrl_ctx)
2138 {
2139 u32 num_failed_eps;
2140
2141 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2142 xhci->num_active_eps -= num_failed_eps;
2143 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2144 "Removing %u failed ep ctxs, %u now active.",
2145 num_failed_eps,
2146 xhci->num_active_eps);
2147 }
2148
2149 /*
2150 * Now that the command has completed, clean up the active endpoint count by
2151 * subtracting out the endpoints that were dropped (but not changed).
2152 *
2153 * Must be called with xhci->lock held.
2154 */
xhci_finish_resource_reservation(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2155 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2156 struct xhci_input_control_ctx *ctrl_ctx)
2157 {
2158 u32 num_dropped_eps;
2159
2160 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2161 xhci->num_active_eps -= num_dropped_eps;
2162 if (num_dropped_eps)
2163 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2164 "Removing %u dropped ep ctxs, %u now active.",
2165 num_dropped_eps,
2166 xhci->num_active_eps);
2167 }
2168
xhci_get_block_size(struct usb_device * udev)2169 static unsigned int xhci_get_block_size(struct usb_device *udev)
2170 {
2171 switch (udev->speed) {
2172 case USB_SPEED_LOW:
2173 case USB_SPEED_FULL:
2174 return FS_BLOCK;
2175 case USB_SPEED_HIGH:
2176 return HS_BLOCK;
2177 case USB_SPEED_SUPER:
2178 case USB_SPEED_SUPER_PLUS:
2179 return SS_BLOCK;
2180 case USB_SPEED_UNKNOWN:
2181 case USB_SPEED_WIRELESS:
2182 default:
2183 /* Should never happen */
2184 return 1;
2185 }
2186 }
2187
2188 static unsigned int
xhci_get_largest_overhead(struct xhci_interval_bw * interval_bw)2189 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2190 {
2191 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2192 return LS_OVERHEAD;
2193 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2194 return FS_OVERHEAD;
2195 return HS_OVERHEAD;
2196 }
2197
2198 /* If we are changing a LS/FS device under a HS hub,
2199 * make sure (if we are activating a new TT) that the HS bus has enough
2200 * bandwidth for this new TT.
2201 */
xhci_check_tt_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2202 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2203 struct xhci_virt_device *virt_dev,
2204 int old_active_eps)
2205 {
2206 struct xhci_interval_bw_table *bw_table;
2207 struct xhci_tt_bw_info *tt_info;
2208
2209 /* Find the bandwidth table for the root port this TT is attached to. */
2210 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2211 tt_info = virt_dev->tt_info;
2212 /* If this TT already had active endpoints, the bandwidth for this TT
2213 * has already been added. Removing all periodic endpoints (and thus
2214 * making the TT enactive) will only decrease the bandwidth used.
2215 */
2216 if (old_active_eps)
2217 return 0;
2218 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2219 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2220 return -ENOMEM;
2221 return 0;
2222 }
2223 /* Not sure why we would have no new active endpoints...
2224 *
2225 * Maybe because of an Evaluate Context change for a hub update or a
2226 * control endpoint 0 max packet size change?
2227 * FIXME: skip the bandwidth calculation in that case.
2228 */
2229 return 0;
2230 }
2231
xhci_check_ss_bw(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)2232 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2233 struct xhci_virt_device *virt_dev)
2234 {
2235 unsigned int bw_reserved;
2236
2237 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2238 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2239 return -ENOMEM;
2240
2241 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2242 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2243 return -ENOMEM;
2244
2245 return 0;
2246 }
2247
2248 /*
2249 * This algorithm is a very conservative estimate of the worst-case scheduling
2250 * scenario for any one interval. The hardware dynamically schedules the
2251 * packets, so we can't tell which microframe could be the limiting factor in
2252 * the bandwidth scheduling. This only takes into account periodic endpoints.
2253 *
2254 * Obviously, we can't solve an NP complete problem to find the minimum worst
2255 * case scenario. Instead, we come up with an estimate that is no less than
2256 * the worst case bandwidth used for any one microframe, but may be an
2257 * over-estimate.
2258 *
2259 * We walk the requirements for each endpoint by interval, starting with the
2260 * smallest interval, and place packets in the schedule where there is only one
2261 * possible way to schedule packets for that interval. In order to simplify
2262 * this algorithm, we record the largest max packet size for each interval, and
2263 * assume all packets will be that size.
2264 *
2265 * For interval 0, we obviously must schedule all packets for each interval.
2266 * The bandwidth for interval 0 is just the amount of data to be transmitted
2267 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2268 * the number of packets).
2269 *
2270 * For interval 1, we have two possible microframes to schedule those packets
2271 * in. For this algorithm, if we can schedule the same number of packets for
2272 * each possible scheduling opportunity (each microframe), we will do so. The
2273 * remaining number of packets will be saved to be transmitted in the gaps in
2274 * the next interval's scheduling sequence.
2275 *
2276 * As we move those remaining packets to be scheduled with interval 2 packets,
2277 * we have to double the number of remaining packets to transmit. This is
2278 * because the intervals are actually powers of 2, and we would be transmitting
2279 * the previous interval's packets twice in this interval. We also have to be
2280 * sure that when we look at the largest max packet size for this interval, we
2281 * also look at the largest max packet size for the remaining packets and take
2282 * the greater of the two.
2283 *
2284 * The algorithm continues to evenly distribute packets in each scheduling
2285 * opportunity, and push the remaining packets out, until we get to the last
2286 * interval. Then those packets and their associated overhead are just added
2287 * to the bandwidth used.
2288 */
xhci_check_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2289 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2290 struct xhci_virt_device *virt_dev,
2291 int old_active_eps)
2292 {
2293 unsigned int bw_reserved;
2294 unsigned int max_bandwidth;
2295 unsigned int bw_used;
2296 unsigned int block_size;
2297 struct xhci_interval_bw_table *bw_table;
2298 unsigned int packet_size = 0;
2299 unsigned int overhead = 0;
2300 unsigned int packets_transmitted = 0;
2301 unsigned int packets_remaining = 0;
2302 unsigned int i;
2303
2304 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2305 return xhci_check_ss_bw(xhci, virt_dev);
2306
2307 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2308 max_bandwidth = HS_BW_LIMIT;
2309 /* Convert percent of bus BW reserved to blocks reserved */
2310 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2311 } else {
2312 max_bandwidth = FS_BW_LIMIT;
2313 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2314 }
2315
2316 bw_table = virt_dev->bw_table;
2317 /* We need to translate the max packet size and max ESIT payloads into
2318 * the units the hardware uses.
2319 */
2320 block_size = xhci_get_block_size(virt_dev->udev);
2321
2322 /* If we are manipulating a LS/FS device under a HS hub, double check
2323 * that the HS bus has enough bandwidth if we are activing a new TT.
2324 */
2325 if (virt_dev->tt_info) {
2326 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2327 "Recalculating BW for rootport %u",
2328 virt_dev->real_port);
2329 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2330 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2331 "newly activated TT.\n");
2332 return -ENOMEM;
2333 }
2334 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2335 "Recalculating BW for TT slot %u port %u",
2336 virt_dev->tt_info->slot_id,
2337 virt_dev->tt_info->ttport);
2338 } else {
2339 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2340 "Recalculating BW for rootport %u",
2341 virt_dev->real_port);
2342 }
2343
2344 /* Add in how much bandwidth will be used for interval zero, or the
2345 * rounded max ESIT payload + number of packets * largest overhead.
2346 */
2347 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2348 bw_table->interval_bw[0].num_packets *
2349 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2350
2351 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2352 unsigned int bw_added;
2353 unsigned int largest_mps;
2354 unsigned int interval_overhead;
2355
2356 /*
2357 * How many packets could we transmit in this interval?
2358 * If packets didn't fit in the previous interval, we will need
2359 * to transmit that many packets twice within this interval.
2360 */
2361 packets_remaining = 2 * packets_remaining +
2362 bw_table->interval_bw[i].num_packets;
2363
2364 /* Find the largest max packet size of this or the previous
2365 * interval.
2366 */
2367 if (list_empty(&bw_table->interval_bw[i].endpoints))
2368 largest_mps = 0;
2369 else {
2370 struct xhci_virt_ep *virt_ep;
2371 struct list_head *ep_entry;
2372
2373 ep_entry = bw_table->interval_bw[i].endpoints.next;
2374 virt_ep = list_entry(ep_entry,
2375 struct xhci_virt_ep, bw_endpoint_list);
2376 /* Convert to blocks, rounding up */
2377 largest_mps = DIV_ROUND_UP(
2378 virt_ep->bw_info.max_packet_size,
2379 block_size);
2380 }
2381 if (largest_mps > packet_size)
2382 packet_size = largest_mps;
2383
2384 /* Use the larger overhead of this or the previous interval. */
2385 interval_overhead = xhci_get_largest_overhead(
2386 &bw_table->interval_bw[i]);
2387 if (interval_overhead > overhead)
2388 overhead = interval_overhead;
2389
2390 /* How many packets can we evenly distribute across
2391 * (1 << (i + 1)) possible scheduling opportunities?
2392 */
2393 packets_transmitted = packets_remaining >> (i + 1);
2394
2395 /* Add in the bandwidth used for those scheduled packets */
2396 bw_added = packets_transmitted * (overhead + packet_size);
2397
2398 /* How many packets do we have remaining to transmit? */
2399 packets_remaining = packets_remaining % (1 << (i + 1));
2400
2401 /* What largest max packet size should those packets have? */
2402 /* If we've transmitted all packets, don't carry over the
2403 * largest packet size.
2404 */
2405 if (packets_remaining == 0) {
2406 packet_size = 0;
2407 overhead = 0;
2408 } else if (packets_transmitted > 0) {
2409 /* Otherwise if we do have remaining packets, and we've
2410 * scheduled some packets in this interval, take the
2411 * largest max packet size from endpoints with this
2412 * interval.
2413 */
2414 packet_size = largest_mps;
2415 overhead = interval_overhead;
2416 }
2417 /* Otherwise carry over packet_size and overhead from the last
2418 * time we had a remainder.
2419 */
2420 bw_used += bw_added;
2421 if (bw_used > max_bandwidth) {
2422 xhci_warn(xhci, "Not enough bandwidth. "
2423 "Proposed: %u, Max: %u\n",
2424 bw_used, max_bandwidth);
2425 return -ENOMEM;
2426 }
2427 }
2428 /*
2429 * Ok, we know we have some packets left over after even-handedly
2430 * scheduling interval 15. We don't know which microframes they will
2431 * fit into, so we over-schedule and say they will be scheduled every
2432 * microframe.
2433 */
2434 if (packets_remaining > 0)
2435 bw_used += overhead + packet_size;
2436
2437 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2438 unsigned int port_index = virt_dev->real_port - 1;
2439
2440 /* OK, we're manipulating a HS device attached to a
2441 * root port bandwidth domain. Include the number of active TTs
2442 * in the bandwidth used.
2443 */
2444 bw_used += TT_HS_OVERHEAD *
2445 xhci->rh_bw[port_index].num_active_tts;
2446 }
2447
2448 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2449 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2450 "Available: %u " "percent",
2451 bw_used, max_bandwidth, bw_reserved,
2452 (max_bandwidth - bw_used - bw_reserved) * 100 /
2453 max_bandwidth);
2454
2455 bw_used += bw_reserved;
2456 if (bw_used > max_bandwidth) {
2457 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2458 bw_used, max_bandwidth);
2459 return -ENOMEM;
2460 }
2461
2462 bw_table->bw_used = bw_used;
2463 return 0;
2464 }
2465
xhci_is_async_ep(unsigned int ep_type)2466 static bool xhci_is_async_ep(unsigned int ep_type)
2467 {
2468 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2469 ep_type != ISOC_IN_EP &&
2470 ep_type != INT_IN_EP);
2471 }
2472
xhci_is_sync_in_ep(unsigned int ep_type)2473 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2474 {
2475 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2476 }
2477
xhci_get_ss_bw_consumed(struct xhci_bw_info * ep_bw)2478 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2479 {
2480 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2481
2482 if (ep_bw->ep_interval == 0)
2483 return SS_OVERHEAD_BURST +
2484 (ep_bw->mult * ep_bw->num_packets *
2485 (SS_OVERHEAD + mps));
2486 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2487 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2488 1 << ep_bw->ep_interval);
2489
2490 }
2491
xhci_drop_ep_from_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)2492 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2493 struct xhci_bw_info *ep_bw,
2494 struct xhci_interval_bw_table *bw_table,
2495 struct usb_device *udev,
2496 struct xhci_virt_ep *virt_ep,
2497 struct xhci_tt_bw_info *tt_info)
2498 {
2499 struct xhci_interval_bw *interval_bw;
2500 int normalized_interval;
2501
2502 if (xhci_is_async_ep(ep_bw->type))
2503 return;
2504
2505 if (udev->speed >= USB_SPEED_SUPER) {
2506 if (xhci_is_sync_in_ep(ep_bw->type))
2507 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2508 xhci_get_ss_bw_consumed(ep_bw);
2509 else
2510 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2511 xhci_get_ss_bw_consumed(ep_bw);
2512 return;
2513 }
2514
2515 /* SuperSpeed endpoints never get added to intervals in the table, so
2516 * this check is only valid for HS/FS/LS devices.
2517 */
2518 if (list_empty(&virt_ep->bw_endpoint_list))
2519 return;
2520 /* For LS/FS devices, we need to translate the interval expressed in
2521 * microframes to frames.
2522 */
2523 if (udev->speed == USB_SPEED_HIGH)
2524 normalized_interval = ep_bw->ep_interval;
2525 else
2526 normalized_interval = ep_bw->ep_interval - 3;
2527
2528 if (normalized_interval == 0)
2529 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2530 interval_bw = &bw_table->interval_bw[normalized_interval];
2531 interval_bw->num_packets -= ep_bw->num_packets;
2532 switch (udev->speed) {
2533 case USB_SPEED_LOW:
2534 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2535 break;
2536 case USB_SPEED_FULL:
2537 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2538 break;
2539 case USB_SPEED_HIGH:
2540 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2541 break;
2542 case USB_SPEED_SUPER:
2543 case USB_SPEED_SUPER_PLUS:
2544 case USB_SPEED_UNKNOWN:
2545 case USB_SPEED_WIRELESS:
2546 /* Should never happen because only LS/FS/HS endpoints will get
2547 * added to the endpoint list.
2548 */
2549 return;
2550 }
2551 if (tt_info)
2552 tt_info->active_eps -= 1;
2553 list_del_init(&virt_ep->bw_endpoint_list);
2554 }
2555
xhci_add_ep_to_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)2556 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2557 struct xhci_bw_info *ep_bw,
2558 struct xhci_interval_bw_table *bw_table,
2559 struct usb_device *udev,
2560 struct xhci_virt_ep *virt_ep,
2561 struct xhci_tt_bw_info *tt_info)
2562 {
2563 struct xhci_interval_bw *interval_bw;
2564 struct xhci_virt_ep *smaller_ep;
2565 int normalized_interval;
2566
2567 if (xhci_is_async_ep(ep_bw->type))
2568 return;
2569
2570 if (udev->speed == USB_SPEED_SUPER) {
2571 if (xhci_is_sync_in_ep(ep_bw->type))
2572 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2573 xhci_get_ss_bw_consumed(ep_bw);
2574 else
2575 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2576 xhci_get_ss_bw_consumed(ep_bw);
2577 return;
2578 }
2579
2580 /* For LS/FS devices, we need to translate the interval expressed in
2581 * microframes to frames.
2582 */
2583 if (udev->speed == USB_SPEED_HIGH)
2584 normalized_interval = ep_bw->ep_interval;
2585 else
2586 normalized_interval = ep_bw->ep_interval - 3;
2587
2588 if (normalized_interval == 0)
2589 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2590 interval_bw = &bw_table->interval_bw[normalized_interval];
2591 interval_bw->num_packets += ep_bw->num_packets;
2592 switch (udev->speed) {
2593 case USB_SPEED_LOW:
2594 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2595 break;
2596 case USB_SPEED_FULL:
2597 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2598 break;
2599 case USB_SPEED_HIGH:
2600 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2601 break;
2602 case USB_SPEED_SUPER:
2603 case USB_SPEED_SUPER_PLUS:
2604 case USB_SPEED_UNKNOWN:
2605 case USB_SPEED_WIRELESS:
2606 /* Should never happen because only LS/FS/HS endpoints will get
2607 * added to the endpoint list.
2608 */
2609 return;
2610 }
2611
2612 if (tt_info)
2613 tt_info->active_eps += 1;
2614 /* Insert the endpoint into the list, largest max packet size first. */
2615 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2616 bw_endpoint_list) {
2617 if (ep_bw->max_packet_size >=
2618 smaller_ep->bw_info.max_packet_size) {
2619 /* Add the new ep before the smaller endpoint */
2620 list_add_tail(&virt_ep->bw_endpoint_list,
2621 &smaller_ep->bw_endpoint_list);
2622 return;
2623 }
2624 }
2625 /* Add the new endpoint at the end of the list. */
2626 list_add_tail(&virt_ep->bw_endpoint_list,
2627 &interval_bw->endpoints);
2628 }
2629
xhci_update_tt_active_eps(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2630 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2631 struct xhci_virt_device *virt_dev,
2632 int old_active_eps)
2633 {
2634 struct xhci_root_port_bw_info *rh_bw_info;
2635 if (!virt_dev->tt_info)
2636 return;
2637
2638 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2639 if (old_active_eps == 0 &&
2640 virt_dev->tt_info->active_eps != 0) {
2641 rh_bw_info->num_active_tts += 1;
2642 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2643 } else if (old_active_eps != 0 &&
2644 virt_dev->tt_info->active_eps == 0) {
2645 rh_bw_info->num_active_tts -= 1;
2646 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2647 }
2648 }
2649
xhci_reserve_bandwidth(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,struct xhci_container_ctx * in_ctx)2650 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2651 struct xhci_virt_device *virt_dev,
2652 struct xhci_container_ctx *in_ctx)
2653 {
2654 struct xhci_bw_info ep_bw_info[31];
2655 int i;
2656 struct xhci_input_control_ctx *ctrl_ctx;
2657 int old_active_eps = 0;
2658
2659 if (virt_dev->tt_info)
2660 old_active_eps = virt_dev->tt_info->active_eps;
2661
2662 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2663 if (!ctrl_ctx) {
2664 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2665 __func__);
2666 return -ENOMEM;
2667 }
2668
2669 for (i = 0; i < 31; i++) {
2670 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2671 continue;
2672
2673 /* Make a copy of the BW info in case we need to revert this */
2674 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2675 sizeof(ep_bw_info[i]));
2676 /* Drop the endpoint from the interval table if the endpoint is
2677 * being dropped or changed.
2678 */
2679 if (EP_IS_DROPPED(ctrl_ctx, i))
2680 xhci_drop_ep_from_interval_table(xhci,
2681 &virt_dev->eps[i].bw_info,
2682 virt_dev->bw_table,
2683 virt_dev->udev,
2684 &virt_dev->eps[i],
2685 virt_dev->tt_info);
2686 }
2687 /* Overwrite the information stored in the endpoints' bw_info */
2688 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2689 for (i = 0; i < 31; i++) {
2690 /* Add any changed or added endpoints to the interval table */
2691 if (EP_IS_ADDED(ctrl_ctx, i))
2692 xhci_add_ep_to_interval_table(xhci,
2693 &virt_dev->eps[i].bw_info,
2694 virt_dev->bw_table,
2695 virt_dev->udev,
2696 &virt_dev->eps[i],
2697 virt_dev->tt_info);
2698 }
2699
2700 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2701 /* Ok, this fits in the bandwidth we have.
2702 * Update the number of active TTs.
2703 */
2704 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2705 return 0;
2706 }
2707
2708 /* We don't have enough bandwidth for this, revert the stored info. */
2709 for (i = 0; i < 31; i++) {
2710 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2711 continue;
2712
2713 /* Drop the new copies of any added or changed endpoints from
2714 * the interval table.
2715 */
2716 if (EP_IS_ADDED(ctrl_ctx, i)) {
2717 xhci_drop_ep_from_interval_table(xhci,
2718 &virt_dev->eps[i].bw_info,
2719 virt_dev->bw_table,
2720 virt_dev->udev,
2721 &virt_dev->eps[i],
2722 virt_dev->tt_info);
2723 }
2724 /* Revert the endpoint back to its old information */
2725 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2726 sizeof(ep_bw_info[i]));
2727 /* Add any changed or dropped endpoints back into the table */
2728 if (EP_IS_DROPPED(ctrl_ctx, i))
2729 xhci_add_ep_to_interval_table(xhci,
2730 &virt_dev->eps[i].bw_info,
2731 virt_dev->bw_table,
2732 virt_dev->udev,
2733 &virt_dev->eps[i],
2734 virt_dev->tt_info);
2735 }
2736 return -ENOMEM;
2737 }
2738
2739
2740 /* Issue a configure endpoint command or evaluate context command
2741 * and wait for it to finish.
2742 */
xhci_configure_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct xhci_command * command,bool ctx_change,bool must_succeed)2743 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2744 struct usb_device *udev,
2745 struct xhci_command *command,
2746 bool ctx_change, bool must_succeed)
2747 {
2748 int ret;
2749 unsigned long flags;
2750 struct xhci_input_control_ctx *ctrl_ctx;
2751 struct xhci_virt_device *virt_dev;
2752 struct xhci_slot_ctx *slot_ctx;
2753
2754 if (!command)
2755 return -EINVAL;
2756
2757 spin_lock_irqsave(&xhci->lock, flags);
2758
2759 if (xhci->xhc_state & XHCI_STATE_DYING) {
2760 spin_unlock_irqrestore(&xhci->lock, flags);
2761 return -ESHUTDOWN;
2762 }
2763
2764 virt_dev = xhci->devs[udev->slot_id];
2765
2766 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2767 if (!ctrl_ctx) {
2768 spin_unlock_irqrestore(&xhci->lock, flags);
2769 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2770 __func__);
2771 return -ENOMEM;
2772 }
2773
2774 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2775 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2776 spin_unlock_irqrestore(&xhci->lock, flags);
2777 xhci_warn(xhci, "Not enough host resources, "
2778 "active endpoint contexts = %u\n",
2779 xhci->num_active_eps);
2780 return -ENOMEM;
2781 }
2782 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2783 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2784 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2785 xhci_free_host_resources(xhci, ctrl_ctx);
2786 spin_unlock_irqrestore(&xhci->lock, flags);
2787 xhci_warn(xhci, "Not enough bandwidth\n");
2788 return -ENOMEM;
2789 }
2790
2791 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2792
2793 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2794 trace_xhci_configure_endpoint(slot_ctx);
2795
2796 if (!ctx_change)
2797 ret = xhci_queue_configure_endpoint(xhci, command,
2798 command->in_ctx->dma,
2799 udev->slot_id, must_succeed);
2800 else
2801 ret = xhci_queue_evaluate_context(xhci, command,
2802 command->in_ctx->dma,
2803 udev->slot_id, must_succeed);
2804 if (ret < 0) {
2805 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2806 xhci_free_host_resources(xhci, ctrl_ctx);
2807 spin_unlock_irqrestore(&xhci->lock, flags);
2808 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2809 "FIXME allocate a new ring segment");
2810 return -ENOMEM;
2811 }
2812 xhci_ring_cmd_db(xhci);
2813 spin_unlock_irqrestore(&xhci->lock, flags);
2814
2815 /* Wait for the configure endpoint command to complete */
2816 wait_for_completion(command->completion);
2817
2818 if (!ctx_change)
2819 ret = xhci_configure_endpoint_result(xhci, udev,
2820 &command->status);
2821 else
2822 ret = xhci_evaluate_context_result(xhci, udev,
2823 &command->status);
2824
2825 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2826 spin_lock_irqsave(&xhci->lock, flags);
2827 /* If the command failed, remove the reserved resources.
2828 * Otherwise, clean up the estimate to include dropped eps.
2829 */
2830 if (ret)
2831 xhci_free_host_resources(xhci, ctrl_ctx);
2832 else
2833 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2834 spin_unlock_irqrestore(&xhci->lock, flags);
2835 }
2836 return ret;
2837 }
2838
xhci_check_bw_drop_ep_streams(struct xhci_hcd * xhci,struct xhci_virt_device * vdev,int i)2839 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2840 struct xhci_virt_device *vdev, int i)
2841 {
2842 struct xhci_virt_ep *ep = &vdev->eps[i];
2843
2844 if (ep->ep_state & EP_HAS_STREAMS) {
2845 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2846 xhci_get_endpoint_address(i));
2847 xhci_free_stream_info(xhci, ep->stream_info);
2848 ep->stream_info = NULL;
2849 ep->ep_state &= ~EP_HAS_STREAMS;
2850 }
2851 }
2852
2853 /* Called after one or more calls to xhci_add_endpoint() or
2854 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2855 * to call xhci_reset_bandwidth().
2856 *
2857 * Since we are in the middle of changing either configuration or
2858 * installing a new alt setting, the USB core won't allow URBs to be
2859 * enqueued for any endpoint on the old config or interface. Nothing
2860 * else should be touching the xhci->devs[slot_id] structure, so we
2861 * don't need to take the xhci->lock for manipulating that.
2862 */
xhci_check_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)2863 static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2864 {
2865 int i;
2866 int ret = 0;
2867 struct xhci_hcd *xhci;
2868 struct xhci_virt_device *virt_dev;
2869 struct xhci_input_control_ctx *ctrl_ctx;
2870 struct xhci_slot_ctx *slot_ctx;
2871 struct xhci_command *command;
2872
2873 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2874 if (ret <= 0)
2875 return ret;
2876 xhci = hcd_to_xhci(hcd);
2877 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2878 (xhci->xhc_state & XHCI_STATE_REMOVING))
2879 return -ENODEV;
2880
2881 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2882 virt_dev = xhci->devs[udev->slot_id];
2883
2884 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2885 if (!command)
2886 return -ENOMEM;
2887
2888 command->in_ctx = virt_dev->in_ctx;
2889
2890 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2891 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2892 if (!ctrl_ctx) {
2893 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2894 __func__);
2895 ret = -ENOMEM;
2896 goto command_cleanup;
2897 }
2898 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2899 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2900 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2901
2902 /* Don't issue the command if there's no endpoints to update. */
2903 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2904 ctrl_ctx->drop_flags == 0) {
2905 ret = 0;
2906 goto command_cleanup;
2907 }
2908 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2909 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2910 for (i = 31; i >= 1; i--) {
2911 __le32 le32 = cpu_to_le32(BIT(i));
2912
2913 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2914 || (ctrl_ctx->add_flags & le32) || i == 1) {
2915 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2916 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2917 break;
2918 }
2919 }
2920
2921 ret = xhci_configure_endpoint(xhci, udev, command,
2922 false, false);
2923 if (ret)
2924 /* Callee should call reset_bandwidth() */
2925 goto command_cleanup;
2926
2927 /* Free any rings that were dropped, but not changed. */
2928 for (i = 1; i < 31; i++) {
2929 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2930 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2931 xhci_free_endpoint_ring(xhci, virt_dev, i);
2932 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2933 }
2934 }
2935 xhci_zero_in_ctx(xhci, virt_dev);
2936 /*
2937 * Install any rings for completely new endpoints or changed endpoints,
2938 * and free any old rings from changed endpoints.
2939 */
2940 for (i = 1; i < 31; i++) {
2941 if (!virt_dev->eps[i].new_ring)
2942 continue;
2943 /* Only free the old ring if it exists.
2944 * It may not if this is the first add of an endpoint.
2945 */
2946 if (virt_dev->eps[i].ring) {
2947 xhci_free_endpoint_ring(xhci, virt_dev, i);
2948 }
2949 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2950 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2951 virt_dev->eps[i].new_ring = NULL;
2952 }
2953 command_cleanup:
2954 kfree(command->completion);
2955 kfree(command);
2956
2957 return ret;
2958 }
2959
xhci_reset_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)2960 static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2961 {
2962 struct xhci_hcd *xhci;
2963 struct xhci_virt_device *virt_dev;
2964 int i, ret;
2965
2966 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2967 if (ret <= 0)
2968 return;
2969 xhci = hcd_to_xhci(hcd);
2970
2971 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2972 virt_dev = xhci->devs[udev->slot_id];
2973 /* Free any rings allocated for added endpoints */
2974 for (i = 0; i < 31; i++) {
2975 if (virt_dev->eps[i].new_ring) {
2976 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2977 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2978 virt_dev->eps[i].new_ring = NULL;
2979 }
2980 }
2981 xhci_zero_in_ctx(xhci, virt_dev);
2982 }
2983
xhci_setup_input_ctx_for_config_ep(struct xhci_hcd * xhci,struct xhci_container_ctx * in_ctx,struct xhci_container_ctx * out_ctx,struct xhci_input_control_ctx * ctrl_ctx,u32 add_flags,u32 drop_flags)2984 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2985 struct xhci_container_ctx *in_ctx,
2986 struct xhci_container_ctx *out_ctx,
2987 struct xhci_input_control_ctx *ctrl_ctx,
2988 u32 add_flags, u32 drop_flags)
2989 {
2990 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2991 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2992 xhci_slot_copy(xhci, in_ctx, out_ctx);
2993 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2994 }
2995
xhci_setup_input_ctx_for_quirk(struct xhci_hcd * xhci,unsigned int slot_id,unsigned int ep_index,struct xhci_dequeue_state * deq_state)2996 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2997 unsigned int slot_id, unsigned int ep_index,
2998 struct xhci_dequeue_state *deq_state)
2999 {
3000 struct xhci_input_control_ctx *ctrl_ctx;
3001 struct xhci_container_ctx *in_ctx;
3002 struct xhci_ep_ctx *ep_ctx;
3003 u32 added_ctxs;
3004 dma_addr_t addr;
3005
3006 in_ctx = xhci->devs[slot_id]->in_ctx;
3007 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
3008 if (!ctrl_ctx) {
3009 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3010 __func__);
3011 return;
3012 }
3013
3014 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
3015 xhci->devs[slot_id]->out_ctx, ep_index);
3016 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
3017 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3018 deq_state->new_deq_ptr);
3019 if (addr == 0) {
3020 xhci_warn(xhci, "WARN Cannot submit config ep after "
3021 "reset ep command\n");
3022 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
3023 deq_state->new_deq_seg,
3024 deq_state->new_deq_ptr);
3025 return;
3026 }
3027 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
3028
3029 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
3030 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
3031 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
3032 added_ctxs, added_ctxs);
3033 }
3034
xhci_cleanup_stalled_ring(struct xhci_hcd * xhci,unsigned int ep_index,unsigned int stream_id,struct xhci_td * td)3035 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
3036 unsigned int stream_id, struct xhci_td *td)
3037 {
3038 struct xhci_dequeue_state deq_state;
3039 struct usb_device *udev = td->urb->dev;
3040
3041 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3042 "Cleaning up stalled endpoint ring");
3043 /* We need to move the HW's dequeue pointer past this TD,
3044 * or it will attempt to resend it on the next doorbell ring.
3045 */
3046 xhci_find_new_dequeue_state(xhci, udev->slot_id,
3047 ep_index, stream_id, td, &deq_state);
3048
3049 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
3050 return;
3051
3052 /* HW with the reset endpoint quirk will use the saved dequeue state to
3053 * issue a configure endpoint command later.
3054 */
3055 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
3056 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3057 "Queueing new dequeue state");
3058 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
3059 ep_index, &deq_state);
3060 } else {
3061 /* Better hope no one uses the input context between now and the
3062 * reset endpoint completion!
3063 * XXX: No idea how this hardware will react when stream rings
3064 * are enabled.
3065 */
3066 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3067 "Setting up input context for "
3068 "configure endpoint command");
3069 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
3070 ep_index, &deq_state);
3071 }
3072 }
3073
xhci_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)3074 static void xhci_endpoint_disable(struct usb_hcd *hcd,
3075 struct usb_host_endpoint *host_ep)
3076 {
3077 struct xhci_hcd *xhci;
3078 struct xhci_virt_device *vdev;
3079 struct xhci_virt_ep *ep;
3080 struct usb_device *udev;
3081 unsigned long flags;
3082 unsigned int ep_index;
3083
3084 xhci = hcd_to_xhci(hcd);
3085 rescan:
3086 spin_lock_irqsave(&xhci->lock, flags);
3087
3088 udev = (struct usb_device *)host_ep->hcpriv;
3089 if (!udev || !udev->slot_id)
3090 goto done;
3091
3092 vdev = xhci->devs[udev->slot_id];
3093 if (!vdev)
3094 goto done;
3095
3096 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3097 ep = &vdev->eps[ep_index];
3098 if (!ep)
3099 goto done;
3100
3101 /* wait for hub_tt_work to finish clearing hub TT */
3102 if (ep->ep_state & EP_CLEARING_TT) {
3103 spin_unlock_irqrestore(&xhci->lock, flags);
3104 schedule_timeout_uninterruptible(1);
3105 goto rescan;
3106 }
3107
3108 if (ep->ep_state)
3109 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3110 ep->ep_state);
3111 done:
3112 host_ep->hcpriv = NULL;
3113 spin_unlock_irqrestore(&xhci->lock, flags);
3114 }
3115
3116 /*
3117 * Called after usb core issues a clear halt control message.
3118 * The host side of the halt should already be cleared by a reset endpoint
3119 * command issued when the STALL event was received.
3120 *
3121 * The reset endpoint command may only be issued to endpoints in the halted
3122 * state. For software that wishes to reset the data toggle or sequence number
3123 * of an endpoint that isn't in the halted state this function will issue a
3124 * configure endpoint command with the Drop and Add bits set for the target
3125 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3126 */
3127
xhci_endpoint_reset(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)3128 static void xhci_endpoint_reset(struct usb_hcd *hcd,
3129 struct usb_host_endpoint *host_ep)
3130 {
3131 struct xhci_hcd *xhci;
3132 struct usb_device *udev;
3133 struct xhci_virt_device *vdev;
3134 struct xhci_virt_ep *ep;
3135 struct xhci_input_control_ctx *ctrl_ctx;
3136 struct xhci_command *stop_cmd, *cfg_cmd;
3137 unsigned int ep_index;
3138 unsigned long flags;
3139 u32 ep_flag;
3140 int err;
3141
3142 xhci = hcd_to_xhci(hcd);
3143 if (!host_ep->hcpriv)
3144 return;
3145 udev = (struct usb_device *) host_ep->hcpriv;
3146 vdev = xhci->devs[udev->slot_id];
3147
3148 /*
3149 * vdev may be lost due to xHC restore error and re-initialization
3150 * during S3/S4 resume. A new vdev will be allocated later by
3151 * xhci_discover_or_reset_device()
3152 */
3153 if (!udev->slot_id || !vdev)
3154 return;
3155 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3156 ep = &vdev->eps[ep_index];
3157 if (!ep)
3158 return;
3159
3160 /* Bail out if toggle is already being cleared by a endpoint reset */
3161 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3162 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3163 return;
3164 }
3165 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3166 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3167 usb_endpoint_xfer_isoc(&host_ep->desc))
3168 return;
3169
3170 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3171
3172 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3173 return;
3174
3175 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3176 if (!stop_cmd)
3177 return;
3178
3179 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3180 if (!cfg_cmd)
3181 goto cleanup;
3182
3183 spin_lock_irqsave(&xhci->lock, flags);
3184
3185 /* block queuing new trbs and ringing ep doorbell */
3186 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3187
3188 /*
3189 * Make sure endpoint ring is empty before resetting the toggle/seq.
3190 * Driver is required to synchronously cancel all transfer request.
3191 * Stop the endpoint to force xHC to update the output context
3192 */
3193
3194 if (!list_empty(&ep->ring->td_list)) {
3195 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3196 spin_unlock_irqrestore(&xhci->lock, flags);
3197 xhci_free_command(xhci, cfg_cmd);
3198 goto cleanup;
3199 }
3200
3201 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3202 ep_index, 0);
3203 if (err < 0) {
3204 spin_unlock_irqrestore(&xhci->lock, flags);
3205 xhci_free_command(xhci, cfg_cmd);
3206 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3207 __func__, err);
3208 goto cleanup;
3209 }
3210
3211 xhci_ring_cmd_db(xhci);
3212 spin_unlock_irqrestore(&xhci->lock, flags);
3213
3214 wait_for_completion(stop_cmd->completion);
3215
3216 spin_lock_irqsave(&xhci->lock, flags);
3217
3218 /* config ep command clears toggle if add and drop ep flags are set */
3219 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3220 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3221 ctrl_ctx, ep_flag, ep_flag);
3222 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3223
3224 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3225 udev->slot_id, false);
3226 if (err < 0) {
3227 spin_unlock_irqrestore(&xhci->lock, flags);
3228 xhci_free_command(xhci, cfg_cmd);
3229 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3230 __func__, err);
3231 goto cleanup;
3232 }
3233
3234 xhci_ring_cmd_db(xhci);
3235 spin_unlock_irqrestore(&xhci->lock, flags);
3236
3237 wait_for_completion(cfg_cmd->completion);
3238
3239 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3240 xhci_free_command(xhci, cfg_cmd);
3241 cleanup:
3242 xhci_free_command(xhci, stop_cmd);
3243 }
3244
xhci_check_streams_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint * ep,unsigned int slot_id)3245 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3246 struct usb_device *udev, struct usb_host_endpoint *ep,
3247 unsigned int slot_id)
3248 {
3249 int ret;
3250 unsigned int ep_index;
3251 unsigned int ep_state;
3252
3253 if (!ep)
3254 return -EINVAL;
3255 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3256 if (ret <= 0)
3257 return -EINVAL;
3258 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3259 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3260 " descriptor for ep 0x%x does not support streams\n",
3261 ep->desc.bEndpointAddress);
3262 return -EINVAL;
3263 }
3264
3265 ep_index = xhci_get_endpoint_index(&ep->desc);
3266 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3267 if (ep_state & EP_HAS_STREAMS ||
3268 ep_state & EP_GETTING_STREAMS) {
3269 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3270 "already has streams set up.\n",
3271 ep->desc.bEndpointAddress);
3272 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3273 "dynamic stream context array reallocation.\n");
3274 return -EINVAL;
3275 }
3276 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3277 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3278 "endpoint 0x%x; URBs are pending.\n",
3279 ep->desc.bEndpointAddress);
3280 return -EINVAL;
3281 }
3282 return 0;
3283 }
3284
xhci_calculate_streams_entries(struct xhci_hcd * xhci,unsigned int * num_streams,unsigned int * num_stream_ctxs)3285 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3286 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3287 {
3288 unsigned int max_streams;
3289
3290 /* The stream context array size must be a power of two */
3291 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3292 /*
3293 * Find out how many primary stream array entries the host controller
3294 * supports. Later we may use secondary stream arrays (similar to 2nd
3295 * level page entries), but that's an optional feature for xHCI host
3296 * controllers. xHCs must support at least 4 stream IDs.
3297 */
3298 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3299 if (*num_stream_ctxs > max_streams) {
3300 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3301 max_streams);
3302 *num_stream_ctxs = max_streams;
3303 *num_streams = max_streams;
3304 }
3305 }
3306
3307 /* Returns an error code if one of the endpoint already has streams.
3308 * This does not change any data structures, it only checks and gathers
3309 * information.
3310 */
xhci_calculate_streams_and_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int * num_streams,u32 * changed_ep_bitmask)3311 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3312 struct usb_device *udev,
3313 struct usb_host_endpoint **eps, unsigned int num_eps,
3314 unsigned int *num_streams, u32 *changed_ep_bitmask)
3315 {
3316 unsigned int max_streams;
3317 unsigned int endpoint_flag;
3318 int i;
3319 int ret;
3320
3321 for (i = 0; i < num_eps; i++) {
3322 ret = xhci_check_streams_endpoint(xhci, udev,
3323 eps[i], udev->slot_id);
3324 if (ret < 0)
3325 return ret;
3326
3327 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3328 if (max_streams < (*num_streams - 1)) {
3329 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3330 eps[i]->desc.bEndpointAddress,
3331 max_streams);
3332 *num_streams = max_streams+1;
3333 }
3334
3335 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3336 if (*changed_ep_bitmask & endpoint_flag)
3337 return -EINVAL;
3338 *changed_ep_bitmask |= endpoint_flag;
3339 }
3340 return 0;
3341 }
3342
xhci_calculate_no_streams_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps)3343 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3344 struct usb_device *udev,
3345 struct usb_host_endpoint **eps, unsigned int num_eps)
3346 {
3347 u32 changed_ep_bitmask = 0;
3348 unsigned int slot_id;
3349 unsigned int ep_index;
3350 unsigned int ep_state;
3351 int i;
3352
3353 slot_id = udev->slot_id;
3354 if (!xhci->devs[slot_id])
3355 return 0;
3356
3357 for (i = 0; i < num_eps; i++) {
3358 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3359 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3360 /* Are streams already being freed for the endpoint? */
3361 if (ep_state & EP_GETTING_NO_STREAMS) {
3362 xhci_warn(xhci, "WARN Can't disable streams for "
3363 "endpoint 0x%x, "
3364 "streams are being disabled already\n",
3365 eps[i]->desc.bEndpointAddress);
3366 return 0;
3367 }
3368 /* Are there actually any streams to free? */
3369 if (!(ep_state & EP_HAS_STREAMS) &&
3370 !(ep_state & EP_GETTING_STREAMS)) {
3371 xhci_warn(xhci, "WARN Can't disable streams for "
3372 "endpoint 0x%x, "
3373 "streams are already disabled!\n",
3374 eps[i]->desc.bEndpointAddress);
3375 xhci_warn(xhci, "WARN xhci_free_streams() called "
3376 "with non-streams endpoint\n");
3377 return 0;
3378 }
3379 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3380 }
3381 return changed_ep_bitmask;
3382 }
3383
3384 /*
3385 * The USB device drivers use this function (through the HCD interface in USB
3386 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3387 * coordinate mass storage command queueing across multiple endpoints (basically
3388 * a stream ID == a task ID).
3389 *
3390 * Setting up streams involves allocating the same size stream context array
3391 * for each endpoint and issuing a configure endpoint command for all endpoints.
3392 *
3393 * Don't allow the call to succeed if one endpoint only supports one stream
3394 * (which means it doesn't support streams at all).
3395 *
3396 * Drivers may get less stream IDs than they asked for, if the host controller
3397 * hardware or endpoints claim they can't support the number of requested
3398 * stream IDs.
3399 */
xhci_alloc_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int num_streams,gfp_t mem_flags)3400 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3401 struct usb_host_endpoint **eps, unsigned int num_eps,
3402 unsigned int num_streams, gfp_t mem_flags)
3403 {
3404 int i, ret;
3405 struct xhci_hcd *xhci;
3406 struct xhci_virt_device *vdev;
3407 struct xhci_command *config_cmd;
3408 struct xhci_input_control_ctx *ctrl_ctx;
3409 unsigned int ep_index;
3410 unsigned int num_stream_ctxs;
3411 unsigned int max_packet;
3412 unsigned long flags;
3413 u32 changed_ep_bitmask = 0;
3414
3415 if (!eps)
3416 return -EINVAL;
3417
3418 /* Add one to the number of streams requested to account for
3419 * stream 0 that is reserved for xHCI usage.
3420 */
3421 num_streams += 1;
3422 xhci = hcd_to_xhci(hcd);
3423 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3424 num_streams);
3425
3426 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3427 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3428 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3429 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3430 return -ENOSYS;
3431 }
3432
3433 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3434 if (!config_cmd)
3435 return -ENOMEM;
3436
3437 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3438 if (!ctrl_ctx) {
3439 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3440 __func__);
3441 xhci_free_command(xhci, config_cmd);
3442 return -ENOMEM;
3443 }
3444
3445 /* Check to make sure all endpoints are not already configured for
3446 * streams. While we're at it, find the maximum number of streams that
3447 * all the endpoints will support and check for duplicate endpoints.
3448 */
3449 spin_lock_irqsave(&xhci->lock, flags);
3450 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3451 num_eps, &num_streams, &changed_ep_bitmask);
3452 if (ret < 0) {
3453 xhci_free_command(xhci, config_cmd);
3454 spin_unlock_irqrestore(&xhci->lock, flags);
3455 return ret;
3456 }
3457 if (num_streams <= 1) {
3458 xhci_warn(xhci, "WARN: endpoints can't handle "
3459 "more than one stream.\n");
3460 xhci_free_command(xhci, config_cmd);
3461 spin_unlock_irqrestore(&xhci->lock, flags);
3462 return -EINVAL;
3463 }
3464 vdev = xhci->devs[udev->slot_id];
3465 /* Mark each endpoint as being in transition, so
3466 * xhci_urb_enqueue() will reject all URBs.
3467 */
3468 for (i = 0; i < num_eps; i++) {
3469 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3470 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3471 }
3472 spin_unlock_irqrestore(&xhci->lock, flags);
3473
3474 /* Setup internal data structures and allocate HW data structures for
3475 * streams (but don't install the HW structures in the input context
3476 * until we're sure all memory allocation succeeded).
3477 */
3478 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3479 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3480 num_stream_ctxs, num_streams);
3481
3482 for (i = 0; i < num_eps; i++) {
3483 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3484 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3485 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3486 num_stream_ctxs,
3487 num_streams,
3488 max_packet, mem_flags);
3489 if (!vdev->eps[ep_index].stream_info)
3490 goto cleanup;
3491 /* Set maxPstreams in endpoint context and update deq ptr to
3492 * point to stream context array. FIXME
3493 */
3494 }
3495
3496 /* Set up the input context for a configure endpoint command. */
3497 for (i = 0; i < num_eps; i++) {
3498 struct xhci_ep_ctx *ep_ctx;
3499
3500 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3501 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3502
3503 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3504 vdev->out_ctx, ep_index);
3505 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3506 vdev->eps[ep_index].stream_info);
3507 }
3508 /* Tell the HW to drop its old copy of the endpoint context info
3509 * and add the updated copy from the input context.
3510 */
3511 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3512 vdev->out_ctx, ctrl_ctx,
3513 changed_ep_bitmask, changed_ep_bitmask);
3514
3515 /* Issue and wait for the configure endpoint command */
3516 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3517 false, false);
3518
3519 /* xHC rejected the configure endpoint command for some reason, so we
3520 * leave the old ring intact and free our internal streams data
3521 * structure.
3522 */
3523 if (ret < 0)
3524 goto cleanup;
3525
3526 spin_lock_irqsave(&xhci->lock, flags);
3527 for (i = 0; i < num_eps; i++) {
3528 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3529 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3530 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3531 udev->slot_id, ep_index);
3532 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3533 }
3534 xhci_free_command(xhci, config_cmd);
3535 spin_unlock_irqrestore(&xhci->lock, flags);
3536
3537 /* Subtract 1 for stream 0, which drivers can't use */
3538 return num_streams - 1;
3539
3540 cleanup:
3541 /* If it didn't work, free the streams! */
3542 for (i = 0; i < num_eps; i++) {
3543 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3544 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3545 vdev->eps[ep_index].stream_info = NULL;
3546 /* FIXME Unset maxPstreams in endpoint context and
3547 * update deq ptr to point to normal string ring.
3548 */
3549 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3550 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3551 xhci_endpoint_zero(xhci, vdev, eps[i]);
3552 }
3553 xhci_free_command(xhci, config_cmd);
3554 return -ENOMEM;
3555 }
3556
3557 /* Transition the endpoint from using streams to being a "normal" endpoint
3558 * without streams.
3559 *
3560 * Modify the endpoint context state, submit a configure endpoint command,
3561 * and free all endpoint rings for streams if that completes successfully.
3562 */
xhci_free_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,gfp_t mem_flags)3563 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3564 struct usb_host_endpoint **eps, unsigned int num_eps,
3565 gfp_t mem_flags)
3566 {
3567 int i, ret;
3568 struct xhci_hcd *xhci;
3569 struct xhci_virt_device *vdev;
3570 struct xhci_command *command;
3571 struct xhci_input_control_ctx *ctrl_ctx;
3572 unsigned int ep_index;
3573 unsigned long flags;
3574 u32 changed_ep_bitmask;
3575
3576 xhci = hcd_to_xhci(hcd);
3577 vdev = xhci->devs[udev->slot_id];
3578
3579 /* Set up a configure endpoint command to remove the streams rings */
3580 spin_lock_irqsave(&xhci->lock, flags);
3581 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3582 udev, eps, num_eps);
3583 if (changed_ep_bitmask == 0) {
3584 spin_unlock_irqrestore(&xhci->lock, flags);
3585 return -EINVAL;
3586 }
3587
3588 /* Use the xhci_command structure from the first endpoint. We may have
3589 * allocated too many, but the driver may call xhci_free_streams() for
3590 * each endpoint it grouped into one call to xhci_alloc_streams().
3591 */
3592 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3593 command = vdev->eps[ep_index].stream_info->free_streams_command;
3594 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3595 if (!ctrl_ctx) {
3596 spin_unlock_irqrestore(&xhci->lock, flags);
3597 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3598 __func__);
3599 return -EINVAL;
3600 }
3601
3602 for (i = 0; i < num_eps; i++) {
3603 struct xhci_ep_ctx *ep_ctx;
3604
3605 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3606 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3607 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3608 EP_GETTING_NO_STREAMS;
3609
3610 xhci_endpoint_copy(xhci, command->in_ctx,
3611 vdev->out_ctx, ep_index);
3612 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3613 &vdev->eps[ep_index]);
3614 }
3615 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3616 vdev->out_ctx, ctrl_ctx,
3617 changed_ep_bitmask, changed_ep_bitmask);
3618 spin_unlock_irqrestore(&xhci->lock, flags);
3619
3620 /* Issue and wait for the configure endpoint command,
3621 * which must succeed.
3622 */
3623 ret = xhci_configure_endpoint(xhci, udev, command,
3624 false, true);
3625
3626 /* xHC rejected the configure endpoint command for some reason, so we
3627 * leave the streams rings intact.
3628 */
3629 if (ret < 0)
3630 return ret;
3631
3632 spin_lock_irqsave(&xhci->lock, flags);
3633 for (i = 0; i < num_eps; i++) {
3634 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3635 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3636 vdev->eps[ep_index].stream_info = NULL;
3637 /* FIXME Unset maxPstreams in endpoint context and
3638 * update deq ptr to point to normal string ring.
3639 */
3640 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3641 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3642 }
3643 spin_unlock_irqrestore(&xhci->lock, flags);
3644
3645 return 0;
3646 }
3647
3648 /*
3649 * Deletes endpoint resources for endpoints that were active before a Reset
3650 * Device command, or a Disable Slot command. The Reset Device command leaves
3651 * the control endpoint intact, whereas the Disable Slot command deletes it.
3652 *
3653 * Must be called with xhci->lock held.
3654 */
xhci_free_device_endpoint_resources(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,bool drop_control_ep)3655 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3656 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3657 {
3658 int i;
3659 unsigned int num_dropped_eps = 0;
3660 unsigned int drop_flags = 0;
3661
3662 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3663 if (virt_dev->eps[i].ring) {
3664 drop_flags |= 1 << i;
3665 num_dropped_eps++;
3666 }
3667 }
3668 xhci->num_active_eps -= num_dropped_eps;
3669 if (num_dropped_eps)
3670 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3671 "Dropped %u ep ctxs, flags = 0x%x, "
3672 "%u now active.",
3673 num_dropped_eps, drop_flags,
3674 xhci->num_active_eps);
3675 }
3676
3677 /*
3678 * This submits a Reset Device Command, which will set the device state to 0,
3679 * set the device address to 0, and disable all the endpoints except the default
3680 * control endpoint. The USB core should come back and call
3681 * xhci_address_device(), and then re-set up the configuration. If this is
3682 * called because of a usb_reset_and_verify_device(), then the old alternate
3683 * settings will be re-installed through the normal bandwidth allocation
3684 * functions.
3685 *
3686 * Wait for the Reset Device command to finish. Remove all structures
3687 * associated with the endpoints that were disabled. Clear the input device
3688 * structure? Reset the control endpoint 0 max packet size?
3689 *
3690 * If the virt_dev to be reset does not exist or does not match the udev,
3691 * it means the device is lost, possibly due to the xHC restore error and
3692 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3693 * re-allocate the device.
3694 */
xhci_discover_or_reset_device(struct usb_hcd * hcd,struct usb_device * udev)3695 static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3696 struct usb_device *udev)
3697 {
3698 int ret, i;
3699 unsigned long flags;
3700 struct xhci_hcd *xhci;
3701 unsigned int slot_id;
3702 struct xhci_virt_device *virt_dev;
3703 struct xhci_command *reset_device_cmd;
3704 struct xhci_slot_ctx *slot_ctx;
3705 int old_active_eps = 0;
3706
3707 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3708 if (ret <= 0)
3709 return ret;
3710 xhci = hcd_to_xhci(hcd);
3711 slot_id = udev->slot_id;
3712 virt_dev = xhci->devs[slot_id];
3713 if (!virt_dev) {
3714 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3715 "not exist. Re-allocate the device\n", slot_id);
3716 ret = xhci_alloc_dev(hcd, udev);
3717 if (ret == 1)
3718 return 0;
3719 else
3720 return -EINVAL;
3721 }
3722
3723 if (virt_dev->tt_info)
3724 old_active_eps = virt_dev->tt_info->active_eps;
3725
3726 if (virt_dev->udev != udev) {
3727 /* If the virt_dev and the udev does not match, this virt_dev
3728 * may belong to another udev.
3729 * Re-allocate the device.
3730 */
3731 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3732 "not match the udev. Re-allocate the device\n",
3733 slot_id);
3734 ret = xhci_alloc_dev(hcd, udev);
3735 if (ret == 1)
3736 return 0;
3737 else
3738 return -EINVAL;
3739 }
3740
3741 /* If device is not setup, there is no point in resetting it */
3742 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3743 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3744 SLOT_STATE_DISABLED)
3745 return 0;
3746
3747 trace_xhci_discover_or_reset_device(slot_ctx);
3748
3749 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3750 /* Allocate the command structure that holds the struct completion.
3751 * Assume we're in process context, since the normal device reset
3752 * process has to wait for the device anyway. Storage devices are
3753 * reset as part of error handling, so use GFP_NOIO instead of
3754 * GFP_KERNEL.
3755 */
3756 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3757 if (!reset_device_cmd) {
3758 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3759 return -ENOMEM;
3760 }
3761
3762 /* Attempt to submit the Reset Device command to the command ring */
3763 spin_lock_irqsave(&xhci->lock, flags);
3764
3765 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3766 if (ret) {
3767 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3768 spin_unlock_irqrestore(&xhci->lock, flags);
3769 goto command_cleanup;
3770 }
3771 xhci_ring_cmd_db(xhci);
3772 spin_unlock_irqrestore(&xhci->lock, flags);
3773
3774 /* Wait for the Reset Device command to finish */
3775 wait_for_completion(reset_device_cmd->completion);
3776
3777 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3778 * unless we tried to reset a slot ID that wasn't enabled,
3779 * or the device wasn't in the addressed or configured state.
3780 */
3781 ret = reset_device_cmd->status;
3782 switch (ret) {
3783 case COMP_COMMAND_ABORTED:
3784 case COMP_COMMAND_RING_STOPPED:
3785 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3786 ret = -ETIME;
3787 goto command_cleanup;
3788 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3789 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3790 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3791 slot_id,
3792 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3793 xhci_dbg(xhci, "Not freeing device rings.\n");
3794 /* Don't treat this as an error. May change my mind later. */
3795 ret = 0;
3796 goto command_cleanup;
3797 case COMP_SUCCESS:
3798 xhci_dbg(xhci, "Successful reset device command.\n");
3799 break;
3800 default:
3801 if (xhci_is_vendor_info_code(xhci, ret))
3802 break;
3803 xhci_warn(xhci, "Unknown completion code %u for "
3804 "reset device command.\n", ret);
3805 ret = -EINVAL;
3806 goto command_cleanup;
3807 }
3808
3809 /* Free up host controller endpoint resources */
3810 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3811 spin_lock_irqsave(&xhci->lock, flags);
3812 /* Don't delete the default control endpoint resources */
3813 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3814 spin_unlock_irqrestore(&xhci->lock, flags);
3815 }
3816
3817 /* Everything but endpoint 0 is disabled, so free the rings. */
3818 for (i = 1; i < 31; i++) {
3819 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3820
3821 if (ep->ep_state & EP_HAS_STREAMS) {
3822 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3823 xhci_get_endpoint_address(i));
3824 xhci_free_stream_info(xhci, ep->stream_info);
3825 ep->stream_info = NULL;
3826 ep->ep_state &= ~EP_HAS_STREAMS;
3827 }
3828
3829 if (ep->ring) {
3830 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3831 xhci_free_endpoint_ring(xhci, virt_dev, i);
3832 }
3833 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3834 xhci_drop_ep_from_interval_table(xhci,
3835 &virt_dev->eps[i].bw_info,
3836 virt_dev->bw_table,
3837 udev,
3838 &virt_dev->eps[i],
3839 virt_dev->tt_info);
3840 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3841 }
3842 /* If necessary, update the number of active TTs on this root port */
3843 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3844 virt_dev->flags = 0;
3845 ret = 0;
3846
3847 command_cleanup:
3848 xhci_free_command(xhci, reset_device_cmd);
3849 return ret;
3850 }
3851
3852 /*
3853 * At this point, the struct usb_device is about to go away, the device has
3854 * disconnected, and all traffic has been stopped and the endpoints have been
3855 * disabled. Free any HC data structures associated with that device.
3856 */
xhci_free_dev(struct usb_hcd * hcd,struct usb_device * udev)3857 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3858 {
3859 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3860 struct xhci_virt_device *virt_dev;
3861 struct xhci_slot_ctx *slot_ctx;
3862 int i, ret;
3863
3864 #ifndef CONFIG_USB_DEFAULT_PERSIST
3865 /*
3866 * We called pm_runtime_get_noresume when the device was attached.
3867 * Decrement the counter here to allow controller to runtime suspend
3868 * if no devices remain.
3869 */
3870 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3871 pm_runtime_put_noidle(hcd->self.controller);
3872 #endif
3873
3874 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3875 /* If the host is halted due to driver unload, we still need to free the
3876 * device.
3877 */
3878 if (ret <= 0 && ret != -ENODEV)
3879 return;
3880
3881 virt_dev = xhci->devs[udev->slot_id];
3882 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3883 trace_xhci_free_dev(slot_ctx);
3884
3885 /* Stop any wayward timer functions (which may grab the lock) */
3886 for (i = 0; i < 31; i++) {
3887 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3888 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3889 }
3890 virt_dev->udev = NULL;
3891 ret = xhci_disable_slot(xhci, udev->slot_id);
3892 if (ret)
3893 xhci_free_virt_device(xhci, udev->slot_id);
3894 }
3895
xhci_disable_slot(struct xhci_hcd * xhci,u32 slot_id)3896 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3897 {
3898 struct xhci_command *command;
3899 unsigned long flags;
3900 u32 state;
3901 int ret = 0;
3902
3903 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3904 if (!command)
3905 return -ENOMEM;
3906
3907 xhci_debugfs_remove_slot(xhci, slot_id);
3908
3909 spin_lock_irqsave(&xhci->lock, flags);
3910 /* Don't disable the slot if the host controller is dead. */
3911 state = readl(&xhci->op_regs->status);
3912 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3913 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3914 spin_unlock_irqrestore(&xhci->lock, flags);
3915 kfree(command);
3916 return -ENODEV;
3917 }
3918
3919 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3920 slot_id);
3921 if (ret) {
3922 spin_unlock_irqrestore(&xhci->lock, flags);
3923 kfree(command);
3924 return ret;
3925 }
3926 xhci_ring_cmd_db(xhci);
3927 spin_unlock_irqrestore(&xhci->lock, flags);
3928 return ret;
3929 }
3930
3931 /*
3932 * Checks if we have enough host controller resources for the default control
3933 * endpoint.
3934 *
3935 * Must be called with xhci->lock held.
3936 */
xhci_reserve_host_control_ep_resources(struct xhci_hcd * xhci)3937 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3938 {
3939 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3940 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3941 "Not enough ep ctxs: "
3942 "%u active, need to add 1, limit is %u.",
3943 xhci->num_active_eps, xhci->limit_active_eps);
3944 return -ENOMEM;
3945 }
3946 xhci->num_active_eps += 1;
3947 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3948 "Adding 1 ep ctx, %u now active.",
3949 xhci->num_active_eps);
3950 return 0;
3951 }
3952
3953
3954 /*
3955 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3956 * timed out, or allocating memory failed. Returns 1 on success.
3957 */
xhci_alloc_dev(struct usb_hcd * hcd,struct usb_device * udev)3958 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3959 {
3960 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3961 struct xhci_virt_device *vdev;
3962 struct xhci_slot_ctx *slot_ctx;
3963 unsigned long flags;
3964 int ret, slot_id;
3965 struct xhci_command *command;
3966
3967 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3968 if (!command)
3969 return 0;
3970
3971 spin_lock_irqsave(&xhci->lock, flags);
3972 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3973 if (ret) {
3974 spin_unlock_irqrestore(&xhci->lock, flags);
3975 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3976 xhci_free_command(xhci, command);
3977 return 0;
3978 }
3979 xhci_ring_cmd_db(xhci);
3980 spin_unlock_irqrestore(&xhci->lock, flags);
3981
3982 wait_for_completion(command->completion);
3983 slot_id = command->slot_id;
3984
3985 if (!slot_id || command->status != COMP_SUCCESS) {
3986 xhci_err(xhci, "Error while assigning device slot ID\n");
3987 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3988 HCS_MAX_SLOTS(
3989 readl(&xhci->cap_regs->hcs_params1)));
3990 xhci_free_command(xhci, command);
3991 return 0;
3992 }
3993
3994 xhci_free_command(xhci, command);
3995
3996 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3997 spin_lock_irqsave(&xhci->lock, flags);
3998 ret = xhci_reserve_host_control_ep_resources(xhci);
3999 if (ret) {
4000 spin_unlock_irqrestore(&xhci->lock, flags);
4001 xhci_warn(xhci, "Not enough host resources, "
4002 "active endpoint contexts = %u\n",
4003 xhci->num_active_eps);
4004 goto disable_slot;
4005 }
4006 spin_unlock_irqrestore(&xhci->lock, flags);
4007 }
4008 /* Use GFP_NOIO, since this function can be called from
4009 * xhci_discover_or_reset_device(), which may be called as part of
4010 * mass storage driver error handling.
4011 */
4012 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4013 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4014 goto disable_slot;
4015 }
4016 vdev = xhci->devs[slot_id];
4017 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4018 trace_xhci_alloc_dev(slot_ctx);
4019
4020 udev->slot_id = slot_id;
4021
4022 xhci_debugfs_create_slot(xhci, slot_id);
4023
4024 #ifndef CONFIG_USB_DEFAULT_PERSIST
4025 /*
4026 * If resetting upon resume, we can't put the controller into runtime
4027 * suspend if there is a device attached.
4028 */
4029 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4030 pm_runtime_get_noresume(hcd->self.controller);
4031 #endif
4032
4033 /* Is this a LS or FS device under a HS hub? */
4034 /* Hub or peripherial? */
4035 return 1;
4036
4037 disable_slot:
4038 ret = xhci_disable_slot(xhci, udev->slot_id);
4039 if (ret)
4040 xhci_free_virt_device(xhci, udev->slot_id);
4041
4042 return 0;
4043 }
4044
4045 /*
4046 * Issue an Address Device command and optionally send a corresponding
4047 * SetAddress request to the device.
4048 */
xhci_setup_device(struct usb_hcd * hcd,struct usb_device * udev,enum xhci_setup_dev setup)4049 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4050 enum xhci_setup_dev setup)
4051 {
4052 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4053 unsigned long flags;
4054 struct xhci_virt_device *virt_dev;
4055 int ret = 0;
4056 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4057 struct xhci_slot_ctx *slot_ctx;
4058 struct xhci_input_control_ctx *ctrl_ctx;
4059 u64 temp_64;
4060 struct xhci_command *command = NULL;
4061
4062 mutex_lock(&xhci->mutex);
4063
4064 if (xhci->xhc_state) { /* dying, removing or halted */
4065 ret = -ESHUTDOWN;
4066 goto out;
4067 }
4068
4069 if (!udev->slot_id) {
4070 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4071 "Bad Slot ID %d", udev->slot_id);
4072 ret = -EINVAL;
4073 goto out;
4074 }
4075
4076 virt_dev = xhci->devs[udev->slot_id];
4077
4078 if (WARN_ON(!virt_dev)) {
4079 /*
4080 * In plug/unplug torture test with an NEC controller,
4081 * a zero-dereference was observed once due to virt_dev = 0.
4082 * Print useful debug rather than crash if it is observed again!
4083 */
4084 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4085 udev->slot_id);
4086 ret = -EINVAL;
4087 goto out;
4088 }
4089 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4090 trace_xhci_setup_device_slot(slot_ctx);
4091
4092 if (setup == SETUP_CONTEXT_ONLY) {
4093 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4094 SLOT_STATE_DEFAULT) {
4095 xhci_dbg(xhci, "Slot already in default state\n");
4096 goto out;
4097 }
4098 }
4099
4100 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4101 if (!command) {
4102 ret = -ENOMEM;
4103 goto out;
4104 }
4105
4106 command->in_ctx = virt_dev->in_ctx;
4107
4108 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4109 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4110 if (!ctrl_ctx) {
4111 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4112 __func__);
4113 ret = -EINVAL;
4114 goto out;
4115 }
4116 /*
4117 * If this is the first Set Address since device plug-in or
4118 * virt_device realloaction after a resume with an xHCI power loss,
4119 * then set up the slot context.
4120 */
4121 if (!slot_ctx->dev_info)
4122 xhci_setup_addressable_virt_dev(xhci, udev);
4123 /* Otherwise, update the control endpoint ring enqueue pointer. */
4124 else
4125 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4126 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4127 ctrl_ctx->drop_flags = 0;
4128
4129 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4130 le32_to_cpu(slot_ctx->dev_info) >> 27);
4131
4132 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4133 spin_lock_irqsave(&xhci->lock, flags);
4134 trace_xhci_setup_device(virt_dev);
4135 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4136 udev->slot_id, setup);
4137 if (ret) {
4138 spin_unlock_irqrestore(&xhci->lock, flags);
4139 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4140 "FIXME: allocate a command ring segment");
4141 goto out;
4142 }
4143 xhci_ring_cmd_db(xhci);
4144 spin_unlock_irqrestore(&xhci->lock, flags);
4145
4146 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4147 wait_for_completion(command->completion);
4148
4149 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
4150 * the SetAddress() "recovery interval" required by USB and aborting the
4151 * command on a timeout.
4152 */
4153 switch (command->status) {
4154 case COMP_COMMAND_ABORTED:
4155 case COMP_COMMAND_RING_STOPPED:
4156 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4157 ret = -ETIME;
4158 break;
4159 case COMP_CONTEXT_STATE_ERROR:
4160 case COMP_SLOT_NOT_ENABLED_ERROR:
4161 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4162 act, udev->slot_id);
4163 ret = -EINVAL;
4164 break;
4165 case COMP_USB_TRANSACTION_ERROR:
4166 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4167
4168 mutex_unlock(&xhci->mutex);
4169 ret = xhci_disable_slot(xhci, udev->slot_id);
4170 if (!ret)
4171 xhci_alloc_dev(hcd, udev);
4172 kfree(command->completion);
4173 kfree(command);
4174 return -EPROTO;
4175 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4176 dev_warn(&udev->dev,
4177 "ERROR: Incompatible device for setup %s command\n", act);
4178 ret = -ENODEV;
4179 break;
4180 case COMP_SUCCESS:
4181 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4182 "Successful setup %s command", act);
4183 break;
4184 default:
4185 xhci_err(xhci,
4186 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4187 act, command->status);
4188 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4189 ret = -EINVAL;
4190 break;
4191 }
4192 if (ret)
4193 goto out;
4194 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4195 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4196 "Op regs DCBAA ptr = %#016llx", temp_64);
4197 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4198 "Slot ID %d dcbaa entry @%p = %#016llx",
4199 udev->slot_id,
4200 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4201 (unsigned long long)
4202 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4203 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4204 "Output Context DMA address = %#08llx",
4205 (unsigned long long)virt_dev->out_ctx->dma);
4206 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4207 le32_to_cpu(slot_ctx->dev_info) >> 27);
4208 /*
4209 * USB core uses address 1 for the roothubs, so we add one to the
4210 * address given back to us by the HC.
4211 */
4212 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4213 le32_to_cpu(slot_ctx->dev_info) >> 27);
4214 /* Zero the input context control for later use */
4215 ctrl_ctx->add_flags = 0;
4216 ctrl_ctx->drop_flags = 0;
4217 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4218 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4219
4220 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4221 "Internal device address = %d",
4222 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4223 out:
4224 mutex_unlock(&xhci->mutex);
4225 if (command) {
4226 kfree(command->completion);
4227 kfree(command);
4228 }
4229 return ret;
4230 }
4231
xhci_address_device(struct usb_hcd * hcd,struct usb_device * udev)4232 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4233 {
4234 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4235 }
4236
xhci_enable_device(struct usb_hcd * hcd,struct usb_device * udev)4237 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4238 {
4239 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4240 }
4241
4242 /*
4243 * Transfer the port index into real index in the HW port status
4244 * registers. Caculate offset between the port's PORTSC register
4245 * and port status base. Divide the number of per port register
4246 * to get the real index. The raw port number bases 1.
4247 */
xhci_find_raw_port_number(struct usb_hcd * hcd,int port1)4248 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4249 {
4250 struct xhci_hub *rhub;
4251
4252 rhub = xhci_get_rhub(hcd);
4253 return rhub->ports[port1 - 1]->hw_portnum + 1;
4254 }
4255
4256 /*
4257 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4258 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4259 */
xhci_change_max_exit_latency(struct xhci_hcd * xhci,struct usb_device * udev,u16 max_exit_latency)4260 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4261 struct usb_device *udev, u16 max_exit_latency)
4262 {
4263 struct xhci_virt_device *virt_dev;
4264 struct xhci_command *command;
4265 struct xhci_input_control_ctx *ctrl_ctx;
4266 struct xhci_slot_ctx *slot_ctx;
4267 unsigned long flags;
4268 int ret;
4269
4270 spin_lock_irqsave(&xhci->lock, flags);
4271
4272 virt_dev = xhci->devs[udev->slot_id];
4273
4274 /*
4275 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4276 * xHC was re-initialized. Exit latency will be set later after
4277 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4278 */
4279
4280 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4281 spin_unlock_irqrestore(&xhci->lock, flags);
4282 return 0;
4283 }
4284
4285 /* Attempt to issue an Evaluate Context command to change the MEL. */
4286 command = xhci->lpm_command;
4287 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4288 if (!ctrl_ctx) {
4289 spin_unlock_irqrestore(&xhci->lock, flags);
4290 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4291 __func__);
4292 return -ENOMEM;
4293 }
4294
4295 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4296 spin_unlock_irqrestore(&xhci->lock, flags);
4297
4298 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4299 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4300 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4301 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4302 slot_ctx->dev_state = 0;
4303
4304 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4305 "Set up evaluate context for LPM MEL change.");
4306
4307 /* Issue and wait for the evaluate context command. */
4308 ret = xhci_configure_endpoint(xhci, udev, command,
4309 true, true);
4310
4311 if (!ret) {
4312 spin_lock_irqsave(&xhci->lock, flags);
4313 virt_dev->current_mel = max_exit_latency;
4314 spin_unlock_irqrestore(&xhci->lock, flags);
4315 }
4316 return ret;
4317 }
4318
4319 #ifdef CONFIG_PM
4320
4321 /* BESL to HIRD Encoding array for USB2 LPM */
4322 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4323 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4324
4325 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
xhci_calculate_hird_besl(struct xhci_hcd * xhci,struct usb_device * udev)4326 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4327 struct usb_device *udev)
4328 {
4329 int u2del, besl, besl_host;
4330 int besl_device = 0;
4331 u32 field;
4332
4333 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4334 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4335
4336 if (field & USB_BESL_SUPPORT) {
4337 for (besl_host = 0; besl_host < 16; besl_host++) {
4338 if (xhci_besl_encoding[besl_host] >= u2del)
4339 break;
4340 }
4341 /* Use baseline BESL value as default */
4342 if (field & USB_BESL_BASELINE_VALID)
4343 besl_device = USB_GET_BESL_BASELINE(field);
4344 else if (field & USB_BESL_DEEP_VALID)
4345 besl_device = USB_GET_BESL_DEEP(field);
4346 } else {
4347 if (u2del <= 50)
4348 besl_host = 0;
4349 else
4350 besl_host = (u2del - 51) / 75 + 1;
4351 }
4352
4353 besl = besl_host + besl_device;
4354 if (besl > 15)
4355 besl = 15;
4356
4357 return besl;
4358 }
4359
4360 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
xhci_calculate_usb2_hw_lpm_params(struct usb_device * udev)4361 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4362 {
4363 u32 field;
4364 int l1;
4365 int besld = 0;
4366 int hirdm = 0;
4367
4368 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4369
4370 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4371 l1 = udev->l1_params.timeout / 256;
4372
4373 /* device has preferred BESLD */
4374 if (field & USB_BESL_DEEP_VALID) {
4375 besld = USB_GET_BESL_DEEP(field);
4376 hirdm = 1;
4377 }
4378
4379 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4380 }
4381
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)4382 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4383 struct usb_device *udev, int enable)
4384 {
4385 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4386 struct xhci_port **ports;
4387 __le32 __iomem *pm_addr, *hlpm_addr;
4388 u32 pm_val, hlpm_val, field;
4389 unsigned int port_num;
4390 unsigned long flags;
4391 int hird, exit_latency;
4392 int ret;
4393
4394 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4395 !udev->lpm_capable)
4396 return -EPERM;
4397
4398 if (!udev->parent || udev->parent->parent ||
4399 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4400 return -EPERM;
4401
4402 if (udev->usb2_hw_lpm_capable != 1)
4403 return -EPERM;
4404
4405 spin_lock_irqsave(&xhci->lock, flags);
4406
4407 ports = xhci->usb2_rhub.ports;
4408 port_num = udev->portnum - 1;
4409 pm_addr = ports[port_num]->addr + PORTPMSC;
4410 pm_val = readl(pm_addr);
4411 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4412
4413 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4414 enable ? "enable" : "disable", port_num + 1);
4415
4416 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
4417 /* Host supports BESL timeout instead of HIRD */
4418 if (udev->usb2_hw_lpm_besl_capable) {
4419 /* if device doesn't have a preferred BESL value use a
4420 * default one which works with mixed HIRD and BESL
4421 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4422 */
4423 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4424 if ((field & USB_BESL_SUPPORT) &&
4425 (field & USB_BESL_BASELINE_VALID))
4426 hird = USB_GET_BESL_BASELINE(field);
4427 else
4428 hird = udev->l1_params.besl;
4429
4430 exit_latency = xhci_besl_encoding[hird];
4431 spin_unlock_irqrestore(&xhci->lock, flags);
4432
4433 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4434 * input context for link powermanagement evaluate
4435 * context commands. It is protected by hcd->bandwidth
4436 * mutex and is shared by all devices. We need to set
4437 * the max ext latency in USB 2 BESL LPM as well, so
4438 * use the same mutex and xhci_change_max_exit_latency()
4439 */
4440 mutex_lock(hcd->bandwidth_mutex);
4441 ret = xhci_change_max_exit_latency(xhci, udev,
4442 exit_latency);
4443 mutex_unlock(hcd->bandwidth_mutex);
4444
4445 if (ret < 0)
4446 return ret;
4447 spin_lock_irqsave(&xhci->lock, flags);
4448
4449 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4450 writel(hlpm_val, hlpm_addr);
4451 /* flush write */
4452 readl(hlpm_addr);
4453 } else {
4454 hird = xhci_calculate_hird_besl(xhci, udev);
4455 }
4456
4457 pm_val &= ~PORT_HIRD_MASK;
4458 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4459 writel(pm_val, pm_addr);
4460 pm_val = readl(pm_addr);
4461 pm_val |= PORT_HLE;
4462 writel(pm_val, pm_addr);
4463 /* flush write */
4464 readl(pm_addr);
4465 } else {
4466 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4467 writel(pm_val, pm_addr);
4468 /* flush write */
4469 readl(pm_addr);
4470 if (udev->usb2_hw_lpm_besl_capable) {
4471 spin_unlock_irqrestore(&xhci->lock, flags);
4472 mutex_lock(hcd->bandwidth_mutex);
4473 xhci_change_max_exit_latency(xhci, udev, 0);
4474 mutex_unlock(hcd->bandwidth_mutex);
4475 return 0;
4476 }
4477 }
4478
4479 spin_unlock_irqrestore(&xhci->lock, flags);
4480 return 0;
4481 }
4482
4483 /* check if a usb2 port supports a given extened capability protocol
4484 * only USB2 ports extended protocol capability values are cached.
4485 * Return 1 if capability is supported
4486 */
xhci_check_usb2_port_capability(struct xhci_hcd * xhci,int port,unsigned capability)4487 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4488 unsigned capability)
4489 {
4490 u32 port_offset, port_count;
4491 int i;
4492
4493 for (i = 0; i < xhci->num_ext_caps; i++) {
4494 if (xhci->ext_caps[i] & capability) {
4495 /* port offsets starts at 1 */
4496 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4497 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4498 if (port >= port_offset &&
4499 port < port_offset + port_count)
4500 return 1;
4501 }
4502 }
4503 return 0;
4504 }
4505
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)4506 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4507 {
4508 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4509 int portnum = udev->portnum - 1;
4510
4511 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4512 return 0;
4513
4514 /* we only support lpm for non-hub device connected to root hub yet */
4515 if (!udev->parent || udev->parent->parent ||
4516 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4517 return 0;
4518
4519 if (xhci->hw_lpm_support == 1 &&
4520 xhci_check_usb2_port_capability(
4521 xhci, portnum, XHCI_HLC)) {
4522 udev->usb2_hw_lpm_capable = 1;
4523 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4524 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4525 if (xhci_check_usb2_port_capability(xhci, portnum,
4526 XHCI_BLC))
4527 udev->usb2_hw_lpm_besl_capable = 1;
4528 }
4529
4530 return 0;
4531 }
4532
4533 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4534
4535 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
xhci_service_interval_to_ns(struct usb_endpoint_descriptor * desc)4536 static unsigned long long xhci_service_interval_to_ns(
4537 struct usb_endpoint_descriptor *desc)
4538 {
4539 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4540 }
4541
xhci_get_timeout_no_hub_lpm(struct usb_device * udev,enum usb3_link_state state)4542 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4543 enum usb3_link_state state)
4544 {
4545 unsigned long long sel;
4546 unsigned long long pel;
4547 unsigned int max_sel_pel;
4548 char *state_name;
4549
4550 switch (state) {
4551 case USB3_LPM_U1:
4552 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4553 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4554 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4555 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4556 state_name = "U1";
4557 break;
4558 case USB3_LPM_U2:
4559 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4560 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4561 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4562 state_name = "U2";
4563 break;
4564 default:
4565 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4566 __func__);
4567 return USB3_LPM_DISABLED;
4568 }
4569
4570 if (sel <= max_sel_pel && pel <= max_sel_pel)
4571 return USB3_LPM_DEVICE_INITIATED;
4572
4573 if (sel > max_sel_pel)
4574 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4575 "due to long SEL %llu ms\n",
4576 state_name, sel);
4577 else
4578 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4579 "due to long PEL %llu ms\n",
4580 state_name, pel);
4581 return USB3_LPM_DISABLED;
4582 }
4583
4584 /* The U1 timeout should be the maximum of the following values:
4585 * - For control endpoints, U1 system exit latency (SEL) * 3
4586 * - For bulk endpoints, U1 SEL * 5
4587 * - For interrupt endpoints:
4588 * - Notification EPs, U1 SEL * 3
4589 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4590 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4591 */
xhci_calculate_intel_u1_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)4592 static unsigned long long xhci_calculate_intel_u1_timeout(
4593 struct usb_device *udev,
4594 struct usb_endpoint_descriptor *desc)
4595 {
4596 unsigned long long timeout_ns;
4597 int ep_type;
4598 int intr_type;
4599
4600 ep_type = usb_endpoint_type(desc);
4601 switch (ep_type) {
4602 case USB_ENDPOINT_XFER_CONTROL:
4603 timeout_ns = udev->u1_params.sel * 3;
4604 break;
4605 case USB_ENDPOINT_XFER_BULK:
4606 timeout_ns = udev->u1_params.sel * 5;
4607 break;
4608 case USB_ENDPOINT_XFER_INT:
4609 intr_type = usb_endpoint_interrupt_type(desc);
4610 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4611 timeout_ns = udev->u1_params.sel * 3;
4612 break;
4613 }
4614 /* Otherwise the calculation is the same as isoc eps */
4615 /* fall through */
4616 case USB_ENDPOINT_XFER_ISOC:
4617 timeout_ns = xhci_service_interval_to_ns(desc);
4618 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4619 if (timeout_ns < udev->u1_params.sel * 2)
4620 timeout_ns = udev->u1_params.sel * 2;
4621 break;
4622 default:
4623 return 0;
4624 }
4625
4626 return timeout_ns;
4627 }
4628
4629 /* Returns the hub-encoded U1 timeout value. */
xhci_calculate_u1_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)4630 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4631 struct usb_device *udev,
4632 struct usb_endpoint_descriptor *desc)
4633 {
4634 unsigned long long timeout_ns;
4635
4636 /* Prevent U1 if service interval is shorter than U1 exit latency */
4637 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4638 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4639 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4640 return USB3_LPM_DISABLED;
4641 }
4642 }
4643
4644 if (xhci->quirks & XHCI_INTEL_HOST)
4645 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4646 else
4647 timeout_ns = udev->u1_params.sel;
4648
4649 /* The U1 timeout is encoded in 1us intervals.
4650 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4651 */
4652 if (timeout_ns == USB3_LPM_DISABLED)
4653 timeout_ns = 1;
4654 else
4655 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4656
4657 /* If the necessary timeout value is bigger than what we can set in the
4658 * USB 3.0 hub, we have to disable hub-initiated U1.
4659 */
4660 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4661 return timeout_ns;
4662 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4663 "due to long timeout %llu ms\n", timeout_ns);
4664 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4665 }
4666
4667 /* The U2 timeout should be the maximum of:
4668 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4669 * - largest bInterval of any active periodic endpoint (to avoid going
4670 * into lower power link states between intervals).
4671 * - the U2 Exit Latency of the device
4672 */
xhci_calculate_intel_u2_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)4673 static unsigned long long xhci_calculate_intel_u2_timeout(
4674 struct usb_device *udev,
4675 struct usb_endpoint_descriptor *desc)
4676 {
4677 unsigned long long timeout_ns;
4678 unsigned long long u2_del_ns;
4679
4680 timeout_ns = 10 * 1000 * 1000;
4681
4682 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4683 (xhci_service_interval_to_ns(desc) > timeout_ns))
4684 timeout_ns = xhci_service_interval_to_ns(desc);
4685
4686 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4687 if (u2_del_ns > timeout_ns)
4688 timeout_ns = u2_del_ns;
4689
4690 return timeout_ns;
4691 }
4692
4693 /* Returns the hub-encoded U2 timeout value. */
xhci_calculate_u2_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)4694 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4695 struct usb_device *udev,
4696 struct usb_endpoint_descriptor *desc)
4697 {
4698 unsigned long long timeout_ns;
4699
4700 /* Prevent U2 if service interval is shorter than U2 exit latency */
4701 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4702 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4703 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4704 return USB3_LPM_DISABLED;
4705 }
4706 }
4707
4708 if (xhci->quirks & XHCI_INTEL_HOST)
4709 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4710 else
4711 timeout_ns = udev->u2_params.sel;
4712
4713 /* The U2 timeout is encoded in 256us intervals */
4714 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4715 /* If the necessary timeout value is bigger than what we can set in the
4716 * USB 3.0 hub, we have to disable hub-initiated U2.
4717 */
4718 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4719 return timeout_ns;
4720 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4721 "due to long timeout %llu ms\n", timeout_ns);
4722 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4723 }
4724
xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)4725 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4726 struct usb_device *udev,
4727 struct usb_endpoint_descriptor *desc,
4728 enum usb3_link_state state,
4729 u16 *timeout)
4730 {
4731 if (state == USB3_LPM_U1)
4732 return xhci_calculate_u1_timeout(xhci, udev, desc);
4733 else if (state == USB3_LPM_U2)
4734 return xhci_calculate_u2_timeout(xhci, udev, desc);
4735
4736 return USB3_LPM_DISABLED;
4737 }
4738
xhci_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)4739 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4740 struct usb_device *udev,
4741 struct usb_endpoint_descriptor *desc,
4742 enum usb3_link_state state,
4743 u16 *timeout)
4744 {
4745 u16 alt_timeout;
4746
4747 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4748 desc, state, timeout);
4749
4750 /* If we found we can't enable hub-initiated LPM, and
4751 * the U1 or U2 exit latency was too high to allow
4752 * device-initiated LPM as well, then we will disable LPM
4753 * for this device, so stop searching any further.
4754 */
4755 if (alt_timeout == USB3_LPM_DISABLED) {
4756 *timeout = alt_timeout;
4757 return -E2BIG;
4758 }
4759 if (alt_timeout > *timeout)
4760 *timeout = alt_timeout;
4761 return 0;
4762 }
4763
xhci_update_timeout_for_interface(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_interface * alt,enum usb3_link_state state,u16 * timeout)4764 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4765 struct usb_device *udev,
4766 struct usb_host_interface *alt,
4767 enum usb3_link_state state,
4768 u16 *timeout)
4769 {
4770 int j;
4771
4772 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4773 if (xhci_update_timeout_for_endpoint(xhci, udev,
4774 &alt->endpoint[j].desc, state, timeout))
4775 return -E2BIG;
4776 continue;
4777 }
4778 return 0;
4779 }
4780
xhci_check_intel_tier_policy(struct usb_device * udev,enum usb3_link_state state)4781 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4782 enum usb3_link_state state)
4783 {
4784 struct usb_device *parent;
4785 unsigned int num_hubs;
4786
4787 if (state == USB3_LPM_U2)
4788 return 0;
4789
4790 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4791 for (parent = udev->parent, num_hubs = 0; parent->parent;
4792 parent = parent->parent)
4793 num_hubs++;
4794
4795 if (num_hubs < 2)
4796 return 0;
4797
4798 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4799 " below second-tier hub.\n");
4800 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4801 "to decrease power consumption.\n");
4802 return -E2BIG;
4803 }
4804
xhci_check_tier_policy(struct xhci_hcd * xhci,struct usb_device * udev,enum usb3_link_state state)4805 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4806 struct usb_device *udev,
4807 enum usb3_link_state state)
4808 {
4809 if (xhci->quirks & XHCI_INTEL_HOST)
4810 return xhci_check_intel_tier_policy(udev, state);
4811 else
4812 return 0;
4813 }
4814
4815 /* Returns the U1 or U2 timeout that should be enabled.
4816 * If the tier check or timeout setting functions return with a non-zero exit
4817 * code, that means the timeout value has been finalized and we shouldn't look
4818 * at any more endpoints.
4819 */
xhci_calculate_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4820 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4821 struct usb_device *udev, enum usb3_link_state state)
4822 {
4823 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4824 struct usb_host_config *config;
4825 char *state_name;
4826 int i;
4827 u16 timeout = USB3_LPM_DISABLED;
4828
4829 if (state == USB3_LPM_U1)
4830 state_name = "U1";
4831 else if (state == USB3_LPM_U2)
4832 state_name = "U2";
4833 else {
4834 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4835 state);
4836 return timeout;
4837 }
4838
4839 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4840 return timeout;
4841
4842 /* Gather some information about the currently installed configuration
4843 * and alternate interface settings.
4844 */
4845 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4846 state, &timeout))
4847 return timeout;
4848
4849 config = udev->actconfig;
4850 if (!config)
4851 return timeout;
4852
4853 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4854 struct usb_driver *driver;
4855 struct usb_interface *intf = config->interface[i];
4856
4857 if (!intf)
4858 continue;
4859
4860 /* Check if any currently bound drivers want hub-initiated LPM
4861 * disabled.
4862 */
4863 if (intf->dev.driver) {
4864 driver = to_usb_driver(intf->dev.driver);
4865 if (driver && driver->disable_hub_initiated_lpm) {
4866 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4867 state_name, driver->name);
4868 timeout = xhci_get_timeout_no_hub_lpm(udev,
4869 state);
4870 if (timeout == USB3_LPM_DISABLED)
4871 return timeout;
4872 }
4873 }
4874
4875 /* Not sure how this could happen... */
4876 if (!intf->cur_altsetting)
4877 continue;
4878
4879 if (xhci_update_timeout_for_interface(xhci, udev,
4880 intf->cur_altsetting,
4881 state, &timeout))
4882 return timeout;
4883 }
4884 return timeout;
4885 }
4886
calculate_max_exit_latency(struct usb_device * udev,enum usb3_link_state state_changed,u16 hub_encoded_timeout)4887 static int calculate_max_exit_latency(struct usb_device *udev,
4888 enum usb3_link_state state_changed,
4889 u16 hub_encoded_timeout)
4890 {
4891 unsigned long long u1_mel_us = 0;
4892 unsigned long long u2_mel_us = 0;
4893 unsigned long long mel_us = 0;
4894 bool disabling_u1;
4895 bool disabling_u2;
4896 bool enabling_u1;
4897 bool enabling_u2;
4898
4899 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4900 hub_encoded_timeout == USB3_LPM_DISABLED);
4901 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4902 hub_encoded_timeout == USB3_LPM_DISABLED);
4903
4904 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4905 hub_encoded_timeout != USB3_LPM_DISABLED);
4906 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4907 hub_encoded_timeout != USB3_LPM_DISABLED);
4908
4909 /* If U1 was already enabled and we're not disabling it,
4910 * or we're going to enable U1, account for the U1 max exit latency.
4911 */
4912 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4913 enabling_u1)
4914 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4915 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4916 enabling_u2)
4917 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4918
4919 if (u1_mel_us > u2_mel_us)
4920 mel_us = u1_mel_us;
4921 else
4922 mel_us = u2_mel_us;
4923 /* xHCI host controller max exit latency field is only 16 bits wide. */
4924 if (mel_us > MAX_EXIT) {
4925 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4926 "is too big.\n", mel_us);
4927 return -E2BIG;
4928 }
4929 return mel_us;
4930 }
4931
4932 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4933 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4934 struct usb_device *udev, enum usb3_link_state state)
4935 {
4936 struct xhci_hcd *xhci;
4937 u16 hub_encoded_timeout;
4938 int mel;
4939 int ret;
4940
4941 xhci = hcd_to_xhci(hcd);
4942 /* The LPM timeout values are pretty host-controller specific, so don't
4943 * enable hub-initiated timeouts unless the vendor has provided
4944 * information about their timeout algorithm.
4945 */
4946 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4947 !xhci->devs[udev->slot_id])
4948 return USB3_LPM_DISABLED;
4949
4950 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4951 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4952 if (mel < 0) {
4953 /* Max Exit Latency is too big, disable LPM. */
4954 hub_encoded_timeout = USB3_LPM_DISABLED;
4955 mel = 0;
4956 }
4957
4958 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4959 if (ret)
4960 return ret;
4961 return hub_encoded_timeout;
4962 }
4963
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4964 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4965 struct usb_device *udev, enum usb3_link_state state)
4966 {
4967 struct xhci_hcd *xhci;
4968 u16 mel;
4969
4970 xhci = hcd_to_xhci(hcd);
4971 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4972 !xhci->devs[udev->slot_id])
4973 return 0;
4974
4975 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4976 return xhci_change_max_exit_latency(xhci, udev, mel);
4977 }
4978 #else /* CONFIG_PM */
4979
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)4980 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4981 struct usb_device *udev, int enable)
4982 {
4983 return 0;
4984 }
4985
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)4986 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4987 {
4988 return 0;
4989 }
4990
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4991 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4992 struct usb_device *udev, enum usb3_link_state state)
4993 {
4994 return USB3_LPM_DISABLED;
4995 }
4996
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4997 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4998 struct usb_device *udev, enum usb3_link_state state)
4999 {
5000 return 0;
5001 }
5002 #endif /* CONFIG_PM */
5003
5004 /*-------------------------------------------------------------------------*/
5005
5006 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
5007 * internal data structures for the device.
5008 */
xhci_update_hub_device(struct usb_hcd * hcd,struct usb_device * hdev,struct usb_tt * tt,gfp_t mem_flags)5009 static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5010 struct usb_tt *tt, gfp_t mem_flags)
5011 {
5012 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5013 struct xhci_virt_device *vdev;
5014 struct xhci_command *config_cmd;
5015 struct xhci_input_control_ctx *ctrl_ctx;
5016 struct xhci_slot_ctx *slot_ctx;
5017 unsigned long flags;
5018 unsigned think_time;
5019 int ret;
5020
5021 /* Ignore root hubs */
5022 if (!hdev->parent)
5023 return 0;
5024
5025 vdev = xhci->devs[hdev->slot_id];
5026 if (!vdev) {
5027 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5028 return -EINVAL;
5029 }
5030
5031 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5032 if (!config_cmd)
5033 return -ENOMEM;
5034
5035 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5036 if (!ctrl_ctx) {
5037 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5038 __func__);
5039 xhci_free_command(xhci, config_cmd);
5040 return -ENOMEM;
5041 }
5042
5043 spin_lock_irqsave(&xhci->lock, flags);
5044 if (hdev->speed == USB_SPEED_HIGH &&
5045 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5046 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5047 xhci_free_command(xhci, config_cmd);
5048 spin_unlock_irqrestore(&xhci->lock, flags);
5049 return -ENOMEM;
5050 }
5051
5052 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5053 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5054 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5055 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5056 /*
5057 * refer to section 6.2.2: MTT should be 0 for full speed hub,
5058 * but it may be already set to 1 when setup an xHCI virtual
5059 * device, so clear it anyway.
5060 */
5061 if (tt->multi)
5062 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5063 else if (hdev->speed == USB_SPEED_FULL)
5064 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5065
5066 if (xhci->hci_version > 0x95) {
5067 xhci_dbg(xhci, "xHCI version %x needs hub "
5068 "TT think time and number of ports\n",
5069 (unsigned int) xhci->hci_version);
5070 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5071 /* Set TT think time - convert from ns to FS bit times.
5072 * 0 = 8 FS bit times, 1 = 16 FS bit times,
5073 * 2 = 24 FS bit times, 3 = 32 FS bit times.
5074 *
5075 * xHCI 1.0: this field shall be 0 if the device is not a
5076 * High-spped hub.
5077 */
5078 think_time = tt->think_time;
5079 if (think_time != 0)
5080 think_time = (think_time / 666) - 1;
5081 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5082 slot_ctx->tt_info |=
5083 cpu_to_le32(TT_THINK_TIME(think_time));
5084 } else {
5085 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5086 "TT think time or number of ports\n",
5087 (unsigned int) xhci->hci_version);
5088 }
5089 slot_ctx->dev_state = 0;
5090 spin_unlock_irqrestore(&xhci->lock, flags);
5091
5092 xhci_dbg(xhci, "Set up %s for hub device.\n",
5093 (xhci->hci_version > 0x95) ?
5094 "configure endpoint" : "evaluate context");
5095
5096 /* Issue and wait for the configure endpoint or
5097 * evaluate context command.
5098 */
5099 if (xhci->hci_version > 0x95)
5100 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5101 false, false);
5102 else
5103 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5104 true, false);
5105
5106 xhci_free_command(xhci, config_cmd);
5107 return ret;
5108 }
5109
xhci_get_frame(struct usb_hcd * hcd)5110 static int xhci_get_frame(struct usb_hcd *hcd)
5111 {
5112 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5113 /* EHCI mods by the periodic size. Why? */
5114 return readl(&xhci->run_regs->microframe_index) >> 3;
5115 }
5116
xhci_gen_setup(struct usb_hcd * hcd,xhci_get_quirks_t get_quirks)5117 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5118 {
5119 struct xhci_hcd *xhci;
5120 /*
5121 * TODO: Check with DWC3 clients for sysdev according to
5122 * quirks
5123 */
5124 struct device *dev = hcd->self.sysdev;
5125 unsigned int minor_rev;
5126 int retval;
5127
5128 /* Accept arbitrarily long scatter-gather lists */
5129 hcd->self.sg_tablesize = ~0;
5130
5131 /* support to build packet from discontinuous buffers */
5132 hcd->self.no_sg_constraint = 1;
5133
5134 /* XHCI controllers don't stop the ep queue on short packets :| */
5135 hcd->self.no_stop_on_short = 1;
5136
5137 xhci = hcd_to_xhci(hcd);
5138
5139 if (usb_hcd_is_primary_hcd(hcd)) {
5140 xhci->main_hcd = hcd;
5141 xhci->usb2_rhub.hcd = hcd;
5142 /* Mark the first roothub as being USB 2.0.
5143 * The xHCI driver will register the USB 3.0 roothub.
5144 */
5145 hcd->speed = HCD_USB2;
5146 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5147 /*
5148 * USB 2.0 roothub under xHCI has an integrated TT,
5149 * (rate matching hub) as opposed to having an OHCI/UHCI
5150 * companion controller.
5151 */
5152 hcd->has_tt = 1;
5153 } else {
5154 /*
5155 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
5156 * should return 0x31 for sbrn, or that the minor revision
5157 * is a two digit BCD containig minor and sub-minor numbers.
5158 * This was later clarified in xHCI 1.2.
5159 *
5160 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
5161 * minor revision set to 0x1 instead of 0x10.
5162 */
5163 if (xhci->usb3_rhub.min_rev == 0x1)
5164 minor_rev = 1;
5165 else
5166 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5167
5168 switch (minor_rev) {
5169 case 2:
5170 hcd->speed = HCD_USB32;
5171 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5172 hcd->self.root_hub->rx_lanes = 2;
5173 hcd->self.root_hub->tx_lanes = 2;
5174 break;
5175 case 1:
5176 hcd->speed = HCD_USB31;
5177 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5178 break;
5179 }
5180 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5181 minor_rev,
5182 minor_rev ? "Enhanced " : "");
5183
5184 xhci->usb3_rhub.hcd = hcd;
5185 /* xHCI private pointer was set in xhci_pci_probe for the second
5186 * registered roothub.
5187 */
5188 return 0;
5189 }
5190
5191 mutex_init(&xhci->mutex);
5192 xhci->cap_regs = hcd->regs;
5193 xhci->op_regs = hcd->regs +
5194 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5195 xhci->run_regs = hcd->regs +
5196 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5197 /* Cache read-only capability registers */
5198 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5199 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5200 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5201 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
5202 xhci->hci_version = HC_VERSION(xhci->hcc_params);
5203 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5204 if (xhci->hci_version > 0x100)
5205 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5206
5207 xhci->quirks |= quirks;
5208
5209 get_quirks(dev, xhci);
5210
5211 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5212 * success event after a short transfer. This quirk will ignore such
5213 * spurious event.
5214 */
5215 if (xhci->hci_version > 0x96)
5216 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5217
5218 /* Make sure the HC is halted. */
5219 retval = xhci_halt(xhci);
5220 if (retval)
5221 return retval;
5222
5223 xhci_zero_64b_regs(xhci);
5224
5225 xhci_dbg(xhci, "Resetting HCD\n");
5226 /* Reset the internal HC memory state and registers. */
5227 retval = xhci_reset(xhci);
5228 if (retval)
5229 return retval;
5230 xhci_dbg(xhci, "Reset complete\n");
5231
5232 /*
5233 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
5234 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
5235 * address memory pointers actually. So, this driver clears the AC64
5236 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5237 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
5238 */
5239 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5240 xhci->hcc_params &= ~BIT(0);
5241
5242 /* Set dma_mask and coherent_dma_mask to 64-bits,
5243 * if xHC supports 64-bit addressing */
5244 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5245 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5246 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5247 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5248 } else {
5249 /*
5250 * This is to avoid error in cases where a 32-bit USB
5251 * controller is used on a 64-bit capable system.
5252 */
5253 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5254 if (retval)
5255 return retval;
5256 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5257 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5258 }
5259
5260 xhci_dbg(xhci, "Calling HCD init\n");
5261 /* Initialize HCD and host controller data structures. */
5262 retval = xhci_init(hcd);
5263 if (retval)
5264 return retval;
5265 xhci_dbg(xhci, "Called HCD init\n");
5266
5267 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5268 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5269
5270 return 0;
5271 }
5272 EXPORT_SYMBOL_GPL(xhci_gen_setup);
5273
xhci_clear_tt_buffer_complete(struct usb_hcd * hcd,struct usb_host_endpoint * ep)5274 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5275 struct usb_host_endpoint *ep)
5276 {
5277 struct xhci_hcd *xhci;
5278 struct usb_device *udev;
5279 unsigned int slot_id;
5280 unsigned int ep_index;
5281 unsigned long flags;
5282
5283 xhci = hcd_to_xhci(hcd);
5284
5285 spin_lock_irqsave(&xhci->lock, flags);
5286 udev = (struct usb_device *)ep->hcpriv;
5287 slot_id = udev->slot_id;
5288 ep_index = xhci_get_endpoint_index(&ep->desc);
5289
5290 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5291 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5292 spin_unlock_irqrestore(&xhci->lock, flags);
5293 }
5294
5295 static const struct hc_driver xhci_hc_driver = {
5296 .description = "xhci-hcd",
5297 .product_desc = "xHCI Host Controller",
5298 .hcd_priv_size = sizeof(struct xhci_hcd),
5299
5300 /*
5301 * generic hardware linkage
5302 */
5303 .irq = xhci_irq,
5304 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
5305
5306 /*
5307 * basic lifecycle operations
5308 */
5309 .reset = NULL, /* set in xhci_init_driver() */
5310 .start = xhci_run,
5311 .stop = xhci_stop,
5312 .shutdown = xhci_shutdown,
5313
5314 /*
5315 * managing i/o requests and associated device resources
5316 */
5317 .map_urb_for_dma = xhci_map_urb_for_dma,
5318 .urb_enqueue = xhci_urb_enqueue,
5319 .urb_dequeue = xhci_urb_dequeue,
5320 .alloc_dev = xhci_alloc_dev,
5321 .free_dev = xhci_free_dev,
5322 .alloc_streams = xhci_alloc_streams,
5323 .free_streams = xhci_free_streams,
5324 .add_endpoint = xhci_add_endpoint,
5325 .drop_endpoint = xhci_drop_endpoint,
5326 .endpoint_disable = xhci_endpoint_disable,
5327 .endpoint_reset = xhci_endpoint_reset,
5328 .check_bandwidth = xhci_check_bandwidth,
5329 .reset_bandwidth = xhci_reset_bandwidth,
5330 .address_device = xhci_address_device,
5331 .enable_device = xhci_enable_device,
5332 .update_hub_device = xhci_update_hub_device,
5333 .reset_device = xhci_discover_or_reset_device,
5334
5335 /*
5336 * scheduling support
5337 */
5338 .get_frame_number = xhci_get_frame,
5339
5340 /*
5341 * root hub support
5342 */
5343 .hub_control = xhci_hub_control,
5344 .hub_status_data = xhci_hub_status_data,
5345 .bus_suspend = xhci_bus_suspend,
5346 .bus_resume = xhci_bus_resume,
5347 .get_resuming_ports = xhci_get_resuming_ports,
5348
5349 /*
5350 * call back when device connected and addressed
5351 */
5352 .update_device = xhci_update_device,
5353 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5354 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5355 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5356 .find_raw_port_number = xhci_find_raw_port_number,
5357 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5358 };
5359
xhci_init_driver(struct hc_driver * drv,const struct xhci_driver_overrides * over)5360 void xhci_init_driver(struct hc_driver *drv,
5361 const struct xhci_driver_overrides *over)
5362 {
5363 BUG_ON(!over);
5364
5365 /* Copy the generic table to drv then apply the overrides */
5366 *drv = xhci_hc_driver;
5367
5368 if (over) {
5369 drv->hcd_priv_size += over->extra_priv_size;
5370 if (over->reset)
5371 drv->reset = over->reset;
5372 if (over->start)
5373 drv->start = over->start;
5374 }
5375 }
5376 EXPORT_SYMBOL_GPL(xhci_init_driver);
5377
5378 MODULE_DESCRIPTION(DRIVER_DESC);
5379 MODULE_AUTHOR(DRIVER_AUTHOR);
5380 MODULE_LICENSE("GPL");
5381
xhci_hcd_init(void)5382 static int __init xhci_hcd_init(void)
5383 {
5384 /*
5385 * Check the compiler generated sizes of structures that must be laid
5386 * out in specific ways for hardware access.
5387 */
5388 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5389 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5390 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5391 /* xhci_device_control has eight fields, and also
5392 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5393 */
5394 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5395 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5396 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5397 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5398 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5399 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5400 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5401
5402 if (usb_disabled())
5403 return -ENODEV;
5404
5405 xhci_debugfs_create_root();
5406
5407 return 0;
5408 }
5409
5410 /*
5411 * If an init function is provided, an exit function must also be provided
5412 * to allow module unload.
5413 */
xhci_hcd_fini(void)5414 static void __exit xhci_hcd_fini(void)
5415 {
5416 xhci_debugfs_remove_root();
5417 }
5418
5419 module_init(xhci_hcd_init);
5420 module_exit(xhci_hcd_fini);
5421