Lines Matching refs:xhci

89 void xhci_quiesce(struct xhci_hcd *xhci)  in xhci_quiesce()  argument
96 halted = readl(&xhci->op_regs->status) & STS_HALT; in xhci_quiesce()
100 cmd = readl(&xhci->op_regs->command); in xhci_quiesce()
102 writel(cmd, &xhci->op_regs->command); in xhci_quiesce()
113 int xhci_halt(struct xhci_hcd *xhci) in xhci_halt() argument
116 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); in xhci_halt()
117 xhci_quiesce(xhci); in xhci_halt()
119 ret = xhci_handshake(&xhci->op_regs->status, in xhci_halt()
122 xhci_warn(xhci, "Host halt failed, %d\n", ret); in xhci_halt()
125 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_halt()
126 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_halt()
133 int xhci_start(struct xhci_hcd *xhci) in xhci_start() argument
138 temp = readl(&xhci->op_regs->command); in xhci_start()
140 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", in xhci_start()
142 writel(temp, &xhci->op_regs->command); in xhci_start()
148 ret = xhci_handshake(&xhci->op_regs->status, in xhci_start()
151 xhci_err(xhci, "Host took too long to start, " in xhci_start()
156 xhci->xhc_state = 0; in xhci_start()
168 int xhci_reset(struct xhci_hcd *xhci) in xhci_reset() argument
174 state = readl(&xhci->op_regs->status); in xhci_reset()
177 xhci_warn(xhci, "Host not accessible, reset failed.\n"); in xhci_reset()
182 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); in xhci_reset()
186 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); in xhci_reset()
187 command = readl(&xhci->op_regs->command); in xhci_reset()
189 writel(command, &xhci->op_regs->command); in xhci_reset()
198 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_reset()
201 ret = xhci_handshake(&xhci->op_regs->command, in xhci_reset()
206 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) in xhci_reset()
207 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); in xhci_reset()
209 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_reset()
215 ret = xhci_handshake(&xhci->op_regs->status, in xhci_reset()
219 xhci->bus_state[i].port_c_suspend = 0; in xhci_reset()
220 xhci->bus_state[i].suspended_ports = 0; in xhci_reset()
221 xhci->bus_state[i].resuming_ports = 0; in xhci_reset()
227 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) in xhci_zero_64b_regs() argument
229 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_zero_64b_regs()
247 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group) in xhci_zero_64b_regs()
250 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); in xhci_zero_64b_regs()
253 val = readl(&xhci->op_regs->command); in xhci_zero_64b_regs()
255 writel(val, &xhci->op_regs->command); in xhci_zero_64b_regs()
258 val = readl(&xhci->op_regs->status); in xhci_zero_64b_regs()
260 writel(val, &xhci->op_regs->status); in xhci_zero_64b_regs()
263 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_zero_64b_regs()
265 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); in xhci_zero_64b_regs()
266 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_zero_64b_regs()
268 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); in xhci_zero_64b_regs()
270 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) { in xhci_zero_64b_regs()
273 ir = &xhci->run_regs->ir_set[i]; in xhci_zero_64b_regs()
274 val = xhci_read_64(xhci, &ir->erst_base); in xhci_zero_64b_regs()
276 xhci_write_64(xhci, 0, &ir->erst_base); in xhci_zero_64b_regs()
277 val= xhci_read_64(xhci, &ir->erst_dequeue); in xhci_zero_64b_regs()
279 xhci_write_64(xhci, 0, &ir->erst_dequeue); in xhci_zero_64b_regs()
283 err = xhci_handshake(&xhci->op_regs->status, in xhci_zero_64b_regs()
287 xhci_info(xhci, "Fault detected\n"); in xhci_zero_64b_regs()
294 static int xhci_setup_msi(struct xhci_hcd *xhci) in xhci_setup_msi() argument
300 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_setup_msi()
304 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
310 0, "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msi()
312 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
323 static int xhci_setup_msix(struct xhci_hcd *xhci) in xhci_setup_msix() argument
326 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_setup_msix()
336 xhci->msix_count = min(num_online_cpus() + 1, in xhci_setup_msix()
337 HCS_MAX_INTRS(xhci->hcs_params1)); in xhci_setup_msix()
339 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, in xhci_setup_msix()
342 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msix()
347 for (i = 0; i < xhci->msix_count; i++) { in xhci_setup_msix()
349 "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msix()
358 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); in xhci_setup_msix()
360 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); in xhci_setup_msix()
366 static void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
368 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cleanup_msix()
371 if (xhci->quirks & XHCI_PLAT) in xhci_cleanup_msix()
381 for (i = 0; i < xhci->msix_count; i++) in xhci_cleanup_msix()
382 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); in xhci_cleanup_msix()
384 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); in xhci_cleanup_msix()
391 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
393 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_msix_sync_irqs()
399 for (i = 0; i < xhci->msix_count; i++) in xhci_msix_sync_irqs()
406 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_try_enable_msi() local
411 if (xhci->quirks & XHCI_PLAT) in xhci_try_enable_msi()
414 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_try_enable_msi()
419 if (xhci->quirks & XHCI_BROKEN_MSI) in xhci_try_enable_msi()
427 ret = xhci_setup_msix(xhci); in xhci_try_enable_msi()
430 ret = xhci_setup_msi(xhci); in xhci_try_enable_msi()
438 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); in xhci_try_enable_msi()
451 xhci_err(xhci, "request interrupt %d failed\n", in xhci_try_enable_msi()
466 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
470 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
478 struct xhci_hcd *xhci; in compliance_mode_recovery() local
484 xhci = from_timer(xhci, t, comp_mode_recovery_timer); in compliance_mode_recovery()
485 rhub = &xhci->usb3_rhub; in compliance_mode_recovery()
494 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
497 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
499 hcd = xhci->shared_hcd; in compliance_mode_recovery()
508 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) in compliance_mode_recovery()
509 mod_timer(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery()
523 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) in compliance_mode_recovery_timer_init() argument
525 xhci->port_status_u0 = 0; in compliance_mode_recovery_timer_init()
526 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, in compliance_mode_recovery_timer_init()
528 xhci->comp_mode_recovery_timer.expires = jiffies + in compliance_mode_recovery_timer_init()
531 add_timer(&xhci->comp_mode_recovery_timer); in compliance_mode_recovery_timer_init()
532 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery_timer_init()
563 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) in xhci_all_ports_seen_u0() argument
565 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); in xhci_all_ports_seen_u0()
578 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_init() local
581 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); in xhci_init()
582 spin_lock_init(&xhci->lock); in xhci_init()
583 if (xhci->hci_version == 0x95 && link_quirk) { in xhci_init()
584 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_init()
586 xhci->quirks |= XHCI_LINK_TRB_QUIRK; in xhci_init()
588 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_init()
591 retval = xhci_mem_init(xhci, GFP_KERNEL); in xhci_init()
592 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); in xhci_init()
596 xhci->quirks |= XHCI_COMP_MODE_QUIRK; in xhci_init()
597 compliance_mode_recovery_timer_init(xhci); in xhci_init()
606 static int xhci_run_finished(struct xhci_hcd *xhci) in xhci_run_finished() argument
608 if (xhci_start(xhci)) { in xhci_run_finished()
609 xhci_halt(xhci); in xhci_run_finished()
612 xhci->shared_hcd->state = HC_STATE_RUNNING; in xhci_run_finished()
613 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_run_finished()
615 if (xhci->quirks & XHCI_NEC_HOST) in xhci_run_finished()
616 xhci_ring_cmd_db(xhci); in xhci_run_finished()
618 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run_finished()
640 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_run() local
648 return xhci_run_finished(xhci); in xhci_run()
650 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); in xhci_run()
656 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_run()
658 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
661 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
663 temp = readl(&xhci->ir_set->irq_control); in xhci_run()
665 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; in xhci_run()
666 writel(temp, &xhci->ir_set->irq_control); in xhci_run()
669 temp = readl(&xhci->op_regs->command); in xhci_run()
671 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
673 writel(temp, &xhci->op_regs->command); in xhci_run()
675 temp = readl(&xhci->ir_set->irq_pending); in xhci_run()
676 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
678 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); in xhci_run()
679 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); in xhci_run()
681 if (xhci->quirks & XHCI_NEC_HOST) { in xhci_run()
684 command = xhci_alloc_command(xhci, false, GFP_KERNEL); in xhci_run()
688 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, in xhci_run()
691 xhci_free_command(xhci, command); in xhci_run()
693 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
696 xhci_dbc_init(xhci); in xhci_run()
698 xhci_debugfs_init(xhci); in xhci_run()
716 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_stop() local
718 mutex_lock(&xhci->mutex); in xhci_stop()
723 xhci->shared_hcd = NULL; in xhci_stop()
724 mutex_unlock(&xhci->mutex); in xhci_stop()
728 xhci_dbc_exit(xhci); in xhci_stop()
730 spin_lock_irq(&xhci->lock); in xhci_stop()
731 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_stop()
732 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_stop()
733 xhci_halt(xhci); in xhci_stop()
734 xhci_reset(xhci); in xhci_stop()
735 spin_unlock_irq(&xhci->lock); in xhci_stop()
737 xhci_cleanup_msix(xhci); in xhci_stop()
740 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_stop()
741 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_stop()
742 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_stop()
743 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_stop()
748 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_stop()
751 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
753 temp = readl(&xhci->op_regs->status); in xhci_stop()
754 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); in xhci_stop()
755 temp = readl(&xhci->ir_set->irq_pending); in xhci_stop()
756 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_stop()
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); in xhci_stop()
759 xhci_mem_cleanup(xhci); in xhci_stop()
760 xhci_debugfs_exit(xhci); in xhci_stop()
761 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
763 readl(&xhci->op_regs->status)); in xhci_stop()
764 mutex_unlock(&xhci->mutex); in xhci_stop()
778 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_shutdown() local
780 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) in xhci_shutdown()
783 spin_lock_irq(&xhci->lock); in xhci_shutdown()
784 xhci_halt(xhci); in xhci_shutdown()
786 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) in xhci_shutdown()
787 xhci_reset(xhci); in xhci_shutdown()
788 spin_unlock_irq(&xhci->lock); in xhci_shutdown()
790 xhci_cleanup_msix(xhci); in xhci_shutdown()
792 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_shutdown()
794 readl(&xhci->op_regs->status)); in xhci_shutdown()
797 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) in xhci_shutdown()
802 static void xhci_save_registers(struct xhci_hcd *xhci) in xhci_save_registers() argument
804 xhci->s3.command = readl(&xhci->op_regs->command); in xhci_save_registers()
805 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); in xhci_save_registers()
806 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_save_registers()
807 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); in xhci_save_registers()
808 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); in xhci_save_registers()
809 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); in xhci_save_registers()
810 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_save_registers()
811 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_save_registers()
812 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); in xhci_save_registers()
815 static void xhci_restore_registers(struct xhci_hcd *xhci) in xhci_restore_registers() argument
817 writel(xhci->s3.command, &xhci->op_regs->command); in xhci_restore_registers()
818 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); in xhci_restore_registers()
819 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); in xhci_restore_registers()
820 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); in xhci_restore_registers()
821 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); in xhci_restore_registers()
822 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); in xhci_restore_registers()
823 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); in xhci_restore_registers()
824 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); in xhci_restore_registers()
825 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); in xhci_restore_registers()
828 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) in xhci_set_cmd_ring_deq() argument
833 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
835 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in xhci_set_cmd_ring_deq()
836 xhci->cmd_ring->dequeue) & in xhci_set_cmd_ring_deq()
838 xhci->cmd_ring->cycle_state; in xhci_set_cmd_ring_deq()
839 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_set_cmd_ring_deq()
842 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
854 static void xhci_clear_command_ring(struct xhci_hcd *xhci) in xhci_clear_command_ring() argument
859 ring = xhci->cmd_ring; in xhci_clear_command_ring()
889 xhci_set_cmd_ring_deq(xhci); in xhci_clear_command_ring()
892 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) in xhci_disable_port_wake_on_bits() argument
899 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
902 port_index = xhci->usb3_rhub.num_ports; in xhci_disable_port_wake_on_bits()
903 ports = xhci->usb3_rhub.ports; in xhci_disable_port_wake_on_bits()
913 port_index = xhci->usb2_rhub.num_ports; in xhci_disable_port_wake_on_bits()
914 ports = xhci->usb2_rhub.ports; in xhci_disable_port_wake_on_bits()
923 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
926 static bool xhci_pending_portevent(struct xhci_hcd *xhci) in xhci_pending_portevent() argument
933 status = readl(&xhci->op_regs->status); in xhci_pending_portevent()
942 port_index = xhci->usb2_rhub.num_ports; in xhci_pending_portevent()
943 ports = xhci->usb2_rhub.ports; in xhci_pending_portevent()
950 port_index = xhci->usb3_rhub.num_ports; in xhci_pending_portevent()
951 ports = xhci->usb3_rhub.ports; in xhci_pending_portevent()
967 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) in xhci_suspend() argument
971 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_suspend()
978 xhci->shared_hcd->state != HC_STATE_SUSPENDED) in xhci_suspend()
981 xhci_dbc_suspend(xhci); in xhci_suspend()
985 xhci_disable_port_wake_on_bits(xhci); in xhci_suspend()
988 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); in xhci_suspend()
991 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_suspend()
992 del_timer_sync(&xhci->shared_hcd->rh_timer); in xhci_suspend()
994 if (xhci->quirks & XHCI_SUSPEND_DELAY) in xhci_suspend()
997 spin_lock_irq(&xhci->lock); in xhci_suspend()
999 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_suspend()
1004 command = readl(&xhci->op_regs->command); in xhci_suspend()
1006 writel(command, &xhci->op_regs->command); in xhci_suspend()
1009 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; in xhci_suspend()
1011 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
1013 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); in xhci_suspend()
1014 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1017 xhci_clear_command_ring(xhci); in xhci_suspend()
1020 xhci_save_registers(xhci); in xhci_suspend()
1023 command = readl(&xhci->op_regs->command); in xhci_suspend()
1025 writel(command, &xhci->op_regs->command); in xhci_suspend()
1026 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
1028 xhci_warn(xhci, "WARN: xHC save state timeout\n"); in xhci_suspend()
1029 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1032 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1038 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_suspend()
1039 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_suspend()
1040 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_suspend()
1041 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_suspend()
1048 xhci_msix_sync_irqs(xhci); in xhci_suspend()
1060 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) in xhci_resume() argument
1063 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_resume()
1074 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || in xhci_resume()
1076 xhci->bus_state[1].next_statechange)) in xhci_resume()
1080 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_resume()
1082 spin_lock_irq(&xhci->lock); in xhci_resume()
1083 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_resume()
1088 xhci_restore_registers(xhci); in xhci_resume()
1090 xhci_set_cmd_ring_deq(xhci); in xhci_resume()
1093 command = readl(&xhci->op_regs->command); in xhci_resume()
1095 writel(command, &xhci->op_regs->command); in xhci_resume()
1101 if (xhci_handshake(&xhci->op_regs->status, in xhci_resume()
1103 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); in xhci_resume()
1104 spin_unlock_irq(&xhci->lock); in xhci_resume()
1107 temp = readl(&xhci->op_regs->status); in xhci_resume()
1113 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_resume()
1114 !(xhci_all_ports_seen_u0(xhci))) { in xhci_resume()
1115 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_resume()
1116 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_resume()
1121 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); in xhci_resume()
1122 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); in xhci_resume()
1124 xhci_dbg(xhci, "Stop HCD\n"); in xhci_resume()
1125 xhci_halt(xhci); in xhci_resume()
1126 xhci_zero_64b_regs(xhci); in xhci_resume()
1127 xhci_reset(xhci); in xhci_resume()
1128 spin_unlock_irq(&xhci->lock); in xhci_resume()
1129 xhci_cleanup_msix(xhci); in xhci_resume()
1131 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); in xhci_resume()
1132 temp = readl(&xhci->op_regs->status); in xhci_resume()
1133 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); in xhci_resume()
1134 temp = readl(&xhci->ir_set->irq_pending); in xhci_resume()
1135 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_resume()
1137 xhci_dbg(xhci, "cleaning up memory\n"); in xhci_resume()
1138 xhci_mem_cleanup(xhci); in xhci_resume()
1139 xhci_debugfs_exit(xhci); in xhci_resume()
1140 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", in xhci_resume()
1141 readl(&xhci->op_regs->status)); in xhci_resume()
1150 secondary_hcd = xhci->shared_hcd; in xhci_resume()
1152 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); in xhci_resume()
1158 xhci_dbg(xhci, "Start the primary HCD\n"); in xhci_resume()
1161 xhci_dbg(xhci, "Start the secondary HCD\n"); in xhci_resume()
1165 xhci->shared_hcd->state = HC_STATE_SUSPENDED; in xhci_resume()
1170 command = readl(&xhci->op_regs->command); in xhci_resume()
1172 writel(command, &xhci->op_regs->command); in xhci_resume()
1173 xhci_handshake(&xhci->op_regs->status, STS_HALT, in xhci_resume()
1185 spin_unlock_irq(&xhci->lock); in xhci_resume()
1187 xhci_dbc_resume(xhci); in xhci_resume()
1192 if (xhci_pending_portevent(xhci)) { in xhci_resume()
1193 usb_hcd_resume_root_hub(xhci->shared_hcd); in xhci_resume()
1204 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) in xhci_resume()
1205 compliance_mode_recovery_timer_init(xhci); in xhci_resume()
1207 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) in xhci_resume()
1211 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); in xhci_resume()
1212 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_resume()
1213 usb_hcd_poll_rh_status(xhci->shared_hcd); in xhci_resume()
1290 struct xhci_hcd *xhci; in xhci_check_args() local
1302 xhci = hcd_to_xhci(hcd); in xhci_check_args()
1304 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { in xhci_check_args()
1305 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", in xhci_check_args()
1310 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_args()
1312 xhci_dbg(xhci, "xHCI %s called with udev and " in xhci_check_args()
1318 if (xhci->xhc_state & XHCI_STATE_HALTED) in xhci_check_args()
1324 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1334 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_check_maxpacket() argument
1345 out_ctx = xhci->devs[slot_id]->out_ctx; in xhci_check_maxpacket()
1346 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_check_maxpacket()
1350 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1352 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1355 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1358 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1366 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_check_maxpacket()
1370 command->in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_check_maxpacket()
1373 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_maxpacket()
1379 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_check_maxpacket()
1380 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_check_maxpacket()
1382 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_check_maxpacket()
1389 ret = xhci_configure_endpoint(xhci, urb->dev, command, in xhci_check_maxpacket()
1409 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_urb_enqueue() local
1423 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_urb_enqueue()
1427 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); in xhci_urb_enqueue()
1457 ret = xhci_check_maxpacket(xhci, slot_id, in xhci_urb_enqueue()
1467 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1469 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_urb_enqueue()
1470 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", in xhci_urb_enqueue()
1476 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", in xhci_urb_enqueue()
1482 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); in xhci_urb_enqueue()
1490 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1494 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1498 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1502 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1511 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1551 struct xhci_hcd *xhci; in xhci_urb_dequeue() local
1560 xhci = hcd_to_xhci(hcd); in xhci_urb_dequeue()
1561 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_dequeue()
1571 vdev = xhci->devs[urb->dev->slot_id]; in xhci_urb_dequeue()
1578 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_urb_dequeue()
1583 temp = readl(&xhci->op_regs->status); in xhci_urb_dequeue()
1584 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { in xhci_urb_dequeue()
1585 xhci_hc_died(xhci); in xhci_urb_dequeue()
1595 xhci_err(xhci, "Canceled URB td not found on endpoint ring"); in xhci_urb_dequeue()
1604 if (xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_urb_dequeue()
1605 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1621 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1639 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_urb_dequeue()
1648 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, in xhci_urb_dequeue()
1650 xhci_ring_cmd_db(xhci); in xhci_urb_dequeue()
1653 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1660 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1681 struct xhci_hcd *xhci; in xhci_drop_endpoint() local
1693 xhci = hcd_to_xhci(hcd); in xhci_drop_endpoint()
1694 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_drop_endpoint()
1697 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_drop_endpoint()
1700 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", in xhci_drop_endpoint()
1705 in_ctx = xhci->devs[udev->slot_id]->in_ctx; in xhci_drop_endpoint()
1706 out_ctx = xhci->devs[udev->slot_id]->out_ctx; in xhci_drop_endpoint()
1709 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_drop_endpoint()
1715 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_drop_endpoint()
1723 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) in xhci_drop_endpoint()
1724 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", in xhci_drop_endpoint()
1735 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); in xhci_drop_endpoint()
1737 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); in xhci_drop_endpoint()
1739 if (xhci->quirks & XHCI_MTK_HOST) in xhci_drop_endpoint()
1742 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_drop_endpoint()
1766 struct xhci_hcd *xhci; in xhci_add_endpoint() local
1781 xhci = hcd_to_xhci(hcd); in xhci_add_endpoint()
1782 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_add_endpoint()
1791 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", in xhci_add_endpoint()
1796 virt_dev = xhci->devs[udev->slot_id]; in xhci_add_endpoint()
1800 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_add_endpoint()
1811 xhci_warn(xhci, "Trying to add endpoint 0x%x " in xhci_add_endpoint()
1821 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", in xhci_add_endpoint()
1831 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { in xhci_add_endpoint()
1837 if (xhci->quirks & XHCI_MTK_HOST) { in xhci_add_endpoint()
1840 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); in xhci_add_endpoint()
1860 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); in xhci_add_endpoint()
1862 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_add_endpoint()
1870 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) in xhci_zero_in_ctx() argument
1879 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_zero_in_ctx()
1891 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_zero_in_ctx()
1896 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); in xhci_zero_in_ctx()
1904 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, in xhci_configure_endpoint_result() argument
1912 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); in xhci_configure_endpoint_result()
1941 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint_result()
1946 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_configure_endpoint_result()
1954 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, in xhci_evaluate_context_result() argument
1962 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); in xhci_evaluate_context_result()
1991 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_evaluate_context_result()
1996 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_evaluate_context_result()
2004 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, in xhci_count_num_new_endpoints() argument
2025 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, in xhci_count_num_dropped_endpoints() argument
2051 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, in xhci_reserve_host_resources() argument
2056 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_reserve_host_resources()
2057 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { in xhci_reserve_host_resources()
2058 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2061 xhci->num_active_eps, added_eps, in xhci_reserve_host_resources()
2062 xhci->limit_active_eps); in xhci_reserve_host_resources()
2065 xhci->num_active_eps += added_eps; in xhci_reserve_host_resources()
2066 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2068 xhci->num_active_eps); in xhci_reserve_host_resources()
2078 static void xhci_free_host_resources(struct xhci_hcd *xhci, in xhci_free_host_resources() argument
2083 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_free_host_resources()
2084 xhci->num_active_eps -= num_failed_eps; in xhci_free_host_resources()
2085 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_host_resources()
2088 xhci->num_active_eps); in xhci_free_host_resources()
2097 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, in xhci_finish_resource_reservation() argument
2102 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); in xhci_finish_resource_reservation()
2103 xhci->num_active_eps -= num_dropped_eps; in xhci_finish_resource_reservation()
2105 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_finish_resource_reservation()
2108 xhci->num_active_eps); in xhci_finish_resource_reservation()
2144 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, in xhci_check_tt_bw_table() argument
2152 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; in xhci_check_tt_bw_table()
2174 static int xhci_check_ss_bw(struct xhci_hcd *xhci, in xhci_check_ss_bw() argument
2231 static int xhci_check_bw_table(struct xhci_hcd *xhci, in xhci_check_bw_table() argument
2247 return xhci_check_ss_bw(xhci, virt_dev); in xhci_check_bw_table()
2268 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2271 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_check_bw_table()
2272 xhci_warn(xhci, "Not enough bandwidth on HS bus for " in xhci_check_bw_table()
2276 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2281 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2364 xhci_warn(xhci, "Not enough bandwidth. " in xhci_check_bw_table()
2387 xhci->rh_bw[port_index].num_active_tts; in xhci_check_bw_table()
2390 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2399 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", in xhci_check_bw_table()
2434 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, in xhci_drop_ep_from_interval_table() argument
2449 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= in xhci_drop_ep_from_interval_table()
2452 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= in xhci_drop_ep_from_interval_table()
2498 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, in xhci_add_ep_to_interval_table() argument
2514 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += in xhci_add_ep_to_interval_table()
2517 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += in xhci_add_ep_to_interval_table()
2572 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, in xhci_update_tt_active_eps() argument
2580 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; in xhci_update_tt_active_eps()
2592 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, in xhci_reserve_bandwidth() argument
2606 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_reserve_bandwidth()
2622 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2630 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); in xhci_reserve_bandwidth()
2634 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2642 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_reserve_bandwidth()
2646 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_reserve_bandwidth()
2659 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2671 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2685 static int xhci_configure_endpoint(struct xhci_hcd *xhci, in xhci_configure_endpoint() argument
2699 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2701 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_configure_endpoint()
2702 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2706 virt_dev = xhci->devs[udev->slot_id]; in xhci_configure_endpoint()
2710 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2711 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_configure_endpoint()
2716 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && in xhci_configure_endpoint()
2717 xhci_reserve_host_resources(xhci, ctrl_ctx)) { in xhci_configure_endpoint()
2718 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2719 xhci_warn(xhci, "Not enough host resources, " in xhci_configure_endpoint()
2721 xhci->num_active_eps); in xhci_configure_endpoint()
2724 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && in xhci_configure_endpoint()
2725 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { in xhci_configure_endpoint()
2726 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2727 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2728 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2729 xhci_warn(xhci, "Not enough bandwidth\n"); in xhci_configure_endpoint()
2733 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_configure_endpoint()
2737 ret = xhci_queue_configure_endpoint(xhci, command, in xhci_configure_endpoint()
2741 ret = xhci_queue_evaluate_context(xhci, command, in xhci_configure_endpoint()
2745 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2746 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2747 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2748 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint()
2752 xhci_ring_cmd_db(xhci); in xhci_configure_endpoint()
2753 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2759 ret = xhci_configure_endpoint_result(xhci, udev, in xhci_configure_endpoint()
2762 ret = xhci_evaluate_context_result(xhci, udev, in xhci_configure_endpoint()
2765 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_configure_endpoint()
2766 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2771 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2773 xhci_finish_resource_reservation(xhci, ctrl_ctx); in xhci_configure_endpoint()
2774 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2779 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, in xhci_check_bw_drop_ep_streams() argument
2785 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", in xhci_check_bw_drop_ep_streams()
2787 xhci_free_stream_info(xhci, ep->stream_info); in xhci_check_bw_drop_ep_streams()
2807 struct xhci_hcd *xhci; in xhci_check_bandwidth() local
2816 xhci = hcd_to_xhci(hcd); in xhci_check_bandwidth()
2817 if ((xhci->xhc_state & XHCI_STATE_DYING) || in xhci_check_bandwidth()
2818 (xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_check_bandwidth()
2821 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_check_bandwidth()
2822 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_bandwidth()
2824 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_check_bandwidth()
2833 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_bandwidth()
2849 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_check_bandwidth()
2861 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_check_bandwidth()
2871 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2872 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2875 xhci_zero_in_ctx(xhci, virt_dev); in xhci_check_bandwidth()
2887 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2889 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2902 struct xhci_hcd *xhci; in xhci_reset_bandwidth() local
2909 xhci = hcd_to_xhci(hcd); in xhci_reset_bandwidth()
2911 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_reset_bandwidth()
2912 virt_dev = xhci->devs[udev->slot_id]; in xhci_reset_bandwidth()
2916 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); in xhci_reset_bandwidth()
2917 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); in xhci_reset_bandwidth()
2921 xhci_zero_in_ctx(xhci, virt_dev); in xhci_reset_bandwidth()
2924 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_config_ep() argument
2932 xhci_slot_copy(xhci, in_ctx, out_ctx); in xhci_setup_input_ctx_for_config_ep()
2936 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_quirk() argument
2946 in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_setup_input_ctx_for_quirk()
2949 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_input_ctx_for_quirk()
2954 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
2955 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
2956 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
2960 xhci_warn(xhci, "WARN Cannot submit config ep after " in xhci_setup_input_ctx_for_quirk()
2962 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", in xhci_setup_input_ctx_for_quirk()
2970 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
2971 xhci->devs[slot_id]->out_ctx, ctrl_ctx, in xhci_setup_input_ctx_for_quirk()
2975 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, in xhci_cleanup_stalled_ring() argument
2981 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
2986 xhci_find_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
2995 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { in xhci_cleanup_stalled_ring()
2996 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
2998 xhci_queue_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
3006 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_cleanup_stalled_ring()
3009 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
3029 struct xhci_hcd *xhci; in xhci_endpoint_reset() local
3039 xhci = hcd_to_xhci(hcd); in xhci_endpoint_reset()
3043 vdev = xhci->devs[udev->slot_id]; in xhci_endpoint_reset()
3062 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); in xhci_endpoint_reset()
3066 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); in xhci_endpoint_reset()
3070 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3083 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3084 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3087 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); in xhci_endpoint_reset()
3088 xhci_ring_cmd_db(xhci); in xhci_endpoint_reset()
3089 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3093 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3097 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, in xhci_endpoint_reset()
3099 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); in xhci_endpoint_reset()
3101 xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, in xhci_endpoint_reset()
3103 xhci_ring_cmd_db(xhci); in xhci_endpoint_reset()
3104 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3109 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3111 xhci_free_command(xhci, stop_cmd); in xhci_endpoint_reset()
3114 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, in xhci_check_streams_endpoint() argument
3124 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); in xhci_check_streams_endpoint()
3128 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" in xhci_check_streams_endpoint()
3135 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_check_streams_endpoint()
3138 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " in xhci_check_streams_endpoint()
3141 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " in xhci_check_streams_endpoint()
3145 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { in xhci_check_streams_endpoint()
3146 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " in xhci_check_streams_endpoint()
3154 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, in xhci_calculate_streams_entries() argument
3167 max_streams = HCC_MAX_PSA(xhci->hcc_params); in xhci_calculate_streams_entries()
3169 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", in xhci_calculate_streams_entries()
3180 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, in xhci_calculate_streams_and_bitmask() argument
3191 ret = xhci_check_streams_endpoint(xhci, udev, in xhci_calculate_streams_and_bitmask()
3198 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", in xhci_calculate_streams_and_bitmask()
3212 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, in xhci_calculate_no_streams_bitmask() argument
3223 if (!xhci->devs[slot_id]) in xhci_calculate_no_streams_bitmask()
3228 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_calculate_no_streams_bitmask()
3231 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3240 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3244 xhci_warn(xhci, "WARN xhci_free_streams() called " in xhci_calculate_no_streams_bitmask()
3274 struct xhci_hcd *xhci; in xhci_alloc_streams() local
3291 xhci = hcd_to_xhci(hcd); in xhci_alloc_streams()
3292 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", in xhci_alloc_streams()
3296 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || in xhci_alloc_streams()
3297 HCC_MAX_PSA(xhci->hcc_params) < 4) { in xhci_alloc_streams()
3298 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); in xhci_alloc_streams()
3302 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); in xhci_alloc_streams()
3308 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_alloc_streams()
3310 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3318 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3319 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, in xhci_alloc_streams()
3322 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3323 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3327 xhci_warn(xhci, "WARN: endpoints can't handle " in xhci_alloc_streams()
3329 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3330 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3333 vdev = xhci->devs[udev->slot_id]; in xhci_alloc_streams()
3341 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3347 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); in xhci_alloc_streams()
3348 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", in xhci_alloc_streams()
3354 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, in xhci_alloc_streams()
3370 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); in xhci_alloc_streams()
3372 xhci_endpoint_copy(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3374 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, in xhci_alloc_streams()
3380 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3385 ret = xhci_configure_endpoint(xhci, udev, config_cmd, in xhci_alloc_streams()
3395 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3399 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", in xhci_alloc_streams()
3403 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3404 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3413 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_alloc_streams()
3420 xhci_endpoint_zero(xhci, vdev, eps[i]); in xhci_alloc_streams()
3422 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3437 struct xhci_hcd *xhci; in xhci_free_streams() local
3445 xhci = hcd_to_xhci(hcd); in xhci_free_streams()
3446 vdev = xhci->devs[udev->slot_id]; in xhci_free_streams()
3449 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3450 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, in xhci_free_streams()
3453 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3465 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3466 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_free_streams()
3475 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_free_streams()
3476 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= in xhci_free_streams()
3479 xhci_endpoint_copy(xhci, command->in_ctx, in xhci_free_streams()
3484 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, in xhci_free_streams()
3487 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3492 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_free_streams()
3501 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3504 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_free_streams()
3512 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3524 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, in xhci_free_device_endpoint_resources() argument
3537 xhci->num_active_eps -= num_dropped_eps; in xhci_free_device_endpoint_resources()
3539 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_device_endpoint_resources()
3543 xhci->num_active_eps); in xhci_free_device_endpoint_resources()
3569 struct xhci_hcd *xhci; in xhci_discover_or_reset_device() local
3579 xhci = hcd_to_xhci(hcd); in xhci_discover_or_reset_device()
3581 virt_dev = xhci->devs[slot_id]; in xhci_discover_or_reset_device()
3583 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3600 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3611 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_discover_or_reset_device()
3618 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); in xhci_discover_or_reset_device()
3625 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); in xhci_discover_or_reset_device()
3627 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); in xhci_discover_or_reset_device()
3632 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3634 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); in xhci_discover_or_reset_device()
3636 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_discover_or_reset_device()
3637 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3640 xhci_ring_cmd_db(xhci); in xhci_discover_or_reset_device()
3641 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3654 xhci_warn(xhci, "Timeout waiting for reset device command\n"); in xhci_discover_or_reset_device()
3659 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", in xhci_discover_or_reset_device()
3661 xhci_get_slot_state(xhci, virt_dev->out_ctx)); in xhci_discover_or_reset_device()
3662 xhci_dbg(xhci, "Not freeing device rings.\n"); in xhci_discover_or_reset_device()
3667 xhci_dbg(xhci, "Successful reset device command.\n"); in xhci_discover_or_reset_device()
3670 if (xhci_is_vendor_info_code(xhci, ret)) in xhci_discover_or_reset_device()
3672 xhci_warn(xhci, "Unknown completion code %u for " in xhci_discover_or_reset_device()
3679 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_discover_or_reset_device()
3680 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3682 xhci_free_device_endpoint_resources(xhci, virt_dev, false); in xhci_discover_or_reset_device()
3683 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3691 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", in xhci_discover_or_reset_device()
3693 xhci_free_stream_info(xhci, ep->stream_info); in xhci_discover_or_reset_device()
3699 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3700 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3703 xhci_drop_ep_from_interval_table(xhci, in xhci_discover_or_reset_device()
3712 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_discover_or_reset_device()
3716 xhci_free_command(xhci, reset_device_cmd); in xhci_discover_or_reset_device()
3727 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_free_dev() local
3738 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_free_dev()
3749 virt_dev = xhci->devs[udev->slot_id]; in xhci_free_dev()
3750 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_free_dev()
3758 xhci_debugfs_remove_slot(xhci, udev->slot_id); in xhci_free_dev()
3760 ret = xhci_disable_slot(xhci, udev->slot_id); in xhci_free_dev()
3762 xhci_free_virt_device(xhci, udev->slot_id); in xhci_free_dev()
3765 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) in xhci_disable_slot() argument
3772 command = xhci_alloc_command(xhci, false, GFP_KERNEL); in xhci_disable_slot()
3776 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_slot()
3778 state = readl(&xhci->op_regs->status); in xhci_disable_slot()
3779 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || in xhci_disable_slot()
3780 (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_disable_slot()
3781 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3786 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, in xhci_disable_slot()
3789 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3793 xhci_ring_cmd_db(xhci); in xhci_disable_slot()
3794 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3804 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) in xhci_reserve_host_control_ep_resources() argument
3806 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { in xhci_reserve_host_control_ep_resources()
3807 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3810 xhci->num_active_eps, xhci->limit_active_eps); in xhci_reserve_host_control_ep_resources()
3813 xhci->num_active_eps += 1; in xhci_reserve_host_control_ep_resources()
3814 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3816 xhci->num_active_eps); in xhci_reserve_host_control_ep_resources()
3827 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_alloc_dev() local
3834 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_alloc_dev()
3838 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3839 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); in xhci_alloc_dev()
3841 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3842 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_alloc_dev()
3843 xhci_free_command(xhci, command); in xhci_alloc_dev()
3846 xhci_ring_cmd_db(xhci); in xhci_alloc_dev()
3847 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3853 xhci_err(xhci, "Error while assigning device slot ID\n"); in xhci_alloc_dev()
3854 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", in xhci_alloc_dev()
3856 readl(&xhci->cap_regs->hcs_params1))); in xhci_alloc_dev()
3857 xhci_free_command(xhci, command); in xhci_alloc_dev()
3861 xhci_free_command(xhci, command); in xhci_alloc_dev()
3863 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_alloc_dev()
3864 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3865 ret = xhci_reserve_host_control_ep_resources(xhci); in xhci_alloc_dev()
3867 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3868 xhci_warn(xhci, "Not enough host resources, " in xhci_alloc_dev()
3870 xhci->num_active_eps); in xhci_alloc_dev()
3873 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3879 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { in xhci_alloc_dev()
3880 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); in xhci_alloc_dev()
3883 vdev = xhci->devs[slot_id]; in xhci_alloc_dev()
3884 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_alloc_dev()
3889 xhci_debugfs_create_slot(xhci, slot_id); in xhci_alloc_dev()
3896 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_alloc_dev()
3905 ret = xhci_disable_slot(xhci, udev->slot_id); in xhci_alloc_dev()
3907 xhci_free_virt_device(xhci, udev->slot_id); in xhci_alloc_dev()
3923 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_setup_device() local
3929 mutex_lock(&xhci->mutex); in xhci_setup_device()
3931 if (xhci->xhc_state) { /* dying, removing or halted */ in xhci_setup_device()
3937 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3943 virt_dev = xhci->devs[udev->slot_id]; in xhci_setup_device()
3951 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", in xhci_setup_device()
3956 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
3962 xhci_dbg(xhci, "Slot already in default state\n"); in xhci_setup_device()
3967 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_setup_device()
3975 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_setup_device()
3978 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_device()
3989 xhci_setup_addressable_virt_dev(xhci, udev); in xhci_setup_device()
3992 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); in xhci_setup_device()
3996 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
3999 spin_lock_irqsave(&xhci->lock, flags); in xhci_setup_device()
4001 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, in xhci_setup_device()
4004 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
4005 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4009 xhci_ring_cmd_db(xhci); in xhci_setup_device()
4010 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
4022 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); in xhci_setup_device()
4027 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", in xhci_setup_device()
4034 mutex_unlock(&xhci->mutex); in xhci_setup_device()
4035 ret = xhci_disable_slot(xhci, udev->slot_id); in xhci_setup_device()
4047 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4051 xhci_err(xhci, in xhci_setup_device()
4054 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); in xhci_setup_device()
4060 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_setup_device()
4061 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4063 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4066 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], in xhci_setup_device()
4068 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); in xhci_setup_device()
4069 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4072 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
4078 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, in xhci_setup_device()
4084 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4088 mutex_unlock(&xhci->mutex); in xhci_setup_device()
4124 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, in xhci_change_max_exit_latency() argument
4134 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4136 virt_dev = xhci->devs[udev->slot_id]; in xhci_change_max_exit_latency()
4145 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4150 command = xhci->lpm_command; in xhci_change_max_exit_latency()
4153 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4154 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_change_max_exit_latency()
4159 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); in xhci_change_max_exit_latency()
4160 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4163 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_change_max_exit_latency()
4168 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_change_max_exit_latency()
4172 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_change_max_exit_latency()
4176 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4178 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4190 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, in xhci_calculate_hird_besl() argument
4197 u2del = HCS_U2_LATENCY(xhci->hcs_params3); in xhci_calculate_hird_besl()
4249 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_set_usb2_hardware_lpm() local
4258 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || in xhci_set_usb2_hardware_lpm()
4269 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4271 ports = xhci->usb2_rhub.ports; in xhci_set_usb2_hardware_lpm()
4278 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", in xhci_set_usb2_hardware_lpm()
4281 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { in xhci_set_usb2_hardware_lpm()
4295 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4305 ret = xhci_change_max_exit_latency(xhci, udev, in xhci_set_usb2_hardware_lpm()
4311 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4318 hird = xhci_calculate_hird_besl(xhci, udev); in xhci_set_usb2_hardware_lpm()
4335 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4337 xhci_change_max_exit_latency(xhci, udev, 0); in xhci_set_usb2_hardware_lpm()
4343 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4351 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, in xhci_check_usb2_port_capability() argument
4357 for (i = 0; i < xhci->num_ext_caps; i++) { in xhci_check_usb2_port_capability()
4358 if (xhci->ext_caps[i] & capability) { in xhci_check_usb2_port_capability()
4360 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; in xhci_check_usb2_port_capability()
4361 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); in xhci_check_usb2_port_capability()
4372 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_device() local
4375 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || in xhci_update_device()
4384 if (xhci->hw_lpm_support == 1 && in xhci_update_device()
4386 xhci, portnum, XHCI_HLC)) { in xhci_update_device()
4390 if (xhci_check_usb2_port_capability(xhci, portnum, in xhci_update_device()
4495 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, in xhci_calculate_u1_timeout() argument
4501 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u1_timeout()
4551 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, in xhci_calculate_u2_timeout() argument
4557 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u2_timeout()
4574 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_call_host_update_timeout_for_endpoint() argument
4581 return xhci_calculate_u1_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4583 return xhci_calculate_u2_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4588 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_update_timeout_for_endpoint() argument
4596 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_endpoint()
4613 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, in xhci_update_timeout_for_interface() argument
4622 if (xhci_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_interface()
4654 static int xhci_check_tier_policy(struct xhci_hcd *xhci, in xhci_check_tier_policy() argument
4658 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_check_tier_policy()
4672 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_calculate_lpm_timeout() local
4688 if (xhci_check_tier_policy(xhci, udev, state) < 0) in xhci_calculate_lpm_timeout()
4694 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, in xhci_calculate_lpm_timeout()
4726 if (xhci_update_timeout_for_interface(xhci, udev, in xhci_calculate_lpm_timeout()
4783 struct xhci_hcd *xhci; in xhci_enable_usb3_lpm_timeout() local
4788 xhci = hcd_to_xhci(hcd); in xhci_enable_usb3_lpm_timeout()
4793 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_enable_usb3_lpm_timeout()
4794 !xhci->devs[udev->slot_id]) in xhci_enable_usb3_lpm_timeout()
4805 ret = xhci_change_max_exit_latency(xhci, udev, mel); in xhci_enable_usb3_lpm_timeout()
4814 struct xhci_hcd *xhci; in xhci_disable_usb3_lpm_timeout() local
4817 xhci = hcd_to_xhci(hcd); in xhci_disable_usb3_lpm_timeout()
4818 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_disable_usb3_lpm_timeout()
4819 !xhci->devs[udev->slot_id]) in xhci_disable_usb3_lpm_timeout()
4823 return xhci_change_max_exit_latency(xhci, udev, mel); in xhci_disable_usb3_lpm_timeout()
4859 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_hub_device() local
4872 vdev = xhci->devs[hdev->slot_id]; in xhci_update_hub_device()
4874 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); in xhci_update_hub_device()
4878 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); in xhci_update_hub_device()
4884 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_update_hub_device()
4886 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4890 spin_lock_irqsave(&xhci->lock, flags); in xhci_update_hub_device()
4892 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { in xhci_update_hub_device()
4893 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); in xhci_update_hub_device()
4894 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4895 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
4899 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); in xhci_update_hub_device()
4901 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); in xhci_update_hub_device()
4913 if (xhci->hci_version > 0x95) { in xhci_update_hub_device()
4914 xhci_dbg(xhci, "xHCI version %x needs hub " in xhci_update_hub_device()
4916 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
4928 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) in xhci_update_hub_device()
4932 xhci_dbg(xhci, "xHCI version %x doesn't need hub " in xhci_update_hub_device()
4934 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
4937 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
4939 xhci_dbg(xhci, "Set up %s for hub device.\n", in xhci_update_hub_device()
4940 (xhci->hci_version > 0x95) ? in xhci_update_hub_device()
4946 if (xhci->hci_version > 0x95) in xhci_update_hub_device()
4947 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
4950 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
4953 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4959 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_get_frame() local
4961 return readl(&xhci->run_regs->microframe_index) >> 3; in xhci_get_frame()
4966 struct xhci_hcd *xhci; in xhci_gen_setup() local
4984 xhci = hcd_to_xhci(hcd); in xhci_gen_setup()
4987 xhci->main_hcd = hcd; in xhci_gen_setup()
4988 xhci->usb2_rhub.hcd = hcd; in xhci_gen_setup()
5005 minor_rev = xhci->usb3_rhub.min_rev; in xhci_gen_setup()
5010 xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", in xhci_gen_setup()
5014 xhci->usb3_rhub.hcd = hcd; in xhci_gen_setup()
5021 mutex_init(&xhci->mutex); in xhci_gen_setup()
5022 xhci->cap_regs = hcd->regs; in xhci_gen_setup()
5023 xhci->op_regs = hcd->regs + in xhci_gen_setup()
5024 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); in xhci_gen_setup()
5025 xhci->run_regs = hcd->regs + in xhci_gen_setup()
5026 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); in xhci_gen_setup()
5028 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); in xhci_gen_setup()
5029 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); in xhci_gen_setup()
5030 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); in xhci_gen_setup()
5031 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); in xhci_gen_setup()
5032 xhci->hci_version = HC_VERSION(xhci->hcc_params); in xhci_gen_setup()
5033 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); in xhci_gen_setup()
5034 if (xhci->hci_version > 0x100) in xhci_gen_setup()
5035 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); in xhci_gen_setup()
5037 xhci->quirks |= quirks; in xhci_gen_setup()
5039 get_quirks(dev, xhci); in xhci_gen_setup()
5045 if (xhci->hci_version > 0x96) in xhci_gen_setup()
5046 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; in xhci_gen_setup()
5049 retval = xhci_halt(xhci); in xhci_gen_setup()
5053 xhci_zero_64b_regs(xhci); in xhci_gen_setup()
5055 xhci_dbg(xhci, "Resetting HCD\n"); in xhci_gen_setup()
5057 retval = xhci_reset(xhci); in xhci_gen_setup()
5060 xhci_dbg(xhci, "Reset complete\n"); in xhci_gen_setup()
5069 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) in xhci_gen_setup()
5070 xhci->hcc_params &= ~BIT(0); in xhci_gen_setup()
5074 if (HCC_64BIT_ADDR(xhci->hcc_params) && in xhci_gen_setup()
5076 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); in xhci_gen_setup()
5086 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); in xhci_gen_setup()
5090 xhci_dbg(xhci, "Calling HCD init\n"); in xhci_gen_setup()
5095 xhci_dbg(xhci, "Called HCD init\n"); in xhci_gen_setup()
5097 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", in xhci_gen_setup()
5098 xhci->hcc_params, xhci->hci_version, xhci->quirks); in xhci_gen_setup()