Lines Matching +full:mtk +full:- +full:xhci
1 // SPDX-License-Identifier: GPL-2.0
3 * xHCI host controller driver
26 * until you reach a non-link TRB.
57 #include <linux/dma-mapping.h>
58 #include "xhci.h"
59 #include "xhci-trace.h"
61 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
74 if (!seg || !trb || trb < seg->trbs) in xhci_trb_virt_to_dma()
77 segment_offset = trb - seg->trbs; in xhci_trb_virt_to_dma()
80 return seg->dma + (segment_offset * sizeof(*trb)); in xhci_trb_virt_to_dma()
85 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); in trb_is_noop()
90 return TRB_TYPE_LINK_LE32(trb->link.control); in trb_is_link()
95 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in last_trb_on_seg()
101 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); in last_trb_on_ring()
106 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; in link_trb_toggles_cycle()
111 struct urb_priv *urb_priv = td->urb->hcpriv; in last_td_in_urb()
113 return urb_priv->num_tds_done == urb_priv->num_tds; in last_td_in_urb()
118 struct urb_priv *urb_priv = urb->hcpriv; in inc_td_cnt()
120 urb_priv->num_tds_done++; in inc_td_cnt()
127 trb->link.control &= cpu_to_le32(~TRB_CHAIN); in trb_to_noop()
129 trb->generic.field[0] = 0; in trb_to_noop()
130 trb->generic.field[1] = 0; in trb_to_noop()
131 trb->generic.field[2] = 0; in trb_to_noop()
133 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); in trb_to_noop()
134 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); in trb_to_noop()
142 static void next_trb(struct xhci_hcd *xhci, in next_trb() argument
148 *seg = (*seg)->next; in next_trb()
149 *trb = ((*seg)->trbs); in next_trb()
158 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
163 if (ring->type == TYPE_EVENT) { in inc_deq()
164 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { in inc_deq()
165 ring->dequeue++; in inc_deq()
168 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) in inc_deq()
169 ring->cycle_state ^= 1; in inc_deq()
170 ring->deq_seg = ring->deq_seg->next; in inc_deq()
171 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
176 if (!trb_is_link(ring->dequeue)) { in inc_deq()
177 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) { in inc_deq()
178 xhci_warn(xhci, "Missing link TRB at end of segment\n"); in inc_deq()
180 ring->dequeue++; in inc_deq()
181 ring->num_trbs_free++; in inc_deq()
185 while (trb_is_link(ring->dequeue)) { in inc_deq()
186 ring->deq_seg = ring->deq_seg->next; in inc_deq()
187 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
189 if (link_trb_count++ > ring->num_segs) { in inc_deq()
190 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in inc_deq()
211 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
216 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, in inc_enq() argument
223 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; in inc_enq()
225 if (!trb_is_link(ring->enqueue)) in inc_enq()
226 ring->num_trbs_free--; in inc_enq()
228 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { in inc_enq()
229 xhci_err(xhci, "Tried to move enqueue past ring segment\n"); in inc_enq()
233 next = ++(ring->enqueue); in inc_enq()
252 if (!(ring->type == TYPE_ISOC && in inc_enq()
253 (xhci->quirks & XHCI_AMD_0x96_HOST)) && in inc_enq()
254 !xhci_link_trb_quirk(xhci)) { in inc_enq()
255 next->link.control &= cpu_to_le32(~TRB_CHAIN); in inc_enq()
256 next->link.control |= cpu_to_le32(chain); in inc_enq()
260 next->link.control ^= cpu_to_le32(TRB_CYCLE); in inc_enq()
264 ring->cycle_state ^= 1; in inc_enq()
266 ring->enq_seg = ring->enq_seg->next; in inc_enq()
267 ring->enqueue = ring->enq_seg->trbs; in inc_enq()
268 next = ring->enqueue; in inc_enq()
270 if (link_trb_count++ > ring->num_segs) { in inc_enq()
271 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); in inc_enq()
283 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, in room_on_ring() argument
288 if (ring->num_trbs_free < num_trbs) in room_on_ring()
291 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { in room_on_ring()
292 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; in room_on_ring()
293 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) in room_on_ring()
301 void xhci_ring_cmd_db(struct xhci_hcd *xhci) in xhci_ring_cmd_db() argument
303 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
306 xhci_dbg(xhci, "// Ding dong!\n"); in xhci_ring_cmd_db()
310 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
312 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
315 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) in xhci_mod_cmd_timer() argument
317 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay); in xhci_mod_cmd_timer()
320 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) in xhci_next_queued_cmd() argument
322 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, in xhci_next_queued_cmd()
327 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
329 * This must be called with command ring stopped and xhci->lock held.
331 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, in xhci_handle_stopped_cmd_ring() argument
336 /* Turn all aborted commands in list to no-ops, then restart */ in xhci_handle_stopped_cmd_ring()
337 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { in xhci_handle_stopped_cmd_ring()
339 if (i_cmd->status != COMP_COMMAND_ABORTED) in xhci_handle_stopped_cmd_ring()
342 i_cmd->status = COMP_COMMAND_RING_STOPPED; in xhci_handle_stopped_cmd_ring()
344 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
345 i_cmd->command_trb); in xhci_handle_stopped_cmd_ring()
347 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); in xhci_handle_stopped_cmd_ring()
351 * completion event is received for these no-op commands in xhci_handle_stopped_cmd_ring()
355 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
358 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
359 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
360 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
361 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in xhci_handle_stopped_cmd_ring()
362 xhci_ring_cmd_db(xhci); in xhci_handle_stopped_cmd_ring()
366 /* Must be called with xhci->lock held, releases and aquires lock back */
367 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) in xhci_abort_cmd_ring() argument
369 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; in xhci_abort_cmd_ring()
370 union xhci_trb *new_deq = xhci->cmd_ring->dequeue; in xhci_abort_cmd_ring()
374 xhci_dbg(xhci, "Abort command ring\n"); in xhci_abort_cmd_ring()
376 reinit_completion(&xhci->cmd_ring_stop_completion); in xhci_abort_cmd_ring()
386 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
388 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
391 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
393 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the in xhci_abort_cmd_ring()
395 * seconds then driver handles it as if host died (-ENODEV). in xhci_abort_cmd_ring()
396 * In the future we should distinguish between -ENODEV and -ETIMEDOUT in xhci_abort_cmd_ring()
397 * and try to recover a -ETIMEDOUT with a host controller reset. in xhci_abort_cmd_ring()
399 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
402 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); in xhci_abort_cmd_ring()
403 xhci_halt(xhci); in xhci_abort_cmd_ring()
404 xhci_hc_died(xhci); in xhci_abort_cmd_ring()
413 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_abort_cmd_ring()
414 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, in xhci_abort_cmd_ring()
416 spin_lock_irqsave(&xhci->lock, flags); in xhci_abort_cmd_ring()
418 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); in xhci_abort_cmd_ring()
419 xhci_cleanup_command_queue(xhci); in xhci_abort_cmd_ring()
421 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); in xhci_abort_cmd_ring()
426 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, in xhci_ring_ep_doorbell() argument
431 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
432 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell()
433 unsigned int ep_state = ep->ep_state; in xhci_ring_ep_doorbell()
453 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in ring_doorbell_for_active_rings() argument
460 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
463 if (!(ep->ep_state & EP_HAS_STREAMS)) { in ring_doorbell_for_active_rings()
464 if (ep->ring && !(list_empty(&ep->ring->td_list))) in ring_doorbell_for_active_rings()
465 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); in ring_doorbell_for_active_rings()
469 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in ring_doorbell_for_active_rings()
471 struct xhci_stream_info *stream_info = ep->stream_info; in ring_doorbell_for_active_rings()
472 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) in ring_doorbell_for_active_rings()
473 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, in ring_doorbell_for_active_rings()
478 void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in xhci_ring_doorbell_for_active_rings() argument
482 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_ring_doorbell_for_active_rings()
485 static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, in xhci_get_virt_ep() argument
490 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in xhci_get_virt_ep()
494 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); in xhci_get_virt_ep()
497 if (!xhci->devs[slot_id]) { in xhci_get_virt_ep()
498 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); in xhci_get_virt_ep()
502 return &xhci->devs[slot_id]->eps[ep_index]; in xhci_get_virt_ep()
505 static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, in xhci_virt_ep_to_ring() argument
510 if (!(ep->ep_state & EP_HAS_STREAMS)) in xhci_virt_ep_to_ring()
511 return ep->ring; in xhci_virt_ep_to_ring()
513 if (!ep->stream_info) in xhci_virt_ep_to_ring()
516 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { in xhci_virt_ep_to_ring()
517 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", in xhci_virt_ep_to_ring()
518 stream_id, ep->vdev->slot_id, ep->ep_index); in xhci_virt_ep_to_ring()
522 return ep->stream_info->stream_rings[stream_id]; in xhci_virt_ep_to_ring()
529 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, in xhci_triad_to_transfer_ring() argument
535 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_triad_to_transfer_ring()
539 return xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_triad_to_transfer_ring()
549 static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, in xhci_get_hw_deq() argument
556 ep = &vdev->eps[ep_index]; in xhci_get_hw_deq()
558 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_get_hw_deq()
559 st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; in xhci_get_hw_deq()
560 return le64_to_cpu(st_ctx->stream_ring); in xhci_get_hw_deq()
562 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); in xhci_get_hw_deq()
563 return le64_to_cpu(ep_ctx->deq); in xhci_get_hw_deq()
566 static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, in xhci_move_dequeue_past_td() argument
570 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_move_dequeue_past_td()
571 struct xhci_virt_ep *ep = &dev->eps[ep_index]; in xhci_move_dequeue_past_td()
587 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, in xhci_move_dequeue_past_td()
590 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", in xhci_move_dequeue_past_td()
592 return -ENODEV; in xhci_move_dequeue_past_td()
602 if (list_empty(&ep_ring->td_list)) { in xhci_move_dequeue_past_td()
603 new_seg = ep_ring->enq_seg; in xhci_move_dequeue_past_td()
604 new_deq = ep_ring->enqueue; in xhci_move_dequeue_past_td()
605 new_cycle = ep_ring->cycle_state; in xhci_move_dequeue_past_td()
606 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); in xhci_move_dequeue_past_td()
609 xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); in xhci_move_dequeue_past_td()
610 return -EINVAL; in xhci_move_dequeue_past_td()
614 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); in xhci_move_dequeue_past_td()
615 new_seg = ep_ring->deq_seg; in xhci_move_dequeue_past_td()
616 new_deq = ep_ring->dequeue; in xhci_move_dequeue_past_td()
619 * Quirk: xHC write-back of the DCS field in the hardware dequeue in xhci_move_dequeue_past_td()
620 * pointer is wrong - use the cycle state of the TRB pointed to by in xhci_move_dequeue_past_td()
623 if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS && in xhci_move_dequeue_past_td()
624 !(ep->ep_state & EP_HAS_STREAMS)) in xhci_move_dequeue_past_td()
625 halted_seg = trb_in_td(xhci, td->start_seg, in xhci_move_dequeue_past_td()
626 td->first_trb, td->last_trb, in xhci_move_dequeue_past_td()
629 index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) / in xhci_move_dequeue_past_td()
631 halted_trb = &halted_seg->trbs[index]; in xhci_move_dequeue_past_td()
632 new_cycle = halted_trb->generic.field[3] & 0x1; in xhci_move_dequeue_past_td()
633 xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n", in xhci_move_dequeue_past_td()
652 if (new_deq == td->last_trb) in xhci_move_dequeue_past_td()
659 next_trb(xhci, ep_ring, &new_seg, &new_deq); in xhci_move_dequeue_past_td()
662 if (new_deq == ep->ring->dequeue) { in xhci_move_dequeue_past_td()
663 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); in xhci_move_dequeue_past_td()
664 return -EINVAL; in xhci_move_dequeue_past_td()
674 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); in xhci_move_dequeue_past_td()
675 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); in xhci_move_dequeue_past_td()
676 return -EINVAL; in xhci_move_dequeue_past_td()
679 if ((ep->ep_state & SET_DEQ_PENDING)) { in xhci_move_dequeue_past_td()
680 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", in xhci_move_dequeue_past_td()
682 return -EBUSY; in xhci_move_dequeue_past_td()
686 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_move_dequeue_past_td()
688 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); in xhci_move_dequeue_past_td()
689 return -ENOMEM; in xhci_move_dequeue_past_td()
694 ret = queue_command(xhci, cmd, in xhci_move_dequeue_past_td()
700 xhci_free_command(xhci, cmd); in xhci_move_dequeue_past_td()
703 ep->queued_deq_seg = new_seg; in xhci_move_dequeue_past_td()
704 ep->queued_deq_ptr = new_deq; in xhci_move_dequeue_past_td()
706 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_move_dequeue_past_td()
714 ep->ep_state |= SET_DEQ_PENDING; in xhci_move_dequeue_past_td()
715 xhci_ring_cmd_db(xhci); in xhci_move_dequeue_past_td()
723 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in td_to_noop() argument
726 struct xhci_segment *seg = td->start_seg; in td_to_noop()
727 union xhci_trb *trb = td->first_trb; in td_to_noop()
733 if (flip_cycle && trb != td->first_trb && trb != td->last_trb) in td_to_noop()
734 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); in td_to_noop()
736 if (trb == td->last_trb) in td_to_noop()
739 next_trb(xhci, ep_ring, &seg, &trb); in td_to_noop()
744 * Must be called with xhci->lock held in interrupt context,
745 * releases and re-acquires xhci->lock
747 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, in xhci_giveback_urb_in_irq() argument
750 struct urb *urb = cur_td->urb; in xhci_giveback_urb_in_irq()
751 struct urb_priv *urb_priv = urb->hcpriv; in xhci_giveback_urb_in_irq()
752 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); in xhci_giveback_urb_in_irq()
754 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { in xhci_giveback_urb_in_irq()
755 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
756 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
757 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
767 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, in xhci_unmap_td_bounce_buffer() argument
770 struct device *dev = xhci_to_hcd(xhci)->self.controller; in xhci_unmap_td_bounce_buffer()
771 struct xhci_segment *seg = td->bounce_seg; in xhci_unmap_td_bounce_buffer()
772 struct urb *urb = td->urb; in xhci_unmap_td_bounce_buffer()
779 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
784 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
787 if (urb->num_sgs) { in xhci_unmap_td_bounce_buffer()
788 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
789 seg->bounce_len, seg->bounce_offs); in xhci_unmap_td_bounce_buffer()
790 if (len != seg->bounce_len) in xhci_unmap_td_bounce_buffer()
791 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", in xhci_unmap_td_bounce_buffer()
792 len, seg->bounce_len); in xhci_unmap_td_bounce_buffer()
794 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
795 seg->bounce_len); in xhci_unmap_td_bounce_buffer()
797 seg->bounce_len = 0; in xhci_unmap_td_bounce_buffer()
798 seg->bounce_offs = 0; in xhci_unmap_td_bounce_buffer()
801 static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_td_cleanup() argument
807 urb = td->urb; in xhci_td_cleanup()
810 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); in xhci_td_cleanup()
814 * length, urb->actual_length will be a very big number (since it's in xhci_td_cleanup()
817 if (urb->actual_length > urb->transfer_buffer_length) { in xhci_td_cleanup()
818 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", in xhci_td_cleanup()
819 urb->transfer_buffer_length, urb->actual_length); in xhci_td_cleanup()
820 urb->actual_length = 0; in xhci_td_cleanup()
824 if (!list_empty(&td->td_list)) in xhci_td_cleanup()
825 list_del_init(&td->td_list); in xhci_td_cleanup()
827 if (!list_empty(&td->cancelled_td_list)) in xhci_td_cleanup()
828 list_del_init(&td->cancelled_td_list); in xhci_td_cleanup()
833 if ((urb->actual_length != urb->transfer_buffer_length && in xhci_td_cleanup()
834 (urb->transfer_flags & URB_SHORT_NOT_OK)) || in xhci_td_cleanup()
835 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) in xhci_td_cleanup()
836 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", in xhci_td_cleanup()
837 urb, urb->actual_length, in xhci_td_cleanup()
838 urb->transfer_buffer_length, status); in xhci_td_cleanup()
841 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) in xhci_td_cleanup()
843 xhci_giveback_urb_in_irq(xhci, td, status); in xhci_td_cleanup()
856 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_giveback_invalidated_tds()
859 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_giveback_invalidated_tds()
861 if (td->cancel_status == TD_CLEARED) { in xhci_giveback_invalidated_tds()
862 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_giveback_invalidated_tds()
863 __func__, td->urb); in xhci_giveback_invalidated_tds()
864 xhci_td_cleanup(ep->xhci, td, ring, td->status); in xhci_giveback_invalidated_tds()
866 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_giveback_invalidated_tds()
867 __func__, td->urb, td->cancel_status); in xhci_giveback_invalidated_tds()
869 if (ep->xhci->xhc_state & XHCI_STATE_DYING) in xhci_giveback_invalidated_tds()
874 static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_reset_halted_ep() argument
880 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_reset_halted_ep()
882 ret = -ENOMEM; in xhci_reset_halted_ep()
886 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n", in xhci_reset_halted_ep()
890 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); in xhci_reset_halted_ep()
893 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", in xhci_reset_halted_ep()
898 static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, in xhci_handle_halted_endpoint() argument
903 unsigned int slot_id = ep->vdev->slot_id; in xhci_handle_halted_endpoint()
910 if (ep->vdev->flags & VDEV_PORT_ERROR) in xhci_handle_halted_endpoint()
911 return -ENODEV; in xhci_handle_halted_endpoint()
915 ep->ep_state |= EP_HARD_CLEAR_TOGGLE; in xhci_handle_halted_endpoint()
916 if (td && list_empty(&td->cancelled_td_list)) { in xhci_handle_halted_endpoint()
917 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); in xhci_handle_halted_endpoint()
918 td->cancel_status = TD_HALTED; in xhci_handle_halted_endpoint()
922 if (ep->ep_state & EP_HALTED) { in xhci_handle_halted_endpoint()
923 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n", in xhci_handle_halted_endpoint()
924 ep->ep_index); in xhci_handle_halted_endpoint()
928 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); in xhci_handle_halted_endpoint()
932 ep->ep_state |= EP_HALTED; in xhci_handle_halted_endpoint()
934 xhci_ring_cmd_db(xhci); in xhci_handle_halted_endpoint()
941 * We have the xHCI lock, so nothing can modify this list until we drop it.
942 * We're also in the event handler, so we can't get re-interrupted if another
950 struct xhci_hcd *xhci; in xhci_invalidate_cancelled_tds() local
956 unsigned int slot_id = ep->vdev->slot_id; in xhci_invalidate_cancelled_tds()
959 xhci = ep->xhci; in xhci_invalidate_cancelled_tds()
961 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
962 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_invalidate_cancelled_tds()
965 td->start_seg, td->first_trb), in xhci_invalidate_cancelled_tds()
966 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
967 list_del_init(&td->td_list); in xhci_invalidate_cancelled_tds()
968 ring = xhci_urb_to_transfer_ring(xhci, td->urb); in xhci_invalidate_cancelled_tds()
970 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", in xhci_invalidate_cancelled_tds()
971 td->urb, td->urb->stream_id); in xhci_invalidate_cancelled_tds()
980 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, in xhci_invalidate_cancelled_tds()
981 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
984 if (td->cancel_status == TD_HALTED || in xhci_invalidate_cancelled_tds()
985 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) { in xhci_invalidate_cancelled_tds()
986 switch (td->cancel_status) { in xhci_invalidate_cancelled_tds()
987 case TD_CLEARED: /* TD is already no-op */ in xhci_invalidate_cancelled_tds()
992 td->cancel_status = TD_CLEARING_CACHE; in xhci_invalidate_cancelled_tds()
995 xhci_dbg(xhci, in xhci_invalidate_cancelled_tds()
997 td->urb->stream_id, td->urb, in xhci_invalidate_cancelled_tds()
998 cached_td->urb->stream_id, cached_td->urb); in xhci_invalidate_cancelled_tds()
1003 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1004 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1012 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, in xhci_invalidate_cancelled_tds()
1013 cached_td->urb->stream_id, in xhci_invalidate_cancelled_tds()
1016 /* Failed to move past cached td, just set cached TDs to no-op */ in xhci_invalidate_cancelled_tds()
1017 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1018 if (td->cancel_status != TD_CLEARING_CACHE) in xhci_invalidate_cancelled_tds()
1020 xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", in xhci_invalidate_cancelled_tds()
1021 td->urb); in xhci_invalidate_cancelled_tds()
1022 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1023 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1031 * Only call for non-running rings without streams.
1038 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */ in find_halted_td()
1039 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); in find_halted_td()
1041 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); in find_halted_td()
1042 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, in find_halted_td()
1043 td->last_trb, hw_deq, false)) in find_halted_td()
1056 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1059 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_stop_ep() argument
1070 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { in xhci_handle_cmd_stop_ep()
1071 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
1072 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", in xhci_handle_cmd_stop_ep()
1077 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_stop_ep()
1078 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1082 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_stop_ep()
1095 * Proper error code is unknown here, it would be -EPIPE if device side in xhci_handle_cmd_stop_ep()
1096 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) in xhci_handle_cmd_stop_ep()
1097 * We use -EPROTO, if device is stalled it should return a stall error on in xhci_handle_cmd_stop_ep()
1098 * next transfer, which then will return -EPIPE, and device side stall is in xhci_handle_cmd_stop_ep()
1103 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); in xhci_handle_cmd_stop_ep()
1104 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_stop_ep()
1110 td->status = -EPROTO; in xhci_handle_cmd_stop_ep()
1113 err = xhci_handle_halted_endpoint(xhci, ep, 0, td, in xhci_handle_cmd_stop_ep()
1117 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1121 xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n"); in xhci_handle_cmd_stop_ep()
1123 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_handle_cmd_stop_ep()
1125 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1128 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0); in xhci_handle_cmd_stop_ep()
1129 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_stop_ep()
1139 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1143 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1146 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_kill_ring_urbs() argument
1151 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { in xhci_kill_ring_urbs()
1152 list_del_init(&cur_td->td_list); in xhci_kill_ring_urbs()
1154 if (!list_empty(&cur_td->cancelled_td_list)) in xhci_kill_ring_urbs()
1155 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_ring_urbs()
1157 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); in xhci_kill_ring_urbs()
1159 inc_td_cnt(cur_td->urb); in xhci_kill_ring_urbs()
1161 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
1165 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, in xhci_kill_endpoint_urbs() argument
1173 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_kill_endpoint_urbs()
1174 if ((ep->ep_state & EP_HAS_STREAMS) || in xhci_kill_endpoint_urbs()
1175 (ep->ep_state & EP_GETTING_NO_STREAMS)) { in xhci_kill_endpoint_urbs()
1178 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in xhci_kill_endpoint_urbs()
1180 ring = ep->stream_info->stream_rings[stream_id]; in xhci_kill_endpoint_urbs()
1184 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1187 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1190 ring = ep->ring; in xhci_kill_endpoint_urbs()
1193 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1196 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1199 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, in xhci_kill_endpoint_urbs()
1201 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_endpoint_urbs()
1202 inc_td_cnt(cur_td->urb); in xhci_kill_endpoint_urbs()
1205 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
1215 * Call with xhci->lock held.
1216 * lock is relased and re-acquired while giving back urb.
1218 void xhci_hc_died(struct xhci_hcd *xhci) in xhci_hc_died() argument
1222 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_hc_died()
1225 xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); in xhci_hc_died()
1226 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_hc_died()
1228 xhci_cleanup_command_queue(xhci); in xhci_hc_died()
1231 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { in xhci_hc_died()
1232 if (!xhci->devs[i]) in xhci_hc_died()
1235 xhci_kill_endpoint_urbs(xhci, i, j); in xhci_hc_died()
1239 if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_hc_died()
1240 usb_hc_died(xhci_to_hcd(xhci)); in xhci_hc_died()
1243 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, in update_ring_for_set_deq_completion() argument
1252 num_trbs_free_temp = ep_ring->num_trbs_free; in update_ring_for_set_deq_completion()
1253 dequeue_temp = ep_ring->dequeue; in update_ring_for_set_deq_completion()
1255 /* If we get two back-to-back stalls, and the first stalled transfer in update_ring_for_set_deq_completion()
1259 * the segment into la-la-land. in update_ring_for_set_deq_completion()
1261 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1262 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1263 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1266 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { in update_ring_for_set_deq_completion()
1268 ep_ring->num_trbs_free++; in update_ring_for_set_deq_completion()
1269 ep_ring->dequeue++; in update_ring_for_set_deq_completion()
1270 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1271 if (ep_ring->dequeue == in update_ring_for_set_deq_completion()
1272 dev->eps[ep_index].queued_deq_ptr) in update_ring_for_set_deq_completion()
1274 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1275 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1277 if (ep_ring->dequeue == dequeue_temp) { in update_ring_for_set_deq_completion()
1284 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); in update_ring_for_set_deq_completion()
1285 ep_ring->num_trbs_free = num_trbs_free_temp; in update_ring_for_set_deq_completion()
1296 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_set_deq() argument
1307 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_set_deq()
1308 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); in xhci_handle_cmd_set_deq()
1309 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1313 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_handle_cmd_set_deq()
1315 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", in xhci_handle_cmd_set_deq()
1321 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
1322 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in xhci_handle_cmd_set_deq()
1332 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); in xhci_handle_cmd_set_deq()
1335 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
1337 slot_state = le32_to_cpu(slot_ctx->dev_state); in xhci_handle_cmd_set_deq()
1339 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1344 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", in xhci_handle_cmd_set_deq()
1348 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", in xhci_handle_cmd_set_deq()
1361 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_set_deq()
1363 &ep->stream_info->stream_ctx_array[stream_id]; in xhci_handle_cmd_set_deq()
1364 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; in xhci_handle_cmd_set_deq()
1366 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; in xhci_handle_cmd_set_deq()
1368 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1370 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, in xhci_handle_cmd_set_deq()
1371 ep->queued_deq_ptr) == deq) { in xhci_handle_cmd_set_deq()
1375 update_ring_for_set_deq_completion(xhci, ep->vdev, in xhci_handle_cmd_set_deq()
1378 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); in xhci_handle_cmd_set_deq()
1379 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1380 ep->queued_deq_seg, ep->queued_deq_ptr); in xhci_handle_cmd_set_deq()
1384 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_handle_cmd_set_deq()
1386 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_handle_cmd_set_deq()
1387 if (td->cancel_status == TD_CLEARING_CACHE) { in xhci_handle_cmd_set_deq()
1388 td->cancel_status = TD_CLEARED; in xhci_handle_cmd_set_deq()
1389 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_handle_cmd_set_deq()
1390 __func__, td->urb); in xhci_handle_cmd_set_deq()
1391 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); in xhci_handle_cmd_set_deq()
1393 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_handle_cmd_set_deq()
1394 __func__, td->urb, td->cancel_status); in xhci_handle_cmd_set_deq()
1398 ep->ep_state &= ~SET_DEQ_PENDING; in xhci_handle_cmd_set_deq()
1399 ep->queued_deq_seg = NULL; in xhci_handle_cmd_set_deq()
1400 ep->queued_deq_ptr = NULL; in xhci_handle_cmd_set_deq()
1402 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1405 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_ep() argument
1412 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_reset_ep()
1413 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1417 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_reset_ep()
1423 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_handle_cmd_reset_ep()
1430 ep->ep_state &= ~EP_HALTED; in xhci_handle_cmd_reset_ep()
1435 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) in xhci_handle_cmd_reset_ep()
1436 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1439 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_enable_slot() argument
1443 command->slot_id = slot_id; in xhci_handle_cmd_enable_slot()
1445 command->slot_id = 0; in xhci_handle_cmd_enable_slot()
1448 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_disable_slot() argument
1453 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1457 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_handle_cmd_disable_slot()
1460 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1462 xhci_free_device_endpoint_resources(xhci, virt_dev, true); in xhci_handle_cmd_disable_slot()
1465 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_config_ep() argument
1479 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1482 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); in xhci_handle_cmd_config_ep()
1484 xhci_warn(xhci, "Could not get input context, bad type.\n"); in xhci_handle_cmd_config_ep()
1488 add_flags = le32_to_cpu(ctrl_ctx->add_flags); in xhci_handle_cmd_config_ep()
1491 ep_index = xhci_last_valid_endpoint(add_flags) - 1; in xhci_handle_cmd_config_ep()
1493 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); in xhci_handle_cmd_config_ep()
1499 static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_addr_dev() argument
1504 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_addr_dev()
1507 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_addr_dev()
1511 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_reset_dev() argument
1516 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_reset_dev()
1518 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", in xhci_handle_cmd_reset_dev()
1522 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_reset_dev()
1525 xhci_dbg(xhci, "Completed reset device command.\n"); in xhci_handle_cmd_reset_dev()
1528 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, in xhci_handle_cmd_nec_get_fw() argument
1531 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1532 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); in xhci_handle_cmd_nec_get_fw()
1535 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_nec_get_fw()
1537 NEC_FW_MAJOR(le32_to_cpu(event->status)), in xhci_handle_cmd_nec_get_fw()
1538 NEC_FW_MINOR(le32_to_cpu(event->status))); in xhci_handle_cmd_nec_get_fw()
1543 list_del(&cmd->cmd_list); in xhci_complete_del_and_free_cmd()
1545 if (cmd->completion) { in xhci_complete_del_and_free_cmd()
1546 cmd->status = status; in xhci_complete_del_and_free_cmd()
1547 complete(cmd->completion); in xhci_complete_del_and_free_cmd()
1553 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) in xhci_cleanup_command_queue() argument
1556 xhci->current_cmd = NULL; in xhci_cleanup_command_queue()
1557 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1563 struct xhci_hcd *xhci; in xhci_handle_command_timeout() local
1570 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); in xhci_handle_command_timeout()
1572 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1578 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { in xhci_handle_command_timeout()
1579 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1583 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]); in xhci_handle_command_timeout()
1584 usbsts = readl(&xhci->op_regs->status); in xhci_handle_command_timeout()
1585 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts)); in xhci_handle_command_timeout()
1587 /* Bail out and tear down xhci if a stop endpoint command failed */ in xhci_handle_command_timeout()
1591 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n"); in xhci_handle_command_timeout()
1593 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3), in xhci_handle_command_timeout()
1596 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_command_timeout()
1598 xhci_halt(xhci); in xhci_handle_command_timeout()
1599 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1604 xhci->current_cmd->status = COMP_COMMAND_ABORTED; in xhci_handle_command_timeout()
1607 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1609 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1613 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1616 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_handle_command_timeout()
1617 xhci_dbg(xhci, "Command timeout\n"); in xhci_handle_command_timeout()
1618 xhci_abort_cmd_ring(xhci, flags); in xhci_handle_command_timeout()
1623 if (xhci->xhc_state & XHCI_STATE_REMOVING) { in xhci_handle_command_timeout()
1624 xhci_dbg(xhci, "host removed, ring start fail?\n"); in xhci_handle_command_timeout()
1625 xhci_cleanup_command_queue(xhci); in xhci_handle_command_timeout()
1631 xhci_dbg(xhci, "Command timeout on stopped ring\n"); in xhci_handle_command_timeout()
1632 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1635 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1639 static void handle_cmd_completion(struct xhci_hcd *xhci, in handle_cmd_completion() argument
1642 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_cmd_completion()
1651 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in handle_cmd_completion()
1655 cmd_dma = le64_to_cpu(event->cmd_trb); in handle_cmd_completion()
1656 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1658 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); in handle_cmd_completion()
1660 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1667 xhci_warn(xhci, in handle_cmd_completion()
1672 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); in handle_cmd_completion()
1674 cancel_delayed_work(&xhci->cmd_timer); in handle_cmd_completion()
1676 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); in handle_cmd_completion()
1680 complete_all(&xhci->cmd_ring_stop_completion); in handle_cmd_completion()
1684 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1685 xhci_err(xhci, in handle_cmd_completion()
1697 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1698 if (cmd->status == COMP_COMMAND_ABORTED) { in handle_cmd_completion()
1699 if (xhci->current_cmd == cmd) in handle_cmd_completion()
1700 xhci->current_cmd = NULL; in handle_cmd_completion()
1705 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1708 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); in handle_cmd_completion()
1711 xhci_handle_cmd_disable_slot(xhci, slot_id); in handle_cmd_completion()
1714 if (!cmd->completion) in handle_cmd_completion()
1715 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); in handle_cmd_completion()
1720 xhci_handle_cmd_addr_dev(xhci, slot_id); in handle_cmd_completion()
1724 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1725 if (!cmd->completion) in handle_cmd_completion()
1726 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, in handle_cmd_completion()
1731 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1732 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1735 /* Is this an aborted command turned to NO-OP? */ in handle_cmd_completion()
1736 if (cmd->status == COMP_COMMAND_RING_STOPPED) in handle_cmd_completion()
1741 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1742 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1746 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) in handle_cmd_completion()
1749 le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1750 xhci_handle_cmd_reset_dev(xhci, slot_id); in handle_cmd_completion()
1753 xhci_handle_cmd_nec_get_fw(xhci, event); in handle_cmd_completion()
1757 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); in handle_cmd_completion()
1762 if (!list_is_singular(&xhci->cmd_list)) { in handle_cmd_completion()
1763 xhci->current_cmd = list_first_entry(&cmd->cmd_list, in handle_cmd_completion()
1765 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in handle_cmd_completion()
1766 } else if (xhci->current_cmd == cmd) { in handle_cmd_completion()
1767 xhci->current_cmd = NULL; in handle_cmd_completion()
1773 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1776 static void handle_vendor_event(struct xhci_hcd *xhci, in handle_vendor_event() argument
1779 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); in handle_vendor_event()
1780 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1781 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1784 static void handle_device_notification(struct xhci_hcd *xhci, in handle_device_notification() argument
1790 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); in handle_device_notification()
1791 if (!xhci->devs[slot_id]) { in handle_device_notification()
1792 xhci_warn(xhci, "Device Notification event for " in handle_device_notification()
1797 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", in handle_device_notification()
1799 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1800 if (udev && udev->parent) in handle_device_notification()
1801 usb_wakeup_notification(udev->parent, udev->portnum); in handle_device_notification()
1805 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1807 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1816 static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) in xhci_cavium_reset_phy_quirk() argument
1818 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cavium_reset_phy_quirk()
1824 writel(0x6F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1826 /* De-assert the PHY reset */ in xhci_cavium_reset_phy_quirk()
1827 writel(0x7F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1829 pll_lock_check = readl(hcd->regs + 0x1070); in xhci_cavium_reset_phy_quirk()
1830 } while (!(pll_lock_check & 0x1) && --retry_count); in xhci_cavium_reset_phy_quirk()
1833 static void handle_port_status(struct xhci_hcd *xhci, in handle_port_status() argument
1847 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) in handle_port_status()
1848 xhci_warn(xhci, in handle_port_status()
1851 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); in handle_port_status()
1852 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
1855 xhci_warn(xhci, "Port change event with invalid port ID %d\n", in handle_port_status()
1857 inc_deq(xhci, xhci->event_ring); in handle_port_status()
1861 port = &xhci->hw_ports[port_id - 1]; in handle_port_status()
1862 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { in handle_port_status()
1863 xhci_warn(xhci, "Port change event, no port for port ID %u\n", in handle_port_status()
1870 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { in handle_port_status()
1871 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); in handle_port_status()
1876 hcd = port->rhub->hcd; in handle_port_status()
1877 bus_state = &port->rhub->bus_state; in handle_port_status()
1878 hcd_portnum = port->hcd_portnum; in handle_port_status()
1879 portsc = readl(port->addr); in handle_port_status()
1881 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", in handle_port_status()
1882 hcd->self.busnum, hcd_portnum + 1, port_id, portsc); in handle_port_status()
1886 if (hcd->state == HC_STATE_SUSPENDED) { in handle_port_status()
1887 xhci_dbg(xhci, "resume root hub\n"); in handle_port_status()
1891 if (hcd->speed >= HCD_USB3 && in handle_port_status()
1893 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
1894 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
1895 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; in handle_port_status()
1899 xhci_dbg(xhci, "port resume event for port %d\n", port_id); in handle_port_status()
1901 cmd_reg = readl(&xhci->op_regs->command); in handle_port_status()
1903 xhci_warn(xhci, "xHC is not running.\n"); in handle_port_status()
1908 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); in handle_port_status()
1913 bus_state->port_remote_wakeup |= 1 << hcd_portnum; in handle_port_status()
1914 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
1915 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
1916 xhci_set_link_state(xhci, port, XDEV_U0); in handle_port_status()
1922 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { in handle_port_status()
1923 xhci_dbg(xhci, "resume HS port %d\n", port_id); in handle_port_status()
1924 bus_state->resume_done[hcd_portnum] = jiffies + in handle_port_status()
1926 set_bit(hcd_portnum, &bus_state->resuming_ports); in handle_port_status()
1929 * usb device auto-resume latency around ~40ms. in handle_port_status()
1931 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
1932 mod_timer(&hcd->rh_timer, in handle_port_status()
1933 bus_state->resume_done[hcd_portnum]); in handle_port_status()
1934 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
1944 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); in handle_port_status()
1945 complete(&bus_state->u3exit_done[hcd_portnum]); in handle_port_status()
1948 * U3Exit state after a host-initiated resume. If it's a device in handle_port_status()
1953 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
1954 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
1955 xhci_ring_device(xhci, slot_id); in handle_port_status()
1956 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { in handle_port_status()
1957 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
1958 usb_wakeup_notification(hcd->self.root_hub, in handle_port_status()
1966 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or in handle_port_status()
1970 if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 && in handle_port_status()
1972 &bus_state->rexit_ports)) { in handle_port_status()
1973 complete(&bus_state->rexit_done[hcd_portnum]); in handle_port_status()
1978 if (hcd->speed < HCD_USB3) { in handle_port_status()
1979 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
1980 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && in handle_port_status()
1982 xhci_cavium_reset_phy_quirk(xhci); in handle_port_status()
1987 inc_deq(xhci, xhci->event_ring); in handle_port_status()
1997 * xHCI port-status-change events occur when the "or" of all the in handle_port_status()
1998 * status-change bits in the portsc register changes from 0 to 1. in handle_port_status()
2003 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", in handle_port_status()
2004 __func__, hcd->self.busnum); in handle_port_status()
2005 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2006 spin_unlock(&xhci->lock); in handle_port_status()
2009 spin_lock(&xhci->lock); in handle_port_status()
2018 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, in trb_in_td() argument
2038 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); in trb_in_td()
2043 xhci_warn(xhci, in trb_in_td()
2044 …"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx… in trb_in_td()
2048 (unsigned long long)cur_seg->dma, in trb_in_td()
2062 (suspect_dma >= cur_seg->dma && in trb_in_td()
2072 cur_seg = cur_seg->next; in trb_in_td()
2073 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); in trb_in_td()
2079 static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_clear_hub_tt_buffer() argument
2083 * As part of low/full-speed endpoint-halt processing in xhci_clear_hub_tt_buffer()
2086 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && in xhci_clear_hub_tt_buffer()
2087 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && in xhci_clear_hub_tt_buffer()
2088 !(ep->ep_state & EP_CLEARING_TT)) { in xhci_clear_hub_tt_buffer()
2089 ep->ep_state |= EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2090 td->urb->ep->hcpriv = td->urb->dev; in xhci_clear_hub_tt_buffer()
2091 if (usb_hub_clear_tt_buffer(td->urb)) in xhci_clear_hub_tt_buffer()
2092 ep->ep_state &= ~EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2097 * cleanup the halt for a non-default control endpoint if we indicate a stall.
2102 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, in xhci_requires_manual_halt_cleanup() argument
2122 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) in xhci_is_vendor_info_code() argument
2126 * treat as not-an-error. in xhci_is_vendor_info_code()
2128 xhci_dbg(xhci, "Vendor defined info completion code %u\n", in xhci_is_vendor_info_code()
2130 xhci_dbg(xhci, "Treating code as success.\n"); in xhci_is_vendor_info_code()
2136 static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in finish_td() argument
2142 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in finish_td()
2174 if ((ep->ep_state & EP_HALTED) && in finish_td()
2175 !list_empty(&td->cancelled_td_list)) { in finish_td()
2176 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", in finish_td()
2178 td->start_seg, td->first_trb)); in finish_td()
2185 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2186 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, in finish_td()
2191 * xhci internal endpoint state will go to a "halt" state for in finish_td()
2200 if (ep->ep_index != 0) in finish_td()
2201 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2203 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, in finish_td()
2212 ep_ring->dequeue = td->last_trb; in finish_td()
2213 ep_ring->deq_seg = td->last_trb_seg; in finish_td()
2214 ep_ring->num_trbs_free += td->num_trbs - 1; in finish_td()
2215 inc_deq(xhci, ep_ring); in finish_td()
2217 return xhci_td_cleanup(xhci, td, ep_ring, td->status); in finish_td()
2221 static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, in sum_trb_lengths() argument
2225 union xhci_trb *trb = ring->dequeue; in sum_trb_lengths()
2226 struct xhci_segment *seg = ring->deq_seg; in sum_trb_lengths()
2228 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { in sum_trb_lengths()
2230 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); in sum_trb_lengths()
2238 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_ctrl_td() argument
2247 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); in process_ctrl_td()
2248 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in process_ctrl_td()
2249 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2250 requested = td->urb->transfer_buffer_length; in process_ctrl_td()
2251 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2256 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", in process_ctrl_td()
2258 td->status = -ESHUTDOWN; in process_ctrl_td()
2261 td->status = 0; in process_ctrl_td()
2264 td->status = 0; in process_ctrl_td()
2268 td->urb->actual_length = remaining; in process_ctrl_td()
2270 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); in process_ctrl_td()
2275 td->urb->actual_length = 0; in process_ctrl_td()
2279 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2282 td->urb->actual_length = requested; in process_ctrl_td()
2285 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", in process_ctrl_td()
2292 if (!xhci_requires_manual_halt_cleanup(xhci, in process_ctrl_td()
2295 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", in process_ctrl_td()
2296 trb_comp_code, ep->ep_index); in process_ctrl_td()
2301 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2302 else if (!td->urb_length_set) in process_ctrl_td()
2303 td->urb->actual_length = 0; in process_ctrl_td()
2317 td->urb_length_set = true; in process_ctrl_td()
2318 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2319 xhci_dbg(xhci, "Waiting for status stage event\n"); in process_ctrl_td()
2324 if (!td->urb_length_set) in process_ctrl_td()
2325 td->urb->actual_length = requested; in process_ctrl_td()
2328 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_ctrl_td()
2334 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_isoc_td() argument
2346 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2347 urb_priv = td->urb->hcpriv; in process_isoc_td()
2348 idx = urb_priv->num_tds_done; in process_isoc_td()
2349 frame = &td->urb->iso_frame_desc[idx]; in process_isoc_td()
2350 requested = frame->length; in process_isoc_td()
2351 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2352 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_isoc_td()
2353 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? in process_isoc_td()
2354 -EREMOTEIO : 0; in process_isoc_td()
2360 frame->status = short_framestatus; in process_isoc_td()
2361 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) in process_isoc_td()
2365 frame->status = 0; in process_isoc_td()
2368 frame->status = short_framestatus; in process_isoc_td()
2372 frame->status = -ECOMM; in process_isoc_td()
2376 frame->status = -EOVERFLOW; in process_isoc_td()
2380 frame->status = -EPROTO; in process_isoc_td()
2383 frame->status = -EPROTO; in process_isoc_td()
2384 if (ep_trb != td->last_trb) in process_isoc_td()
2392 frame->status = short_framestatus; in process_isoc_td()
2401 frame->status = -1; in process_isoc_td()
2406 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + in process_isoc_td()
2407 ep_trb_len - remaining; in process_isoc_td()
2409 frame->actual_length = requested; in process_isoc_td()
2411 td->urb->actual_length += frame->actual_length; in process_isoc_td()
2413 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_isoc_td()
2416 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2423 urb_priv = td->urb->hcpriv; in skip_isoc_td()
2424 idx = urb_priv->num_tds_done; in skip_isoc_td()
2425 frame = &td->urb->iso_frame_desc[idx]; in skip_isoc_td()
2428 frame->status = -EXDEV; in skip_isoc_td()
2431 frame->actual_length = 0; in skip_isoc_td()
2434 ep->ring->dequeue = td->last_trb; in skip_isoc_td()
2435 ep->ring->deq_seg = td->last_trb_seg; in skip_isoc_td()
2436 ep->ring->num_trbs_free += td->num_trbs - 1; in skip_isoc_td()
2437 inc_deq(xhci, ep->ring); in skip_isoc_td()
2439 return xhci_td_cleanup(xhci, td, ep->ring, status); in skip_isoc_td()
2445 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_bulk_intr_td() argument
2453 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in process_bulk_intr_td()
2454 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2455 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2456 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_bulk_intr_td()
2457 requested = td->urb->transfer_buffer_length; in process_bulk_intr_td()
2461 ep_ring->err_count = 0; in process_bulk_intr_td()
2463 if (ep_trb != td->last_trb || remaining) { in process_bulk_intr_td()
2464 xhci_warn(xhci, "WARN Successful completion on short TX\n"); in process_bulk_intr_td()
2465 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2466 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2469 td->status = 0; in process_bulk_intr_td()
2472 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2473 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2475 td->status = 0; in process_bulk_intr_td()
2478 td->urb->actual_length = remaining; in process_bulk_intr_td()
2486 if (xhci->quirks & XHCI_NO_SOFT_RETRY || in process_bulk_intr_td()
2487 (ep_ring->err_count++ > MAX_SOFT_RETRY) || in process_bulk_intr_td()
2488 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) in process_bulk_intr_td()
2491 td->status = 0; in process_bulk_intr_td()
2493 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, in process_bulk_intr_td()
2501 if (ep_trb == td->last_trb) in process_bulk_intr_td()
2502 td->urb->actual_length = requested - remaining; in process_bulk_intr_td()
2504 td->urb->actual_length = in process_bulk_intr_td()
2505 sum_trb_lengths(xhci, ep_ring, ep_trb) + in process_bulk_intr_td()
2506 ep_trb_len - remaining; in process_bulk_intr_td()
2509 xhci_warn(xhci, "bad transfer trb length %d in event trb\n", in process_bulk_intr_td()
2511 td->urb->actual_length = 0; in process_bulk_intr_td()
2514 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_bulk_intr_td()
2522 static int handle_tx_event(struct xhci_hcd *xhci, in handle_tx_event() argument
2533 int status = -EINPROGRESS; in handle_tx_event()
2540 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_tx_event()
2541 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; in handle_tx_event()
2542 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in handle_tx_event()
2543 ep_trb_dma = le64_to_cpu(event->buffer); in handle_tx_event()
2545 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in handle_tx_event()
2547 xhci_err(xhci, "ERROR Invalid Transfer event\n"); in handle_tx_event()
2552 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in handle_tx_event()
2555 xhci_err(xhci, in handle_tx_event()
2561 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ in handle_tx_event()
2568 xhci_handle_halted_endpoint(xhci, ep, 0, NULL, in handle_tx_event()
2576 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", in handle_tx_event()
2582 /* Count current td numbers if ep->skip is set */ in handle_tx_event()
2583 if (ep->skip) { in handle_tx_event()
2584 list_for_each(tmp, &ep_ring->td_list) in handle_tx_event()
2594 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) in handle_tx_event()
2596 if (xhci->quirks & XHCI_TRUST_TX_LENGTH || in handle_tx_event()
2597 ep_ring->last_td_was_short) in handle_tx_event()
2600 xhci_warn_ratelimited(xhci, in handle_tx_event()
2608 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", in handle_tx_event()
2612 xhci_dbg(xhci, in handle_tx_event()
2613 "Stopped on No-op or Link TRB for slot %u ep %u\n", in handle_tx_event()
2617 xhci_dbg(xhci, in handle_tx_event()
2623 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, in handle_tx_event()
2625 status = -EPIPE; in handle_tx_event()
2628 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", in handle_tx_event()
2630 status = -EPROTO; in handle_tx_event()
2633 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", in handle_tx_event()
2635 status = -EPROTO; in handle_tx_event()
2638 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", in handle_tx_event()
2640 status = -EOVERFLOW; in handle_tx_event()
2644 xhci_warn(xhci, in handle_tx_event()
2647 status = -EILSEQ; in handle_tx_event()
2651 xhci_warn(xhci, in handle_tx_event()
2654 status = -ENOSR; in handle_tx_event()
2657 xhci_warn(xhci, in handle_tx_event()
2662 xhci_warn(xhci, in handle_tx_event()
2672 xhci_dbg(xhci, "underrun event on endpoint\n"); in handle_tx_event()
2673 if (!list_empty(&ep_ring->td_list)) in handle_tx_event()
2674 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " in handle_tx_event()
2676 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2680 xhci_dbg(xhci, "overrun event on endpoint\n"); in handle_tx_event()
2681 if (!list_empty(&ep_ring->td_list)) in handle_tx_event()
2682 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " in handle_tx_event()
2684 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2694 ep->skip = true; in handle_tx_event()
2695 xhci_dbg(xhci, in handle_tx_event()
2700 ep->skip = true; in handle_tx_event()
2701 xhci_dbg(xhci, in handle_tx_event()
2708 xhci_warn(xhci, in handle_tx_event()
2711 status = -EPROTO; in handle_tx_event()
2714 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { in handle_tx_event()
2718 xhci_warn(xhci, in handle_tx_event()
2728 if (list_empty(&ep_ring->td_list)) { in handle_tx_event()
2739 ep_ring->last_td_was_short)) { in handle_tx_event()
2740 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", in handle_tx_event()
2741 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2744 if (ep->skip) { in handle_tx_event()
2745 ep->skip = false; in handle_tx_event()
2746 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2750 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
2752 xhci_handle_halted_endpoint(xhci, ep, in handle_tx_event()
2753 ep_ring->stream_id, in handle_tx_event()
2760 /* We've skipped all the TDs on the ep ring when ep->skip set */ in handle_tx_event()
2761 if (ep->skip && td_num == 0) { in handle_tx_event()
2762 ep->skip = false; in handle_tx_event()
2763 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2768 td = list_first_entry(&ep_ring->td_list, struct xhci_td, in handle_tx_event()
2770 if (ep->skip) in handle_tx_event()
2771 td_num--; in handle_tx_event()
2774 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, in handle_tx_event()
2775 td->last_trb, ep_trb_dma, false); in handle_tx_event()
2779 * is not in the current TD pointed by ep_ring->dequeue because in handle_tx_event()
2791 if (!ep->skip || in handle_tx_event()
2792 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { in handle_tx_event()
2797 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && in handle_tx_event()
2798 ep_ring->last_td_was_short) { in handle_tx_event()
2799 ep_ring->last_td_was_short = false; in handle_tx_event()
2803 xhci_err(xhci, in handle_tx_event()
2808 trb_in_td(xhci, ep_ring->deq_seg, in handle_tx_event()
2809 ep_ring->dequeue, td->last_trb, in handle_tx_event()
2811 return -ESHUTDOWN; in handle_tx_event()
2814 skip_isoc_td(xhci, td, ep, status); in handle_tx_event()
2818 ep_ring->last_td_was_short = true; in handle_tx_event()
2820 ep_ring->last_td_was_short = false; in handle_tx_event()
2822 if (ep->skip) { in handle_tx_event()
2823 xhci_dbg(xhci, in handle_tx_event()
2826 ep->skip = false; in handle_tx_event()
2829 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / in handle_tx_event()
2836 * No-op TRB could trigger interrupts in a case where in handle_tx_event()
2845 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
2847 xhci_handle_halted_endpoint(xhci, ep, in handle_tx_event()
2848 ep_ring->stream_id, in handle_tx_event()
2853 td->status = status; in handle_tx_event()
2856 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) in handle_tx_event()
2857 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2858 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) in handle_tx_event()
2859 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2861 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2863 handling_skipped_tds = ep->skip && in handle_tx_event()
2872 inc_deq(xhci, xhci->event_ring); in handle_tx_event()
2875 * If ep->skip is set, it means there are missed tds on the in handle_tx_event()
2885 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2887 xhci->event_ring->deq_seg, in handle_tx_event()
2888 xhci->event_ring->dequeue), in handle_tx_event()
2889 lower_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
2890 upper_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
2891 le32_to_cpu(event->transfer_len), in handle_tx_event()
2892 le32_to_cpu(event->flags)); in handle_tx_event()
2893 return -ENODEV; in handle_tx_event()
2897 * This function handles all OS-owned events on the event ring. It may drop
2898 * xhci->lock between event processing (e.g. to pass up port status changes).
2902 static int xhci_handle_event(struct xhci_hcd *xhci) in xhci_handle_event() argument
2910 if (!xhci->event_ring || !xhci->event_ring->dequeue) { in xhci_handle_event()
2911 xhci_err(xhci, "ERROR event ring not ready\n"); in xhci_handle_event()
2912 return -ENOMEM; in xhci_handle_event()
2915 event = xhci->event_ring->dequeue; in xhci_handle_event()
2917 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != in xhci_handle_event()
2918 xhci->event_ring->cycle_state) in xhci_handle_event()
2921 trace_xhci_handle_event(xhci->event_ring, &event->generic); in xhci_handle_event()
2928 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); in xhci_handle_event()
2933 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event()
2936 handle_port_status(xhci, event); in xhci_handle_event()
2940 ret = handle_tx_event(xhci, &event->trans_event); in xhci_handle_event()
2945 handle_device_notification(xhci, event); in xhci_handle_event()
2949 handle_vendor_event(xhci, event, trb_type); in xhci_handle_event()
2951 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); in xhci_handle_event()
2953 /* Any of the above functions may drop and re-acquire the lock, so check in xhci_handle_event()
2954 * to make sure a watchdog timer didn't mark the host as non-responsive. in xhci_handle_event()
2956 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event()
2957 xhci_dbg(xhci, "xHCI host dying, returning from " in xhci_handle_event()
2964 inc_deq(xhci, xhci->event_ring); in xhci_handle_event()
2974 * - When all events have finished
2975 * - To avoid "Event Ring Full Error" condition
2977 static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, in xhci_update_erst_dequeue() argument
2983 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
2985 if (event_ring_deq != xhci->event_ring->dequeue) { in xhci_update_erst_dequeue()
2986 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, in xhci_update_erst_dequeue()
2987 xhci->event_ring->dequeue); in xhci_update_erst_dequeue()
2989 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); in xhci_update_erst_dequeue()
3005 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3009 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3015 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_irq() local
3022 spin_lock(&xhci->lock); in xhci_irq()
3024 status = readl(&xhci->op_regs->status); in xhci_irq()
3026 xhci_hc_died(xhci); in xhci_irq()
3035 xhci_warn(xhci, "WARNING: Host System Error\n"); in xhci_irq()
3036 xhci_halt(xhci); in xhci_irq()
3043 * so we can receive interrupts from other MSI-X interrupters. in xhci_irq()
3047 writel(status, &xhci->op_regs->status); in xhci_irq()
3049 if (!hcd->msi_enabled) { in xhci_irq()
3051 irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_irq()
3053 writel(irq_pending, &xhci->ir_set->irq_pending); in xhci_irq()
3056 if (xhci->xhc_state & XHCI_STATE_DYING || in xhci_irq()
3057 xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_irq()
3058 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " in xhci_irq()
3063 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_irq()
3064 xhci_write_64(xhci, temp_64 | ERST_EHB, in xhci_irq()
3065 &xhci->ir_set->erst_dequeue); in xhci_irq()
3070 event_ring_deq = xhci->event_ring->dequeue; in xhci_irq()
3074 while (xhci_handle_event(xhci) > 0) { in xhci_irq()
3077 xhci_update_erst_dequeue(xhci, event_ring_deq); in xhci_irq()
3078 event_ring_deq = xhci->event_ring->dequeue; in xhci_irq()
3080 /* ring is half-full, force isoc trbs to interrupt more often */ in xhci_irq()
3081 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) in xhci_irq()
3082 xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2; in xhci_irq()
3087 xhci_update_erst_dequeue(xhci, event_ring_deq); in xhci_irq()
3091 spin_unlock(&xhci->lock); in xhci_irq()
3110 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in queue_trb() argument
3116 trb = &ring->enqueue->generic; in queue_trb()
3117 trb->field[0] = cpu_to_le32(field1); in queue_trb()
3118 trb->field[1] = cpu_to_le32(field2); in queue_trb()
3119 trb->field[2] = cpu_to_le32(field3); in queue_trb()
3122 trb->field[3] = cpu_to_le32(field4); in queue_trb()
3126 inc_enq(xhci, ring, more_trbs_coming); in queue_trb()
3133 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in prepare_ring() argument
3146 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
3147 return -ENOENT; in prepare_ring()
3149 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
3151 /* XXX not sure if this should be -ENOENT or not */ in prepare_ring()
3152 return -EINVAL; in prepare_ring()
3154 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); in prepare_ring()
3160 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
3165 return -EINVAL; in prepare_ring()
3169 if (room_on_ring(xhci, ep_ring, num_trbs)) in prepare_ring()
3172 if (ep_ring == xhci->cmd_ring) { in prepare_ring()
3173 xhci_err(xhci, "Do not support expand command ring\n"); in prepare_ring()
3174 return -ENOMEM; in prepare_ring()
3177 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, in prepare_ring()
3179 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; in prepare_ring()
3180 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, in prepare_ring()
3182 xhci_err(xhci, "Ring expansion failed\n"); in prepare_ring()
3183 return -ENOMEM; in prepare_ring()
3187 while (trb_is_link(ep_ring->enqueue)) { in prepare_ring()
3191 if (!xhci_link_trb_quirk(xhci) && in prepare_ring()
3192 !(ep_ring->type == TYPE_ISOC && in prepare_ring()
3193 (xhci->quirks & XHCI_AMD_0x96_HOST))) in prepare_ring()
3194 ep_ring->enqueue->link.control &= in prepare_ring()
3197 ep_ring->enqueue->link.control |= in prepare_ring()
3201 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); in prepare_ring()
3204 if (link_trb_toggles_cycle(ep_ring->enqueue)) in prepare_ring()
3205 ep_ring->cycle_state ^= 1; in prepare_ring()
3207 ep_ring->enq_seg = ep_ring->enq_seg->next; in prepare_ring()
3208 ep_ring->enqueue = ep_ring->enq_seg->trbs; in prepare_ring()
3211 if (link_trb_count++ > ep_ring->num_segs) { in prepare_ring()
3212 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in prepare_ring()
3213 return -EINVAL; in prepare_ring()
3217 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { in prepare_ring()
3218 xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); in prepare_ring()
3219 return -EINVAL; in prepare_ring()
3225 static int prepare_transfer(struct xhci_hcd *xhci, in prepare_transfer() argument
3238 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
3240 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, in prepare_transfer()
3243 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", in prepare_transfer()
3245 return -EINVAL; in prepare_transfer()
3248 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in prepare_transfer()
3253 urb_priv = urb->hcpriv; in prepare_transfer()
3254 td = &urb_priv->td[td_index]; in prepare_transfer()
3256 INIT_LIST_HEAD(&td->td_list); in prepare_transfer()
3257 INIT_LIST_HEAD(&td->cancelled_td_list); in prepare_transfer()
3260 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); in prepare_transfer()
3265 td->urb = urb; in prepare_transfer()
3267 list_add_tail(&td->td_list, &ep_ring->td_list); in prepare_transfer()
3268 td->start_seg = ep_ring->enq_seg; in prepare_transfer()
3269 td->first_trb = ep_ring->enqueue; in prepare_transfer()
3278 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), in count_trbs()
3288 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); in count_trbs_needed()
3296 full_len = urb->transfer_buffer_length; in count_sg_trbs_needed()
3298 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { in count_sg_trbs_needed()
3302 full_len -= len; in count_sg_trbs_needed()
3314 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); in count_isoc_trbs_needed()
3315 len = urb->iso_frame_desc[i].length; in count_isoc_trbs_needed()
3322 if (unlikely(running_total != urb->transfer_buffer_length)) in check_trb_math()
3323 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " in check_trb_math()
3326 urb->ep->desc.bEndpointAddress, in check_trb_math()
3328 urb->transfer_buffer_length, in check_trb_math()
3329 urb->transfer_buffer_length); in check_trb_math()
3332 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, in giveback_first_trb() argument
3342 start_trb->field[3] |= cpu_to_le32(start_cycle); in giveback_first_trb()
3344 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); in giveback_first_trb()
3345 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); in giveback_first_trb()
3348 static void check_interval(struct xhci_hcd *xhci, struct urb *urb, in check_interval() argument
3354 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); in check_interval()
3355 ep_interval = urb->interval; in check_interval()
3358 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3359 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3366 dev_dbg_ratelimited(&urb->dev->dev, in check_interval()
3367 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", in check_interval()
3370 urb->interval = xhci_interval; in check_interval()
3372 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3373 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3374 urb->interval /= 8; in check_interval()
3379 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3384 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_intr_tx() argument
3389 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3390 check_interval(xhci, urb, ep_ctx); in xhci_queue_intr_tx()
3392 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_intr_tx()
3396 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3405 * TD size = total_packet_count - packets_transferred
3407 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3415 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, in xhci_td_remainder() argument
3421 /* MTK xHCI 0.96 contains some features from 1.0 */ in xhci_td_remainder()
3422 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) in xhci_td_remainder()
3423 return ((td_total_len - transferred) >> 10); in xhci_td_remainder()
3425 /* One TRB with a zero-length data packet. */ in xhci_td_remainder()
3430 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ in xhci_td_remainder()
3431 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) in xhci_td_remainder()
3434 maxp = usb_endpoint_maxp(&urb->ep->desc); in xhci_td_remainder()
3438 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); in xhci_td_remainder()
3442 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, in xhci_align_td() argument
3445 struct device *dev = xhci_to_hcd(xhci)->self.controller; in xhci_align_td()
3451 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_align_td()
3458 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", in xhci_align_td()
3463 *trb_buff_len -= unalign; in xhci_align_td()
3464 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3473 new_buff_len = max_pkt - (enqd_len % max_pkt); in xhci_align_td()
3475 if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) in xhci_align_td()
3476 new_buff_len = (urb->transfer_buffer_length - enqd_len); in xhci_align_td()
3480 if (urb->num_sgs) { in xhci_align_td()
3481 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, in xhci_align_td()
3482 seg->bounce_buf, new_buff_len, enqd_len); in xhci_align_td()
3484 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", in xhci_align_td()
3487 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); in xhci_align_td()
3490 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3493 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3497 if (dma_mapping_error(dev, seg->bounce_dma)) { in xhci_align_td()
3499 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); in xhci_align_td()
3503 seg->bounce_len = new_buff_len; in xhci_align_td()
3504 seg->bounce_offs = enqd_len; in xhci_align_td()
3506 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3511 /* This is very similar to what ehci-q.c qtd_fill() does */
3512 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_bulk_tx() argument
3530 ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_bulk_tx()
3532 return -EINVAL; in xhci_queue_bulk_tx()
3534 full_len = urb->transfer_buffer_length; in xhci_queue_bulk_tx()
3536 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) { in xhci_queue_bulk_tx()
3537 num_sgs = urb->num_mapped_sgs; in xhci_queue_bulk_tx()
3538 sg = urb->sg; in xhci_queue_bulk_tx()
3544 addr = (u64) urb->transfer_dma; in xhci_queue_bulk_tx()
3547 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3548 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3553 urb_priv = urb->hcpriv; in xhci_queue_bulk_tx()
3555 /* Deal with URB_ZERO_PACKET - need one more td/trb */ in xhci_queue_bulk_tx()
3556 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) in xhci_queue_bulk_tx()
3559 td = &urb_priv->td[0]; in xhci_queue_bulk_tx()
3566 start_trb = &ring->enqueue->generic; in xhci_queue_bulk_tx()
3567 start_cycle = ring->cycle_state; in xhci_queue_bulk_tx()
3570 /* Queue the TRBs, even if they are zero-length */ in xhci_queue_bulk_tx()
3580 trb_buff_len = full_len - enqd_len; in xhci_queue_bulk_tx()
3588 field |= ring->cycle_state; in xhci_queue_bulk_tx()
3595 if (trb_is_link(ring->enqueue + 1)) { in xhci_queue_bulk_tx()
3596 if (xhci_align_td(xhci, urb, enqd_len, in xhci_queue_bulk_tx()
3598 ring->enq_seg)) { in xhci_queue_bulk_tx()
3599 send_addr = ring->enq_seg->bounce_dma; in xhci_queue_bulk_tx()
3601 td->bounce_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3609 td->last_trb = ring->enqueue; in xhci_queue_bulk_tx()
3610 td->last_trb_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3612 memcpy(&send_addr, urb->transfer_buffer, in xhci_queue_bulk_tx()
3624 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, in xhci_queue_bulk_tx()
3631 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, in xhci_queue_bulk_tx()
3636 td->num_trbs++; in xhci_queue_bulk_tx()
3642 --num_sgs; in xhci_queue_bulk_tx()
3643 sent_len -= block_len; in xhci_queue_bulk_tx()
3651 block_len -= sent_len; in xhci_queue_bulk_tx()
3656 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3657 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3659 urb_priv->td[1].last_trb = ring->enqueue; in xhci_queue_bulk_tx()
3660 urb_priv->td[1].last_trb_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3661 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; in xhci_queue_bulk_tx()
3662 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); in xhci_queue_bulk_tx()
3663 urb_priv->td[1].num_trbs++; in xhci_queue_bulk_tx()
3667 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3672 /* Caller must have locked xhci->lock */
3673 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_ctrl_tx() argument
3686 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_ctrl_tx()
3688 return -EINVAL; in xhci_queue_ctrl_tx()
3694 if (!urb->setup_packet) in xhci_queue_ctrl_tx()
3695 return -EINVAL; in xhci_queue_ctrl_tx()
3704 if (urb->transfer_buffer_length > 0) in xhci_queue_ctrl_tx()
3706 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3707 ep_index, urb->stream_id, in xhci_queue_ctrl_tx()
3712 urb_priv = urb->hcpriv; in xhci_queue_ctrl_tx()
3713 td = &urb_priv->td[0]; in xhci_queue_ctrl_tx()
3714 td->num_trbs = num_trbs; in xhci_queue_ctrl_tx()
3721 start_trb = &ep_ring->enqueue->generic; in xhci_queue_ctrl_tx()
3722 start_cycle = ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3724 /* Queue setup TRB - see section 6.4.1.2.1 */ in xhci_queue_ctrl_tx()
3726 setup = (struct usb_ctrlrequest *) urb->setup_packet; in xhci_queue_ctrl_tx()
3732 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ in xhci_queue_ctrl_tx()
3733 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { in xhci_queue_ctrl_tx()
3734 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3735 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3742 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3743 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, in xhci_queue_ctrl_tx()
3744 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, in xhci_queue_ctrl_tx()
3756 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3761 memcpy(&addr, urb->transfer_buffer, in xhci_queue_ctrl_tx()
3762 urb->transfer_buffer_length); in xhci_queue_ctrl_tx()
3766 addr = (u64) urb->transfer_dma; in xhci_queue_ctrl_tx()
3769 remainder = xhci_td_remainder(xhci, 0, in xhci_queue_ctrl_tx()
3770 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3771 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3773 length_field = TRB_LEN(urb->transfer_buffer_length) | in xhci_queue_ctrl_tx()
3776 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3778 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3782 field | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3786 td->last_trb = ep_ring->enqueue; in xhci_queue_ctrl_tx()
3787 td->last_trb_seg = ep_ring->enq_seg; in xhci_queue_ctrl_tx()
3789 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ in xhci_queue_ctrl_tx()
3791 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3795 queue_trb(xhci, ep_ring, false, in xhci_queue_ctrl_tx()
3800 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3802 giveback_first_trb(xhci, slot_id, ep_index, 0, in xhci_queue_ctrl_tx()
3813 * zero. Only xHCI 1.0 host controllers support this field.
3815 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, in xhci_get_burst_count() argument
3820 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) in xhci_get_burst_count()
3823 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_burst_count()
3824 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; in xhci_get_burst_count()
3835 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, in xhci_get_last_burst_packet_count() argument
3841 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
3844 if (urb->dev->speed >= USB_SPEED_SUPER) { in xhci_get_last_burst_packet_count()
3846 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_last_burst_packet_count()
3849 * number of packets, but the TLBPC field is zero-based. in xhci_get_last_burst_packet_count()
3853 return residue - 1; in xhci_get_last_burst_packet_count()
3857 return total_packet_count - 1; in xhci_get_last_burst_packet_count()
3867 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, in xhci_get_isoc_frame_id() argument
3873 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
3874 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
3875 start_frame = urb->start_frame + index * urb->interval; in xhci_get_isoc_frame_id()
3877 start_frame = (urb->start_frame + index * urb->interval) >> 3; in xhci_get_isoc_frame_id()
3887 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_get_isoc_frame_id()
3888 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_get_isoc_frame_id()
3904 current_frame_id = readl(&xhci->run_regs->microframe_index); in xhci_get_isoc_frame_id()
3912 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", in xhci_get_isoc_frame_id()
3913 __func__, index, readl(&xhci->run_regs->microframe_index), in xhci_get_isoc_frame_id()
3919 ret = -EINVAL; in xhci_get_isoc_frame_id()
3923 ret = -EINVAL; in xhci_get_isoc_frame_id()
3925 ret = -EINVAL; in xhci_get_isoc_frame_id()
3929 if (ret == -EINVAL || start_frame == start_frame_id) { in xhci_get_isoc_frame_id()
3931 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
3932 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
3933 urb->start_frame = start_frame; in xhci_get_isoc_frame_id()
3935 urb->start_frame = start_frame << 3; in xhci_get_isoc_frame_id()
3941 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", in xhci_get_isoc_frame_id()
3944 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); in xhci_get_isoc_frame_id()
3952 static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) in trb_block_event_intr() argument
3954 if (xhci->hci_version < 0x100) in trb_block_event_intr()
3957 if (i == num_tds - 1) in trb_block_event_intr()
3963 if (i && xhci->quirks & XHCI_AVOID_BEI) in trb_block_event_intr()
3964 return !!(i % xhci->isoc_bei_interval); in trb_block_event_intr()
3970 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx() argument
3988 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx()
3989 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
3991 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx()
3993 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); in xhci_queue_isoc_tx()
3994 return -EINVAL; in xhci_queue_isoc_tx()
3996 start_addr = (u64) urb->transfer_dma; in xhci_queue_isoc_tx()
3997 start_trb = &ep_ring->enqueue->generic; in xhci_queue_isoc_tx()
3998 start_cycle = ep_ring->cycle_state; in xhci_queue_isoc_tx()
4000 urb_priv = urb->hcpriv; in xhci_queue_isoc_tx()
4001 /* Queue the TRBs for each TD, even if they are zero-length */ in xhci_queue_isoc_tx()
4009 addr = start_addr + urb->iso_frame_desc[i].offset; in xhci_queue_isoc_tx()
4010 td_len = urb->iso_frame_desc[i].length; in xhci_queue_isoc_tx()
4012 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_queue_isoc_tx()
4015 /* A zero-length transfer still involves at least one packet. */ in xhci_queue_isoc_tx()
4018 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); in xhci_queue_isoc_tx()
4019 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, in xhci_queue_isoc_tx()
4024 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
4025 urb->stream_id, trbs_per_td, urb, i, mem_flags); in xhci_queue_isoc_tx()
4031 td = &urb_priv->td[i]; in xhci_queue_isoc_tx()
4032 td->num_trbs = trbs_per_td; in xhci_queue_isoc_tx()
4035 if (!(urb->transfer_flags & URB_ISO_ASAP) && in xhci_queue_isoc_tx()
4036 HCC_CFC(xhci->hcc_params)) { in xhci_queue_isoc_tx()
4037 frame_id = xhci_get_isoc_frame_id(xhci, urb, i); in xhci_queue_isoc_tx()
4049 (i ? ep_ring->cycle_state : !start_cycle); in xhci_queue_isoc_tx()
4051 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ in xhci_queue_isoc_tx()
4052 if (!xep->use_extended_tbc) in xhci_queue_isoc_tx()
4062 ep_ring->cycle_state; in xhci_queue_isoc_tx()
4069 if (j < trbs_per_td - 1) { in xhci_queue_isoc_tx()
4074 td->last_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4075 td->last_trb_seg = ep_ring->enq_seg; in xhci_queue_isoc_tx()
4077 if (trb_block_event_intr(xhci, num_tds, i)) in xhci_queue_isoc_tx()
4086 remainder = xhci_td_remainder(xhci, running_total, in xhci_queue_isoc_tx()
4093 /* xhci 1.1 with ETE uses TD Size field for TBC */ in xhci_queue_isoc_tx()
4094 if (first_trb && xep->use_extended_tbc) in xhci_queue_isoc_tx()
4100 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_isoc_tx()
4108 td_remain_len -= trb_buff_len; in xhci_queue_isoc_tx()
4113 xhci_err(xhci, "ISOC TD length unmatch\n"); in xhci_queue_isoc_tx()
4114 ret = -EINVAL; in xhci_queue_isoc_tx()
4120 if (HCC_CFC(xhci->hcc_params)) in xhci_queue_isoc_tx()
4121 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; in xhci_queue_isoc_tx()
4123 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
4124 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
4127 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
4129 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
4135 for (i--; i >= 0; i--) in xhci_queue_isoc_tx()
4136 list_del_init(&urb_priv->td[i].td_list); in xhci_queue_isoc_tx()
4139 * into No-ops with a software-owned cycle bit. That way the hardware in xhci_queue_isoc_tx()
4141 * overwrite them. td->first_trb and td->start_seg are already set. in xhci_queue_isoc_tx()
4143 urb_priv->td[0].last_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4145 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); in xhci_queue_isoc_tx()
4148 ep_ring->enqueue = urb_priv->td[0].first_trb; in xhci_queue_isoc_tx()
4149 ep_ring->enq_seg = urb_priv->td[0].start_seg; in xhci_queue_isoc_tx()
4150 ep_ring->cycle_state = start_cycle; in xhci_queue_isoc_tx()
4151 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; in xhci_queue_isoc_tx()
4152 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); in xhci_queue_isoc_tx()
4159 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
4160 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4163 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx_prepare() argument
4175 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
4176 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx_prepare()
4177 ep_ring = xdev->eps[ep_index].ring; in xhci_queue_isoc_tx_prepare()
4178 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
4181 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx_prepare()
4188 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in xhci_queue_isoc_tx_prepare()
4197 check_interval(xhci, urb, ep_ctx); in xhci_queue_isoc_tx_prepare()
4199 /* Calculate the start frame and put it in urb->start_frame. */ in xhci_queue_isoc_tx_prepare()
4200 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { in xhci_queue_isoc_tx_prepare()
4202 urb->start_frame = xep->next_frame_id; in xhci_queue_isoc_tx_prepare()
4207 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
4213 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_queue_isoc_tx_prepare()
4214 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_queue_isoc_tx_prepare()
4223 if (urb->dev->speed == USB_SPEED_LOW || in xhci_queue_isoc_tx_prepare()
4224 urb->dev->speed == USB_SPEED_FULL) { in xhci_queue_isoc_tx_prepare()
4225 start_frame = roundup(start_frame, urb->interval << 3); in xhci_queue_isoc_tx_prepare()
4226 urb->start_frame = start_frame >> 3; in xhci_queue_isoc_tx_prepare()
4228 start_frame = roundup(start_frame, urb->interval); in xhci_queue_isoc_tx_prepare()
4229 urb->start_frame = start_frame; in xhci_queue_isoc_tx_prepare()
4233 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; in xhci_queue_isoc_tx_prepare()
4235 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_isoc_tx_prepare()
4245 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4248 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in queue_command() argument
4252 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
4255 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
4256 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
4257 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); in queue_command()
4258 return -ESHUTDOWN; in queue_command()
4264 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
4267 xhci_err(xhci, "ERR: No room for command on command ring\n"); in queue_command()
4269 xhci_err(xhci, "ERR: Reserved TRB counting for " in queue_command()
4274 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
4277 if (list_empty(&xhci->cmd_list)) { in queue_command()
4278 xhci->current_cmd = cmd; in queue_command()
4279 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in queue_command()
4282 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
4284 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
4285 field4 | xhci->cmd_ring->cycle_state); in queue_command()
4290 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_slot_control() argument
4293 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_slot_control()
4298 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_address_device() argument
4301 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_address_device()
4307 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_vendor_command() argument
4310 return queue_command(xhci, cmd, field1, field2, field3, field4, false); in xhci_queue_vendor_command()
4314 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_device() argument
4317 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_device()
4323 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, in xhci_queue_configure_endpoint() argument
4327 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_configure_endpoint()
4334 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_evaluate_context() argument
4337 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_evaluate_context()
4347 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_stop_endpoint() argument
4355 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_stop_endpoint()
4359 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_ep() argument
4370 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_ep()