Lines Matching +full:mtk +full:- +full:xhci

1 // SPDX-License-Identifier: GPL-2.0
3 * xHCI host controller driver
26 * until you reach a non-link TRB.
57 #include <linux/dma-mapping.h>
58 #include "xhci.h"
59 #include "xhci-trace.h"
61 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
74 if (!seg || !trb || trb < seg->trbs) in xhci_trb_virt_to_dma()
77 segment_offset = trb - seg->trbs; in xhci_trb_virt_to_dma()
80 return seg->dma + (segment_offset * sizeof(*trb)); in xhci_trb_virt_to_dma()
85 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); in trb_is_noop()
90 return TRB_TYPE_LINK_LE32(trb->link.control); in trb_is_link()
95 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in last_trb_on_seg()
101 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); in last_trb_on_ring()
106 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; in link_trb_toggles_cycle()
111 struct urb_priv *urb_priv = td->urb->hcpriv; in last_td_in_urb()
113 return urb_priv->num_tds_done == urb_priv->num_tds; in last_td_in_urb()
118 struct urb_priv *urb_priv = urb->hcpriv; in inc_td_cnt()
120 urb_priv->num_tds_done++; in inc_td_cnt()
127 trb->link.control &= cpu_to_le32(~TRB_CHAIN); in trb_to_noop()
129 trb->generic.field[0] = 0; in trb_to_noop()
130 trb->generic.field[1] = 0; in trb_to_noop()
131 trb->generic.field[2] = 0; in trb_to_noop()
133 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); in trb_to_noop()
134 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); in trb_to_noop()
142 static void next_trb(struct xhci_hcd *xhci, in next_trb() argument
148 *seg = (*seg)->next; in next_trb()
149 *trb = ((*seg)->trbs); in next_trb()
158 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
163 if (ring->type == TYPE_EVENT) { in inc_deq()
164 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { in inc_deq()
165 ring->dequeue++; in inc_deq()
168 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) in inc_deq()
169 ring->cycle_state ^= 1; in inc_deq()
170 ring->deq_seg = ring->deq_seg->next; in inc_deq()
171 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
176 if (!trb_is_link(ring->dequeue)) { in inc_deq()
177 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) in inc_deq()
178 xhci_warn(xhci, "Missing link TRB at end of segment\n"); in inc_deq()
180 ring->dequeue++; in inc_deq()
183 while (trb_is_link(ring->dequeue)) { in inc_deq()
184 ring->deq_seg = ring->deq_seg->next; in inc_deq()
185 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
187 if (link_trb_count++ > ring->num_segs) { in inc_deq()
188 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in inc_deq()
209 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
214 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, in inc_enq() argument
221 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; in inc_enq()
223 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { in inc_enq()
224 xhci_err(xhci, "Tried to move enqueue past ring segment\n"); in inc_enq()
228 next = ++(ring->enqueue); in inc_enq()
247 if (!(ring->type == TYPE_ISOC && in inc_enq()
248 (xhci->quirks & XHCI_AMD_0x96_HOST)) && in inc_enq()
249 !xhci_link_trb_quirk(xhci)) { in inc_enq()
250 next->link.control &= cpu_to_le32(~TRB_CHAIN); in inc_enq()
251 next->link.control |= cpu_to_le32(chain); in inc_enq()
255 next->link.control ^= cpu_to_le32(TRB_CYCLE); in inc_enq()
259 ring->cycle_state ^= 1; in inc_enq()
261 ring->enq_seg = ring->enq_seg->next; in inc_enq()
262 ring->enqueue = ring->enq_seg->trbs; in inc_enq()
263 next = ring->enqueue; in inc_enq()
265 if (link_trb_count++ > ring->num_segs) { in inc_enq()
266 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); in inc_enq()
280 static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_num_trbs_free() argument
282 struct xhci_segment *enq_seg = ring->enq_seg; in xhci_num_trbs_free()
283 union xhci_trb *enq = ring->enqueue; in xhci_num_trbs_free()
290 enq_seg = enq_seg->next; in xhci_num_trbs_free()
291 enq = enq_seg->trbs; in xhci_num_trbs_free()
295 if (enq == ring->dequeue) in xhci_num_trbs_free()
296 return ring->num_segs * (TRBS_PER_SEGMENT - 1); in xhci_num_trbs_free()
299 if (ring->deq_seg == enq_seg && ring->dequeue >= enq) in xhci_num_trbs_free()
300 return free + (ring->dequeue - enq); in xhci_num_trbs_free()
301 last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_num_trbs_free()
302 free += last_on_seg - enq; in xhci_num_trbs_free()
303 enq_seg = enq_seg->next; in xhci_num_trbs_free()
304 enq = enq_seg->trbs; in xhci_num_trbs_free()
305 } while (i++ <= ring->num_segs); in xhci_num_trbs_free()
316 static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring, in xhci_ring_expansion_needed() argument
324 enq_used = ring->enqueue - ring->enq_seg->trbs; in xhci_ring_expansion_needed()
327 trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); in xhci_ring_expansion_needed()
333 if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue) in xhci_ring_expansion_needed()
336 new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1)); in xhci_ring_expansion_needed()
337 seg = ring->enq_seg; in xhci_ring_expansion_needed()
340 seg = seg->next; in xhci_ring_expansion_needed()
341 if (seg == ring->deq_seg) { in xhci_ring_expansion_needed()
342 xhci_dbg(xhci, "Ring expansion by %d segments needed\n", in xhci_ring_expansion_needed()
344 xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n", in xhci_ring_expansion_needed()
348 new_segs--; in xhci_ring_expansion_needed()
355 void xhci_ring_cmd_db(struct xhci_hcd *xhci) in xhci_ring_cmd_db() argument
357 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
360 xhci_dbg(xhci, "// Ding dong!\n"); in xhci_ring_cmd_db()
364 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
366 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
369 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) in xhci_mod_cmd_timer() argument
371 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay); in xhci_mod_cmd_timer()
374 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) in xhci_next_queued_cmd() argument
376 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, in xhci_next_queued_cmd()
381 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
383 * This must be called with command ring stopped and xhci->lock held.
385 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, in xhci_handle_stopped_cmd_ring() argument
390 /* Turn all aborted commands in list to no-ops, then restart */ in xhci_handle_stopped_cmd_ring()
391 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { in xhci_handle_stopped_cmd_ring()
393 if (i_cmd->status != COMP_COMMAND_ABORTED) in xhci_handle_stopped_cmd_ring()
396 i_cmd->status = COMP_COMMAND_RING_STOPPED; in xhci_handle_stopped_cmd_ring()
398 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
399 i_cmd->command_trb); in xhci_handle_stopped_cmd_ring()
401 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); in xhci_handle_stopped_cmd_ring()
405 * completion event is received for these no-op commands in xhci_handle_stopped_cmd_ring()
409 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
412 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
413 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
414 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
415 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in xhci_handle_stopped_cmd_ring()
416 xhci_ring_cmd_db(xhci); in xhci_handle_stopped_cmd_ring()
420 /* Must be called with xhci->lock held, releases and aquires lock back */
421 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) in xhci_abort_cmd_ring() argument
423 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; in xhci_abort_cmd_ring()
424 union xhci_trb *new_deq = xhci->cmd_ring->dequeue; in xhci_abort_cmd_ring()
428 xhci_dbg(xhci, "Abort command ring\n"); in xhci_abort_cmd_ring()
430 reinit_completion(&xhci->cmd_ring_stop_completion); in xhci_abort_cmd_ring()
440 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
442 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
445 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
447 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the in xhci_abort_cmd_ring()
449 * seconds then driver handles it as if host died (-ENODEV). in xhci_abort_cmd_ring()
450 * In the future we should distinguish between -ENODEV and -ETIMEDOUT in xhci_abort_cmd_ring()
451 * and try to recover a -ETIMEDOUT with a host controller reset. in xhci_abort_cmd_ring()
453 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
456 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); in xhci_abort_cmd_ring()
457 xhci_halt(xhci); in xhci_abort_cmd_ring()
458 xhci_hc_died(xhci); in xhci_abort_cmd_ring()
467 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_abort_cmd_ring()
468 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, in xhci_abort_cmd_ring()
470 spin_lock_irqsave(&xhci->lock, flags); in xhci_abort_cmd_ring()
472 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); in xhci_abort_cmd_ring()
473 xhci_cleanup_command_queue(xhci); in xhci_abort_cmd_ring()
475 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); in xhci_abort_cmd_ring()
480 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, in xhci_ring_ep_doorbell() argument
485 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
486 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell()
487 unsigned int ep_state = ep->ep_state; in xhci_ring_ep_doorbell()
507 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in ring_doorbell_for_active_rings() argument
514 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
517 if (!(ep->ep_state & EP_HAS_STREAMS)) { in ring_doorbell_for_active_rings()
518 if (ep->ring && !(list_empty(&ep->ring->td_list))) in ring_doorbell_for_active_rings()
519 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); in ring_doorbell_for_active_rings()
523 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in ring_doorbell_for_active_rings()
525 struct xhci_stream_info *stream_info = ep->stream_info; in ring_doorbell_for_active_rings()
526 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) in ring_doorbell_for_active_rings()
527 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, in ring_doorbell_for_active_rings()
532 void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in xhci_ring_doorbell_for_active_rings() argument
536 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_ring_doorbell_for_active_rings()
539 static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, in xhci_get_virt_ep() argument
544 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in xhci_get_virt_ep()
548 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); in xhci_get_virt_ep()
551 if (!xhci->devs[slot_id]) { in xhci_get_virt_ep()
552 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); in xhci_get_virt_ep()
556 return &xhci->devs[slot_id]->eps[ep_index]; in xhci_get_virt_ep()
559 static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, in xhci_virt_ep_to_ring() argument
564 if (!(ep->ep_state & EP_HAS_STREAMS)) in xhci_virt_ep_to_ring()
565 return ep->ring; in xhci_virt_ep_to_ring()
567 if (!ep->stream_info) in xhci_virt_ep_to_ring()
570 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { in xhci_virt_ep_to_ring()
571 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", in xhci_virt_ep_to_ring()
572 stream_id, ep->vdev->slot_id, ep->ep_index); in xhci_virt_ep_to_ring()
576 return ep->stream_info->stream_rings[stream_id]; in xhci_virt_ep_to_ring()
583 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, in xhci_triad_to_transfer_ring() argument
589 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_triad_to_transfer_ring()
593 return xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_triad_to_transfer_ring()
603 static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, in xhci_get_hw_deq() argument
610 ep = &vdev->eps[ep_index]; in xhci_get_hw_deq()
612 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_get_hw_deq()
613 st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; in xhci_get_hw_deq()
614 return le64_to_cpu(st_ctx->stream_ring); in xhci_get_hw_deq()
616 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); in xhci_get_hw_deq()
617 return le64_to_cpu(ep_ctx->deq); in xhci_get_hw_deq()
620 static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, in xhci_move_dequeue_past_td() argument
624 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_move_dequeue_past_td()
625 struct xhci_virt_ep *ep = &dev->eps[ep_index]; in xhci_move_dequeue_past_td()
638 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, in xhci_move_dequeue_past_td()
641 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", in xhci_move_dequeue_past_td()
643 return -ENODEV; in xhci_move_dequeue_past_td()
653 if (list_empty(&ep_ring->td_list)) { in xhci_move_dequeue_past_td()
654 new_seg = ep_ring->enq_seg; in xhci_move_dequeue_past_td()
655 new_deq = ep_ring->enqueue; in xhci_move_dequeue_past_td()
656 new_cycle = ep_ring->cycle_state; in xhci_move_dequeue_past_td()
657 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); in xhci_move_dequeue_past_td()
660 xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); in xhci_move_dequeue_past_td()
661 return -EINVAL; in xhci_move_dequeue_past_td()
665 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); in xhci_move_dequeue_past_td()
666 new_seg = ep_ring->deq_seg; in xhci_move_dequeue_past_td()
667 new_deq = ep_ring->dequeue; in xhci_move_dequeue_past_td()
683 if (new_deq == td->last_trb) in xhci_move_dequeue_past_td()
690 next_trb(xhci, ep_ring, &new_seg, &new_deq); in xhci_move_dequeue_past_td()
693 if (new_deq == ep->ring->dequeue) { in xhci_move_dequeue_past_td()
694 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); in xhci_move_dequeue_past_td()
695 return -EINVAL; in xhci_move_dequeue_past_td()
705 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); in xhci_move_dequeue_past_td()
706 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); in xhci_move_dequeue_past_td()
707 return -EINVAL; in xhci_move_dequeue_past_td()
710 if ((ep->ep_state & SET_DEQ_PENDING)) { in xhci_move_dequeue_past_td()
711 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", in xhci_move_dequeue_past_td()
713 return -EBUSY; in xhci_move_dequeue_past_td()
717 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_move_dequeue_past_td()
719 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); in xhci_move_dequeue_past_td()
720 return -ENOMEM; in xhci_move_dequeue_past_td()
725 ret = queue_command(xhci, cmd, in xhci_move_dequeue_past_td()
731 xhci_free_command(xhci, cmd); in xhci_move_dequeue_past_td()
734 ep->queued_deq_seg = new_seg; in xhci_move_dequeue_past_td()
735 ep->queued_deq_ptr = new_deq; in xhci_move_dequeue_past_td()
737 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_move_dequeue_past_td()
745 ep->ep_state |= SET_DEQ_PENDING; in xhci_move_dequeue_past_td()
746 xhci_ring_cmd_db(xhci); in xhci_move_dequeue_past_td()
754 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in td_to_noop() argument
757 struct xhci_segment *seg = td->start_seg; in td_to_noop()
758 union xhci_trb *trb = td->first_trb; in td_to_noop()
764 if (flip_cycle && trb != td->first_trb && trb != td->last_trb) in td_to_noop()
765 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); in td_to_noop()
767 if (trb == td->last_trb) in td_to_noop()
770 next_trb(xhci, ep_ring, &seg, &trb); in td_to_noop()
775 * Must be called with xhci->lock held in interrupt context,
776 * releases and re-acquires xhci->lock
778 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, in xhci_giveback_urb_in_irq() argument
781 struct urb *urb = cur_td->urb; in xhci_giveback_urb_in_irq()
782 struct urb_priv *urb_priv = urb->hcpriv; in xhci_giveback_urb_in_irq()
783 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); in xhci_giveback_urb_in_irq()
785 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { in xhci_giveback_urb_in_irq()
786 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
787 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
788 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
798 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, in xhci_unmap_td_bounce_buffer() argument
801 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_unmap_td_bounce_buffer()
802 struct xhci_segment *seg = td->bounce_seg; in xhci_unmap_td_bounce_buffer()
803 struct urb *urb = td->urb; in xhci_unmap_td_bounce_buffer()
810 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
815 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
818 if (urb->num_sgs) { in xhci_unmap_td_bounce_buffer()
819 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
820 seg->bounce_len, seg->bounce_offs); in xhci_unmap_td_bounce_buffer()
821 if (len != seg->bounce_len) in xhci_unmap_td_bounce_buffer()
822 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", in xhci_unmap_td_bounce_buffer()
823 len, seg->bounce_len); in xhci_unmap_td_bounce_buffer()
825 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
826 seg->bounce_len); in xhci_unmap_td_bounce_buffer()
828 seg->bounce_len = 0; in xhci_unmap_td_bounce_buffer()
829 seg->bounce_offs = 0; in xhci_unmap_td_bounce_buffer()
832 static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_td_cleanup() argument
838 urb = td->urb; in xhci_td_cleanup()
841 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); in xhci_td_cleanup()
845 * length, urb->actual_length will be a very big number (since it's in xhci_td_cleanup()
848 if (urb->actual_length > urb->transfer_buffer_length) { in xhci_td_cleanup()
849 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", in xhci_td_cleanup()
850 urb->transfer_buffer_length, urb->actual_length); in xhci_td_cleanup()
851 urb->actual_length = 0; in xhci_td_cleanup()
855 if (!list_empty(&td->td_list)) in xhci_td_cleanup()
856 list_del_init(&td->td_list); in xhci_td_cleanup()
858 if (!list_empty(&td->cancelled_td_list)) in xhci_td_cleanup()
859 list_del_init(&td->cancelled_td_list); in xhci_td_cleanup()
864 if ((urb->actual_length != urb->transfer_buffer_length && in xhci_td_cleanup()
865 (urb->transfer_flags & URB_SHORT_NOT_OK)) || in xhci_td_cleanup()
866 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) in xhci_td_cleanup()
867 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", in xhci_td_cleanup()
868 urb, urb->actual_length, in xhci_td_cleanup()
869 urb->transfer_buffer_length, status); in xhci_td_cleanup()
872 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) in xhci_td_cleanup()
874 xhci_giveback_urb_in_irq(xhci, td, status); in xhci_td_cleanup()
887 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_giveback_invalidated_tds()
890 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_giveback_invalidated_tds()
892 if (td->cancel_status == TD_CLEARED) { in xhci_giveback_invalidated_tds()
893 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_giveback_invalidated_tds()
894 __func__, td->urb); in xhci_giveback_invalidated_tds()
895 xhci_td_cleanup(ep->xhci, td, ring, td->status); in xhci_giveback_invalidated_tds()
897 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_giveback_invalidated_tds()
898 __func__, td->urb, td->cancel_status); in xhci_giveback_invalidated_tds()
900 if (ep->xhci->xhc_state & XHCI_STATE_DYING) in xhci_giveback_invalidated_tds()
905 static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_reset_halted_ep() argument
911 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_reset_halted_ep()
913 ret = -ENOMEM; in xhci_reset_halted_ep()
917 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n", in xhci_reset_halted_ep()
921 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); in xhci_reset_halted_ep()
924 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", in xhci_reset_halted_ep()
929 static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, in xhci_handle_halted_endpoint() argument
934 unsigned int slot_id = ep->vdev->slot_id; in xhci_handle_halted_endpoint()
941 if (ep->vdev->flags & VDEV_PORT_ERROR) in xhci_handle_halted_endpoint()
942 return -ENODEV; in xhci_handle_halted_endpoint()
946 ep->ep_state |= EP_HARD_CLEAR_TOGGLE; in xhci_handle_halted_endpoint()
947 if (td && list_empty(&td->cancelled_td_list)) { in xhci_handle_halted_endpoint()
948 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); in xhci_handle_halted_endpoint()
949 td->cancel_status = TD_HALTED; in xhci_handle_halted_endpoint()
953 if (ep->ep_state & EP_HALTED) { in xhci_handle_halted_endpoint()
954 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n", in xhci_handle_halted_endpoint()
955 ep->ep_index); in xhci_handle_halted_endpoint()
959 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); in xhci_handle_halted_endpoint()
963 ep->ep_state |= EP_HALTED; in xhci_handle_halted_endpoint()
965 xhci_ring_cmd_db(xhci); in xhci_handle_halted_endpoint()
972 * We have the xHCI lock, so nothing can modify this list until we drop it.
973 * We're also in the event handler, so we can't get re-interrupted if another
981 struct xhci_hcd *xhci; in xhci_invalidate_cancelled_tds() local
987 unsigned int slot_id = ep->vdev->slot_id; in xhci_invalidate_cancelled_tds()
990 xhci = ep->xhci; in xhci_invalidate_cancelled_tds()
992 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
993 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_invalidate_cancelled_tds()
996 td->start_seg, td->first_trb), in xhci_invalidate_cancelled_tds()
997 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
998 list_del_init(&td->td_list); in xhci_invalidate_cancelled_tds()
999 ring = xhci_urb_to_transfer_ring(xhci, td->urb); in xhci_invalidate_cancelled_tds()
1001 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", in xhci_invalidate_cancelled_tds()
1002 td->urb, td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1011 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, in xhci_invalidate_cancelled_tds()
1012 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1015 if (td->cancel_status == TD_HALTED || in xhci_invalidate_cancelled_tds()
1016 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) { in xhci_invalidate_cancelled_tds()
1017 switch (td->cancel_status) { in xhci_invalidate_cancelled_tds()
1018 case TD_CLEARED: /* TD is already no-op */ in xhci_invalidate_cancelled_tds()
1023 td->cancel_status = TD_CLEARING_CACHE; in xhci_invalidate_cancelled_tds()
1026 xhci_dbg(xhci, in xhci_invalidate_cancelled_tds()
1028 td->urb->stream_id, td->urb, in xhci_invalidate_cancelled_tds()
1029 cached_td->urb->stream_id, cached_td->urb); in xhci_invalidate_cancelled_tds()
1034 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1035 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1043 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, in xhci_invalidate_cancelled_tds()
1044 cached_td->urb->stream_id, in xhci_invalidate_cancelled_tds()
1047 /* Failed to move past cached td, just set cached TDs to no-op */ in xhci_invalidate_cancelled_tds()
1048 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1049 if (td->cancel_status != TD_CLEARING_CACHE) in xhci_invalidate_cancelled_tds()
1051 xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", in xhci_invalidate_cancelled_tds()
1052 td->urb); in xhci_invalidate_cancelled_tds()
1053 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1054 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1062 * Only call for non-running rings without streams.
1069 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */ in find_halted_td()
1070 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); in find_halted_td()
1072 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); in find_halted_td()
1073 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, in find_halted_td()
1074 td->last_trb, hw_deq, false)) in find_halted_td()
1087 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1090 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_stop_ep() argument
1101 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { in xhci_handle_cmd_stop_ep()
1102 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
1103 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", in xhci_handle_cmd_stop_ep()
1108 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_stop_ep()
1109 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1113 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_stop_ep()
1126 * Proper error code is unknown here, it would be -EPIPE if device side in xhci_handle_cmd_stop_ep()
1127 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) in xhci_handle_cmd_stop_ep()
1128 * We use -EPROTO, if device is stalled it should return a stall error on in xhci_handle_cmd_stop_ep()
1129 * next transfer, which then will return -EPIPE, and device side stall is in xhci_handle_cmd_stop_ep()
1134 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); in xhci_handle_cmd_stop_ep()
1135 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_stop_ep()
1141 td->status = -EPROTO; in xhci_handle_cmd_stop_ep()
1144 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); in xhci_handle_cmd_stop_ep()
1147 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1151 xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n"); in xhci_handle_cmd_stop_ep()
1153 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_handle_cmd_stop_ep()
1155 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1158 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0); in xhci_handle_cmd_stop_ep()
1159 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_stop_ep()
1169 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1173 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1176 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_kill_ring_urbs() argument
1181 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { in xhci_kill_ring_urbs()
1182 list_del_init(&cur_td->td_list); in xhci_kill_ring_urbs()
1184 if (!list_empty(&cur_td->cancelled_td_list)) in xhci_kill_ring_urbs()
1185 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_ring_urbs()
1187 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); in xhci_kill_ring_urbs()
1189 inc_td_cnt(cur_td->urb); in xhci_kill_ring_urbs()
1191 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
1195 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, in xhci_kill_endpoint_urbs() argument
1203 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_kill_endpoint_urbs()
1207 if ((ep->ep_state & EP_HAS_STREAMS) || in xhci_kill_endpoint_urbs()
1208 (ep->ep_state & EP_GETTING_NO_STREAMS)) { in xhci_kill_endpoint_urbs()
1211 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in xhci_kill_endpoint_urbs()
1213 ring = ep->stream_info->stream_rings[stream_id]; in xhci_kill_endpoint_urbs()
1217 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1220 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1223 ring = ep->ring; in xhci_kill_endpoint_urbs()
1226 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1229 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1232 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, in xhci_kill_endpoint_urbs()
1234 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_endpoint_urbs()
1235 inc_td_cnt(cur_td->urb); in xhci_kill_endpoint_urbs()
1238 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
1248 * Call with xhci->lock held.
1249 * lock is relased and re-acquired while giving back urb.
1251 void xhci_hc_died(struct xhci_hcd *xhci) in xhci_hc_died() argument
1255 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_hc_died()
1258 xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); in xhci_hc_died()
1259 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_hc_died()
1261 xhci_cleanup_command_queue(xhci); in xhci_hc_died()
1264 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { in xhci_hc_died()
1265 if (!xhci->devs[i]) in xhci_hc_died()
1268 xhci_kill_endpoint_urbs(xhci, i, j); in xhci_hc_died()
1272 if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_hc_died()
1273 usb_hc_died(xhci_to_hcd(xhci)); in xhci_hc_died()
1276 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, in update_ring_for_set_deq_completion() argument
1283 dequeue_temp = ep_ring->dequeue; in update_ring_for_set_deq_completion()
1285 /* If we get two back-to-back stalls, and the first stalled transfer in update_ring_for_set_deq_completion()
1289 * the segment into la-la-land. in update_ring_for_set_deq_completion()
1291 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1292 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1293 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1296 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { in update_ring_for_set_deq_completion()
1298 ep_ring->dequeue++; in update_ring_for_set_deq_completion()
1299 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1300 if (ep_ring->dequeue == in update_ring_for_set_deq_completion()
1301 dev->eps[ep_index].queued_deq_ptr) in update_ring_for_set_deq_completion()
1303 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1304 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1306 if (ep_ring->dequeue == dequeue_temp) { in update_ring_for_set_deq_completion()
1307 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); in update_ring_for_set_deq_completion()
1320 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_set_deq() argument
1331 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_set_deq()
1332 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); in xhci_handle_cmd_set_deq()
1333 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1337 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_handle_cmd_set_deq()
1339 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", in xhci_handle_cmd_set_deq()
1345 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
1346 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in xhci_handle_cmd_set_deq()
1356 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); in xhci_handle_cmd_set_deq()
1359 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
1361 slot_state = le32_to_cpu(slot_ctx->dev_state); in xhci_handle_cmd_set_deq()
1363 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1368 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", in xhci_handle_cmd_set_deq()
1372 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", in xhci_handle_cmd_set_deq()
1385 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_set_deq()
1387 &ep->stream_info->stream_ctx_array[stream_id]; in xhci_handle_cmd_set_deq()
1388 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; in xhci_handle_cmd_set_deq()
1390 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; in xhci_handle_cmd_set_deq()
1392 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1394 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, in xhci_handle_cmd_set_deq()
1395 ep->queued_deq_ptr) == deq) { in xhci_handle_cmd_set_deq()
1399 update_ring_for_set_deq_completion(xhci, ep->vdev, in xhci_handle_cmd_set_deq()
1402 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); in xhci_handle_cmd_set_deq()
1403 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1404 ep->queued_deq_seg, ep->queued_deq_ptr); in xhci_handle_cmd_set_deq()
1408 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_handle_cmd_set_deq()
1410 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_handle_cmd_set_deq()
1411 if (td->cancel_status == TD_CLEARING_CACHE) { in xhci_handle_cmd_set_deq()
1412 td->cancel_status = TD_CLEARED; in xhci_handle_cmd_set_deq()
1413 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_handle_cmd_set_deq()
1414 __func__, td->urb); in xhci_handle_cmd_set_deq()
1415 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); in xhci_handle_cmd_set_deq()
1417 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_handle_cmd_set_deq()
1418 __func__, td->urb, td->cancel_status); in xhci_handle_cmd_set_deq()
1422 ep->ep_state &= ~SET_DEQ_PENDING; in xhci_handle_cmd_set_deq()
1423 ep->queued_deq_seg = NULL; in xhci_handle_cmd_set_deq()
1424 ep->queued_deq_ptr = NULL; in xhci_handle_cmd_set_deq()
1426 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1429 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_ep() argument
1436 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_reset_ep()
1437 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1441 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_reset_ep()
1447 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_handle_cmd_reset_ep()
1454 ep->ep_state &= ~EP_HALTED; in xhci_handle_cmd_reset_ep()
1459 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) in xhci_handle_cmd_reset_ep()
1460 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1463 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_enable_slot() argument
1467 command->slot_id = slot_id; in xhci_handle_cmd_enable_slot()
1469 command->slot_id = 0; in xhci_handle_cmd_enable_slot()
1472 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_disable_slot() argument
1477 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1481 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_handle_cmd_disable_slot()
1484 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1486 xhci_free_device_endpoint_resources(xhci, virt_dev, true); in xhci_handle_cmd_disable_slot()
1489 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_config_ep() argument
1503 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1506 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); in xhci_handle_cmd_config_ep()
1508 xhci_warn(xhci, "Could not get input context, bad type.\n"); in xhci_handle_cmd_config_ep()
1512 add_flags = le32_to_cpu(ctrl_ctx->add_flags); in xhci_handle_cmd_config_ep()
1515 ep_index = xhci_last_valid_endpoint(add_flags) - 1; in xhci_handle_cmd_config_ep()
1517 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); in xhci_handle_cmd_config_ep()
1523 static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_addr_dev() argument
1528 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_addr_dev()
1531 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_addr_dev()
1535 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_reset_dev() argument
1540 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_reset_dev()
1542 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", in xhci_handle_cmd_reset_dev()
1546 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_reset_dev()
1549 xhci_dbg(xhci, "Completed reset device command.\n"); in xhci_handle_cmd_reset_dev()
1552 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, in xhci_handle_cmd_nec_get_fw() argument
1555 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1556 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); in xhci_handle_cmd_nec_get_fw()
1559 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_nec_get_fw()
1561 NEC_FW_MAJOR(le32_to_cpu(event->status)), in xhci_handle_cmd_nec_get_fw()
1562 NEC_FW_MINOR(le32_to_cpu(event->status))); in xhci_handle_cmd_nec_get_fw()
1567 list_del(&cmd->cmd_list); in xhci_complete_del_and_free_cmd()
1569 if (cmd->completion) { in xhci_complete_del_and_free_cmd()
1570 cmd->status = status; in xhci_complete_del_and_free_cmd()
1571 complete(cmd->completion); in xhci_complete_del_and_free_cmd()
1577 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) in xhci_cleanup_command_queue() argument
1580 xhci->current_cmd = NULL; in xhci_cleanup_command_queue()
1581 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1587 struct xhci_hcd *xhci; in xhci_handle_command_timeout() local
1594 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); in xhci_handle_command_timeout()
1596 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1602 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { in xhci_handle_command_timeout()
1603 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1607 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]); in xhci_handle_command_timeout()
1608 usbsts = readl(&xhci->op_regs->status); in xhci_handle_command_timeout()
1609 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts)); in xhci_handle_command_timeout()
1611 /* Bail out and tear down xhci if a stop endpoint command failed */ in xhci_handle_command_timeout()
1615 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n"); in xhci_handle_command_timeout()
1617 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3), in xhci_handle_command_timeout()
1620 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_command_timeout()
1622 xhci_halt(xhci); in xhci_handle_command_timeout()
1623 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1628 xhci->current_cmd->status = COMP_COMMAND_ABORTED; in xhci_handle_command_timeout()
1631 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1633 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1637 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1640 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_handle_command_timeout()
1641 xhci_dbg(xhci, "Command timeout\n"); in xhci_handle_command_timeout()
1642 xhci_abort_cmd_ring(xhci, flags); in xhci_handle_command_timeout()
1647 if (xhci->xhc_state & XHCI_STATE_REMOVING) { in xhci_handle_command_timeout()
1648 xhci_dbg(xhci, "host removed, ring start fail?\n"); in xhci_handle_command_timeout()
1649 xhci_cleanup_command_queue(xhci); in xhci_handle_command_timeout()
1655 xhci_dbg(xhci, "Command timeout on stopped ring\n"); in xhci_handle_command_timeout()
1656 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1659 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1663 static void handle_cmd_completion(struct xhci_hcd *xhci, in handle_cmd_completion() argument
1666 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_cmd_completion()
1675 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in handle_cmd_completion()
1679 cmd_dma = le64_to_cpu(event->cmd_trb); in handle_cmd_completion()
1680 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1682 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); in handle_cmd_completion()
1684 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1691 xhci_warn(xhci, in handle_cmd_completion()
1696 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); in handle_cmd_completion()
1698 cancel_delayed_work(&xhci->cmd_timer); in handle_cmd_completion()
1700 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); in handle_cmd_completion()
1704 complete_all(&xhci->cmd_ring_stop_completion); in handle_cmd_completion()
1708 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1709 xhci_err(xhci, in handle_cmd_completion()
1721 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1722 if (cmd->status == COMP_COMMAND_ABORTED) { in handle_cmd_completion()
1723 if (xhci->current_cmd == cmd) in handle_cmd_completion()
1724 xhci->current_cmd = NULL; in handle_cmd_completion()
1729 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1732 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); in handle_cmd_completion()
1735 xhci_handle_cmd_disable_slot(xhci, slot_id); in handle_cmd_completion()
1738 if (!cmd->completion) in handle_cmd_completion()
1739 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); in handle_cmd_completion()
1744 xhci_handle_cmd_addr_dev(xhci, slot_id); in handle_cmd_completion()
1748 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1749 if (!cmd->completion) in handle_cmd_completion()
1750 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, in handle_cmd_completion()
1755 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1756 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1759 /* Is this an aborted command turned to NO-OP? */ in handle_cmd_completion()
1760 if (cmd->status == COMP_COMMAND_RING_STOPPED) in handle_cmd_completion()
1765 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1766 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1770 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) in handle_cmd_completion()
1773 le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1774 xhci_handle_cmd_reset_dev(xhci, slot_id); in handle_cmd_completion()
1777 xhci_handle_cmd_nec_get_fw(xhci, event); in handle_cmd_completion()
1781 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); in handle_cmd_completion()
1786 if (!list_is_singular(&xhci->cmd_list)) { in handle_cmd_completion()
1787 xhci->current_cmd = list_first_entry(&cmd->cmd_list, in handle_cmd_completion()
1789 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in handle_cmd_completion()
1790 } else if (xhci->current_cmd == cmd) { in handle_cmd_completion()
1791 xhci->current_cmd = NULL; in handle_cmd_completion()
1797 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1800 static void handle_vendor_event(struct xhci_hcd *xhci, in handle_vendor_event() argument
1803 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); in handle_vendor_event()
1804 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1805 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1808 static void handle_device_notification(struct xhci_hcd *xhci, in handle_device_notification() argument
1814 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); in handle_device_notification()
1815 if (!xhci->devs[slot_id]) { in handle_device_notification()
1816 xhci_warn(xhci, "Device Notification event for " in handle_device_notification()
1821 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", in handle_device_notification()
1823 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1824 if (udev && udev->parent) in handle_device_notification()
1825 usb_wakeup_notification(udev->parent, udev->portnum); in handle_device_notification()
1829 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1831 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1840 static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) in xhci_cavium_reset_phy_quirk() argument
1842 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cavium_reset_phy_quirk()
1848 writel(0x6F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1850 /* De-assert the PHY reset */ in xhci_cavium_reset_phy_quirk()
1851 writel(0x7F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1853 pll_lock_check = readl(hcd->regs + 0x1070); in xhci_cavium_reset_phy_quirk()
1854 } while (!(pll_lock_check & 0x1) && --retry_count); in xhci_cavium_reset_phy_quirk()
1857 static void handle_port_status(struct xhci_hcd *xhci, in handle_port_status() argument
1872 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) in handle_port_status()
1873 xhci_warn(xhci, in handle_port_status()
1876 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); in handle_port_status()
1877 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
1880 xhci_warn(xhci, "Port change event with invalid port ID %d\n", in handle_port_status()
1882 inc_deq(xhci, ir->event_ring); in handle_port_status()
1886 port = &xhci->hw_ports[port_id - 1]; in handle_port_status()
1887 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { in handle_port_status()
1888 xhci_warn(xhci, "Port change event, no port for port ID %u\n", in handle_port_status()
1895 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { in handle_port_status()
1896 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); in handle_port_status()
1901 hcd = port->rhub->hcd; in handle_port_status()
1902 bus_state = &port->rhub->bus_state; in handle_port_status()
1903 hcd_portnum = port->hcd_portnum; in handle_port_status()
1904 portsc = readl(port->addr); in handle_port_status()
1906 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", in handle_port_status()
1907 hcd->self.busnum, hcd_portnum + 1, port_id, portsc); in handle_port_status()
1911 if (hcd->state == HC_STATE_SUSPENDED) { in handle_port_status()
1912 xhci_dbg(xhci, "resume root hub\n"); in handle_port_status()
1916 if (hcd->speed >= HCD_USB3 && in handle_port_status()
1918 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
1919 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
1920 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; in handle_port_status()
1924 xhci_dbg(xhci, "port resume event for port %d\n", port_id); in handle_port_status()
1926 cmd_reg = readl(&xhci->op_regs->command); in handle_port_status()
1928 xhci_warn(xhci, "xHC is not running.\n"); in handle_port_status()
1933 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); in handle_port_status()
1938 bus_state->port_remote_wakeup |= 1 << hcd_portnum; in handle_port_status()
1939 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
1940 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
1941 xhci_set_link_state(xhci, port, XDEV_U0); in handle_port_status()
1947 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { in handle_port_status()
1948 xhci_dbg(xhci, "resume HS port %d\n", port_id); in handle_port_status()
1949 port->resume_timestamp = jiffies + in handle_port_status()
1951 set_bit(hcd_portnum, &bus_state->resuming_ports); in handle_port_status()
1954 * usb device auto-resume latency around ~40ms. in handle_port_status()
1956 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
1957 mod_timer(&hcd->rh_timer, in handle_port_status()
1958 port->resume_timestamp); in handle_port_status()
1959 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
1969 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); in handle_port_status()
1970 complete(&port->u3exit_done); in handle_port_status()
1973 * U3Exit state after a host-initiated resume. If it's a device in handle_port_status()
1978 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
1979 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
1980 xhci_ring_device(xhci, slot_id); in handle_port_status()
1981 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { in handle_port_status()
1982 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
1983 usb_wakeup_notification(hcd->self.root_hub, in handle_port_status()
1991 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or in handle_port_status()
1995 if (hcd->speed < HCD_USB3 && port->rexit_active) { in handle_port_status()
1996 complete(&port->rexit_done); in handle_port_status()
1997 port->rexit_active = false; in handle_port_status()
2002 if (hcd->speed < HCD_USB3) { in handle_port_status()
2003 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2004 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && in handle_port_status()
2006 xhci_cavium_reset_phy_quirk(xhci); in handle_port_status()
2011 inc_deq(xhci, ir->event_ring); in handle_port_status()
2021 * xHCI port-status-change events occur when the "or" of all the in handle_port_status()
2022 * status-change bits in the portsc register changes from 0 to 1. in handle_port_status()
2027 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", in handle_port_status()
2028 __func__, hcd->self.busnum); in handle_port_status()
2029 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2030 spin_unlock(&xhci->lock); in handle_port_status()
2033 spin_lock(&xhci->lock); in handle_port_status()
2042 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, in trb_in_td() argument
2062 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); in trb_in_td()
2067 xhci_warn(xhci, in trb_in_td()
2068 …"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx… in trb_in_td()
2072 (unsigned long long)cur_seg->dma, in trb_in_td()
2086 (suspect_dma >= cur_seg->dma && in trb_in_td()
2096 cur_seg = cur_seg->next; in trb_in_td()
2097 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); in trb_in_td()
2103 static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_clear_hub_tt_buffer() argument
2107 * As part of low/full-speed endpoint-halt processing in xhci_clear_hub_tt_buffer()
2110 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && in xhci_clear_hub_tt_buffer()
2111 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && in xhci_clear_hub_tt_buffer()
2112 !(ep->ep_state & EP_CLEARING_TT)) { in xhci_clear_hub_tt_buffer()
2113 ep->ep_state |= EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2114 td->urb->ep->hcpriv = td->urb->dev; in xhci_clear_hub_tt_buffer()
2115 if (usb_hub_clear_tt_buffer(td->urb)) in xhci_clear_hub_tt_buffer()
2116 ep->ep_state &= ~EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2121 * cleanup the halt for a non-default control endpoint if we indicate a stall.
2126 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, in xhci_requires_manual_halt_cleanup() argument
2146 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) in xhci_is_vendor_info_code() argument
2150 * treat as not-an-error. in xhci_is_vendor_info_code()
2152 xhci_dbg(xhci, "Vendor defined info completion code %u\n", in xhci_is_vendor_info_code()
2154 xhci_dbg(xhci, "Treating code as success.\n"); in xhci_is_vendor_info_code()
2160 static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in finish_td() argument
2166 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in finish_td()
2198 if ((ep->ep_state & EP_HALTED) && in finish_td()
2199 !list_empty(&td->cancelled_td_list)) { in finish_td()
2200 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", in finish_td()
2202 td->start_seg, td->first_trb)); in finish_td()
2209 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2210 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2214 * xhci internal endpoint state will go to a "halt" state for in finish_td()
2223 if (ep->ep_index != 0) in finish_td()
2224 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2226 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2234 ep_ring->dequeue = td->last_trb; in finish_td()
2235 ep_ring->deq_seg = td->last_trb_seg; in finish_td()
2236 inc_deq(xhci, ep_ring); in finish_td()
2238 return xhci_td_cleanup(xhci, td, ep_ring, td->status); in finish_td()
2242 static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, in sum_trb_lengths() argument
2246 union xhci_trb *trb = ring->dequeue; in sum_trb_lengths()
2247 struct xhci_segment *seg = ring->deq_seg; in sum_trb_lengths()
2249 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { in sum_trb_lengths()
2251 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); in sum_trb_lengths()
2259 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_ctrl_td() argument
2268 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); in process_ctrl_td()
2269 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in process_ctrl_td()
2270 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2271 requested = td->urb->transfer_buffer_length; in process_ctrl_td()
2272 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2277 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", in process_ctrl_td()
2279 td->status = -ESHUTDOWN; in process_ctrl_td()
2282 td->status = 0; in process_ctrl_td()
2285 td->status = 0; in process_ctrl_td()
2289 td->urb->actual_length = remaining; in process_ctrl_td()
2291 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); in process_ctrl_td()
2296 td->urb->actual_length = 0; in process_ctrl_td()
2300 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2303 td->urb->actual_length = requested; in process_ctrl_td()
2306 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", in process_ctrl_td()
2313 if (!xhci_requires_manual_halt_cleanup(xhci, in process_ctrl_td()
2316 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", in process_ctrl_td()
2317 trb_comp_code, ep->ep_index); in process_ctrl_td()
2322 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2323 else if (!td->urb_length_set) in process_ctrl_td()
2324 td->urb->actual_length = 0; in process_ctrl_td()
2338 td->urb_length_set = true; in process_ctrl_td()
2339 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2340 xhci_dbg(xhci, "Waiting for status stage event\n"); in process_ctrl_td()
2345 if (!td->urb_length_set) in process_ctrl_td()
2346 td->urb->actual_length = requested; in process_ctrl_td()
2349 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_ctrl_td()
2355 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_isoc_td() argument
2367 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2368 urb_priv = td->urb->hcpriv; in process_isoc_td()
2369 idx = urb_priv->num_tds_done; in process_isoc_td()
2370 frame = &td->urb->iso_frame_desc[idx]; in process_isoc_td()
2371 requested = frame->length; in process_isoc_td()
2372 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2373 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_isoc_td()
2374 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? in process_isoc_td()
2375 -EREMOTEIO : 0; in process_isoc_td()
2381 frame->status = short_framestatus; in process_isoc_td()
2382 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) in process_isoc_td()
2386 frame->status = 0; in process_isoc_td()
2389 frame->status = short_framestatus; in process_isoc_td()
2393 frame->status = -ECOMM; in process_isoc_td()
2397 frame->status = -EOVERFLOW; in process_isoc_td()
2401 frame->status = -EPROTO; in process_isoc_td()
2404 frame->status = -EPROTO; in process_isoc_td()
2405 if (ep_trb != td->last_trb) in process_isoc_td()
2413 frame->status = short_framestatus; in process_isoc_td()
2422 frame->status = -1; in process_isoc_td()
2427 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + in process_isoc_td()
2428 ep_trb_len - remaining; in process_isoc_td()
2430 frame->actual_length = requested; in process_isoc_td()
2432 td->urb->actual_length += frame->actual_length; in process_isoc_td()
2434 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_isoc_td()
2437 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2444 urb_priv = td->urb->hcpriv; in skip_isoc_td()
2445 idx = urb_priv->num_tds_done; in skip_isoc_td()
2446 frame = &td->urb->iso_frame_desc[idx]; in skip_isoc_td()
2449 frame->status = -EXDEV; in skip_isoc_td()
2452 frame->actual_length = 0; in skip_isoc_td()
2455 ep->ring->dequeue = td->last_trb; in skip_isoc_td()
2456 ep->ring->deq_seg = td->last_trb_seg; in skip_isoc_td()
2457 inc_deq(xhci, ep->ring); in skip_isoc_td()
2459 return xhci_td_cleanup(xhci, td, ep->ring, status); in skip_isoc_td()
2465 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_bulk_intr_td() argument
2473 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in process_bulk_intr_td()
2474 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2475 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2476 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_bulk_intr_td()
2477 requested = td->urb->transfer_buffer_length; in process_bulk_intr_td()
2481 ep->err_count = 0; in process_bulk_intr_td()
2483 if (ep_trb != td->last_trb || remaining) { in process_bulk_intr_td()
2484 xhci_warn(xhci, "WARN Successful completion on short TX\n"); in process_bulk_intr_td()
2485 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2486 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2489 td->status = 0; in process_bulk_intr_td()
2492 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2493 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2495 td->status = 0; in process_bulk_intr_td()
2498 td->urb->actual_length = remaining; in process_bulk_intr_td()
2506 if (xhci->quirks & XHCI_NO_SOFT_RETRY || in process_bulk_intr_td()
2507 (ep->err_count++ > MAX_SOFT_RETRY) || in process_bulk_intr_td()
2508 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) in process_bulk_intr_td()
2511 td->status = 0; in process_bulk_intr_td()
2513 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); in process_bulk_intr_td()
2520 if (ep_trb == td->last_trb) in process_bulk_intr_td()
2521 td->urb->actual_length = requested - remaining; in process_bulk_intr_td()
2523 td->urb->actual_length = in process_bulk_intr_td()
2524 sum_trb_lengths(xhci, ep_ring, ep_trb) + in process_bulk_intr_td()
2525 ep_trb_len - remaining; in process_bulk_intr_td()
2528 xhci_warn(xhci, "bad transfer trb length %d in event trb\n", in process_bulk_intr_td()
2530 td->urb->actual_length = 0; in process_bulk_intr_td()
2533 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_bulk_intr_td()
2541 static int handle_tx_event(struct xhci_hcd *xhci, in handle_tx_event() argument
2553 int status = -EINPROGRESS; in handle_tx_event()
2559 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_tx_event()
2560 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; in handle_tx_event()
2561 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in handle_tx_event()
2562 ep_trb_dma = le64_to_cpu(event->buffer); in handle_tx_event()
2564 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in handle_tx_event()
2566 xhci_err(xhci, "ERROR Invalid Transfer event\n"); in handle_tx_event()
2571 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in handle_tx_event()
2574 xhci_err(xhci, in handle_tx_event()
2580 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ in handle_tx_event()
2587 xhci_dbg(xhci, "Stream transaction error ep %u no id\n", in handle_tx_event()
2589 if (ep->err_count++ > MAX_SOFT_RETRY) in handle_tx_event()
2590 xhci_handle_halted_endpoint(xhci, ep, NULL, in handle_tx_event()
2593 xhci_handle_halted_endpoint(xhci, ep, NULL, in handle_tx_event()
2601 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", in handle_tx_event()
2607 /* Count current td numbers if ep->skip is set */ in handle_tx_event()
2608 if (ep->skip) in handle_tx_event()
2609 td_num += list_count_nodes(&ep_ring->td_list); in handle_tx_event()
2617 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) in handle_tx_event()
2619 if (xhci->quirks & XHCI_TRUST_TX_LENGTH || in handle_tx_event()
2620 ep_ring->last_td_was_short) in handle_tx_event()
2623 xhci_warn_ratelimited(xhci, in handle_tx_event()
2631 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", in handle_tx_event()
2635 xhci_dbg(xhci, in handle_tx_event()
2636 "Stopped on No-op or Link TRB for slot %u ep %u\n", in handle_tx_event()
2640 xhci_dbg(xhci, in handle_tx_event()
2646 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, in handle_tx_event()
2648 status = -EPIPE; in handle_tx_event()
2651 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", in handle_tx_event()
2653 status = -EPROTO; in handle_tx_event()
2656 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", in handle_tx_event()
2658 status = -EPROTO; in handle_tx_event()
2661 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", in handle_tx_event()
2663 status = -EOVERFLOW; in handle_tx_event()
2667 xhci_warn(xhci, in handle_tx_event()
2670 status = -EILSEQ; in handle_tx_event()
2674 xhci_warn(xhci, in handle_tx_event()
2677 status = -ENOSR; in handle_tx_event()
2680 xhci_warn(xhci, in handle_tx_event()
2685 xhci_warn(xhci, in handle_tx_event()
2695 xhci_dbg(xhci, "underrun event on endpoint\n"); in handle_tx_event()
2696 if (!list_empty(&ep_ring->td_list)) in handle_tx_event()
2697 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " in handle_tx_event()
2699 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2703 xhci_dbg(xhci, "overrun event on endpoint\n"); in handle_tx_event()
2704 if (!list_empty(&ep_ring->td_list)) in handle_tx_event()
2705 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " in handle_tx_event()
2707 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2717 ep->skip = true; in handle_tx_event()
2718 xhci_dbg(xhci, in handle_tx_event()
2723 ep->skip = true; in handle_tx_event()
2724 xhci_dbg(xhci, in handle_tx_event()
2731 xhci_warn(xhci, in handle_tx_event()
2734 status = -EPROTO; in handle_tx_event()
2737 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { in handle_tx_event()
2741 xhci_warn(xhci, in handle_tx_event()
2751 if (list_empty(&ep_ring->td_list)) { in handle_tx_event()
2762 ep_ring->last_td_was_short)) { in handle_tx_event()
2763 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", in handle_tx_event()
2764 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2767 if (ep->skip) { in handle_tx_event()
2768 ep->skip = false; in handle_tx_event()
2769 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2773 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
2775 xhci_handle_halted_endpoint(xhci, ep, NULL, in handle_tx_event()
2781 /* We've skipped all the TDs on the ep ring when ep->skip set */ in handle_tx_event()
2782 if (ep->skip && td_num == 0) { in handle_tx_event()
2783 ep->skip = false; in handle_tx_event()
2784 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2789 td = list_first_entry(&ep_ring->td_list, struct xhci_td, in handle_tx_event()
2791 if (ep->skip) in handle_tx_event()
2792 td_num--; in handle_tx_event()
2795 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, in handle_tx_event()
2796 td->last_trb, ep_trb_dma, false); in handle_tx_event()
2800 * is not in the current TD pointed by ep_ring->dequeue because in handle_tx_event()
2812 if (!ep->skip || in handle_tx_event()
2813 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { in handle_tx_event()
2818 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && in handle_tx_event()
2819 ep_ring->last_td_was_short) { in handle_tx_event()
2820 ep_ring->last_td_was_short = false; in handle_tx_event()
2824 xhci_err(xhci, in handle_tx_event()
2829 trb_in_td(xhci, ep_ring->deq_seg, in handle_tx_event()
2830 ep_ring->dequeue, td->last_trb, in handle_tx_event()
2832 return -ESHUTDOWN; in handle_tx_event()
2835 skip_isoc_td(xhci, td, ep, status); in handle_tx_event()
2839 ep_ring->last_td_was_short = true; in handle_tx_event()
2841 ep_ring->last_td_was_short = false; in handle_tx_event()
2843 if (ep->skip) { in handle_tx_event()
2844 xhci_dbg(xhci, in handle_tx_event()
2847 ep->skip = false; in handle_tx_event()
2850 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / in handle_tx_event()
2857 * No-op TRB could trigger interrupts in a case where in handle_tx_event()
2866 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
2868 xhci_handle_halted_endpoint(xhci, ep, td, in handle_tx_event()
2873 td->status = status; in handle_tx_event()
2876 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) in handle_tx_event()
2877 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2878 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) in handle_tx_event()
2879 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2881 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2883 handling_skipped_tds = ep->skip && in handle_tx_event()
2892 inc_deq(xhci, ir->event_ring); in handle_tx_event()
2895 * If ep->skip is set, it means there are missed tds on the in handle_tx_event()
2905 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2907 ir->event_ring->deq_seg, in handle_tx_event()
2908 ir->event_ring->dequeue), in handle_tx_event()
2909 lower_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
2910 upper_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
2911 le32_to_cpu(event->transfer_len), in handle_tx_event()
2912 le32_to_cpu(event->flags)); in handle_tx_event()
2913 return -ENODEV; in handle_tx_event()
2917 * This function handles all OS-owned events on the event ring. It may drop
2918 * xhci->lock between event processing (e.g. to pass up port status changes).
2922 static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) in xhci_handle_event() argument
2930 if (!ir || !ir->event_ring || !ir->event_ring->dequeue) { in xhci_handle_event()
2931 xhci_err(xhci, "ERROR interrupter not ready\n"); in xhci_handle_event()
2932 return -ENOMEM; in xhci_handle_event()
2935 event = ir->event_ring->dequeue; in xhci_handle_event()
2937 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != in xhci_handle_event()
2938 ir->event_ring->cycle_state) in xhci_handle_event()
2941 trace_xhci_handle_event(ir->event_ring, &event->generic); in xhci_handle_event()
2948 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); in xhci_handle_event()
2953 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event()
2956 handle_port_status(xhci, ir, event); in xhci_handle_event()
2960 ret = handle_tx_event(xhci, ir, &event->trans_event); in xhci_handle_event()
2965 handle_device_notification(xhci, event); in xhci_handle_event()
2969 handle_vendor_event(xhci, event, trb_type); in xhci_handle_event()
2971 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); in xhci_handle_event()
2973 /* Any of the above functions may drop and re-acquire the lock, so check in xhci_handle_event()
2974 * to make sure a watchdog timer didn't mark the host as non-responsive. in xhci_handle_event()
2976 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event()
2977 xhci_dbg(xhci, "xHCI host dying, returning from " in xhci_handle_event()
2984 inc_deq(xhci, ir->event_ring); in xhci_handle_event()
2994 * - When all events have finished
2995 * - To avoid "Event Ring Full Error" condition
2997 static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, in xhci_update_erst_dequeue() argument
3005 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3007 if (event_ring_deq != ir->event_ring->dequeue) { in xhci_update_erst_dequeue()
3008 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, in xhci_update_erst_dequeue()
3009 ir->event_ring->dequeue); in xhci_update_erst_dequeue()
3011 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); in xhci_update_erst_dequeue()
3028 xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3032 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3038 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_irq() local
3046 spin_lock(&xhci->lock); in xhci_irq()
3048 status = readl(&xhci->op_regs->status); in xhci_irq()
3050 xhci_hc_died(xhci); in xhci_irq()
3059 xhci_warn(xhci, "WARNING: Host Controller Error\n"); in xhci_irq()
3064 xhci_warn(xhci, "WARNING: Host System Error\n"); in xhci_irq()
3065 xhci_halt(xhci); in xhci_irq()
3072 * so we can receive interrupts from other MSI-X interrupters. in xhci_irq()
3076 writel(status, &xhci->op_regs->status); in xhci_irq()
3079 ir = xhci->interrupter; in xhci_irq()
3080 if (!hcd->msi_enabled) { in xhci_irq()
3082 irq_pending = readl(&ir->ir_set->irq_pending); in xhci_irq()
3084 writel(irq_pending, &ir->ir_set->irq_pending); in xhci_irq()
3087 if (xhci->xhc_state & XHCI_STATE_DYING || in xhci_irq()
3088 xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_irq()
3089 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " in xhci_irq()
3094 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_irq()
3095 xhci_write_64(xhci, temp_64 | ERST_EHB, in xhci_irq()
3096 &ir->ir_set->erst_dequeue); in xhci_irq()
3101 event_ring_deq = ir->event_ring->dequeue; in xhci_irq()
3105 while (xhci_handle_event(xhci, ir) > 0) { in xhci_irq()
3108 xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false); in xhci_irq()
3109 event_ring_deq = ir->event_ring->dequeue; in xhci_irq()
3111 /* ring is half-full, force isoc trbs to interrupt more often */ in xhci_irq()
3112 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) in xhci_irq()
3113 xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2; in xhci_irq()
3118 xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true); in xhci_irq()
3122 spin_unlock(&xhci->lock); in xhci_irq()
3142 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in queue_trb() argument
3148 trb = &ring->enqueue->generic; in queue_trb()
3149 trb->field[0] = cpu_to_le32(field1); in queue_trb()
3150 trb->field[1] = cpu_to_le32(field2); in queue_trb()
3151 trb->field[2] = cpu_to_le32(field3); in queue_trb()
3154 trb->field[3] = cpu_to_le32(field4); in queue_trb()
3158 inc_enq(xhci, ring, more_trbs_coming); in queue_trb()
3165 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in prepare_ring() argument
3178 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
3179 return -ENOENT; in prepare_ring()
3181 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
3183 /* XXX not sure if this should be -ENOENT or not */ in prepare_ring()
3184 return -EINVAL; in prepare_ring()
3186 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); in prepare_ring()
3192 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
3197 return -EINVAL; in prepare_ring()
3200 if (ep_ring != xhci->cmd_ring) { in prepare_ring()
3201 new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs); in prepare_ring()
3202 } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) { in prepare_ring()
3203 xhci_err(xhci, "Do not support expand command ring\n"); in prepare_ring()
3204 return -ENOMEM; in prepare_ring()
3208 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, in prepare_ring()
3210 if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) { in prepare_ring()
3211 xhci_err(xhci, "Ring expansion failed\n"); in prepare_ring()
3212 return -ENOMEM; in prepare_ring()
3216 while (trb_is_link(ep_ring->enqueue)) { in prepare_ring()
3220 if (!xhci_link_trb_quirk(xhci) && in prepare_ring()
3221 !(ep_ring->type == TYPE_ISOC && in prepare_ring()
3222 (xhci->quirks & XHCI_AMD_0x96_HOST))) in prepare_ring()
3223 ep_ring->enqueue->link.control &= in prepare_ring()
3226 ep_ring->enqueue->link.control |= in prepare_ring()
3230 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); in prepare_ring()
3233 if (link_trb_toggles_cycle(ep_ring->enqueue)) in prepare_ring()
3234 ep_ring->cycle_state ^= 1; in prepare_ring()
3236 ep_ring->enq_seg = ep_ring->enq_seg->next; in prepare_ring()
3237 ep_ring->enqueue = ep_ring->enq_seg->trbs; in prepare_ring()
3240 if (link_trb_count++ > ep_ring->num_segs) { in prepare_ring()
3241 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in prepare_ring()
3242 return -EINVAL; in prepare_ring()
3246 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { in prepare_ring()
3247 xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); in prepare_ring()
3248 return -EINVAL; in prepare_ring()
3254 static int prepare_transfer(struct xhci_hcd *xhci, in prepare_transfer() argument
3267 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
3269 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, in prepare_transfer()
3272 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", in prepare_transfer()
3274 return -EINVAL; in prepare_transfer()
3277 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in prepare_transfer()
3282 urb_priv = urb->hcpriv; in prepare_transfer()
3283 td = &urb_priv->td[td_index]; in prepare_transfer()
3285 INIT_LIST_HEAD(&td->td_list); in prepare_transfer()
3286 INIT_LIST_HEAD(&td->cancelled_td_list); in prepare_transfer()
3289 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); in prepare_transfer()
3294 td->urb = urb; in prepare_transfer()
3296 list_add_tail(&td->td_list, &ep_ring->td_list); in prepare_transfer()
3297 td->start_seg = ep_ring->enq_seg; in prepare_transfer()
3298 td->first_trb = ep_ring->enqueue; in prepare_transfer()
3307 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), in count_trbs()
3317 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); in count_trbs_needed()
3325 full_len = urb->transfer_buffer_length; in count_sg_trbs_needed()
3327 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { in count_sg_trbs_needed()
3331 full_len -= len; in count_sg_trbs_needed()
3343 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); in count_isoc_trbs_needed()
3344 len = urb->iso_frame_desc[i].length; in count_isoc_trbs_needed()
3351 if (unlikely(running_total != urb->transfer_buffer_length)) in check_trb_math()
3352 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " in check_trb_math()
3355 urb->ep->desc.bEndpointAddress, in check_trb_math()
3357 urb->transfer_buffer_length, in check_trb_math()
3358 urb->transfer_buffer_length); in check_trb_math()
3361 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, in giveback_first_trb() argument
3371 start_trb->field[3] |= cpu_to_le32(start_cycle); in giveback_first_trb()
3373 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); in giveback_first_trb()
3374 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); in giveback_first_trb()
3377 static void check_interval(struct xhci_hcd *xhci, struct urb *urb, in check_interval() argument
3383 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); in check_interval()
3384 ep_interval = urb->interval; in check_interval()
3387 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3388 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3395 dev_dbg_ratelimited(&urb->dev->dev, in check_interval()
3396 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", in check_interval()
3399 urb->interval = xhci_interval; in check_interval()
3401 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3402 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3403 urb->interval /= 8; in check_interval()
3408 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3413 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_intr_tx() argument
3418 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3419 check_interval(xhci, urb, ep_ctx); in xhci_queue_intr_tx()
3421 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_intr_tx()
3425 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3434 * TD size = total_packet_count - packets_transferred
3436 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3444 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, in xhci_td_remainder() argument
3450 /* MTK xHCI 0.96 contains some features from 1.0 */ in xhci_td_remainder()
3451 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) in xhci_td_remainder()
3452 return ((td_total_len - transferred) >> 10); in xhci_td_remainder()
3454 /* One TRB with a zero-length data packet. */ in xhci_td_remainder()
3459 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ in xhci_td_remainder()
3460 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) in xhci_td_remainder()
3463 maxp = usb_endpoint_maxp(&urb->ep->desc); in xhci_td_remainder()
3467 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); in xhci_td_remainder()
3471 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, in xhci_align_td() argument
3474 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_align_td()
3480 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_align_td()
3487 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", in xhci_align_td()
3492 *trb_buff_len -= unalign; in xhci_align_td()
3493 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3502 new_buff_len = max_pkt - (enqd_len % max_pkt); in xhci_align_td()
3504 if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) in xhci_align_td()
3505 new_buff_len = (urb->transfer_buffer_length - enqd_len); in xhci_align_td()
3509 if (urb->num_sgs) { in xhci_align_td()
3510 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, in xhci_align_td()
3511 seg->bounce_buf, new_buff_len, enqd_len); in xhci_align_td()
3513 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", in xhci_align_td()
3516 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); in xhci_align_td()
3519 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3522 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3526 if (dma_mapping_error(dev, seg->bounce_dma)) { in xhci_align_td()
3528 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); in xhci_align_td()
3532 seg->bounce_len = new_buff_len; in xhci_align_td()
3533 seg->bounce_offs = enqd_len; in xhci_align_td()
3535 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3540 /* This is very similar to what ehci-q.c qtd_fill() does */
3541 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_bulk_tx() argument
3559 ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_bulk_tx()
3561 return -EINVAL; in xhci_queue_bulk_tx()
3563 full_len = urb->transfer_buffer_length; in xhci_queue_bulk_tx()
3565 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) { in xhci_queue_bulk_tx()
3566 num_sgs = urb->num_mapped_sgs; in xhci_queue_bulk_tx()
3567 sg = urb->sg; in xhci_queue_bulk_tx()
3573 addr = (u64) urb->transfer_dma; in xhci_queue_bulk_tx()
3576 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3577 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3582 urb_priv = urb->hcpriv; in xhci_queue_bulk_tx()
3584 /* Deal with URB_ZERO_PACKET - need one more td/trb */ in xhci_queue_bulk_tx()
3585 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) in xhci_queue_bulk_tx()
3588 td = &urb_priv->td[0]; in xhci_queue_bulk_tx()
3595 start_trb = &ring->enqueue->generic; in xhci_queue_bulk_tx()
3596 start_cycle = ring->cycle_state; in xhci_queue_bulk_tx()
3599 /* Queue the TRBs, even if they are zero-length */ in xhci_queue_bulk_tx()
3609 trb_buff_len = full_len - enqd_len; in xhci_queue_bulk_tx()
3617 field |= ring->cycle_state; in xhci_queue_bulk_tx()
3624 if (trb_is_link(ring->enqueue + 1)) { in xhci_queue_bulk_tx()
3625 if (xhci_align_td(xhci, urb, enqd_len, in xhci_queue_bulk_tx()
3627 ring->enq_seg)) { in xhci_queue_bulk_tx()
3628 send_addr = ring->enq_seg->bounce_dma; in xhci_queue_bulk_tx()
3630 td->bounce_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3638 td->last_trb = ring->enqueue; in xhci_queue_bulk_tx()
3639 td->last_trb_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3641 memcpy(&send_addr, urb->transfer_buffer, in xhci_queue_bulk_tx()
3653 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, in xhci_queue_bulk_tx()
3660 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, in xhci_queue_bulk_tx()
3665 td->num_trbs++; in xhci_queue_bulk_tx()
3671 --num_sgs; in xhci_queue_bulk_tx()
3672 sent_len -= block_len; in xhci_queue_bulk_tx()
3680 block_len -= sent_len; in xhci_queue_bulk_tx()
3685 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3686 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3688 urb_priv->td[1].last_trb = ring->enqueue; in xhci_queue_bulk_tx()
3689 urb_priv->td[1].last_trb_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3690 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; in xhci_queue_bulk_tx()
3691 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); in xhci_queue_bulk_tx()
3692 urb_priv->td[1].num_trbs++; in xhci_queue_bulk_tx()
3696 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3701 /* Caller must have locked xhci->lock */
3702 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_ctrl_tx() argument
3715 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_ctrl_tx()
3717 return -EINVAL; in xhci_queue_ctrl_tx()
3723 if (!urb->setup_packet) in xhci_queue_ctrl_tx()
3724 return -EINVAL; in xhci_queue_ctrl_tx()
3733 if (urb->transfer_buffer_length > 0) in xhci_queue_ctrl_tx()
3735 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3736 ep_index, urb->stream_id, in xhci_queue_ctrl_tx()
3741 urb_priv = urb->hcpriv; in xhci_queue_ctrl_tx()
3742 td = &urb_priv->td[0]; in xhci_queue_ctrl_tx()
3743 td->num_trbs = num_trbs; in xhci_queue_ctrl_tx()
3750 start_trb = &ep_ring->enqueue->generic; in xhci_queue_ctrl_tx()
3751 start_cycle = ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3753 /* Queue setup TRB - see section 6.4.1.2.1 */ in xhci_queue_ctrl_tx()
3755 setup = (struct usb_ctrlrequest *) urb->setup_packet; in xhci_queue_ctrl_tx()
3761 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ in xhci_queue_ctrl_tx()
3762 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { in xhci_queue_ctrl_tx()
3763 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3764 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3771 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3772 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, in xhci_queue_ctrl_tx()
3773 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, in xhci_queue_ctrl_tx()
3785 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3790 memcpy(&addr, urb->transfer_buffer, in xhci_queue_ctrl_tx()
3791 urb->transfer_buffer_length); in xhci_queue_ctrl_tx()
3795 addr = (u64) urb->transfer_dma; in xhci_queue_ctrl_tx()
3798 remainder = xhci_td_remainder(xhci, 0, in xhci_queue_ctrl_tx()
3799 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3800 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3802 length_field = TRB_LEN(urb->transfer_buffer_length) | in xhci_queue_ctrl_tx()
3805 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3807 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3811 field | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3815 td->last_trb = ep_ring->enqueue; in xhci_queue_ctrl_tx()
3816 td->last_trb_seg = ep_ring->enq_seg; in xhci_queue_ctrl_tx()
3818 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ in xhci_queue_ctrl_tx()
3820 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3824 queue_trb(xhci, ep_ring, false, in xhci_queue_ctrl_tx()
3829 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3831 giveback_first_trb(xhci, slot_id, ep_index, 0, in xhci_queue_ctrl_tx()
3842 * zero. Only xHCI 1.0 host controllers support this field.
3844 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, in xhci_get_burst_count() argument
3849 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) in xhci_get_burst_count()
3852 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_burst_count()
3853 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; in xhci_get_burst_count()
3864 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, in xhci_get_last_burst_packet_count() argument
3870 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
3873 if (urb->dev->speed >= USB_SPEED_SUPER) { in xhci_get_last_burst_packet_count()
3875 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_last_burst_packet_count()
3878 * number of packets, but the TLBPC field is zero-based. in xhci_get_last_burst_packet_count()
3882 return residue - 1; in xhci_get_last_burst_packet_count()
3886 return total_packet_count - 1; in xhci_get_last_burst_packet_count()
3896 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, in xhci_get_isoc_frame_id() argument
3902 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
3903 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
3904 start_frame = urb->start_frame + index * urb->interval; in xhci_get_isoc_frame_id()
3906 start_frame = (urb->start_frame + index * urb->interval) >> 3; in xhci_get_isoc_frame_id()
3916 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_get_isoc_frame_id()
3917 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_get_isoc_frame_id()
3933 current_frame_id = readl(&xhci->run_regs->microframe_index); in xhci_get_isoc_frame_id()
3941 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", in xhci_get_isoc_frame_id()
3942 __func__, index, readl(&xhci->run_regs->microframe_index), in xhci_get_isoc_frame_id()
3948 ret = -EINVAL; in xhci_get_isoc_frame_id()
3952 ret = -EINVAL; in xhci_get_isoc_frame_id()
3954 ret = -EINVAL; in xhci_get_isoc_frame_id()
3958 if (ret == -EINVAL || start_frame == start_frame_id) { in xhci_get_isoc_frame_id()
3960 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
3961 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
3962 urb->start_frame = start_frame; in xhci_get_isoc_frame_id()
3964 urb->start_frame = start_frame << 3; in xhci_get_isoc_frame_id()
3970 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", in xhci_get_isoc_frame_id()
3973 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); in xhci_get_isoc_frame_id()
3981 static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) in trb_block_event_intr() argument
3983 if (xhci->hci_version < 0x100) in trb_block_event_intr()
3986 if (i == num_tds - 1) in trb_block_event_intr()
3992 if (i && xhci->quirks & XHCI_AVOID_BEI) in trb_block_event_intr()
3993 return !!(i % xhci->isoc_bei_interval); in trb_block_event_intr()
3999 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx() argument
4017 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx()
4018 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
4020 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx()
4022 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); in xhci_queue_isoc_tx()
4023 return -EINVAL; in xhci_queue_isoc_tx()
4025 start_addr = (u64) urb->transfer_dma; in xhci_queue_isoc_tx()
4026 start_trb = &ep_ring->enqueue->generic; in xhci_queue_isoc_tx()
4027 start_cycle = ep_ring->cycle_state; in xhci_queue_isoc_tx()
4029 urb_priv = urb->hcpriv; in xhci_queue_isoc_tx()
4030 /* Queue the TRBs for each TD, even if they are zero-length */ in xhci_queue_isoc_tx()
4038 addr = start_addr + urb->iso_frame_desc[i].offset; in xhci_queue_isoc_tx()
4039 td_len = urb->iso_frame_desc[i].length; in xhci_queue_isoc_tx()
4041 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_queue_isoc_tx()
4044 /* A zero-length transfer still involves at least one packet. */ in xhci_queue_isoc_tx()
4047 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); in xhci_queue_isoc_tx()
4048 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, in xhci_queue_isoc_tx()
4053 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
4054 urb->stream_id, trbs_per_td, urb, i, mem_flags); in xhci_queue_isoc_tx()
4060 td = &urb_priv->td[i]; in xhci_queue_isoc_tx()
4061 td->num_trbs = trbs_per_td; in xhci_queue_isoc_tx()
4064 if (!(urb->transfer_flags & URB_ISO_ASAP) && in xhci_queue_isoc_tx()
4065 HCC_CFC(xhci->hcc_params)) { in xhci_queue_isoc_tx()
4066 frame_id = xhci_get_isoc_frame_id(xhci, urb, i); in xhci_queue_isoc_tx()
4078 (i ? ep_ring->cycle_state : !start_cycle); in xhci_queue_isoc_tx()
4080 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ in xhci_queue_isoc_tx()
4081 if (!xep->use_extended_tbc) in xhci_queue_isoc_tx()
4091 ep_ring->cycle_state; in xhci_queue_isoc_tx()
4098 if (j < trbs_per_td - 1) { in xhci_queue_isoc_tx()
4103 td->last_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4104 td->last_trb_seg = ep_ring->enq_seg; in xhci_queue_isoc_tx()
4106 if (trb_block_event_intr(xhci, num_tds, i)) in xhci_queue_isoc_tx()
4115 remainder = xhci_td_remainder(xhci, running_total, in xhci_queue_isoc_tx()
4122 /* xhci 1.1 with ETE uses TD Size field for TBC */ in xhci_queue_isoc_tx()
4123 if (first_trb && xep->use_extended_tbc) in xhci_queue_isoc_tx()
4129 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_isoc_tx()
4137 td_remain_len -= trb_buff_len; in xhci_queue_isoc_tx()
4142 xhci_err(xhci, "ISOC TD length unmatch\n"); in xhci_queue_isoc_tx()
4143 ret = -EINVAL; in xhci_queue_isoc_tx()
4149 if (HCC_CFC(xhci->hcc_params)) in xhci_queue_isoc_tx()
4150 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; in xhci_queue_isoc_tx()
4152 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
4153 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
4156 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
4158 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
4164 for (i--; i >= 0; i--) in xhci_queue_isoc_tx()
4165 list_del_init(&urb_priv->td[i].td_list); in xhci_queue_isoc_tx()
4168 * into No-ops with a software-owned cycle bit. That way the hardware in xhci_queue_isoc_tx()
4170 * overwrite them. td->first_trb and td->start_seg are already set. in xhci_queue_isoc_tx()
4172 urb_priv->td[0].last_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4174 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); in xhci_queue_isoc_tx()
4177 ep_ring->enqueue = urb_priv->td[0].first_trb; in xhci_queue_isoc_tx()
4178 ep_ring->enq_seg = urb_priv->td[0].start_seg; in xhci_queue_isoc_tx()
4179 ep_ring->cycle_state = start_cycle; in xhci_queue_isoc_tx()
4180 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); in xhci_queue_isoc_tx()
4187 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
4188 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4191 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx_prepare() argument
4203 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
4204 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx_prepare()
4205 ep_ring = xdev->eps[ep_index].ring; in xhci_queue_isoc_tx_prepare()
4206 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
4209 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx_prepare()
4216 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in xhci_queue_isoc_tx_prepare()
4225 check_interval(xhci, urb, ep_ctx); in xhci_queue_isoc_tx_prepare()
4227 /* Calculate the start frame and put it in urb->start_frame. */ in xhci_queue_isoc_tx_prepare()
4228 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { in xhci_queue_isoc_tx_prepare()
4230 urb->start_frame = xep->next_frame_id; in xhci_queue_isoc_tx_prepare()
4235 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
4241 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_queue_isoc_tx_prepare()
4242 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_queue_isoc_tx_prepare()
4251 if (urb->dev->speed == USB_SPEED_LOW || in xhci_queue_isoc_tx_prepare()
4252 urb->dev->speed == USB_SPEED_FULL) { in xhci_queue_isoc_tx_prepare()
4253 start_frame = roundup(start_frame, urb->interval << 3); in xhci_queue_isoc_tx_prepare()
4254 urb->start_frame = start_frame >> 3; in xhci_queue_isoc_tx_prepare()
4256 start_frame = roundup(start_frame, urb->interval); in xhci_queue_isoc_tx_prepare()
4257 urb->start_frame = start_frame; in xhci_queue_isoc_tx_prepare()
4262 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_isoc_tx_prepare()
4272 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4275 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in queue_command() argument
4279 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
4282 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
4283 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
4284 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); in queue_command()
4285 return -ESHUTDOWN; in queue_command()
4291 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
4294 xhci_err(xhci, "ERR: No room for command on command ring\n"); in queue_command()
4296 xhci_err(xhci, "ERR: Reserved TRB counting for " in queue_command()
4301 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
4304 if (list_empty(&xhci->cmd_list)) { in queue_command()
4305 xhci->current_cmd = cmd; in queue_command()
4306 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in queue_command()
4309 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
4311 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
4312 field4 | xhci->cmd_ring->cycle_state); in queue_command()
4317 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_slot_control() argument
4320 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_slot_control()
4325 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_address_device() argument
4328 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_address_device()
4334 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_vendor_command() argument
4337 return queue_command(xhci, cmd, field1, field2, field3, field4, false); in xhci_queue_vendor_command()
4341 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_device() argument
4344 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_device()
4350 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, in xhci_queue_configure_endpoint() argument
4354 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_configure_endpoint()
4361 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_evaluate_context() argument
4364 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_evaluate_context()
4374 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_stop_endpoint() argument
4382 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_stop_endpoint()
4386 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_ep() argument
4397 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_ep()