Lines Matching +full:num +full:- +full:ss +full:- +full:bits

1 // SPDX-License-Identifier: GPL-2.0+
27 * Copyright (C) 2003-2005 PLX Technology, Inc.
28 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
39 #include <linux/dma-mapping.h>
87 EP_INFO("ep-a",
89 EP_INFO("ep-b",
91 EP_INFO("ep-c",
93 EP_INFO("ep-d",
95 EP_INFO("ep-e",
97 EP_INFO("ep-f",
99 EP_INFO("ep-g",
101 EP_INFO("ep-h",
126 /* mode 0 == ep-{a,b,c,d} 1K fifo each
127 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
128 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
135 /* enable_suspend -- When enabled, the driver will respond to
138 * self-powered devices
167 /*-------------------------------------------------------------------------*/
170 u32 tmp = readl(&ep->dev->regs->pciirqenb0); in enable_pciirqenb()
172 if (ep->dev->quirks & PLX_LEGACY) in enable_pciirqenb()
173 tmp |= BIT(ep->num); in enable_pciirqenb()
175 tmp |= BIT(ep_bit[ep->num]); in enable_pciirqenb()
176 writel(tmp, &ep->dev->regs->pciirqenb0); in enable_pciirqenb()
194 if (!_ep || !desc || ep->desc || _ep->name == ep0name || in net2280_enable()
195 desc->bDescriptorType != USB_DT_ENDPOINT) { in net2280_enable()
197 return -EINVAL; in net2280_enable()
199 dev = ep->dev; in net2280_enable()
200 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { in net2280_enable()
201 ret = -ESHUTDOWN; in net2280_enable()
206 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) { in net2280_enable()
207 ret = -EDOM; in net2280_enable()
211 if (dev->quirks & PLX_PCIE) { in net2280_enable()
212 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) { in net2280_enable()
213 ret = -EDOM; in net2280_enable()
216 ep->is_in = !!usb_endpoint_dir_in(desc); in net2280_enable()
217 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) { in net2280_enable()
218 ret = -EINVAL; in net2280_enable()
223 /* sanity check ep-e/ep-f since their fifos are small */ in net2280_enable()
225 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) { in net2280_enable()
226 ret = -ERANGE; in net2280_enable()
230 spin_lock_irqsave(&dev->lock, flags); in net2280_enable()
231 _ep->maxpacket = max; in net2280_enable()
232 ep->desc = desc; in net2280_enable()
235 ep->stopped = 0; in net2280_enable()
236 ep->wedged = 0; in net2280_enable()
237 ep->out_overflow = 0; in net2280_enable()
239 /* set speed-dependent max packet; may kick in high bandwidth */ in net2280_enable()
243 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); in net2280_enable()
245 if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) { in net2280_enable()
246 tmp = readl(&ep->cfg->ep_cfg); in net2280_enable()
249 ret = -EINVAL; in net2280_enable()
250 spin_unlock_irqrestore(&dev->lock, flags); in net2280_enable()
253 if (ep->is_in) in net2280_enable()
258 type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); in net2280_enable()
261 if (dev->chiprev == 0100 && in net2280_enable()
262 dev->gadget.speed == USB_SPEED_HIGH && in net2280_enable()
263 !(desc->bEndpointAddress & USB_DIR_IN)) in net2280_enable()
265 &ep->regs->ep_rsp); in net2280_enable()
268 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) || in net2280_enable()
269 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) || in net2280_enable()
270 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { in net2280_enable()
271 spin_unlock_irqrestore(&dev->lock, flags); in net2280_enable()
272 ret = -ERANGE; in net2280_enable()
276 ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC); in net2280_enable()
278 if (dev->quirks & PLX_LEGACY) { in net2280_enable()
280 tmp |= desc->bEndpointAddress; in net2280_enable()
284 ep->is_in = (tmp & USB_DIR_IN) != 0; in net2280_enable()
287 if (dev->enhanced_mode && ep->is_in) { in net2280_enable()
293 tmp |= (ep->is_in << ENDPOINT_DIRECTION); in net2280_enable()
297 if (!dev->enhanced_mode) in net2280_enable()
299 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); in net2280_enable()
306 if (!ep->is_in) in net2280_enable()
307 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); in net2280_enable()
308 else if (!(dev->quirks & PLX_2280)) { in net2280_enable()
313 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); in net2280_enable()
316 if (dev->quirks & PLX_PCIE) in net2280_enable()
318 writel(tmp, &ep->cfg->ep_cfg); in net2280_enable()
321 if (!ep->dma) { /* pio, per-packet */ in net2280_enable()
326 if (dev->quirks & PLX_2280) in net2280_enable()
327 tmp |= readl(&ep->regs->ep_irqenb); in net2280_enable()
328 writel(tmp, &ep->regs->ep_irqenb); in net2280_enable()
329 } else { /* dma, per-request */ in net2280_enable()
330 tmp = BIT((8 + ep->num)); /* completion */ in net2280_enable()
331 tmp |= readl(&dev->regs->pciirqenb1); in net2280_enable()
332 writel(tmp, &dev->regs->pciirqenb1); in net2280_enable()
335 * advance the queue; do it pio-style, by hand. in net2280_enable()
338 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) { in net2280_enable()
340 writel(tmp, &ep->regs->ep_irqenb); in net2280_enable()
346 tmp = desc->bEndpointAddress; in net2280_enable()
347 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n", in net2280_enable()
348 _ep->name, tmp & 0x0f, DIR_STRING(tmp), in net2280_enable()
349 type_string(desc->bmAttributes), in net2280_enable()
350 ep->dma ? "dma" : "pio", max); in net2280_enable()
353 spin_unlock_irqrestore(&dev->lock, flags); in net2280_enable()
357 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); in net2280_enable()
371 return -ENODEV; in handshake()
383 ep->desc = NULL; in ep_reset_228x()
384 INIT_LIST_HEAD(&ep->queue); in ep_reset_228x()
386 usb_ep_set_maxpacket_limit(&ep->ep, ~0); in ep_reset_228x()
387 ep->ep.ops = &net2280_ep_ops; in ep_reset_228x()
390 if (ep->dma) { in ep_reset_228x()
391 writel(0, &ep->dma->dmactl); in ep_reset_228x()
395 &ep->dma->dmastat); in ep_reset_228x()
397 tmp = readl(&regs->pciirqenb0); in ep_reset_228x()
398 tmp &= ~BIT(ep->num); in ep_reset_228x()
399 writel(tmp, &regs->pciirqenb0); in ep_reset_228x()
401 tmp = readl(&regs->pciirqenb1); in ep_reset_228x()
402 tmp &= ~BIT((8 + ep->num)); /* completion */ in ep_reset_228x()
403 writel(tmp, &regs->pciirqenb1); in ep_reset_228x()
405 writel(0, &ep->regs->ep_irqenb); in ep_reset_228x()
410 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) { in ep_reset_228x()
423 if (ep->num != 0) { in ep_reset_228x()
427 writel(tmp, &ep->regs->ep_rsp); in ep_reset_228x()
429 /* scrub most status bits, and flush any fifo state */ in ep_reset_228x()
430 if (ep->dev->quirks & PLX_2280) in ep_reset_228x()
449 &ep->regs->ep_stat); in ep_reset_228x()
459 ep->desc = NULL; in ep_reset_338x()
460 INIT_LIST_HEAD(&ep->queue); in ep_reset_338x()
462 usb_ep_set_maxpacket_limit(&ep->ep, ~0); in ep_reset_338x()
463 ep->ep.ops = &net2280_ep_ops; in ep_reset_338x()
466 if (ep->dma) { in ep_reset_338x()
467 writel(0, &ep->dma->dmactl); in ep_reset_338x()
473 &ep->dma->dmastat); in ep_reset_338x()
475 dmastat = readl(&ep->dma->dmastat); in ep_reset_338x()
477 ep_warn(ep->dev, "The dmastat return = %x!!\n", in ep_reset_338x()
479 writel(0x5a, &ep->dma->dmastat); in ep_reset_338x()
482 tmp = readl(&regs->pciirqenb0); in ep_reset_338x()
483 tmp &= ~BIT(ep_bit[ep->num]); in ep_reset_338x()
484 writel(tmp, &regs->pciirqenb0); in ep_reset_338x()
486 if (ep->num < 5) { in ep_reset_338x()
487 tmp = readl(&regs->pciirqenb1); in ep_reset_338x()
488 tmp &= ~BIT((8 + ep->num)); /* completion */ in ep_reset_338x()
489 writel(tmp, &regs->pciirqenb1); in ep_reset_338x()
492 writel(0, &ep->regs->ep_irqenb); in ep_reset_338x()
500 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); in ep_reset_338x()
502 tmp = readl(&ep->cfg->ep_cfg); in ep_reset_338x()
503 if (ep->is_in) in ep_reset_338x()
507 writel(tmp, &ep->cfg->ep_cfg); in ep_reset_338x()
518 if (!_ep || _ep->name == ep0name) { in net2280_disable()
520 return -EINVAL; in net2280_disable()
522 spin_lock_irqsave(&ep->dev->lock, flags); in net2280_disable()
525 if (ep->dev->quirks & PLX_PCIE) in net2280_disable()
526 ep_reset_338x(ep->dev->regs, ep); in net2280_disable()
528 ep_reset_228x(ep->dev->regs, ep); in net2280_disable()
530 ep_vdbg(ep->dev, "disabled %s %s\n", in net2280_disable()
531 ep->dma ? "dma" : "pio", _ep->name); in net2280_disable()
534 (void)readl(&ep->cfg->ep_cfg); in net2280_disable()
536 if (!ep->dma && ep->num >= 1 && ep->num <= 4) in net2280_disable()
537 ep->dma = &ep->dev->dma[ep->num - 1]; in net2280_disable()
539 spin_unlock_irqrestore(&ep->dev->lock, flags); in net2280_disable()
543 /*-------------------------------------------------------------------------*/
561 INIT_LIST_HEAD(&req->queue); in net2280_alloc_request()
564 if (ep->dma) { in net2280_alloc_request()
567 td = dma_pool_alloc(ep->dev->requests, gfp_flags, in net2280_alloc_request()
568 &req->td_dma); in net2280_alloc_request()
573 td->dmacount = 0; /* not VALID */ in net2280_alloc_request()
574 td->dmadesc = td->dmaaddr; in net2280_alloc_request()
575 req->td = td; in net2280_alloc_request()
577 return &req->req; in net2280_alloc_request()
587 dev_err(&ep->dev->pdev->dev, "%s: Invalid ep=%p or req=%p\n", in net2280_free_request()
593 WARN_ON(!list_empty(&req->queue)); in net2280_free_request()
594 if (req->td) in net2280_free_request()
595 dma_pool_free(ep->dev->requests, req->td, req->td_dma); in net2280_free_request()
599 /*-------------------------------------------------------------------------*/
604 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
606 * one packet. ep-a..ep-d should use dma instead.
610 struct net2280_ep_regs __iomem *regs = ep->regs; in write_fifo()
618 buf = req->buf + req->actual; in write_fifo()
620 total = req->length - req->actual; in write_fifo()
627 count = ep->ep.maxpacket; in write_fifo()
631 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", in write_fifo()
632 ep->ep.name, count, in write_fifo()
633 (count != ep->ep.maxpacket) ? " (short)" : "", in write_fifo()
642 writel(tmp, &regs->ep_data); in write_fifo()
644 count -= 4; in write_fifo()
651 if (count || total < ep->ep.maxpacket) { in write_fifo()
655 writel(tmp, &regs->ep_data); in write_fifo()
673 statp = &ep->regs->ep_stat; in out_flush()
677 ep_dbg(ep->dev, "%s %s %08x !NAK\n", in out_flush()
678 ep->ep.name, __func__, tmp); in out_flush()
679 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); in out_flush()
691 ep->dev->gadget.speed == USB_SPEED_FULL) { in out_flush()
705 * for ep-a..ep-d this will read multiple packets out when they
710 struct net2280_ep_regs __iomem *regs = ep->regs; in read_fifo()
711 u8 *buf = req->req.buf + req->req.actual; in read_fifo()
718 if (ep->dev->chiprev == 0x0100 && in read_fifo()
719 ep->dev->gadget.speed == USB_SPEED_FULL) { in read_fifo()
721 tmp = readl(&ep->regs->ep_stat); in read_fifo()
735 count = readl(&regs->ep_avail); in read_fifo()
738 tmp = readl(&ep->regs->ep_stat); in read_fifo()
739 count = readl(&regs->ep_avail); in read_fifo()
745 tmp = req->req.length - req->req.actual; in read_fifo()
748 if ((tmp % ep->ep.maxpacket) != 0) { in read_fifo()
749 ep_err(ep->dev, in read_fifo()
751 ep->ep.name, count, tmp); in read_fifo()
752 req->req.status = -EOVERFLOW; in read_fifo()
760 req->req.actual += count; in read_fifo()
762 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); in read_fifo()
764 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", in read_fifo()
765 ep->ep.name, count, is_short ? " (short)" : "", in read_fifo()
767 req, req->req.actual, req->req.length); in read_fifo()
770 tmp = readl(&regs->ep_data); in read_fifo()
774 count -= 4; in read_fifo()
777 tmp = readl(&regs->ep_data); in read_fifo()
782 } while (--count); in read_fifo()
787 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); in read_fifo()
788 (void) readl(&ep->regs->ep_rsp); in read_fifo()
791 return is_short || req->req.actual == req->req.length; in read_fifo()
798 struct net2280_dma *td = req->td; in fill_dma_desc()
799 u32 dmacount = req->req.length; in fill_dma_desc()
803 * in case of overruns on max-size packets, we can't in fill_dma_desc()
806 if (ep->is_in) in fill_dma_desc()
808 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) || in fill_dma_desc()
809 !(ep->dev->quirks & PLX_2280)) in fill_dma_desc()
812 req->valid = valid; in fill_dma_desc()
817 /* td->dmadesc = previously set by caller */ in fill_dma_desc()
818 td->dmaaddr = cpu_to_le32 (req->req.dma); in fill_dma_desc()
820 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ in fill_dma_desc()
822 td->dmacount = cpu_to_le32(dmacount); in fill_dma_desc()
838 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50); in spin_stop_dma()
843 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl); in stop_dma()
849 struct net2280_dma_regs __iomem *dma = ep->dma; in start_queue()
850 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION); in start_queue()
852 if (!(ep->dev->quirks & PLX_2280)) in start_queue()
855 writel(tmp, &dma->dmacount); in start_queue()
856 writel(readl(&dma->dmastat), &dma->dmastat); in start_queue()
858 writel(td_dma, &dma->dmadesc); in start_queue()
859 if (ep->dev->quirks & PLX_PCIE) in start_queue()
861 writel(dmactl, &dma->dmactl); in start_queue()
864 (void) readl(&ep->dev->pci->pcimstctl); in start_queue()
866 writel(BIT(DMA_START), &dma->dmastat); in start_queue()
872 struct net2280_dma_regs __iomem *dma = ep->dma; in start_dma()
877 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE)); in start_dma()
878 writel(0, &ep->dma->dmactl); in start_dma()
881 if (!ep->is_in && (readl(&ep->regs->ep_stat) & in start_dma()
884 &ep->regs->ep_stat); in start_dma()
886 tmp = readl(&ep->regs->ep_avail); in start_dma()
888 writel(readl(&dma->dmastat), &dma->dmastat); in start_dma()
891 writel(req->req.dma, &dma->dmaaddr); in start_dma()
892 tmp = min(tmp, req->req.length); in start_dma()
895 req->td->dmacount = cpu_to_le32(req->req.length - tmp); in start_dma()
897 &dma->dmacount); in start_dma()
898 req->td->dmadesc = 0; in start_dma()
899 req->valid = 1; in start_dma()
901 writel(BIT(DMA_ENABLE), &dma->dmactl); in start_dma()
902 writel(BIT(DMA_START), &dma->dmastat); in start_dma()
914 if (ep->is_in) { in start_dma()
915 if (likely((req->req.length % ep->ep.maxpacket) || in start_dma()
916 req->req.zero)){ in start_dma()
918 ep->in_fifo_validate = 1; in start_dma()
920 ep->in_fifo_validate = 0; in start_dma()
923 /* init req->td, pointing to the current dummy */ in start_dma()
924 req->td->dmadesc = cpu_to_le32 (ep->td_dma); in start_dma()
927 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); in start_dma()
929 start_queue(ep, tmp, req->td_dma); in start_dma()
939 end = ep->dummy; in queue_dma()
940 ep->dummy = req->td; in queue_dma()
941 req->td = end; in queue_dma()
943 tmp = ep->td_dma; in queue_dma()
944 ep->td_dma = req->td_dma; in queue_dma()
945 req->td_dma = tmp; in queue_dma()
947 end->dmadesc = cpu_to_le32 (ep->td_dma); in queue_dma()
956 unsigned stopped = ep->stopped; in done()
958 list_del_init(&req->queue); in done()
960 if (req->req.status == -EINPROGRESS) in done()
961 req->req.status = status; in done()
963 status = req->req.status; in done()
965 dev = ep->dev; in done()
966 if (ep->dma) in done()
967 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); in done()
969 if (status && status != -ESHUTDOWN) in done()
971 ep->ep.name, &req->req, status, in done()
972 req->req.actual, req->req.length); in done()
975 ep->stopped = 1; in done()
976 spin_unlock(&dev->lock); in done()
977 usb_gadget_giveback_request(&ep->ep, &req->req); in done()
978 spin_lock(&dev->lock); in done()
979 ep->stopped = stopped; in done()
982 /*-------------------------------------------------------------------------*/
993 /* we always require a cpu-view buffer, so that we can in net2280_queue()
997 if (!_ep || (!ep->desc && ep->num != 0)) { in net2280_queue()
998 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); in net2280_queue()
999 return -EINVAL; in net2280_queue()
1002 if (!_req || !_req->complete || !_req->buf || in net2280_queue()
1003 !list_empty(&req->queue)) { in net2280_queue()
1004 ret = -EINVAL; in net2280_queue()
1007 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) { in net2280_queue()
1008 ret = -EDOM; in net2280_queue()
1011 dev = ep->dev; in net2280_queue()
1012 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { in net2280_queue()
1013 ret = -ESHUTDOWN; in net2280_queue()
1018 if (ep->dma && _req->length == 0) { in net2280_queue()
1019 ret = -EOPNOTSUPP; in net2280_queue()
1024 if (ep->dma) { in net2280_queue()
1025 ret = usb_gadget_map_request(&dev->gadget, _req, in net2280_queue()
1026 ep->is_in); in net2280_queue()
1032 _ep->name, _req, _req->length, _req->buf); in net2280_queue()
1034 spin_lock_irqsave(&dev->lock, flags); in net2280_queue()
1036 _req->status = -EINPROGRESS; in net2280_queue()
1037 _req->actual = 0; in net2280_queue()
1040 if (list_empty(&ep->queue) && !ep->stopped && in net2280_queue()
1041 !((dev->quirks & PLX_PCIE) && ep->dma && in net2280_queue()
1042 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) { in net2280_queue()
1045 if (ep->dma) in net2280_queue()
1049 if (ep->num == 0 && _req->length == 0) { in net2280_queue()
1052 ep_vdbg(dev, "%s status ack\n", ep->ep.name); in net2280_queue()
1057 if (ep->is_in) in net2280_queue()
1063 s = readl(&ep->regs->ep_stat); in net2280_queue()
1065 /* note: _req->short_not_ok is in net2280_queue()
1068 * _req->status doesn't change for in net2280_queue()
1069 * short reads (only _req->actual) in net2280_queue()
1072 ep->num == 0) { in net2280_queue()
1078 ep->num != 0) { in net2280_queue()
1082 s = readl(&ep->regs->ep_stat); in net2280_queue()
1088 &ep->regs->ep_rsp); in net2280_queue()
1092 } else if (ep->dma) { in net2280_queue()
1095 if (ep->is_in) { in net2280_queue()
1098 /* preventing magic zlps is per-engine state, not in net2280_queue()
1099 * per-transfer; irq logic must recover hiccups. in net2280_queue()
1101 expect = likely(req->req.zero || in net2280_queue()
1102 (req->req.length % ep->ep.maxpacket)); in net2280_queue()
1103 if (expect != ep->in_fifo_validate) in net2280_queue()
1110 ep->responded = 1; in net2280_queue()
1112 list_add_tail(&req->queue, &ep->queue); in net2280_queue()
1114 spin_unlock_irqrestore(&dev->lock, flags); in net2280_queue()
1120 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); in net2280_queue()
1128 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); in dma_done()
1139 while (!list_empty(&ep->queue)) { in scan_dma_completions()
1143 req = list_entry(ep->queue.next, in scan_dma_completions()
1145 if (!req->valid) in scan_dma_completions()
1148 req_dma_count = le32_to_cpup(&req->td->dmacount); in scan_dma_completions()
1152 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" in scan_dma_completions()
1154 * all non-abort DMA completions. in scan_dma_completions()
1156 if (unlikely(req->td->dmadesc == 0)) { in scan_dma_completions()
1158 u32 const ep_dmacount = readl(&ep->dma->dmacount); in scan_dma_completions()
1166 } else if (!ep->is_in && in scan_dma_completions()
1167 (req->req.length % ep->ep.maxpacket) && in scan_dma_completions()
1168 !(ep->dev->quirks & PLX_PCIE)) { in scan_dma_completions()
1170 u32 const ep_stat = readl(&ep->regs->ep_stat); in scan_dma_completions()
1176 ep_warn(ep->dev, "%s lost packet sync!\n", in scan_dma_completions()
1177 ep->ep.name); in scan_dma_completions()
1178 req->req.status = -EOVERFLOW; in scan_dma_completions()
1180 u32 const ep_avail = readl(&ep->regs->ep_avail); in scan_dma_completions()
1183 ep->out_overflow = 1; in scan_dma_completions()
1184 ep_dbg(ep->dev, in scan_dma_completions()
1186 ep->ep.name, ep_avail, in scan_dma_completions()
1187 req->req.length); in scan_dma_completions()
1188 req->req.status = -EOVERFLOW; in scan_dma_completions()
1203 if (ep->stopped) in restart_dma()
1205 req = list_entry(ep->queue.next, struct net2280_request, queue); in restart_dma()
1213 if (likely(!list_empty(&ep->queue))) { in abort_dma()
1215 writel(BIT(DMA_ABORT), &ep->dma->dmastat); in abort_dma()
1216 spin_stop_dma(ep->dma); in abort_dma()
1218 stop_dma(ep->dma); in abort_dma()
1228 ep->stopped = 1; in nuke()
1229 if (ep->dma) in nuke()
1231 while (!list_empty(&ep->queue)) { in nuke()
1232 req = list_entry(ep->queue.next, in nuke()
1235 done(ep, req, -ESHUTDOWN); in nuke()
1249 if (!_ep || (!ep->desc && ep->num != 0) || !_req) { in net2280_dequeue()
1250 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n", in net2280_dequeue()
1252 return -EINVAL; in net2280_dequeue()
1255 spin_lock_irqsave(&ep->dev->lock, flags); in net2280_dequeue()
1256 stopped = ep->stopped; in net2280_dequeue()
1260 ep->stopped = 1; in net2280_dequeue()
1261 if (ep->dma) { in net2280_dequeue()
1262 dmactl = readl(&ep->dma->dmactl); in net2280_dequeue()
1264 stop_dma(ep->dma); in net2280_dequeue()
1269 list_for_each_entry(req, &ep->queue, queue) { in net2280_dequeue()
1270 if (&req->req == _req) in net2280_dequeue()
1273 if (&req->req != _req) { in net2280_dequeue()
1274 ep->stopped = stopped; in net2280_dequeue()
1275 spin_unlock_irqrestore(&ep->dev->lock, flags); in net2280_dequeue()
1276 ep_dbg(ep->dev, "%s: Request mismatch\n", __func__); in net2280_dequeue()
1277 return -EINVAL; in net2280_dequeue()
1281 if (ep->queue.next == &req->queue) { in net2280_dequeue()
1282 if (ep->dma) { in net2280_dequeue()
1283 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name); in net2280_dequeue()
1284 _req->status = -ECONNRESET; in net2280_dequeue()
1286 if (likely(ep->queue.next == &req->queue)) { in net2280_dequeue()
1287 /* NOTE: misreports single-transfer mode*/ in net2280_dequeue()
1288 req->td->dmacount = 0; /* invalidate */ in net2280_dequeue()
1290 readl(&ep->dma->dmacount), in net2280_dequeue()
1291 -ECONNRESET); in net2280_dequeue()
1294 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name); in net2280_dequeue()
1295 done(ep, req, -ECONNRESET); in net2280_dequeue()
1301 done(ep, req, -ECONNRESET); in net2280_dequeue()
1302 ep->stopped = stopped; in net2280_dequeue()
1304 if (ep->dma) { in net2280_dequeue()
1306 if (list_empty(&ep->queue)) in net2280_dequeue()
1307 stop_dma(ep->dma); in net2280_dequeue()
1308 else if (!ep->stopped) { in net2280_dequeue()
1311 writel(dmactl, &ep->dma->dmactl); in net2280_dequeue()
1313 start_dma(ep, list_entry(ep->queue.next, in net2280_dequeue()
1318 spin_unlock_irqrestore(&ep->dev->lock, flags); in net2280_dequeue()
1322 /*-------------------------------------------------------------------------*/
1334 if (!_ep || (!ep->desc && ep->num != 0)) { in net2280_set_halt_and_wedge()
1335 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); in net2280_set_halt_and_wedge()
1336 return -EINVAL; in net2280_set_halt_and_wedge()
1338 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { in net2280_set_halt_and_wedge()
1339 retval = -ESHUTDOWN; in net2280_set_halt_and_wedge()
1342 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03) in net2280_set_halt_and_wedge()
1344 retval = -EINVAL; in net2280_set_halt_and_wedge()
1348 spin_lock_irqsave(&ep->dev->lock, flags); in net2280_set_halt_and_wedge()
1349 if (!list_empty(&ep->queue)) { in net2280_set_halt_and_wedge()
1350 retval = -EAGAIN; in net2280_set_halt_and_wedge()
1352 } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) { in net2280_set_halt_and_wedge()
1353 retval = -EAGAIN; in net2280_set_halt_and_wedge()
1356 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name, in net2280_set_halt_and_wedge()
1361 if (ep->num == 0) in net2280_set_halt_and_wedge()
1362 ep->dev->protocol_stall = 1; in net2280_set_halt_and_wedge()
1366 ep->wedged = 1; in net2280_set_halt_and_wedge()
1369 if (ep->dev->quirks & PLX_PCIE && in net2280_set_halt_and_wedge()
1370 !list_empty(&ep->queue) && ep->td_dma) in net2280_set_halt_and_wedge()
1372 ep->wedged = 0; in net2280_set_halt_and_wedge()
1374 (void) readl(&ep->regs->ep_rsp); in net2280_set_halt_and_wedge()
1376 spin_unlock_irqrestore(&ep->dev->lock, flags); in net2280_set_halt_and_wedge()
1381 spin_unlock_irqrestore(&ep->dev->lock, flags); in net2280_set_halt_and_wedge()
1383 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval); in net2280_set_halt_and_wedge()
1394 if (!_ep || _ep->name == ep0name) { in net2280_set_wedge()
1396 return -EINVAL; in net2280_set_wedge()
1407 if (!_ep || (!ep->desc && ep->num != 0)) { in net2280_fifo_status()
1408 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); in net2280_fifo_status()
1409 return -ENODEV; in net2280_fifo_status()
1411 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { in net2280_fifo_status()
1412 dev_err(&ep->dev->pdev->dev, in net2280_fifo_status()
1414 __func__, ep->dev->driver, ep->dev->gadget.speed); in net2280_fifo_status()
1415 return -ESHUTDOWN; in net2280_fifo_status()
1418 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1); in net2280_fifo_status()
1419 if (avail > ep->fifo_size) { in net2280_fifo_status()
1420 dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__); in net2280_fifo_status()
1421 return -EOVERFLOW; in net2280_fifo_status()
1423 if (ep->is_in) in net2280_fifo_status()
1424 avail = ep->fifo_size - avail; in net2280_fifo_status()
1433 if (!_ep || (!ep->desc && ep->num != 0)) { in net2280_fifo_flush()
1434 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); in net2280_fifo_flush()
1437 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { in net2280_fifo_flush()
1438 dev_err(&ep->dev->pdev->dev, in net2280_fifo_flush()
1440 __func__, ep->dev->driver, ep->dev->gadget.speed); in net2280_fifo_flush()
1444 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); in net2280_fifo_flush()
1445 (void) readl(&ep->regs->ep_rsp); in net2280_fifo_flush()
1464 /*-------------------------------------------------------------------------*/
1473 return -ENODEV; in net2280_get_frame()
1475 spin_lock_irqsave(&dev->lock, flags); in net2280_get_frame()
1476 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff; in net2280_get_frame()
1477 spin_unlock_irqrestore(&dev->lock, flags); in net2280_get_frame()
1491 spin_lock_irqsave(&dev->lock, flags); in net2280_wakeup()
1492 tmp = readl(&dev->usb->usbctl); in net2280_wakeup()
1494 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat); in net2280_wakeup()
1495 spin_unlock_irqrestore(&dev->lock, flags); in net2280_wakeup()
1511 spin_lock_irqsave(&dev->lock, flags); in net2280_set_selfpowered()
1512 tmp = readl(&dev->usb->usbctl); in net2280_set_selfpowered()
1515 _gadget->is_selfpowered = 1; in net2280_set_selfpowered()
1518 _gadget->is_selfpowered = 0; in net2280_set_selfpowered()
1520 writel(tmp, &dev->usb->usbctl); in net2280_set_selfpowered()
1521 spin_unlock_irqrestore(&dev->lock, flags); in net2280_set_selfpowered()
1533 return -ENODEV; in net2280_pullup()
1536 spin_lock_irqsave(&dev->lock, flags); in net2280_pullup()
1537 tmp = readl(&dev->usb->usbctl); in net2280_pullup()
1538 dev->softconnect = (is_on != 0); in net2280_pullup()
1541 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); in net2280_pullup()
1543 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); in net2280_pullup()
1547 spin_unlock_irqrestore(&dev->lock, flags); in net2280_pullup()
1560 /* ep-e, ep-f are PIO with only 64 byte fifos */ in net2280_match_ep()
1561 ep = gadget_find_ep_by_name(_gadget, "ep-e"); in net2280_match_ep()
1564 ep = gadget_find_ep_by_name(_gadget, "ep-f"); in net2280_match_ep()
1633 /*-------------------------------------------------------------------------*/
1648 if (!dev->driver || !dev->driver->function || in function_show()
1649 strlen(dev->driver->function) > PAGE_SIZE) in function_show()
1651 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function); in function_show()
1669 spin_lock_irqsave(&dev->lock, flags); in registers_show()
1671 if (dev->driver) in registers_show()
1672 s = dev->driver->driver.name; in registers_show()
1682 driver_name, dev->chiprev, in registers_show()
1683 readl(&dev->regs->devinit), in registers_show()
1684 readl(&dev->regs->fifoctl), in registers_show()
1686 readl(&dev->regs->pciirqenb0), in registers_show()
1687 readl(&dev->regs->pciirqenb1), in registers_show()
1688 readl(&dev->regs->irqstat0), in registers_show()
1689 readl(&dev->regs->irqstat1)); in registers_show()
1690 size -= t; in registers_show()
1694 t1 = readl(&dev->usb->usbctl); in registers_show()
1695 t2 = readl(&dev->usb->usbstat); in registers_show()
1699 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) in registers_show()
1709 readl(&dev->usb->stdrsp), t1, t2, in registers_show()
1710 readl(&dev->usb->ouraddr), s); in registers_show()
1711 size -= t; in registers_show()
1719 for (i = 0; i < dev->n_ep; i++) { in registers_show()
1722 ep = &dev->ep[i]; in registers_show()
1723 if (i && !ep->desc) in registers_show()
1726 t1 = readl(&ep->cfg->ep_cfg); in registers_show()
1727 t2 = readl(&ep->regs->ep_rsp) & 0xff; in registers_show()
1731 ep->ep.name, t1, t2, in registers_show()
1748 readl(&ep->regs->ep_irqenb)); in registers_show()
1749 size -= t; in registers_show()
1754 "(ep%d%s-%s)%s\n", in registers_show()
1755 readl(&ep->regs->ep_stat), in registers_show()
1756 readl(&ep->regs->ep_avail), in registers_show()
1759 ep->stopped ? "*" : ""); in registers_show()
1760 size -= t; in registers_show()
1763 if (!ep->dma) in registers_show()
1769 readl(&ep->dma->dmactl), in registers_show()
1770 readl(&ep->dma->dmastat), in registers_show()
1771 readl(&ep->dma->dmacount), in registers_show()
1772 readl(&ep->dma->dmaaddr), in registers_show()
1773 readl(&ep->dma->dmadesc)); in registers_show()
1774 size -= t; in registers_show()
1783 size -= t; in registers_show()
1785 for (i = 0; i < dev->n_ep; i++) { in registers_show()
1788 ep = &dev->ep[i]; in registers_show()
1789 if (i && !ep->irqs) in registers_show()
1791 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs); in registers_show()
1792 size -= t; in registers_show()
1797 size -= t; in registers_show()
1800 spin_unlock_irqrestore(&dev->lock, flags); in registers_show()
1802 return PAGE_SIZE - size; in registers_show()
1818 spin_lock_irqsave(&dev->lock, flags); in queues_show()
1820 for (i = 0; i < dev->n_ep; i++) { in queues_show()
1821 struct net2280_ep *ep = &dev->ep[i]; in queues_show()
1828 d = ep->desc; in queues_show()
1831 t = d->bEndpointAddress; in queues_show()
1833 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", in queues_show()
1834 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, in queues_show()
1836 type_string(d->bmAttributes), in queues_show()
1838 ep->dma ? "dma" : "pio", ep->fifo_size in queues_show()
1842 ep->is_in ? "in" : "out"); in queues_show()
1845 size -= t; in queues_show()
1848 if (list_empty(&ep->queue)) { in queues_show()
1852 size -= t; in queues_show()
1856 list_for_each_entry(req, &ep->queue, queue) { in queues_show()
1857 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) in queues_show()
1861 &req->req, req->req.actual, in queues_show()
1862 req->req.length, req->req.buf, in queues_show()
1863 readl(&ep->dma->dmacount)); in queues_show()
1867 &req->req, req->req.actual, in queues_show()
1868 req->req.length, req->req.buf); in queues_show()
1871 size -= t; in queues_show()
1874 if (ep->dma) { in queues_show()
1877 td = req->td; in queues_show()
1880 (u32) req->td_dma, in queues_show()
1881 le32_to_cpu(td->dmacount), in queues_show()
1882 le32_to_cpu(td->dmaaddr), in queues_show()
1883 le32_to_cpu(td->dmadesc)); in queues_show()
1886 size -= t; in queues_show()
1893 spin_unlock_irqrestore(&dev->lock, flags); in queues_show()
1894 return PAGE_SIZE - size; in queues_show()
1906 /*-------------------------------------------------------------------------*/
1908 /* another driver-specific mode might be a request type doing dma
1914 /* keeping high bits preserves BAR2 */ in set_fifo_mode()
1915 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl); in set_fifo_mode()
1917 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ in set_fifo_mode()
1918 INIT_LIST_HEAD(&dev->gadget.ep_list); in set_fifo_mode()
1919 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); in set_fifo_mode()
1920 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); in set_fifo_mode()
1923 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); in set_fifo_mode()
1924 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list); in set_fifo_mode()
1925 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; in set_fifo_mode()
1928 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048; in set_fifo_mode()
1931 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); in set_fifo_mode()
1932 dev->ep[1].fifo_size = 2048; in set_fifo_mode()
1933 dev->ep[2].fifo_size = 1024; in set_fifo_mode()
1936 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ in set_fifo_mode()
1937 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list); in set_fifo_mode()
1938 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list); in set_fifo_mode()
1945 * - This phase undoes the earlier phase of the Defect 7374 workaround, in defect7374_disable_data_eps()
1954 ep = &dev->ep[i]; in defect7374_disable_data_eps()
1955 writel(i, &ep->cfg->ep_cfg); in defect7374_disable_data_eps()
1960 writel(0, &dev->dep[i].dep_cfg); in defect7374_disable_data_eps()
1964 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); in defect7374_disable_data_eps()
1965 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl); in defect7374_disable_data_eps()
1972 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4); in defect7374_disable_data_eps()
1974 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4); in defect7374_disable_data_eps()
1975 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); in defect7374_disable_data_eps()
1977 writel(tmp_reg, &dev->plregs->pl_ep_ctrl); in defect7374_disable_data_eps()
1988 scratch = get_idx_reg(dev->regs, SCRATCH); in defect7374_enable_data_eps_zero()
1996 ep_warn(dev, "It will operate on cold-reboot and SS connect"); in defect7374_enable_data_eps_zero()
2001 ((dev->enhanced_mode) ? in defect7374_enable_data_eps_zero()
2006 writel(tmp, &dev->ep[i].cfg->ep_cfg); in defect7374_enable_data_eps_zero()
2010 writel(tmp, &dev->dep[1].dep_cfg); in defect7374_enable_data_eps_zero()
2011 writel(tmp, &dev->dep[3].dep_cfg); in defect7374_enable_data_eps_zero()
2012 writel(tmp, &dev->dep[4].dep_cfg); in defect7374_enable_data_eps_zero()
2013 writel(tmp, &dev->dep[5].dep_cfg); in defect7374_enable_data_eps_zero()
2019 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); in defect7374_enable_data_eps_zero()
2021 &dev->plregs->pl_ep_ctrl); in defect7374_enable_data_eps_zero()
2025 (readl(&dev->plregs->pl_ep_ctrl) | in defect7374_enable_data_eps_zero()
2027 writel(tmp, &dev->plregs->pl_ep_ctrl); in defect7374_enable_data_eps_zero()
2035 tmp = (readl(&dev->plregs->pl_ep_cfg_4) | in defect7374_enable_data_eps_zero()
2037 writel(tmp, &dev->plregs->pl_ep_cfg_4); in defect7374_enable_data_eps_zero()
2039 tmp = readl(&dev->plregs->pl_ep_ctrl) & in defect7374_enable_data_eps_zero()
2041 writel(tmp, &dev->plregs->pl_ep_ctrl); in defect7374_enable_data_eps_zero()
2046 * - Tip: Connection speed is known upon the first in defect7374_enable_data_eps_zero()
2049 set_idx_reg(dev->regs, SCRATCH, scratch); in defect7374_enable_data_eps_zero()
2054 * - one bus driver, initted first;
2055 * - one function driver, initted second
2066 dev->gadget.speed = USB_SPEED_UNKNOWN; in usb_reset_228x()
2067 (void) readl(&dev->usb->usbctl); in usb_reset_228x()
2072 writel(0, &dev->usb->stdrsp); in usb_reset_228x()
2073 writel(0, &dev->regs->pciirqenb0); in usb_reset_228x()
2074 writel(0, &dev->regs->pciirqenb1); in usb_reset_228x()
2078 struct net2280_ep *ep = &dev->ep[tmp + 1]; in usb_reset_228x()
2079 if (ep->dma) in usb_reset_228x()
2083 writel(~0, &dev->regs->irqstat0), in usb_reset_228x()
2084 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), in usb_reset_228x()
2087 tmp = readl(&dev->regs->devinit) | in usb_reset_228x()
2092 writel(tmp, &dev->regs->devinit); in usb_reset_228x()
2102 dev->gadget.speed = USB_SPEED_UNKNOWN; in usb_reset_338x()
2103 (void)readl(&dev->usb->usbctl); in usb_reset_338x()
2107 if (dev->bug7734_patched) { in usb_reset_338x()
2109 writel(0, &dev->usb->stdrsp); in usb_reset_338x()
2110 writel(0, &dev->regs->pciirqenb0); in usb_reset_338x()
2111 writel(0, &dev->regs->pciirqenb1); in usb_reset_338x()
2116 struct net2280_ep *ep = &dev->ep[tmp + 1]; in usb_reset_338x()
2119 if (ep->dma) { in usb_reset_338x()
2122 dma = &dev->dma[tmp]; in usb_reset_338x()
2123 writel(BIT(DMA_ABORT), &dma->dmastat); in usb_reset_338x()
2124 writel(0, &dma->dmactl); in usb_reset_338x()
2128 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); in usb_reset_338x()
2130 if (dev->bug7734_patched) { in usb_reset_338x()
2132 tmp = readl(&dev->regs->devinit) | in usb_reset_338x()
2138 writel(tmp, &dev->regs->devinit); in usb_reset_338x()
2141 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ in usb_reset_338x()
2142 INIT_LIST_HEAD(&dev->gadget.ep_list); in usb_reset_338x()
2144 for (tmp = 1; tmp < dev->n_ep; tmp++) in usb_reset_338x()
2145 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); in usb_reset_338x()
2151 if (dev->quirks & PLX_LEGACY) in usb_reset()
2162 struct net2280_ep *ep = &dev->ep[tmp]; in usb_reinit_228x()
2164 ep->ep.name = ep_info_dft[tmp].name; in usb_reinit_228x()
2165 ep->ep.caps = ep_info_dft[tmp].caps; in usb_reinit_228x()
2166 ep->dev = dev; in usb_reinit_228x()
2167 ep->num = tmp; in usb_reinit_228x()
2170 ep->fifo_size = 1024; in usb_reinit_228x()
2171 ep->dma = &dev->dma[tmp - 1]; in usb_reinit_228x()
2173 ep->fifo_size = 64; in usb_reinit_228x()
2174 ep->regs = &dev->epregs[tmp]; in usb_reinit_228x()
2175 ep->cfg = &dev->epregs[tmp]; in usb_reinit_228x()
2176 ep_reset_228x(dev->regs, ep); in usb_reinit_228x()
2178 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); in usb_reinit_228x()
2179 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64); in usb_reinit_228x()
2180 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64); in usb_reinit_228x()
2182 dev->gadget.ep0 = &dev->ep[0].ep; in usb_reinit_228x()
2183 dev->ep[0].stopped = 0; in usb_reinit_228x()
2184 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); in usb_reinit_228x()
2190 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg); in usb_reinit_228x()
2202 for (i = 0; i < dev->n_ep; i++) { in usb_reinit_338x()
2203 struct net2280_ep *ep = &dev->ep[i]; in usb_reinit_338x()
2205 ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name : in usb_reinit_338x()
2207 ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps : in usb_reinit_338x()
2209 ep->dev = dev; in usb_reinit_338x()
2210 ep->num = i; in usb_reinit_338x()
2213 ep->dma = &dev->dma[i - 1]; in usb_reinit_338x()
2215 if (dev->enhanced_mode) { in usb_reinit_338x()
2216 ep->cfg = &dev->epregs[ne[i]]; in usb_reinit_338x()
2222 writel(ne[i], &ep->cfg->ep_cfg); in usb_reinit_338x()
2223 ep->regs = (struct net2280_ep_regs __iomem *) in usb_reinit_338x()
2224 (((void __iomem *)&dev->epregs[ne[i]]) + in usb_reinit_338x()
2227 ep->cfg = &dev->epregs[i]; in usb_reinit_338x()
2228 ep->regs = &dev->epregs[i]; in usb_reinit_338x()
2231 ep->fifo_size = (i != 0) ? 2048 : 512; in usb_reinit_338x()
2233 ep_reset_338x(dev->regs, ep); in usb_reinit_338x()
2235 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); in usb_reinit_338x()
2237 dev->gadget.ep0 = &dev->ep[0].ep; in usb_reinit_338x()
2238 dev->ep[0].stopped = 0; in usb_reinit_338x()
2241 if (dev->bug7734_patched) { in usb_reinit_338x()
2242 tmp = readl(&dev->usb_ext->usbctl2) & in usb_reinit_338x()
2244 writel(tmp, &dev->usb_ext->usbctl2); in usb_reinit_338x()
2248 val = readl(&dev->llregs->ll_lfps_5); in usb_reinit_338x()
2251 writel(val, &dev->llregs->ll_lfps_5); in usb_reinit_338x()
2253 val = readl(&dev->llregs->ll_lfps_6); in usb_reinit_338x()
2256 writel(val, &dev->llregs->ll_lfps_6); in usb_reinit_338x()
2263 val = readl(&dev->llregs->ll_tsn_counters_2); in usb_reinit_338x()
2266 writel(val, &dev->llregs->ll_tsn_counters_2); in usb_reinit_338x()
2268 val = readl(&dev->llregs->ll_tsn_counters_3); in usb_reinit_338x()
2271 writel(val, &dev->llregs->ll_tsn_counters_3); in usb_reinit_338x()
2275 * Handshake Signaling for Device-Initiated U1 Exit is too short. in usb_reinit_338x()
2279 val = readl(&dev->llregs->ll_lfps_timers_2); in usb_reinit_338x()
2281 &dev->llregs->ll_lfps_timers_2); in usb_reinit_338x()
2285 * - On SS connections, setting Recovery Idle to Recover Fmw improves in usb_reinit_338x()
2287 * - It is safe to set for all connection speeds; all chip revisions. in usb_reinit_338x()
2288 * - R-M-W to leave other bits undisturbed. in usb_reinit_338x()
2289 * - Reference PLX TT-7372 in usb_reinit_338x()
2291 val = readl(&dev->llregs->ll_tsn_chicken_bit); in usb_reinit_338x()
2293 writel(val, &dev->llregs->ll_tsn_chicken_bit); in usb_reinit_338x()
2295 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); in usb_reinit_338x()
2298 writel(0x0D, &dev->dep[0].dep_cfg); in usb_reinit_338x()
2299 writel(0x0D, &dev->dep[1].dep_cfg); in usb_reinit_338x()
2300 writel(0x0E, &dev->dep[2].dep_cfg); in usb_reinit_338x()
2301 writel(0x0E, &dev->dep[3].dep_cfg); in usb_reinit_338x()
2302 writel(0x0F, &dev->dep[4].dep_cfg); in usb_reinit_338x()
2303 writel(0x0C, &dev->dep[5].dep_cfg); in usb_reinit_338x()
2308 if (dev->quirks & PLX_LEGACY) in usb_reinit()
2318 &dev->epregs[0].ep_rsp); in ep0_start_228x()
2331 &dev->usb->stdrsp); in ep0_start_228x()
2335 (dev->softconnect << USB_DETECT_ENABLE) | in ep0_start_228x()
2337 &dev->usb->usbctl); in ep0_start_228x()
2342 &dev->regs->pciirqenb0); in ep0_start_228x()
2350 &dev->regs->pciirqenb1); in ep0_start_228x()
2353 (void) readl(&dev->usb->usbctl); in ep0_start_228x()
2359 if (dev->bug7734_patched) in ep0_start_338x()
2362 &dev->epregs[0].ep_rsp); in ep0_start_338x()
2376 &dev->usb->stdrsp); in ep0_start_338x()
2377 dev->wakeup_enable = 1; in ep0_start_338x()
2379 (dev->softconnect << USB_DETECT_ENABLE) | in ep0_start_338x()
2381 &dev->usb->usbctl); in ep0_start_338x()
2386 &dev->regs->pciirqenb0); in ep0_start_338x()
2391 &dev->regs->pciirqenb1); in ep0_start_338x()
2394 (void)readl(&dev->usb->usbctl); in ep0_start_338x()
2399 if (dev->quirks & PLX_LEGACY) in ep0_start()
2406 * non-control requests. then usb traffic follows until a
2418 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) in net2280_start()
2421 if (!driver || driver->max_speed < USB_SPEED_HIGH || in net2280_start()
2422 !driver->setup) in net2280_start()
2423 return -EINVAL; in net2280_start()
2427 for (i = 0; i < dev->n_ep; i++) in net2280_start()
2428 dev->ep[i].irqs = 0; in net2280_start()
2431 driver->driver.bus = NULL; in net2280_start()
2432 dev->driver = driver; in net2280_start()
2434 retval = device_create_file(&dev->pdev->dev, &dev_attr_function); in net2280_start()
2437 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues); in net2280_start()
2446 if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched) in net2280_start()
2455 device_remove_file(&dev->pdev->dev, &dev_attr_function); in net2280_start()
2457 dev->driver = NULL; in net2280_start()
2466 if (dev->gadget.speed == USB_SPEED_UNKNOWN) in stop_activity()
2473 for (i = 0; i < dev->n_ep; i++) in stop_activity()
2474 nuke(&dev->ep[i]); in stop_activity()
2477 if (dev->async_callbacks && driver) { in stop_activity()
2478 spin_unlock(&dev->lock); in stop_activity()
2479 driver->disconnect(&dev->gadget); in stop_activity()
2480 spin_lock(&dev->lock); in stop_activity()
2493 spin_lock_irqsave(&dev->lock, flags); in net2280_stop()
2495 spin_unlock_irqrestore(&dev->lock, flags); in net2280_stop()
2499 device_remove_file(&dev->pdev->dev, &dev_attr_function); in net2280_stop()
2500 device_remove_file(&dev->pdev->dev, &dev_attr_queues); in net2280_stop()
2502 dev->driver = NULL; in net2280_stop()
2511 spin_lock_irq(&dev->lock); in net2280_async_callbacks()
2512 dev->async_callbacks = enable; in net2280_async_callbacks()
2513 spin_unlock_irq(&dev->lock); in net2280_async_callbacks()
2516 /*-------------------------------------------------------------------------*/
2518 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2519 * also works for dma-capable endpoints, in pio mode or just
2526 /* 0 error, 1 mid-data, 2 done */ in handle_ep_small()
2529 if (!list_empty(&ep->queue)) in handle_ep_small()
2530 req = list_entry(ep->queue.next, in handle_ep_small()
2536 t = readl(&ep->regs->ep_stat); in handle_ep_small()
2537 ep->irqs++; in handle_ep_small()
2539 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", in handle_ep_small()
2540 ep->ep.name, t, req ? &req->req : NULL); in handle_ep_small()
2542 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) in handle_ep_small()
2543 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); in handle_ep_small()
2546 writel(t, &ep->regs->ep_stat); in handle_ep_small()
2558 if (unlikely(ep->num == 0)) { in handle_ep_small()
2559 if (ep->is_in) { in handle_ep_small()
2562 if (ep->dev->protocol_stall) { in handle_ep_small()
2563 ep->stopped = 1; in handle_ep_small()
2571 if (ep->dev->protocol_stall) { in handle_ep_small()
2572 ep->stopped = 1; in handle_ep_small()
2575 } else if (ep->responded && in handle_ep_small()
2576 !req && !ep->stopped) in handle_ep_small()
2582 if (ep->dev->protocol_stall) { in handle_ep_small()
2583 ep->stopped = 1; in handle_ep_small()
2590 req->req.actual == req->req.length) || in handle_ep_small()
2591 (ep->responded && !req)) { in handle_ep_small()
2592 ep->dev->protocol_stall = 1; in handle_ep_small()
2594 ep->stopped = 1; in handle_ep_small()
2596 done(ep, req, -EOVERFLOW); in handle_ep_small()
2606 if (likely(ep->dma)) { in handle_ep_small()
2609 int stopped = ep->stopped; in handle_ep_small()
2618 ep->stopped = 1; in handle_ep_small()
2619 for (count = 0; ; t = readl(&ep->regs->ep_stat)) { in handle_ep_small()
2625 if (unlikely(list_empty(&ep->queue) || in handle_ep_small()
2626 ep->out_overflow)) { in handle_ep_small()
2630 req = list_entry(ep->queue.next, in handle_ep_small()
2637 count = readl(&ep->dma->dmacount); in handle_ep_small()
2639 if (readl(&ep->dma->dmadesc) in handle_ep_small()
2640 != req->td_dma) in handle_ep_small()
2650 readl(&ep->dma->dmadesc) != in handle_ep_small()
2651 req->td_dma && stuck++ > 5) { in handle_ep_small()
2653 &ep->dma->dmacount); in handle_ep_small()
2656 ep_dbg(ep->dev, "%s escape stuck %d, count %u\n", in handle_ep_small()
2657 ep->ep.name, stuck, in handle_ep_small()
2673 writel(BIT(DMA_ABORT), &ep->dma->dmastat); in handle_ep_small()
2674 spin_stop_dma(ep->dma); in handle_ep_small()
2677 req->td->dmacount = 0; in handle_ep_small()
2678 t = readl(&ep->regs->ep_avail); in handle_ep_small()
2680 (ep->out_overflow || t) in handle_ep_small()
2681 ? -EOVERFLOW : 0); in handle_ep_small()
2685 if (unlikely(ep->out_overflow || in handle_ep_small()
2686 (ep->dev->chiprev == 0x0100 && in handle_ep_small()
2687 ep->dev->gadget.speed in handle_ep_small()
2690 ep->out_overflow = 0; in handle_ep_small()
2694 ep->stopped = stopped; in handle_ep_small()
2695 if (!list_empty(&ep->queue)) in handle_ep_small()
2698 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n", in handle_ep_small()
2699 ep->ep.name, t); in handle_ep_small()
2704 if (read_fifo(ep, req) && ep->num != 0) in handle_ep_small()
2711 len = req->req.length - req->req.actual; in handle_ep_small()
2712 if (len > ep->ep.maxpacket) in handle_ep_small()
2713 len = ep->ep.maxpacket; in handle_ep_small()
2714 req->req.actual += len; in handle_ep_small()
2718 if ((req->req.actual == req->req.length) && in handle_ep_small()
2719 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) in handle_ep_small()
2732 if (ep->num == 0) { in handle_ep_small()
2737 if (!ep->stopped) in handle_ep_small()
2741 if (!list_empty(&ep->queue) && !ep->stopped) in handle_ep_small()
2742 req = list_entry(ep->queue.next, in handle_ep_small()
2746 if (req && !ep->is_in) in handle_ep_small()
2754 if (req && !ep->stopped) { in handle_ep_small()
2758 write_fifo(ep, &req->req); in handle_ep_small()
2767 return &dev->ep[0]; in get_ep_by_addr()
2768 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { in get_ep_by_addr()
2771 if (!ep->desc) in get_ep_by_addr()
2773 bEndpointAddress = ep->desc->bEndpointAddress; in get_ep_by_addr()
2788 scratch = get_idx_reg(dev->regs, SCRATCH); in defect7374_workaround()
2797 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) { in defect7374_workaround()
2799 * Connection is NOT SS: in defect7374_workaround()
2800 * - Connection must be FS or HS. in defect7374_workaround()
2801 * - This FSM state should allow workaround software to in defect7374_workaround()
2805 dev->bug7734_patched = 1; in defect7374_workaround()
2809 /* Connection is SS: */ in defect7374_workaround()
2814 state = readl(&dev->plregs->pl_ep_status_1) in defect7374_workaround()
2819 dev->bug7734_patched = 1; in defect7374_workaround()
2825 * - Wait and try again. in defect7374_workaround()
2833 "to detect SS host's data phase ACK."); in defect7374_workaround()
2844 * Restore data EPs to their pre-workaround settings (disabled, in defect7374_workaround()
2849 set_idx_reg(dev->regs, SCRATCH, scratch); in defect7374_workaround()
2856 struct net2280 *dev = ep->dev; in ep_clear_seqnum()
2860 val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f; in ep_clear_seqnum()
2861 val |= ep_pl[ep->num]; in ep_clear_seqnum()
2862 writel(val, &dev->plregs->pl_ep_ctrl); in ep_clear_seqnum()
2864 writel(val, &dev->plregs->pl_ep_ctrl); in ep_clear_seqnum()
2882 dev->addressed_state = !w_value; in handle_stat0_irqs_superspeed()
2888 status = dev->wakeup_enable ? 0x02 : 0x00; in handle_stat0_irqs_superspeed()
2889 if (dev->gadget.is_selfpowered) in handle_stat0_irqs_superspeed()
2891 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | in handle_stat0_irqs_superspeed()
2892 dev->ltm_enable << 4); in handle_stat0_irqs_superspeed()
2893 writel(0, &dev->epregs[0].ep_irqenb); in handle_stat0_irqs_superspeed()
2895 writel((__force u32) status, &dev->epregs[0].ep_data); in handle_stat0_irqs_superspeed()
2903 status = readl(&e->regs->ep_rsp) & in handle_stat0_irqs_superspeed()
2905 writel(0, &dev->epregs[0].ep_irqenb); in handle_stat0_irqs_superspeed()
2907 writel((__force u32) status, &dev->epregs[0].ep_data); in handle_stat0_irqs_superspeed()
2919 if (!dev->addressed_state) { in handle_stat0_irqs_superspeed()
2922 dev->u1_enable = 0; in handle_stat0_irqs_superspeed()
2923 writel(readl(&dev->usb_ext->usbctl2) & in handle_stat0_irqs_superspeed()
2925 &dev->usb_ext->usbctl2); in handle_stat0_irqs_superspeed()
2930 dev->u2_enable = 0; in handle_stat0_irqs_superspeed()
2931 writel(readl(&dev->usb_ext->usbctl2) & in handle_stat0_irqs_superspeed()
2933 &dev->usb_ext->usbctl2); in handle_stat0_irqs_superspeed()
2938 dev->ltm_enable = 0; in handle_stat0_irqs_superspeed()
2939 writel(readl(&dev->usb_ext->usbctl2) & in handle_stat0_irqs_superspeed()
2941 &dev->usb_ext->usbctl2); in handle_stat0_irqs_superspeed()
2950 dev->wakeup_enable = 0; in handle_stat0_irqs_superspeed()
2951 writel(readl(&dev->usb->usbctl) & in handle_stat0_irqs_superspeed()
2953 &dev->usb->usbctl); in handle_stat0_irqs_superspeed()
2965 ep_vdbg(dev, "%s clear halt\n", e->ep.name); in handle_stat0_irqs_superspeed()
2967 * Workaround for SS SeqNum not cleared via in handle_stat0_irqs_superspeed()
2972 if (!list_empty(&e->queue) && e->td_dma) in handle_stat0_irqs_superspeed()
2975 ep->stopped = 1; in handle_stat0_irqs_superspeed()
2985 if (!dev->addressed_state) { in handle_stat0_irqs_superspeed()
2988 dev->u1_enable = 1; in handle_stat0_irqs_superspeed()
2989 writel(readl(&dev->usb_ext->usbctl2) | in handle_stat0_irqs_superspeed()
2991 &dev->usb_ext->usbctl2); in handle_stat0_irqs_superspeed()
2996 dev->u2_enable = 1; in handle_stat0_irqs_superspeed()
2997 writel(readl(&dev->usb_ext->usbctl2) | in handle_stat0_irqs_superspeed()
2999 &dev->usb_ext->usbctl2); in handle_stat0_irqs_superspeed()
3004 dev->ltm_enable = 1; in handle_stat0_irqs_superspeed()
3005 writel(readl(&dev->usb_ext->usbctl2) | in handle_stat0_irqs_superspeed()
3007 &dev->usb_ext->usbctl2); in handle_stat0_irqs_superspeed()
3016 dev->wakeup_enable = 1; in handle_stat0_irqs_superspeed()
3017 writel(readl(&dev->usb->usbctl) | in handle_stat0_irqs_superspeed()
3019 &dev->usb->usbctl); in handle_stat0_irqs_superspeed()
3029 ep->stopped = 1; in handle_stat0_irqs_superspeed()
3030 if (ep->num == 0) in handle_stat0_irqs_superspeed()
3031 ep->dev->protocol_stall = 1; in handle_stat0_irqs_superspeed()
3033 if (ep->dma) in handle_stat0_irqs_superspeed()
3051 readl(&ep->cfg->ep_cfg)); in handle_stat0_irqs_superspeed()
3053 ep->responded = 0; in handle_stat0_irqs_superspeed()
3054 if (dev->async_callbacks) { in handle_stat0_irqs_superspeed()
3055 spin_unlock(&dev->lock); in handle_stat0_irqs_superspeed()
3056 tmp = dev->driver->setup(&dev->gadget, &r); in handle_stat0_irqs_superspeed()
3057 spin_lock(&dev->lock); in handle_stat0_irqs_superspeed()
3064 dev->protocol_stall = 1; in handle_stat0_irqs_superspeed()
3094 handle_ep_small(&dev->ep[index]); in usb338x_handle_ep_intr()
3101 u32 num, scratch; in handle_stat0_irqs() local
3118 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { in handle_stat0_irqs()
3119 u32 val = readl(&dev->usb->usbstat); in handle_stat0_irqs()
3121 dev->gadget.speed = USB_SPEED_SUPER; in handle_stat0_irqs()
3122 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, in handle_stat0_irqs()
3125 dev->gadget.speed = USB_SPEED_HIGH; in handle_stat0_irqs()
3126 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, in handle_stat0_irqs()
3129 dev->gadget.speed = USB_SPEED_FULL; in handle_stat0_irqs()
3130 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, in handle_stat0_irqs()
3133 net2280_led_speed(dev, dev->gadget.speed); in handle_stat0_irqs()
3135 usb_speed_string(dev->gadget.speed)); in handle_stat0_irqs()
3138 ep = &dev->ep[0]; in handle_stat0_irqs()
3139 ep->irqs++; in handle_stat0_irqs()
3143 while (!list_empty(&ep->queue)) { in handle_stat0_irqs()
3144 req = list_entry(ep->queue.next, in handle_stat0_irqs()
3146 done(ep, req, (req->req.actual == req->req.length) in handle_stat0_irqs()
3147 ? 0 : -EPROTO); in handle_stat0_irqs()
3149 ep->stopped = 0; in handle_stat0_irqs()
3150 dev->protocol_stall = 0; in handle_stat0_irqs()
3151 if (!(dev->quirks & PLX_PCIE)) { in handle_stat0_irqs()
3152 if (ep->dev->quirks & PLX_2280) in handle_stat0_irqs()
3170 &ep->regs->ep_stat); in handle_stat0_irqs()
3172 u.raw[0] = readl(&dev->usb->setup0123); in handle_stat0_irqs()
3173 u.raw[1] = readl(&dev->usb->setup4567); in handle_stat0_irqs()
3178 if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched) in handle_stat0_irqs()
3188 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0); in handle_stat0_irqs()
3196 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; in handle_stat0_irqs()
3197 if (ep->is_in) { in handle_stat0_irqs()
3206 writel(scratch, &dev->epregs[0].ep_irqenb); in handle_stat0_irqs()
3211 ep->responded = 1; in handle_stat0_irqs()
3213 if (dev->gadget.speed == USB_SPEED_SUPER) { in handle_stat0_irqs()
3230 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT)) in handle_stat0_irqs()
3236 writel(0, &dev->epregs[0].ep_irqenb); in handle_stat0_irqs()
3238 writel((__force u32)status, &dev->epregs[0].ep_data); in handle_stat0_irqs()
3240 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status); in handle_stat0_irqs()
3255 if (e->wedged) { in handle_stat0_irqs()
3257 ep->ep.name); in handle_stat0_irqs()
3259 ep_vdbg(dev, "%s clear halt\n", e->ep.name); in handle_stat0_irqs()
3261 if ((ep->dev->quirks & PLX_PCIE) && in handle_stat0_irqs()
3262 !list_empty(&e->queue) && e->td_dma) in handle_stat0_irqs()
3280 if (e->ep.name == ep0name) in handle_stat0_irqs()
3283 if ((dev->quirks & PLX_PCIE) && e->dma) in handle_stat0_irqs()
3286 ep_vdbg(dev, "%s set halt\n", ep->ep.name); in handle_stat0_irqs()
3296 readl(&ep->cfg->ep_cfg)); in handle_stat0_irqs()
3297 ep->responded = 0; in handle_stat0_irqs()
3298 if (dev->async_callbacks) { in handle_stat0_irqs()
3299 spin_unlock(&dev->lock); in handle_stat0_irqs()
3300 tmp = dev->driver->setup(&dev->gadget, &u.r); in handle_stat0_irqs()
3301 spin_lock(&dev->lock); in handle_stat0_irqs()
3310 dev->protocol_stall = 1; in handle_stat0_irqs()
3324 if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) { in handle_stat0_irqs()
3337 for (num = 0; scratch; num++) { in handle_stat0_irqs()
3341 t = BIT(num); in handle_stat0_irqs()
3346 ep = &dev->ep[num]; in handle_stat0_irqs()
3365 __releases(dev->lock) in handle_stat1_irqs()
3366 __acquires(dev->lock) in handle_stat1_irqs()
3369 u32 tmp, num, mask, scratch; in handle_stat1_irqs() local
3388 writel(tmp, &dev->regs->irqstat1); in handle_stat1_irqs()
3389 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { in handle_stat1_irqs()
3391 (readl(&dev->usb->usbctl) & in handle_stat1_irqs()
3395 dev->driver->driver.name); in handle_stat1_irqs()
3397 (readl(&dev->usb->usbstat) & mask) in handle_stat1_irqs()
3401 dev->driver->driver.name); in handle_stat1_irqs()
3405 stop_activity(dev, dev->driver); in handle_stat1_irqs()
3407 if (dev->async_callbacks) { in handle_stat1_irqs()
3408 spin_unlock(&dev->lock); in handle_stat1_irqs()
3410 usb_gadget_udc_reset(&dev->gadget, dev->driver); in handle_stat1_irqs()
3412 (dev->driver->disconnect)(&dev->gadget); in handle_stat1_irqs()
3413 spin_lock(&dev->lock); in handle_stat1_irqs()
3432 writel(tmp, &dev->regs->irqstat1); in handle_stat1_irqs()
3433 spin_unlock(&dev->lock); in handle_stat1_irqs()
3435 if (dev->async_callbacks && dev->driver->suspend) in handle_stat1_irqs()
3436 dev->driver->suspend(&dev->gadget); in handle_stat1_irqs()
3440 if (dev->async_callbacks && dev->driver->resume) in handle_stat1_irqs()
3441 dev->driver->resume(&dev->gadget); in handle_stat1_irqs()
3444 spin_lock(&dev->lock); in handle_stat1_irqs()
3450 writel(stat, &dev->regs->irqstat1); in handle_stat1_irqs()
3453 if (dev->quirks & PLX_2280) in handle_stat1_irqs()
3468 /* DMA status, for ep-{a,b,c,d} */ in handle_stat1_irqs()
3472 for (num = 0; scratch; num++) { in handle_stat1_irqs()
3475 tmp = BIT(num); in handle_stat1_irqs()
3480 ep = &dev->ep[num + 1]; in handle_stat1_irqs()
3481 dma = ep->dma; in handle_stat1_irqs()
3487 tmp = readl(&dma->dmastat); in handle_stat1_irqs()
3488 writel(tmp, &dma->dmastat); in handle_stat1_irqs()
3491 if (dev->quirks & PLX_PCIE) { in handle_stat1_irqs()
3492 u32 r_dmacount = readl(&dma->dmacount); in handle_stat1_irqs()
3493 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) && in handle_stat1_irqs()
3499 ep_dbg(ep->dev, "%s no xact done? %08x\n", in handle_stat1_irqs()
3500 ep->ep.name, tmp); in handle_stat1_irqs()
3503 stop_dma(ep->dma); in handle_stat1_irqs()
3508 * less than req->length. NAK_OUT_PACKETS may be set, in handle_stat1_irqs()
3513 * precisely (like PIO does) needs per-packet irqs in handle_stat1_irqs()
3518 if (!list_empty(&ep->queue)) { in handle_stat1_irqs()
3519 tmp = readl(&dma->dmactl); in handle_stat1_irqs()
3522 ep->irqs++; in handle_stat1_irqs()
3534 stop_activity(dev, dev->driver); in handle_stat1_irqs()
3548 if ((dev->quirks & PLX_LEGACY) && in net2280_irq()
3549 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED)))) in net2280_irq()
3552 spin_lock(&dev->lock); in net2280_irq()
3555 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1)); in net2280_irq()
3558 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0)); in net2280_irq()
3560 if (dev->quirks & PLX_PCIE) { in net2280_irq()
3561 /* re-enable interrupt to trigger any possible new interrupt */ in net2280_irq()
3562 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1); in net2280_irq()
3563 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1); in net2280_irq()
3564 writel(pciirqenb1, &dev->regs->pciirqenb1); in net2280_irq()
3567 spin_unlock(&dev->lock); in net2280_irq()
3572 /*-------------------------------------------------------------------------*/
3587 if (dev->added) in net2280_remove()
3588 usb_del_gadget(&dev->gadget); in net2280_remove()
3590 BUG_ON(dev->driver); in net2280_remove()
3593 if (dev->requests) { in net2280_remove()
3596 if (!dev->ep[i].dummy) in net2280_remove()
3598 dma_pool_free(dev->requests, dev->ep[i].dummy, in net2280_remove()
3599 dev->ep[i].td_dma); in net2280_remove()
3601 dma_pool_destroy(dev->requests); in net2280_remove()
3603 if (dev->got_irq) in net2280_remove()
3604 free_irq(pdev->irq, dev); in net2280_remove()
3605 if (dev->quirks & PLX_PCIE) in net2280_remove()
3607 if (dev->regs) { in net2280_remove()
3609 iounmap(dev->regs); in net2280_remove()
3611 if (dev->region) in net2280_remove()
3614 if (dev->enabled) in net2280_remove()
3616 device_remove_file(&pdev->dev, &dev_attr_registers); in net2280_remove()
3619 usb_put_gadget(&dev->gadget); in net2280_remove()
3636 retval = -ENOMEM; in net2280_probe()
3641 usb_initialize_gadget(&pdev->dev, &dev->gadget, gadget_release); in net2280_probe()
3642 spin_lock_init(&dev->lock); in net2280_probe()
3643 dev->quirks = id->driver_data; in net2280_probe()
3644 dev->pdev = pdev; in net2280_probe()
3645 dev->gadget.ops = &net2280_ops; in net2280_probe()
3646 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ? in net2280_probe()
3650 dev->gadget.name = driver_name; in net2280_probe()
3654 retval = -ENODEV; in net2280_probe()
3657 dev->enabled = 1; in net2280_probe()
3667 retval = -EBUSY; in net2280_probe()
3670 dev->region = 1; in net2280_probe()
3679 retval = -EFAULT; in net2280_probe()
3682 dev->regs = (struct net2280_regs __iomem *) base; in net2280_probe()
3683 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080); in net2280_probe()
3684 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100); in net2280_probe()
3685 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180); in net2280_probe()
3686 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); in net2280_probe()
3687 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); in net2280_probe()
3689 if (dev->quirks & PLX_PCIE) { in net2280_probe()
3692 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *) in net2280_probe()
3694 dev->llregs = (struct usb338x_ll_regs __iomem *) in net2280_probe()
3696 dev->plregs = (struct usb338x_pl_regs __iomem *) in net2280_probe()
3698 usbstat = readl(&dev->usb->usbstat); in net2280_probe()
3699 dev->enhanced_mode = !!(usbstat & BIT(11)); in net2280_probe()
3700 dev->n_ep = (dev->enhanced_mode) ? 9 : 5; in net2280_probe()
3702 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & in net2280_probe()
3706 dev->bug7734_patched = 1; in net2280_probe()
3707 writel(0, &dev->usb->usbctl); in net2280_probe()
3709 dev->bug7734_patched = 0; in net2280_probe()
3711 dev->enhanced_mode = 0; in net2280_probe()
3712 dev->n_ep = 7; in net2280_probe()
3714 writel(0, &dev->usb->usbctl); in net2280_probe()
3721 if (!pdev->irq) { in net2280_probe()
3723 retval = -ENODEV; in net2280_probe()
3727 if (dev->quirks & PLX_PCIE) in net2280_probe()
3731 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED, in net2280_probe()
3733 ep_err(dev, "request interrupt %d failed\n", pdev->irq); in net2280_probe()
3734 retval = -EBUSY; in net2280_probe()
3737 dev->got_irq = 1; in net2280_probe()
3741 dev->requests = dma_pool_create("requests", &pdev->dev, in net2280_probe()
3744 0 /* or page-crossing issues */); in net2280_probe()
3745 if (!dev->requests) { in net2280_probe()
3747 retval = -ENOMEM; in net2280_probe()
3753 td = dma_pool_alloc(dev->requests, GFP_KERNEL, in net2280_probe()
3754 &dev->ep[i].td_dma); in net2280_probe()
3757 retval = -ENOMEM; in net2280_probe()
3760 td->dmacount = 0; /* not VALID */ in net2280_probe()
3761 td->dmadesc = td->dmaaddr; in net2280_probe()
3762 dev->ep[i].dummy = td; in net2280_probe()
3765 /* enable lower-overhead pci memory bursts during DMA */ in net2280_probe()
3766 if (dev->quirks & PLX_LEGACY) in net2280_probe()
3774 &dev->pci->pcimstctl); in net2280_probe()
3780 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff; in net2280_probe()
3785 pdev->irq, base, dev->chiprev); in net2280_probe()
3787 dev->enhanced_mode ? "enhanced mode" : "legacy mode"); in net2280_probe()
3788 retval = device_create_file(&pdev->dev, &dev_attr_registers); in net2280_probe()
3792 retval = usb_add_gadget(&dev->gadget); in net2280_probe()
3795 dev->added = 1; in net2280_probe()
3815 writel(0, &dev->regs->pciirqenb0); in net2280_shutdown()
3816 writel(0, &dev->regs->pciirqenb1); in net2280_shutdown()
3819 writel(0, &dev->usb->usbctl); in net2280_shutdown()
3824 /*-------------------------------------------------------------------------*/