Lines Matching refs:musb

33 			struct musb *musb, struct musb_ep *musb_ep)  in map_dma_buffer()  argument
36 struct dma_controller *dma = musb->dma_controller; in map_dma_buffer()
59 musb->controller, in map_dma_buffer()
65 ret = dma_mapping_error(musb->controller, dma_addr); in map_dma_buffer()
72 dma_sync_single_for_device(musb->controller, in map_dma_buffer()
84 struct musb *musb) in unmap_dma_buffer() argument
92 dev_vdbg(musb->controller, in unmap_dma_buffer()
97 dma_unmap_single(musb->controller, in unmap_dma_buffer()
105 dma_sync_single_for_cpu(musb->controller, in unmap_dma_buffer()
126 __releases(ep->musb->lock) in musb_g_giveback()
127 __acquires(ep->musb->lock) in musb_g_giveback()
130 struct musb *musb; in musb_g_giveback() local
138 musb = req->musb; in musb_g_giveback()
141 spin_unlock(&musb->lock); in musb_g_giveback()
143 if (!dma_mapping_error(&musb->g.dev, request->dma)) in musb_g_giveback()
144 unmap_dma_buffer(req, musb); in musb_g_giveback()
148 spin_lock(&musb->lock); in musb_g_giveback()
160 struct musb *musb = ep->musb; in nuke() local
162 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; in nuke()
167 struct dma_controller *c = ep->musb->dma_controller; in nuke()
188 musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value); in nuke()
208 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) in max_ep_writesize() argument
210 if (can_bulk_split(musb, ep->type)) in max_ep_writesize()
223 static void txstate(struct musb *musb, struct musb_request *req) in txstate() argument
227 void __iomem *epio = musb->endpoints[epnum].regs; in txstate()
236 musb_dbg(musb, "ep:%s disabled - ignore request", in txstate()
243 musb_dbg(musb, "dma pending..."); in txstate()
251 fifo_count = min(max_ep_writesize(musb, musb_ep), in txstate()
255 musb_dbg(musb, "%s old packet still ready , txcsr %03x", in txstate()
261 musb_dbg(musb, "%s stalling, txcsr %03x", in txstate()
266 musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x", in txstate()
272 struct dma_controller *c = musb->dma_controller; in txstate()
283 if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) { in txstate()
323 can_bulk_split(musb, in txstate()
333 if (is_cppi_enabled(musb)) { in txstate()
369 } else if (tusb_dma_omap(musb)) in txstate()
383 unmap_dma_buffer(req, musb); in txstate()
394 musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d", in txstate()
406 void musb_g_tx(struct musb *musb, u8 epnum) in musb_g_tx() argument
411 u8 __iomem *mbase = musb->mregs; in musb_g_tx()
412 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; in musb_g_tx()
413 void __iomem *epio = musb->endpoints[epnum].regs; in musb_g_tx()
421 musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr); in musb_g_tx()
441 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", in musb_g_tx()
450 musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name); in musb_g_tx()
469 musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p", in musb_g_tx()
482 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && in musb_g_tx()
514 musb_dbg(musb, "%s idle now", in musb_g_tx()
520 txstate(musb, req); in musb_g_tx()
529 static void rxstate(struct musb *musb, struct musb_request *req) in rxstate() argument
534 void __iomem *epio = musb->endpoints[epnum].regs; in rxstate()
538 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; in rxstate()
550 musb_dbg(musb, "ep:%s disabled - ignore request", in rxstate()
557 musb_dbg(musb, "DMA pending..."); in rxstate()
562 musb_dbg(musb, "%s stalling, RXCSR %04x", in rxstate()
567 if (is_cppi_enabled(musb) && is_buffer_mapped(req)) { in rxstate()
568 struct dma_controller *c = musb->dma_controller; in rxstate()
612 if (musb_dma_inventra(musb)) { in rxstate()
618 c = musb->dma_controller; in rxstate()
687 if ((musb_dma_ux500(musb)) && in rxstate()
694 c = musb->dma_controller; in rxstate()
737 musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d", in rxstate()
744 if (tusb_dma_omap(musb)) { in rxstate()
745 struct dma_controller *c = musb->dma_controller; in rxstate()
764 unmap_dma_buffer(req, musb); in rxstate()
798 void musb_g_rx(struct musb *musb, u8 epnum) in musb_g_rx() argument
803 void __iomem *mbase = musb->mregs; in musb_g_rx()
805 void __iomem *epio = musb->endpoints[epnum].regs; in musb_g_rx()
807 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; in musb_g_rx()
826 musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name, in musb_g_rx()
841 musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request); in musb_g_rx()
847 musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name); in musb_g_rx()
852 musb_dbg(musb, "%s busy, csr %04x", in musb_g_rx()
911 rxstate(musb, req); in musb_g_rx()
923 struct musb *musb; in musb_gadget_enable() local
936 musb = musb_ep->musb; in musb_gadget_enable()
937 mbase = musb->mregs; in musb_gadget_enable()
940 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_enable()
958 ok = musb->hb_iso_tx; in musb_gadget_enable()
960 ok = musb->hb_iso_rx; in musb_gadget_enable()
963 musb_dbg(musb, "no support for high bandwidth ISO"); in musb_gadget_enable()
986 musb_dbg(musb, "packet size beyond hardware FIFO size"); in musb_gadget_enable()
990 musb->intrtxe |= (1 << epnum); in musb_gadget_enable()
991 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); in musb_gadget_enable()
999 if (can_bulk_split(musb, musb_ep->type)) in musb_gadget_enable()
1025 musb_dbg(musb, "packet size beyond hardware FIFO size"); in musb_gadget_enable()
1029 musb->intrrxe |= (1 << epnum); in musb_gadget_enable()
1030 musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); in musb_gadget_enable()
1062 if (is_dma_capable() && musb->dma_controller) { in musb_gadget_enable()
1063 struct dma_controller *c = musb->dma_controller; in musb_gadget_enable()
1082 schedule_delayed_work(&musb->irq_work, 0); in musb_gadget_enable()
1085 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_enable()
1095 struct musb *musb; in musb_gadget_disable() local
1102 musb = musb_ep->musb; in musb_gadget_disable()
1104 epio = musb->endpoints[epnum].regs; in musb_gadget_disable()
1106 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_disable()
1107 musb_ep_select(musb->mregs, epnum); in musb_gadget_disable()
1111 musb->intrtxe &= ~(1 << epnum); in musb_gadget_disable()
1112 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); in musb_gadget_disable()
1115 musb->intrrxe &= ~(1 << epnum); in musb_gadget_disable()
1116 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); in musb_gadget_disable()
1126 schedule_delayed_work(&musb->irq_work, 0); in musb_gadget_disable()
1128 spin_unlock_irqrestore(&(musb->lock), flags); in musb_gadget_disable()
1130 musb_dbg(musb, "%s", musb_ep->end_point.name); in musb_gadget_disable()
1180 void musb_ep_restart(struct musb *musb, struct musb_request *req) in musb_ep_restart() argument
1183 musb_ep_select(musb->mregs, req->epnum); in musb_ep_restart()
1185 txstate(musb, req); in musb_ep_restart()
1187 rxstate(musb, req); in musb_ep_restart()
1190 static int musb_ep_restart_resume_work(struct musb *musb, void *data) in musb_ep_restart_resume_work() argument
1194 musb_ep_restart(musb, req); in musb_ep_restart_resume_work()
1204 struct musb *musb; in musb_gadget_queue() local
1214 musb = musb_ep->musb; in musb_gadget_queue()
1217 request->musb = musb; in musb_gadget_queue()
1222 status = pm_runtime_get(musb->controller); in musb_gadget_queue()
1224 dev_err(musb->controller, in musb_gadget_queue()
1227 pm_runtime_put_noidle(musb->controller); in musb_gadget_queue()
1241 map_dma_buffer(request, musb, musb_ep); in musb_gadget_queue()
1243 spin_lock_irqsave(&musb->lock, lockflags); in musb_gadget_queue()
1247 musb_dbg(musb, "req %p queued to %s while ep %s", in musb_gadget_queue()
1250 unmap_dma_buffer(request, musb); in musb_gadget_queue()
1259 status = musb_queue_resume_work(musb, in musb_gadget_queue()
1263 dev_err(musb->controller, "%s resume work: %i\n", in musb_gadget_queue()
1268 spin_unlock_irqrestore(&musb->lock, lockflags); in musb_gadget_queue()
1269 pm_runtime_mark_last_busy(musb->controller); in musb_gadget_queue()
1270 pm_runtime_put_autosuspend(musb->controller); in musb_gadget_queue()
1282 struct musb *musb = musb_ep->musb; in musb_gadget_dequeue() local
1289 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_dequeue()
1296 dev_err(musb->controller, "request %p not queued to %s\n", in musb_gadget_dequeue()
1308 struct dma_controller *c = musb->dma_controller; in musb_gadget_dequeue()
1310 musb_ep_select(musb->mregs, musb_ep->current_epnum); in musb_gadget_dequeue()
1325 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_dequeue()
1339 struct musb *musb = musb_ep->musb; in musb_gadget_set_halt() local
1340 void __iomem *epio = musb->endpoints[epnum].regs; in musb_gadget_set_halt()
1349 mbase = musb->mregs; in musb_gadget_set_halt()
1351 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_set_halt()
1363 musb_dbg(musb, "request in progress, cannot halt %s", in musb_gadget_set_halt()
1372 musb_dbg(musb, "FIFO busy, cannot halt %s", in musb_gadget_set_halt()
1382 musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear"); in musb_gadget_set_halt()
1409 musb_dbg(musb, "restarting the request"); in musb_gadget_set_halt()
1410 musb_ep_restart(musb, request); in musb_gadget_set_halt()
1414 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_set_halt()
1440 struct musb *musb = musb_ep->musb; in musb_gadget_fifo_status() local
1442 void __iomem *mbase = musb->mregs; in musb_gadget_fifo_status()
1445 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_fifo_status()
1451 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_fifo_status()
1459 struct musb *musb = musb_ep->musb; in musb_gadget_fifo_flush() local
1461 void __iomem *epio = musb->endpoints[epnum].regs; in musb_gadget_fifo_flush()
1466 mbase = musb->mregs; in musb_gadget_fifo_flush()
1468 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_fifo_flush()
1472 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); in musb_gadget_fifo_flush()
1496 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); in musb_gadget_fifo_flush()
1497 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_fifo_flush()
1517 struct musb *musb = gadget_to_musb(gadget); in musb_gadget_get_frame() local
1519 return (int)musb_readw(musb->mregs, MUSB_FRAME); in musb_gadget_get_frame()
1524 struct musb *musb = gadget_to_musb(gadget); in musb_gadget_wakeup() local
1525 void __iomem *mregs = musb->mregs; in musb_gadget_wakeup()
1531 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_wakeup()
1533 switch (musb->xceiv->otg->state) { in musb_gadget_wakeup()
1539 if (musb->may_wakeup && musb->is_suspended) in musb_gadget_wakeup()
1545 musb_dbg(musb, "Sending SRP: devctl: %02x", devctl); in musb_gadget_wakeup()
1562 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_wakeup()
1563 otg_start_srp(musb->xceiv->otg); in musb_gadget_wakeup()
1564 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_wakeup()
1567 musb_platform_try_idle(musb, in musb_gadget_wakeup()
1573 musb_dbg(musb, "Unhandled wake: %s", in musb_gadget_wakeup()
1574 usb_otg_state_string(musb->xceiv->otg->state)); in musb_gadget_wakeup()
1583 musb_dbg(musb, "issue wakeup"); in musb_gadget_wakeup()
1592 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_wakeup()
1603 static void musb_pullup(struct musb *musb, int is_on) in musb_pullup() argument
1607 power = musb_readb(musb->mregs, MUSB_POWER); in musb_pullup()
1615 musb_dbg(musb, "gadget D+ pullup %s", in musb_pullup()
1617 musb_writeb(musb->mregs, MUSB_POWER, power); in musb_pullup()
1623 musb_dbg(musb, "<= %s =>\n", __func__);
1636 struct musb *musb = gadget_to_musb(gadget); in musb_gadget_vbus_draw() local
1638 if (!musb->xceiv->set_power) in musb_gadget_vbus_draw()
1640 return usb_phy_set_power(musb->xceiv, mA); in musb_gadget_vbus_draw()
1645 struct musb *musb; in musb_gadget_work() local
1648 musb = container_of(work, struct musb, gadget_work.work); in musb_gadget_work()
1649 pm_runtime_get_sync(musb->controller); in musb_gadget_work()
1650 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_work()
1651 musb_pullup(musb, musb->softconnect); in musb_gadget_work()
1652 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_work()
1653 pm_runtime_mark_last_busy(musb->controller); in musb_gadget_work()
1654 pm_runtime_put_autosuspend(musb->controller); in musb_gadget_work()
1659 struct musb *musb = gadget_to_musb(gadget); in musb_gadget_pullup() local
1667 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_pullup()
1668 if (is_on != musb->softconnect) { in musb_gadget_pullup()
1669 musb->softconnect = is_on; in musb_gadget_pullup()
1670 schedule_delayed_work(&musb->gadget_work, 0); in musb_gadget_pullup()
1672 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_pullup()
1702 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) in init_peripheral_ep() argument
1704 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; in init_peripheral_ep()
1709 ep->musb = musb; in init_peripheral_ep()
1724 musb->g.ep0 = &ep->end_point; in init_peripheral_ep()
1734 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); in init_peripheral_ep()
1750 static inline void musb_g_init_endpoints(struct musb *musb) in musb_g_init_endpoints() argument
1757 INIT_LIST_HEAD(&(musb->g.ep_list)); in musb_g_init_endpoints()
1759 for (epnum = 0, hw_ep = musb->endpoints; in musb_g_init_endpoints()
1760 epnum < musb->nr_endpoints; in musb_g_init_endpoints()
1763 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); in musb_g_init_endpoints()
1767 init_peripheral_ep(musb, &hw_ep->ep_in, in musb_g_init_endpoints()
1772 init_peripheral_ep(musb, &hw_ep->ep_out, in musb_g_init_endpoints()
1783 int musb_gadget_setup(struct musb *musb) in musb_gadget_setup() argument
1792 musb->g.ops = &musb_gadget_operations; in musb_gadget_setup()
1793 musb->g.max_speed = USB_SPEED_HIGH; in musb_gadget_setup()
1794 musb->g.speed = USB_SPEED_UNKNOWN; in musb_gadget_setup()
1796 MUSB_DEV_MODE(musb); in musb_gadget_setup()
1797 musb->xceiv->otg->state = OTG_STATE_B_IDLE; in musb_gadget_setup()
1800 musb->g.name = musb_driver_name; in musb_gadget_setup()
1802 musb->g.is_otg = 0; in musb_gadget_setup()
1803 INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work); in musb_gadget_setup()
1804 musb_g_init_endpoints(musb); in musb_gadget_setup()
1806 musb->is_active = 0; in musb_gadget_setup()
1807 musb_platform_try_idle(musb, 0); in musb_gadget_setup()
1809 status = usb_add_gadget_udc(musb->controller, &musb->g); in musb_gadget_setup()
1815 musb->g.dev.parent = NULL; in musb_gadget_setup()
1816 device_unregister(&musb->g.dev); in musb_gadget_setup()
1820 void musb_gadget_cleanup(struct musb *musb) in musb_gadget_cleanup() argument
1822 if (musb->port_mode == MUSB_HOST) in musb_gadget_cleanup()
1825 cancel_delayed_work_sync(&musb->gadget_work); in musb_gadget_cleanup()
1826 usb_del_gadget_udc(&musb->g); in musb_gadget_cleanup()
1843 struct musb *musb = gadget_to_musb(g); in musb_gadget_start() local
1844 struct usb_otg *otg = musb->xceiv->otg; in musb_gadget_start()
1853 pm_runtime_get_sync(musb->controller); in musb_gadget_start()
1855 musb->softconnect = 0; in musb_gadget_start()
1856 musb->gadget_driver = driver; in musb_gadget_start()
1858 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_start()
1859 musb->is_active = 1; in musb_gadget_start()
1861 otg_set_peripheral(otg, &musb->g); in musb_gadget_start()
1862 musb->xceiv->otg->state = OTG_STATE_B_IDLE; in musb_gadget_start()
1863 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_start()
1865 musb_start(musb); in musb_gadget_start()
1871 if (musb->xceiv->last_event == USB_EVENT_ID) in musb_gadget_start()
1872 musb_platform_set_vbus(musb, 1); in musb_gadget_start()
1874 pm_runtime_mark_last_busy(musb->controller); in musb_gadget_start()
1875 pm_runtime_put_autosuspend(musb->controller); in musb_gadget_start()
1891 struct musb *musb = gadget_to_musb(g); in musb_gadget_stop() local
1894 pm_runtime_get_sync(musb->controller); in musb_gadget_stop()
1901 spin_lock_irqsave(&musb->lock, flags); in musb_gadget_stop()
1903 musb_hnp_stop(musb); in musb_gadget_stop()
1905 (void) musb_gadget_vbus_draw(&musb->g, 0); in musb_gadget_stop()
1907 musb->xceiv->otg->state = OTG_STATE_UNDEFINED; in musb_gadget_stop()
1908 musb_stop(musb); in musb_gadget_stop()
1909 otg_set_peripheral(musb->xceiv->otg, NULL); in musb_gadget_stop()
1911 musb->is_active = 0; in musb_gadget_stop()
1912 musb->gadget_driver = NULL; in musb_gadget_stop()
1913 musb_platform_try_idle(musb, 0); in musb_gadget_stop()
1914 spin_unlock_irqrestore(&musb->lock, flags); in musb_gadget_stop()
1923 schedule_delayed_work(&musb->irq_work, 0); in musb_gadget_stop()
1925 pm_runtime_mark_last_busy(musb->controller); in musb_gadget_stop()
1926 pm_runtime_put_autosuspend(musb->controller); in musb_gadget_stop()
1935 void musb_g_resume(struct musb *musb) in musb_g_resume() argument
1937 musb->is_suspended = 0; in musb_g_resume()
1938 switch (musb->xceiv->otg->state) { in musb_g_resume()
1943 musb->is_active = 1; in musb_g_resume()
1944 if (musb->gadget_driver && musb->gadget_driver->resume) { in musb_g_resume()
1945 spin_unlock(&musb->lock); in musb_g_resume()
1946 musb->gadget_driver->resume(&musb->g); in musb_g_resume()
1947 spin_lock(&musb->lock); in musb_g_resume()
1952 usb_otg_state_string(musb->xceiv->otg->state)); in musb_g_resume()
1957 void musb_g_suspend(struct musb *musb) in musb_g_suspend() argument
1961 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); in musb_g_suspend()
1962 musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl); in musb_g_suspend()
1964 switch (musb->xceiv->otg->state) { in musb_g_suspend()
1967 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; in musb_g_suspend()
1970 musb->is_suspended = 1; in musb_g_suspend()
1971 if (musb->gadget_driver && musb->gadget_driver->suspend) { in musb_g_suspend()
1972 spin_unlock(&musb->lock); in musb_g_suspend()
1973 musb->gadget_driver->suspend(&musb->g); in musb_g_suspend()
1974 spin_lock(&musb->lock); in musb_g_suspend()
1982 usb_otg_state_string(musb->xceiv->otg->state)); in musb_g_suspend()
1987 void musb_g_wakeup(struct musb *musb) in musb_g_wakeup() argument
1989 musb_gadget_wakeup(&musb->g); in musb_g_wakeup()
1993 void musb_g_disconnect(struct musb *musb) in musb_g_disconnect() argument
1995 void __iomem *mregs = musb->mregs; in musb_g_disconnect()
1998 musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl); in musb_g_disconnect()
2004 (void) musb_gadget_vbus_draw(&musb->g, 0); in musb_g_disconnect()
2006 musb->g.speed = USB_SPEED_UNKNOWN; in musb_g_disconnect()
2007 if (musb->gadget_driver && musb->gadget_driver->disconnect) { in musb_g_disconnect()
2008 spin_unlock(&musb->lock); in musb_g_disconnect()
2009 musb->gadget_driver->disconnect(&musb->g); in musb_g_disconnect()
2010 spin_lock(&musb->lock); in musb_g_disconnect()
2013 switch (musb->xceiv->otg->state) { in musb_g_disconnect()
2015 musb_dbg(musb, "Unhandled disconnect %s, setting a_idle", in musb_g_disconnect()
2016 usb_otg_state_string(musb->xceiv->otg->state)); in musb_g_disconnect()
2017 musb->xceiv->otg->state = OTG_STATE_A_IDLE; in musb_g_disconnect()
2018 MUSB_HST_MODE(musb); in musb_g_disconnect()
2021 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; in musb_g_disconnect()
2022 MUSB_HST_MODE(musb); in musb_g_disconnect()
2028 musb->xceiv->otg->state = OTG_STATE_B_IDLE; in musb_g_disconnect()
2034 musb->is_active = 0; in musb_g_disconnect()
2037 void musb_g_reset(struct musb *musb) in musb_g_reset() argument
2038 __releases(musb->lock) in musb_g_reset()
2039 __acquires(musb->lock) in musb_g_reset()
2041 void __iomem *mbase = musb->mregs; in musb_g_reset()
2045 musb_dbg(musb, "<== %s driver '%s'", in musb_g_reset()
2048 musb->gadget_driver in musb_g_reset()
2049 ? musb->gadget_driver->driver.name in musb_g_reset()
2054 if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) { in musb_g_reset()
2055 spin_unlock(&musb->lock); in musb_g_reset()
2056 usb_gadget_udc_reset(&musb->g, musb->gadget_driver); in musb_g_reset()
2057 spin_lock(&musb->lock); in musb_g_reset()
2067 musb->g.speed = (power & MUSB_POWER_HSMODE) in musb_g_reset()
2071 musb->is_active = 1; in musb_g_reset()
2072 musb->is_suspended = 0; in musb_g_reset()
2073 MUSB_DEV_MODE(musb); in musb_g_reset()
2074 musb->address = 0; in musb_g_reset()
2075 musb->ep0_state = MUSB_EP0_STAGE_SETUP; in musb_g_reset()
2077 musb->may_wakeup = 0; in musb_g_reset()
2078 musb->g.b_hnp_enable = 0; in musb_g_reset()
2079 musb->g.a_alt_hnp_support = 0; in musb_g_reset()
2080 musb->g.a_hnp_support = 0; in musb_g_reset()
2081 musb->g.quirk_zlp_not_supp = 1; in musb_g_reset()
2086 if (!musb->g.is_otg) { in musb_g_reset()
2092 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; in musb_g_reset()
2093 musb->g.is_a_peripheral = 0; in musb_g_reset()
2095 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; in musb_g_reset()
2096 musb->g.is_a_peripheral = 0; in musb_g_reset()
2098 musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL; in musb_g_reset()
2099 musb->g.is_a_peripheral = 1; in musb_g_reset()
2103 (void) musb_gadget_vbus_draw(&musb->g, 8); in musb_g_reset()