Lines Matching refs:hwep

340 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,  in add_td_to_list()  argument
351 node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma); in add_td_to_list()
360 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) { in add_td_to_list()
361 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in add_td_to_list()
364 || hwreq->req.length % hwep->ep.maxpacket) in add_td_to_list()
410 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_enqueue() argument
412 struct ci_hdrc *ci = hwep->ci; in _hardware_enqueue()
425 &hwreq->req, hwep->dir); in _hardware_enqueue()
437 ret = add_td_to_list(hwep, hwreq, 0); in _hardware_enqueue()
445 ret = add_td_to_list(hwep, hwreq, count); in _hardware_enqueue()
452 if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX in _hardware_enqueue()
453 && (hwreq->req.length % hwep->ep.maxpacket == 0)) { in _hardware_enqueue()
454 ret = add_td_to_list(hwep, hwreq, 0); in _hardware_enqueue()
470 if (!list_empty(&hwep->qh.queue)) { in _hardware_enqueue()
472 int n = hw_ep_bit(hwep->num, hwep->dir); in _hardware_enqueue()
477 hwreqprev = list_entry(hwep->qh.queue.prev, in _hardware_enqueue()
496 hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma); in _hardware_enqueue()
497 hwep->qh.ptr->td.token &= in _hardware_enqueue()
500 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) { in _hardware_enqueue()
501 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in _hardware_enqueue()
504 || hwreq->req.length % hwep->ep.maxpacket) in _hardware_enqueue()
506 hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT)); in _hardware_enqueue()
509 ret = hw_ep_prime(ci, hwep->num, hwep->dir, in _hardware_enqueue()
510 hwep->type == USB_ENDPOINT_XFER_CONTROL); in _hardware_enqueue()
519 static void free_pending_td(struct ci_hw_ep *hwep) in free_pending_td() argument
521 struct td_node *pending = hwep->pending_td; in free_pending_td()
523 dma_pool_free(hwep->td_pool, pending->ptr, pending->dma); in free_pending_td()
524 hwep->pending_td = NULL; in free_pending_td()
528 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep, in reprime_dtd() argument
531 hwep->qh.ptr->td.next = cpu_to_le32(node->dma); in reprime_dtd()
532 hwep->qh.ptr->td.token &= in reprime_dtd()
535 return hw_ep_prime(ci, hwep->num, hwep->dir, in reprime_dtd()
536 hwep->type == USB_ENDPOINT_XFER_CONTROL); in reprime_dtd()
546 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_dequeue() argument
552 struct ci_hdrc *ci = hwep->ci; in _hardware_dequeue()
562 int n = hw_ep_bit(hwep->num, hwep->dir); in _hardware_dequeue()
566 reprime_dtd(ci, hwep, node); in _hardware_dequeue()
588 if (hwep->dir == TX) { in _hardware_dequeue()
598 if (hwep->pending_td) in _hardware_dequeue()
599 free_pending_td(hwep); in _hardware_dequeue()
601 hwep->pending_td = node; in _hardware_dequeue()
605 usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent, in _hardware_dequeue()
606 &hwreq->req, hwep->dir); in _hardware_dequeue()
623 static int _ep_nuke(struct ci_hw_ep *hwep) in _ep_nuke() argument
624 __releases(hwep->lock) in _ep_nuke()
625 __acquires(hwep->lock) in _ep_nuke()
628 if (hwep == NULL) in _ep_nuke()
631 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); in _ep_nuke()
633 while (!list_empty(&hwep->qh.queue)) { in _ep_nuke()
636 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next, in _ep_nuke()
640 dma_pool_free(hwep->td_pool, node->ptr, node->dma); in _ep_nuke()
650 spin_unlock(hwep->lock); in _ep_nuke()
651 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in _ep_nuke()
652 spin_lock(hwep->lock); in _ep_nuke()
656 if (hwep->pending_td) in _ep_nuke()
657 free_pending_td(hwep); in _ep_nuke()
664 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in _ep_set_halt() local
668 if (ep == NULL || hwep->ep.desc == NULL) in _ep_set_halt()
671 if (usb_endpoint_xfer_isoc(hwep->ep.desc)) in _ep_set_halt()
674 spin_lock_irqsave(hwep->lock, flags); in _ep_set_halt()
676 if (value && hwep->dir == TX && check_transfer && in _ep_set_halt()
677 !list_empty(&hwep->qh.queue) && in _ep_set_halt()
678 !usb_endpoint_xfer_control(hwep->ep.desc)) { in _ep_set_halt()
679 spin_unlock_irqrestore(hwep->lock, flags); in _ep_set_halt()
683 direction = hwep->dir; in _ep_set_halt()
685 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value); in _ep_set_halt()
688 hwep->wedge = 0; in _ep_set_halt()
690 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) in _ep_set_halt()
691 hwep->dir = (hwep->dir == TX) ? RX : TX; in _ep_set_halt()
693 } while (hwep->dir != direction); in _ep_set_halt()
695 spin_unlock_irqrestore(hwep->lock, flags); in _ep_set_halt()
804 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in _ep_queue() local
806 struct ci_hdrc *ci = hwep->ci; in _ep_queue()
809 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) in _ep_queue()
812 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) { in _ep_queue()
814 hwep = (ci->ep0_dir == RX) ? in _ep_queue()
816 if (!list_empty(&hwep->qh.queue)) { in _ep_queue()
817 _ep_nuke(hwep); in _ep_queue()
818 dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n", in _ep_queue()
819 _usb_addr(hwep)); in _ep_queue()
823 if (usb_endpoint_xfer_isoc(hwep->ep.desc) && in _ep_queue()
824 hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) { in _ep_queue()
825 dev_err(hwep->ci->dev, "request length too big for isochronous\n"); in _ep_queue()
831 dev_err(hwep->ci->dev, "request already in queue\n"); in _ep_queue()
839 retval = _hardware_enqueue(hwep, hwreq); in _ep_queue()
844 list_add_tail(&hwreq->queue, &hwep->qh.queue); in _ep_queue()
858 __releases(hwep->lock) in isr_get_status_response()
859 __acquires(hwep->lock) in isr_get_status_response()
861 struct ci_hw_ep *hwep = ci->ep0in; in isr_get_status_response() local
866 if (hwep == NULL || setup == NULL) in isr_get_status_response()
869 spin_unlock(hwep->lock); in isr_get_status_response()
870 req = usb_ep_alloc_request(&hwep->ep, gfp_flags); in isr_get_status_response()
871 spin_lock(hwep->lock); in isr_get_status_response()
895 retval = _ep_queue(&hwep->ep, req, gfp_flags); in isr_get_status_response()
904 spin_unlock(hwep->lock); in isr_get_status_response()
905 usb_ep_free_request(&hwep->ep, req); in isr_get_status_response()
906 spin_lock(hwep->lock); in isr_get_status_response()
945 struct ci_hw_ep *hwep; in isr_setup_status_phase() local
956 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; in isr_setup_status_phase()
960 return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC); in isr_setup_status_phase()
970 static int isr_tr_complete_low(struct ci_hw_ep *hwep) in isr_tr_complete_low() argument
971 __releases(hwep->lock) in isr_tr_complete_low()
972 __acquires(hwep->lock) in isr_tr_complete_low()
975 struct ci_hw_ep *hweptemp = hwep; in isr_tr_complete_low()
978 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue, in isr_tr_complete_low()
980 retval = _hardware_dequeue(hwep, hwreq); in isr_tr_complete_low()
985 spin_unlock(hwep->lock); in isr_tr_complete_low()
986 if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) && in isr_tr_complete_low()
988 hweptemp = hwep->ci->ep0in; in isr_tr_complete_low()
990 spin_lock(hwep->lock); in isr_tr_complete_low()
1017 struct ci_hw_ep *hwep = &ci->ci_hw_ep[0]; in isr_setup_packet_handler() local
1032 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req)); in isr_setup_packet_handler()
1171 if (_ep_set_halt(&hwep->ep, 1, false)) in isr_setup_packet_handler()
1191 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; in isr_tr_complete_handler() local
1193 if (hwep->ep.desc == NULL) in isr_tr_complete_handler()
1197 err = isr_tr_complete_low(hwep); in isr_tr_complete_handler()
1198 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) { in isr_tr_complete_handler()
1203 if (_ep_set_halt(&hwep->ep, 1, false)) in isr_tr_complete_handler()
1229 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_enable() local
1237 spin_lock_irqsave(hwep->lock, flags); in ep_enable()
1241 if (!list_empty(&hwep->qh.queue)) { in ep_enable()
1242 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n"); in ep_enable()
1243 spin_unlock_irqrestore(hwep->lock, flags); in ep_enable()
1247 hwep->ep.desc = desc; in ep_enable()
1249 hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX; in ep_enable()
1250 hwep->num = usb_endpoint_num(desc); in ep_enable()
1251 hwep->type = usb_endpoint_type(desc); in ep_enable()
1253 hwep->ep.maxpacket = usb_endpoint_maxp(desc); in ep_enable()
1254 hwep->ep.mult = usb_endpoint_maxp_mult(desc); in ep_enable()
1256 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) in ep_enable()
1260 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; in ep_enable()
1265 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) in ep_enable()
1268 hwep->qh.ptr->cap = cpu_to_le32(cap); in ep_enable()
1270 hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */ in ep_enable()
1272 if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) { in ep_enable()
1273 dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n"); in ep_enable()
1281 if (hwep->num) in ep_enable()
1282 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir, in ep_enable()
1283 hwep->type); in ep_enable()
1285 spin_unlock_irqrestore(hwep->lock, flags); in ep_enable()
1296 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_disable() local
1302 else if (hwep->ep.desc == NULL) in ep_disable()
1305 spin_lock_irqsave(hwep->lock, flags); in ep_disable()
1306 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { in ep_disable()
1307 spin_unlock_irqrestore(hwep->lock, flags); in ep_disable()
1313 direction = hwep->dir; in ep_disable()
1315 retval |= _ep_nuke(hwep); in ep_disable()
1316 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir); in ep_disable()
1318 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) in ep_disable()
1319 hwep->dir = (hwep->dir == TX) ? RX : TX; in ep_disable()
1321 } while (hwep->dir != direction); in ep_disable()
1323 hwep->ep.desc = NULL; in ep_disable()
1325 spin_unlock_irqrestore(hwep->lock, flags); in ep_disable()
1357 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_free_request() local
1365 dev_err(hwep->ci->dev, "freeing queued request\n"); in ep_free_request()
1369 spin_lock_irqsave(hwep->lock, flags); in ep_free_request()
1372 dma_pool_free(hwep->td_pool, node->ptr, node->dma); in ep_free_request()
1380 spin_unlock_irqrestore(hwep->lock, flags); in ep_free_request()
1391 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_queue() local
1395 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) in ep_queue()
1398 spin_lock_irqsave(hwep->lock, flags); in ep_queue()
1399 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { in ep_queue()
1400 spin_unlock_irqrestore(hwep->lock, flags); in ep_queue()
1404 spin_unlock_irqrestore(hwep->lock, flags); in ep_queue()
1415 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_dequeue() local
1421 hwep->ep.desc == NULL || list_empty(&hwreq->queue) || in ep_dequeue()
1422 list_empty(&hwep->qh.queue)) in ep_dequeue()
1425 spin_lock_irqsave(hwep->lock, flags); in ep_dequeue()
1426 if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN) in ep_dequeue()
1427 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); in ep_dequeue()
1430 dma_pool_free(hwep->td_pool, node->ptr, node->dma); in ep_dequeue()
1438 usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir); in ep_dequeue()
1443 spin_unlock(hwep->lock); in ep_dequeue()
1444 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in ep_dequeue()
1445 spin_lock(hwep->lock); in ep_dequeue()
1448 spin_unlock_irqrestore(hwep->lock, flags); in ep_dequeue()
1469 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_set_wedge() local
1472 if (ep == NULL || hwep->ep.desc == NULL) in ep_set_wedge()
1475 spin_lock_irqsave(hwep->lock, flags); in ep_set_wedge()
1476 hwep->wedge = 1; in ep_set_wedge()
1477 spin_unlock_irqrestore(hwep->lock, flags); in ep_set_wedge()
1489 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_fifo_flush() local
1493 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep)); in ep_fifo_flush()
1497 spin_lock_irqsave(hwep->lock, flags); in ep_fifo_flush()
1498 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { in ep_fifo_flush()
1499 spin_unlock_irqrestore(hwep->lock, flags); in ep_fifo_flush()
1503 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); in ep_fifo_flush()
1505 spin_unlock_irqrestore(hwep->lock, flags); in ep_fifo_flush()
1604 struct ci_hw_ep *hwep = ci->ep0in; in ci_udc_selfpowered() local
1607 spin_lock_irqsave(hwep->lock, flags); in ci_udc_selfpowered()
1609 spin_unlock_irqrestore(hwep->lock, flags); in ci_udc_selfpowered()
1683 struct ci_hw_ep *hwep = &ci->ci_hw_ep[k]; in init_eps() local
1685 scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i, in init_eps()
1688 hwep->ci = ci; in init_eps()
1689 hwep->lock = &ci->lock; in init_eps()
1690 hwep->td_pool = ci->td_pool; in init_eps()
1692 hwep->ep.name = hwep->name; in init_eps()
1693 hwep->ep.ops = &usb_ep_ops; in init_eps()
1696 hwep->ep.caps.type_control = true; in init_eps()
1698 hwep->ep.caps.type_iso = true; in init_eps()
1699 hwep->ep.caps.type_bulk = true; in init_eps()
1700 hwep->ep.caps.type_int = true; in init_eps()
1704 hwep->ep.caps.dir_in = true; in init_eps()
1706 hwep->ep.caps.dir_out = true; in init_eps()
1713 usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0); in init_eps()
1715 INIT_LIST_HEAD(&hwep->qh.queue); in init_eps()
1716 hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL, in init_eps()
1717 &hwep->qh.dma); in init_eps()
1718 if (hwep->qh.ptr == NULL) in init_eps()
1727 ci->ep0out = hwep; in init_eps()
1729 ci->ep0in = hwep; in init_eps()
1731 usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX); in init_eps()
1735 list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list); in init_eps()
1746 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; in destroy_eps() local
1748 if (hwep->pending_td) in destroy_eps()
1749 free_pending_td(hwep); in destroy_eps()
1750 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); in destroy_eps()