Lines Matching refs:hwreq
355 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq, in add_td_to_list() argument
376 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in add_td_to_list()
378 if (hwreq->req.length == 0 in add_td_to_list()
379 || hwreq->req.length % hwep->ep.maxpacket) in add_td_to_list()
385 temp = (u32) (sg_dma_address(s) + hwreq->req.actual); in add_td_to_list()
388 temp = (u32) (hwreq->req.dma + hwreq->req.actual); in add_td_to_list()
400 hwreq->req.actual += length; in add_td_to_list()
402 if (!list_empty(&hwreq->tds)) { in add_td_to_list()
404 lastnode = list_entry(hwreq->tds.prev, in add_td_to_list()
410 list_add_tail(&node->td, &hwreq->tds); in add_td_to_list()
425 struct ci_hw_req *hwreq) in prepare_td_for_non_sg() argument
427 unsigned int rest = hwreq->req.length; in prepare_td_for_non_sg()
432 ret = add_td_to_list(hwep, hwreq, 0, NULL); in prepare_td_for_non_sg()
441 if (hwreq->req.dma % PAGE_SIZE) in prepare_td_for_non_sg()
445 unsigned int count = min(hwreq->req.length - hwreq->req.actual, in prepare_td_for_non_sg()
448 ret = add_td_to_list(hwep, hwreq, count, NULL); in prepare_td_for_non_sg()
455 if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX in prepare_td_for_non_sg()
456 && (hwreq->req.length % hwep->ep.maxpacket == 0)) { in prepare_td_for_non_sg()
457 ret = add_td_to_list(hwep, hwreq, 0, NULL); in prepare_td_for_non_sg()
465 static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq, in prepare_td_per_sg() argument
471 hwreq->req.actual = 0; in prepare_td_per_sg()
476 ret = add_td_to_list(hwep, hwreq, count, s); in prepare_td_per_sg()
505 static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in prepare_td_for_sg() argument
507 struct usb_request *req = &hwreq->req; in prepare_td_for_sg()
527 ret = prepare_td_per_sg(hwep, hwreq, s); in prepare_td_for_sg()
531 node = list_entry(hwreq->tds.prev, in prepare_td_for_sg()
548 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_enqueue() argument
555 if (hwreq->req.status == -EALREADY) in _hardware_enqueue()
558 hwreq->req.status = -EALREADY; in _hardware_enqueue()
561 &hwreq->req, hwep->dir); in _hardware_enqueue()
565 if (hwreq->req.num_mapped_sgs) in _hardware_enqueue()
566 ret = prepare_td_for_sg(hwep, hwreq); in _hardware_enqueue()
568 ret = prepare_td_for_non_sg(hwep, hwreq); in _hardware_enqueue()
573 lastnode = list_entry(hwreq->tds.prev, in _hardware_enqueue()
577 if (!hwreq->req.no_interrupt) in _hardware_enqueue()
580 list_for_each_entry_safe(firstnode, lastnode, &hwreq->tds, td) in _hardware_enqueue()
581 trace_ci_prepare_td(hwep, hwreq, firstnode); in _hardware_enqueue()
583 firstnode = list_first_entry(&hwreq->tds, struct td_node, td); in _hardware_enqueue()
587 hwreq->req.actual = 0; in _hardware_enqueue()
619 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in _hardware_enqueue()
621 if (hwreq->req.length == 0 in _hardware_enqueue()
622 || hwreq->req.length % hwep->ep.maxpacket) in _hardware_enqueue()
664 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_dequeue() argument
669 unsigned actual = hwreq->req.length; in _hardware_dequeue()
672 if (hwreq->req.status != -EALREADY) in _hardware_dequeue()
675 hwreq->req.status = 0; in _hardware_dequeue()
677 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in _hardware_dequeue()
679 trace_ci_complete_td(hwep, hwreq, node); in _hardware_dequeue()
686 hwreq->req.status = -EALREADY; in _hardware_dequeue()
694 hwreq->req.status = tmptoken & TD_STATUS; in _hardware_dequeue()
695 if ((TD_STATUS_HALTED & hwreq->req.status)) { in _hardware_dequeue()
696 hwreq->req.status = -EPIPE; in _hardware_dequeue()
698 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) { in _hardware_dequeue()
699 hwreq->req.status = -EPROTO; in _hardware_dequeue()
701 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) { in _hardware_dequeue()
702 hwreq->req.status = -EILSEQ; in _hardware_dequeue()
708 hwreq->req.status = -EPROTO; in _hardware_dequeue()
725 &hwreq->req, hwep->dir); in _hardware_dequeue()
727 hwreq->req.actual += actual; in _hardware_dequeue()
729 if (hwreq->req.status) in _hardware_dequeue()
730 return hwreq->req.status; in _hardware_dequeue()
732 return hwreq->req.actual; in _hardware_dequeue()
755 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next, in _ep_nuke() local
758 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in _ep_nuke()
765 list_del_init(&hwreq->queue); in _ep_nuke()
766 hwreq->req.status = -ESHUTDOWN; in _ep_nuke()
768 if (hwreq->req.complete != NULL) { in _ep_nuke()
770 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in _ep_nuke()
924 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); in _ep_queue() local
943 hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) { in _ep_queue()
949 if (!list_empty(&hwreq->queue)) { in _ep_queue()
955 hwreq->req.status = -EINPROGRESS; in _ep_queue()
956 hwreq->req.actual = 0; in _ep_queue()
958 retval = _hardware_enqueue(hwep, hwreq); in _ep_queue()
963 list_add_tail(&hwreq->queue, &hwep->qh.queue); in _ep_queue()
1093 struct ci_hw_req *hwreq, *hwreqtemp; in isr_tr_complete_low() local
1097 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue, in isr_tr_complete_low()
1099 retval = _hardware_dequeue(hwep, hwreq); in isr_tr_complete_low()
1102 list_del_init(&hwreq->queue); in isr_tr_complete_low()
1103 if (hwreq->req.complete != NULL) { in isr_tr_complete_low()
1106 hwreq->req.length) in isr_tr_complete_low()
1108 usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req); in isr_tr_complete_low()
1455 struct ci_hw_req *hwreq = NULL; in ep_alloc_request() local
1460 hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags); in ep_alloc_request()
1461 if (hwreq != NULL) { in ep_alloc_request()
1462 INIT_LIST_HEAD(&hwreq->queue); in ep_alloc_request()
1463 INIT_LIST_HEAD(&hwreq->tds); in ep_alloc_request()
1466 return (hwreq == NULL) ? NULL : &hwreq->req; in ep_alloc_request()
1477 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); in ep_free_request() local
1483 } else if (!list_empty(&hwreq->queue)) { in ep_free_request()
1490 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in ep_free_request()
1497 kfree(hwreq); in ep_free_request()
1535 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); in ep_dequeue() local
1539 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || in ep_dequeue()
1540 hwep->ep.desc == NULL || list_empty(&hwreq->queue) || in ep_dequeue()
1548 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in ep_dequeue()
1555 list_del_init(&hwreq->queue); in ep_dequeue()
1561 if (hwreq->req.complete != NULL) { in ep_dequeue()
1563 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in ep_dequeue()