Lines Matching refs:tpd
1608 struct he_tpd *tpd; in __alloc_tpd() local
1611 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping); in __alloc_tpd()
1612 if (tpd == NULL) in __alloc_tpd()
1615 tpd->status = TPD_ADDR(mapping); in __alloc_tpd()
1616 tpd->reserved = 0; in __alloc_tpd()
1617 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; in __alloc_tpd()
1618 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; in __alloc_tpd()
1619 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0; in __alloc_tpd()
1621 return tpd; in __alloc_tpd()
1804 struct he_tpd *tpd; in he_service_tbrq() local
1818 tpd = NULL; in he_service_tbrq()
1821 tpd = __tpd; in he_service_tbrq()
1827 if (tpd == NULL) { in he_service_tbrq()
1835 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci)); in he_service_tbrq()
1836 if (tpd->vcc) in he_service_tbrq()
1837 wake_up(&HE_VCC(tpd->vcc)->tx_waitq); in he_service_tbrq()
1843 if (tpd->iovec[slot].addr) in he_service_tbrq()
1845 tpd->iovec[slot].addr, in he_service_tbrq()
1846 tpd->iovec[slot].len & TPD_LEN_MASK, in he_service_tbrq()
1848 if (tpd->iovec[slot].len & TPD_LST) in he_service_tbrq()
1853 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */ in he_service_tbrq()
1854 if (tpd->vcc && tpd->vcc->pop) in he_service_tbrq()
1855 tpd->vcc->pop(tpd->vcc, tpd->skb); in he_service_tbrq()
1857 dev_kfree_skb_any(tpd->skb); in he_service_tbrq()
1861 if (tpd) in he_service_tbrq()
1862 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in he_service_tbrq()
2050 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) in __enqueue_tpd() argument
2055 tpd, cid, he_dev->tpdrq_tail); in __enqueue_tpd()
2084 if (tpd->iovec[slot].addr) in __enqueue_tpd()
2086 tpd->iovec[slot].addr, in __enqueue_tpd()
2087 tpd->iovec[slot].len & TPD_LEN_MASK, in __enqueue_tpd()
2090 if (tpd->skb) { in __enqueue_tpd()
2091 if (tpd->vcc->pop) in __enqueue_tpd()
2092 tpd->vcc->pop(tpd->vcc, tpd->skb); in __enqueue_tpd()
2094 dev_kfree_skb_any(tpd->skb); in __enqueue_tpd()
2095 atomic_inc(&tpd->vcc->stats->tx_err); in __enqueue_tpd()
2097 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in __enqueue_tpd()
2103 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds); in __enqueue_tpd()
2104 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status); in __enqueue_tpd()
2333 struct he_tpd *tpd; in he_close() local
2424 tpd = __alloc_tpd(he_dev); in he_close()
2425 if (tpd == NULL) { in he_close()
2429 tpd->status |= TPD_EOS | TPD_INT; in he_close()
2430 tpd->skb = NULL; in he_close()
2431 tpd->vcc = vcc; in he_close()
2436 __enqueue_tpd(he_dev, tpd, cid); in he_close()
2491 struct he_tpd *tpd; in he_send() local
2524 tpd = __alloc_tpd(he_dev); in he_send()
2525 if (tpd == NULL) { in he_send()
2536 tpd->status |= TPD_CELLTYPE(TPD_USERCELL); in he_send()
2543 tpd->status |= TPD_CELLTYPE(pti); in he_send()
2545 tpd->status |= TPD_CLP; in he_send()
2551 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data, in he_send()
2553 tpd->iovec[slot].len = skb_headlen(skb); in he_send()
2560 tpd->vcc = vcc; in he_send()
2561 tpd->skb = NULL; /* not the last fragment in he_send()
2565 __enqueue_tpd(he_dev, tpd, cid); in he_send()
2566 tpd = __alloc_tpd(he_dev); in he_send()
2567 if (tpd == NULL) { in he_send()
2576 tpd->status |= TPD_USERCELL; in he_send()
2580 tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev, in he_send()
2582 tpd->iovec[slot].len = skb_frag_size(frag); in he_send()
2587 tpd->iovec[slot - 1].len |= TPD_LST; in he_send()
2589 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE); in he_send()
2590 tpd->length0 = skb->len | TPD_LST; in he_send()
2592 tpd->status |= TPD_INT; in he_send()
2594 tpd->vcc = vcc; in he_send()
2595 tpd->skb = skb; in he_send()
2599 __enqueue_tpd(he_dev, tpd, cid); in he_send()