Lines Matching refs:tpd
1611 struct he_tpd *tpd; in __alloc_tpd() local
1614 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping); in __alloc_tpd()
1615 if (tpd == NULL) in __alloc_tpd()
1618 tpd->status = TPD_ADDR(mapping); in __alloc_tpd()
1619 tpd->reserved = 0; in __alloc_tpd()
1620 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; in __alloc_tpd()
1621 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; in __alloc_tpd()
1622 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0; in __alloc_tpd()
1624 return tpd; in __alloc_tpd()
1807 struct he_tpd *tpd; in he_service_tbrq() local
1821 tpd = NULL; in he_service_tbrq()
1824 tpd = __tpd; in he_service_tbrq()
1830 if (tpd == NULL) { in he_service_tbrq()
1838 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci)); in he_service_tbrq()
1839 if (tpd->vcc) in he_service_tbrq()
1840 wake_up(&HE_VCC(tpd->vcc)->tx_waitq); in he_service_tbrq()
1846 if (tpd->iovec[slot].addr) in he_service_tbrq()
1848 tpd->iovec[slot].addr, in he_service_tbrq()
1849 tpd->iovec[slot].len & TPD_LEN_MASK, in he_service_tbrq()
1851 if (tpd->iovec[slot].len & TPD_LST) in he_service_tbrq()
1856 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */ in he_service_tbrq()
1857 if (tpd->vcc && tpd->vcc->pop) in he_service_tbrq()
1858 tpd->vcc->pop(tpd->vcc, tpd->skb); in he_service_tbrq()
1860 dev_kfree_skb_any(tpd->skb); in he_service_tbrq()
1864 if (tpd) in he_service_tbrq()
1865 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in he_service_tbrq()
2053 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) in __enqueue_tpd() argument
2058 tpd, cid, he_dev->tpdrq_tail); in __enqueue_tpd()
2087 if (tpd->iovec[slot].addr) in __enqueue_tpd()
2089 tpd->iovec[slot].addr, in __enqueue_tpd()
2090 tpd->iovec[slot].len & TPD_LEN_MASK, in __enqueue_tpd()
2093 if (tpd->skb) { in __enqueue_tpd()
2094 if (tpd->vcc->pop) in __enqueue_tpd()
2095 tpd->vcc->pop(tpd->vcc, tpd->skb); in __enqueue_tpd()
2097 dev_kfree_skb_any(tpd->skb); in __enqueue_tpd()
2098 atomic_inc(&tpd->vcc->stats->tx_err); in __enqueue_tpd()
2100 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in __enqueue_tpd()
2106 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds); in __enqueue_tpd()
2107 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status); in __enqueue_tpd()
2336 struct he_tpd *tpd; in he_close() local
2427 tpd = __alloc_tpd(he_dev); in he_close()
2428 if (tpd == NULL) { in he_close()
2432 tpd->status |= TPD_EOS | TPD_INT; in he_close()
2433 tpd->skb = NULL; in he_close()
2434 tpd->vcc = vcc; in he_close()
2439 __enqueue_tpd(he_dev, tpd, cid); in he_close()
2494 struct he_tpd *tpd; in he_send() local
2527 tpd = __alloc_tpd(he_dev); in he_send()
2528 if (tpd == NULL) { in he_send()
2539 tpd->status |= TPD_CELLTYPE(TPD_USERCELL); in he_send()
2546 tpd->status |= TPD_CELLTYPE(pti); in he_send()
2548 tpd->status |= TPD_CLP; in he_send()
2554 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data, in he_send()
2556 tpd->iovec[slot].len = skb_headlen(skb); in he_send()
2563 tpd->vcc = vcc; in he_send()
2564 tpd->skb = NULL; /* not the last fragment in he_send()
2568 __enqueue_tpd(he_dev, tpd, cid); in he_send()
2569 tpd = __alloc_tpd(he_dev); in he_send()
2570 if (tpd == NULL) { in he_send()
2579 tpd->status |= TPD_USERCELL; in he_send()
2583 tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev, in he_send()
2585 tpd->iovec[slot].len = skb_frag_size(frag); in he_send()
2590 tpd->iovec[slot - 1].len |= TPD_LST; in he_send()
2592 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE); in he_send()
2593 tpd->length0 = skb->len | TPD_LST; in he_send()
2595 tpd->status |= TPD_INT; in he_send()
2597 tpd->vcc = vcc; in he_send()
2598 tpd->skb = skb; in he_send()
2602 __enqueue_tpd(he_dev, tpd, cid); in he_send()