Lines Matching refs:tx_cb
1917 struct ql_tx_buf_cb *tx_cb; in ql_process_mac_tx_intr() local
1925 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; in ql_process_mac_tx_intr()
1936 if (tx_cb->seg_count == 0) { in ql_process_mac_tx_intr()
1945 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_process_mac_tx_intr()
1946 dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE); in ql_process_mac_tx_intr()
1947 tx_cb->seg_count--; in ql_process_mac_tx_intr()
1948 if (tx_cb->seg_count) { in ql_process_mac_tx_intr()
1949 for (i = 1; i < tx_cb->seg_count; i++) { in ql_process_mac_tx_intr()
1951 dma_unmap_addr(&tx_cb->map[i], mapaddr), in ql_process_mac_tx_intr()
1952 dma_unmap_len(&tx_cb->map[i], maplen), in ql_process_mac_tx_intr()
1957 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; in ql_process_mac_tx_intr()
1960 dev_kfree_skb_irq(tx_cb->skb); in ql_process_mac_tx_intr()
1961 tx_cb->skb = NULL; in ql_process_mac_tx_intr()
2300 struct ql_tx_buf_cb *tx_cb, in ql_send_map() argument
2312 seg_cnt = tx_cb->seg_count; in ql_send_map()
2330 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2331 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); in ql_send_map()
2339 oal = tx_cb->oal; in ql_send_map()
2370 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2371 dma_unmap_len_set(&tx_cb->map[seg], maplen, in ql_send_map()
2392 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2393 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); in ql_send_map()
2407 oal = tx_cb->oal; in ql_send_map()
2421 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2422 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2429 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2430 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2435 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_send_map()
2436 dma_unmap_addr(&tx_cb->map[0], maplen), in ql_send_map()
2460 struct ql_tx_buf_cb *tx_cb; in ql3xxx_send() local
2467 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; in ql3xxx_send()
2468 tx_cb->seg_count = ql_get_seg_count(qdev, in ql3xxx_send()
2470 if (tx_cb->seg_count == -1) { in ql3xxx_send()
2475 mac_iocb_ptr = tx_cb->queue_entry; in ql3xxx_send()
2482 tx_cb->skb = skb; in ql3xxx_send()
2487 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { in ql3xxx_send()
2795 struct ql_tx_buf_cb *tx_cb; in ql_free_send_free_list() local
2798 tx_cb = &qdev->tx_buf[0]; in ql_free_send_free_list()
2800 kfree(tx_cb->oal); in ql_free_send_free_list()
2801 tx_cb->oal = NULL; in ql_free_send_free_list()
2802 tx_cb++; in ql_free_send_free_list()
2808 struct ql_tx_buf_cb *tx_cb; in ql_create_send_free_list() local
2815 tx_cb = &qdev->tx_buf[i]; in ql_create_send_free_list()
2816 tx_cb->skb = NULL; in ql_create_send_free_list()
2817 tx_cb->queue_entry = req_q_curr; in ql_create_send_free_list()
2819 tx_cb->oal = kmalloc(512, GFP_KERNEL); in ql_create_send_free_list()
2820 if (tx_cb->oal == NULL) in ql_create_send_free_list()
3610 struct ql_tx_buf_cb *tx_cb; in ql_reset_work() local
3624 tx_cb = &qdev->tx_buf[i]; in ql_reset_work()
3625 if (tx_cb->skb) { in ql_reset_work()
3629 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_reset_work()
3630 dma_unmap_len(&tx_cb->map[0], maplen), in ql_reset_work()
3632 for (j = 1; j < tx_cb->seg_count; j++) { in ql_reset_work()
3634 dma_unmap_addr(&tx_cb->map[j], mapaddr), in ql_reset_work()
3635 dma_unmap_len(&tx_cb->map[j], maplen), in ql_reset_work()
3638 dev_kfree_skb(tx_cb->skb); in ql_reset_work()
3639 tx_cb->skb = NULL; in ql_reset_work()