Lines Matching full:tx

10  * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
66 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
79 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
217 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument
219 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
222 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
225 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
226 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
227 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
228 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
229 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
234 static int tsnep_tx_ring_init(struct tsnep_tx *tx) in tsnep_tx_ring_init() argument
236 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_init()
243 tx->page[i] = in tsnep_tx_ring_init()
244 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_init()
246 if (!tx->page[i]) { in tsnep_tx_ring_init()
251 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_init()
253 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_init()
256 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_init()
260 entry = &tx->entry[i]; in tsnep_tx_ring_init()
261 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; in tsnep_tx_ring_init()
268 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_init()
272 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
275 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
306 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
307 tx->owner_counter++; in tsnep_tx_activate()
308 if (tx->owner_counter == 4) in tsnep_tx_activate()
309 tx->owner_counter = 1; in tsnep_tx_activate()
310 tx->increment_owner_counter--; in tsnep_tx_activate()
311 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
312 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
315 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
330 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
332 if (tx->read <= tx->write) in tsnep_tx_desc_available()
333 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
335 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
338 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) in tsnep_tx_map() argument
340 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
348 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; in tsnep_tx_map()
366 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
374 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
376 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
382 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; in tsnep_tx_unmap()
404 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
416 spin_lock_irqsave(&tx->lock, flags); in tsnep_xmit_frame_ring()
418 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
422 netif_stop_queue(tx->adapter->netdev); in tsnep_xmit_frame_ring()
424 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_xmit_frame_ring()
429 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
432 retval = tsnep_tx_map(skb, tx, count); in tsnep_xmit_frame_ring()
434 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
438 tx->dropped++; in tsnep_xmit_frame_ring()
440 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_xmit_frame_ring()
442 netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); in tsnep_xmit_frame_ring()
452 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, in tsnep_xmit_frame_ring()
454 tx->write = (tx->write + count) % TSNEP_RING_SIZE; in tsnep_xmit_frame_ring()
461 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
463 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
465 netif_stop_queue(tx->adapter->netdev); in tsnep_xmit_frame_ring()
468 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_xmit_frame_ring()
473 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
481 spin_lock_irqsave(&tx->lock, flags); in tsnep_tx_poll()
484 if (tx->read == tx->write) in tsnep_tx_poll()
487 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
502 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
527 tx->read = (tx->read + count) % TSNEP_RING_SIZE; in tsnep_tx_poll()
529 tx->packets++; in tsnep_tx_poll()
530 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
535 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
536 netif_queue_stopped(tx->adapter->netdev)) { in tsnep_tx_poll()
537 netif_wake_queue(tx->adapter->netdev); in tsnep_tx_poll()
540 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_tx_poll()
545 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
551 spin_lock_irqsave(&tx->lock, flags); in tsnep_tx_pending()
553 if (tx->read != tx->write) { in tsnep_tx_pending()
554 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
561 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_tx_pending()
567 int queue_index, struct tsnep_tx *tx) in tsnep_tx_open() argument
572 memset(tx, 0, sizeof(*tx)); in tsnep_tx_open()
573 tx->adapter = adapter; in tsnep_tx_open()
574 tx->addr = addr; in tsnep_tx_open()
575 tx->queue_index = queue_index; in tsnep_tx_open()
577 retval = tsnep_tx_ring_init(tx); in tsnep_tx_open()
581 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_open()
582 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_open()
583 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_open()
584 tx->owner_counter = 1; in tsnep_tx_open()
585 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_open()
587 spin_lock_init(&tx->lock); in tsnep_tx_open()
592 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
596 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_close()
600 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
905 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
921 if (queue->tx) in tsnep_poll()
922 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
962 if (queue->tx && queue->rx) in tsnep_request_irq()
965 else if (queue->tx) in tsnep_request_irq()
966 sprintf(queue->name, "%s-tx-%d", name, in tsnep_request_irq()
967 queue->tx->queue_index); in tsnep_request_irq()
1011 if (adapter->queue[i].tx) { in tsnep_netdev_open()
1014 adapter->queue[i].tx); in tsnep_netdev_open()
1071 if (adapter->queue[i].tx) in tsnep_netdev_open()
1072 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
1095 if (adapter->queue[i].tx) in tsnep_netdev_close()
1096 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
1111 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
1149 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
1150 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
1151 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
1360 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
1371 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
1377 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
1390 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()