Lines Matching +full:rx +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
38 /* XSK buffer shall store at least Q-in-Q frame */
52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
66 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_enable_irq()
72 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_disable_irq()
78 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); in tsnep_irq()
82 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); in tsnep_irq()
86 phy_mac_interrupt(adapter->netdev->phydev); in tsnep_irq()
88 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
89 if ((active & adapter->queue[0].irq_mask) != 0) { in tsnep_irq()
90 if (napi_schedule_prep(&adapter->queue[0].napi)) { in tsnep_irq()
91 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); in tsnep_irq()
93 __napi_schedule(&adapter->queue[0].napi); in tsnep_irq()
104 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
105 if (napi_schedule_prep(&queue->napi)) { in tsnep_irq_txrx()
106 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_irq_txrx()
108 __napi_schedule(&queue->napi); in tsnep_irq_txrx()
117 return -ERANGE; in tsnep_set_irq_coalesce()
123 queue->irq_delay &= ~ECM_INT_DELAY_MASK; in tsnep_set_irq_coalesce()
124 queue->irq_delay |= usecs; in tsnep_set_irq_coalesce()
125 iowrite8(queue->irq_delay, queue->irq_delay_addr); in tsnep_set_irq_coalesce()
134 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); in tsnep_get_irq_coalesce()
143 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_read()
148 if (!adapter->suppress_preamble) in tsnep_mdiobus_read()
152 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_read()
153 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_read()
164 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_write()
169 if (!adapter->suppress_preamble) in tsnep_mdiobus_write()
174 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_write()
175 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_write()
187 switch (adapter->phydev->speed) { in tsnep_set_link_mode()
198 iowrite32(mode, adapter->addr + ECM_STATUS); in tsnep_set_link_mode()
204 struct phy_device *phydev = netdev->phydev; in tsnep_phy_link_status_change()
206 if (phydev->link) in tsnep_phy_link_status_change()
209 phy_print_status(netdev->phydev); in tsnep_phy_link_status_change()
216 retval = phy_loopback(adapter->phydev, enable); in tsnep_phy_loopback()
234 retval = phy_connect_direct(adapter->netdev, adapter->phydev, in tsnep_phy_open()
236 adapter->phy_mode); in tsnep_phy_open()
239 phydev = adapter->netdev->phydev; in tsnep_phy_open()
251 phy_ethtool_set_eee(adapter->phydev, &ethtool_eee); in tsnep_phy_open()
253 adapter->phydev->irq = PHY_MAC_INTERRUPT; in tsnep_phy_open()
254 phy_start(adapter->phydev); in tsnep_phy_open()
261 phy_stop(adapter->netdev->phydev); in tsnep_phy_close()
262 phy_disconnect(adapter->netdev->phydev); in tsnep_phy_close()
265 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument
267 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
270 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
273 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
274 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
275 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
276 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
277 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
282 static int tsnep_tx_ring_create(struct tsnep_tx *tx) in tsnep_tx_ring_create() argument
284 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
291 tx->page[i] = in tsnep_tx_ring_create()
292 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
294 if (!tx->page[i]) { in tsnep_tx_ring_create()
295 retval = -ENOMEM; in tsnep_tx_ring_create()
299 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
300 entry->desc_wb = (struct tsnep_tx_desc_wb *) in tsnep_tx_ring_create()
301 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
302 entry->desc = (struct tsnep_tx_desc *) in tsnep_tx_ring_create()
303 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_tx_ring_create()
304 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
305 entry->owner_user_flag = false; in tsnep_tx_ring_create()
309 entry = &tx->entry[i]; in tsnep_tx_ring_create()
310 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
311 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_tx_ring_create()
317 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_create()
321 static void tsnep_tx_init(struct tsnep_tx *tx) in tsnep_tx_init() argument
325 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
326 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
327 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
328 tx->write = 0; in tsnep_tx_init()
329 tx->read = 0; in tsnep_tx_init()
330 tx->owner_counter = 1; in tsnep_tx_init()
331 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
334 static void tsnep_tx_enable(struct tsnep_tx *tx) in tsnep_tx_enable() argument
338 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
345 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) in tsnep_tx_disable() argument
350 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
356 /* wait until TX is done in hardware */ in tsnep_tx_disable()
357 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
361 /* wait until TX is also done in software */ in tsnep_tx_disable()
362 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
368 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
371 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
373 entry->properties = 0; in tsnep_tx_activate()
375 if (entry->skb) { in tsnep_tx_activate()
376 entry->properties = length & TSNEP_DESC_LENGTH_MASK; in tsnep_tx_activate()
377 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_tx_activate()
378 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_activate()
379 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) in tsnep_tx_activate()
380 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; in tsnep_tx_activate()
400 entry->owner_user_flag = !entry->owner_user_flag; in tsnep_tx_activate()
403 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; in tsnep_tx_activate()
404 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
405 tx->owner_counter++; in tsnep_tx_activate()
406 if (tx->owner_counter == 4) in tsnep_tx_activate()
407 tx->owner_counter = 1; in tsnep_tx_activate()
408 tx->increment_owner_counter--; in tsnep_tx_activate()
409 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
410 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
412 entry->properties |= in tsnep_tx_activate()
413 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
415 if (entry->owner_user_flag) in tsnep_tx_activate()
416 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; in tsnep_tx_activate()
417 entry->desc->more_properties = in tsnep_tx_activate()
418 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); in tsnep_tx_activate()
425 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_tx_activate()
428 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
430 if (tx->read <= tx->write) in tsnep_tx_desc_available()
431 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
433 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
436 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) in tsnep_tx_map() argument
438 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
446 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
450 dma = dma_map_single(dmadev, skb->data, len, in tsnep_tx_map()
453 entry->type = TSNEP_TX_TYPE_SKB; in tsnep_tx_map()
455 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); in tsnep_tx_map()
457 &skb_shinfo(skb)->frags[i - 1], in tsnep_tx_map()
460 entry->type = TSNEP_TX_TYPE_SKB_FRAG; in tsnep_tx_map()
463 return -ENOMEM; in tsnep_tx_map()
465 entry->len = len; in tsnep_tx_map()
468 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
476 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
478 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
484 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
486 if (entry->len) { in tsnep_tx_unmap()
487 if (entry->type & TSNEP_TX_TYPE_SKB) in tsnep_tx_unmap()
492 else if (entry->type & in tsnep_tx_unmap()
498 map_len += entry->len; in tsnep_tx_unmap()
499 entry->len = 0; in tsnep_tx_unmap()
507 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
515 if (skb_shinfo(skb)->nr_frags > 0) in tsnep_xmit_frame_ring()
516 count += skb_shinfo(skb)->nr_frags; in tsnep_xmit_frame_ring()
518 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
522 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
527 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
528 entry->skb = skb; in tsnep_xmit_frame_ring()
530 retval = tsnep_tx_map(skb, tx, count); in tsnep_xmit_frame_ring()
532 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
533 dev_kfree_skb_any(entry->skb); in tsnep_xmit_frame_ring()
534 entry->skb = NULL; in tsnep_xmit_frame_ring()
536 tx->dropped++; in tsnep_xmit_frame_ring()
542 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) in tsnep_xmit_frame_ring()
543 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in tsnep_xmit_frame_ring()
546 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
547 i == count - 1); in tsnep_xmit_frame_ring()
548 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
555 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
557 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
559 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
565 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
568 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
579 len = xdpf->len; in tsnep_xdp_tx_map()
581 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
584 xdpf->data; in tsnep_xdp_tx_map()
587 return -ENOMEM; in tsnep_xdp_tx_map()
589 entry->type = TSNEP_TX_TYPE_XDP_NDO; in tsnep_xdp_tx_map()
592 virt_to_page(xdpf->data); in tsnep_xdp_tx_map()
597 dma += sizeof(*xdpf) + xdpf->headroom; in tsnep_xdp_tx_map()
601 entry->type = TSNEP_TX_TYPE_XDP_TX; in tsnep_xdp_tx_map()
604 entry->len = len; in tsnep_xdp_tx_map()
607 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
612 frag = &shinfo->frags[i]; in tsnep_xdp_tx_map()
622 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
630 count += shinfo->nr_frags; in tsnep_xdp_xmit_frame_ring()
632 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_frame_ring()
633 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_frame_ring()
636 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
639 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
640 entry->xdpf = xdpf; in tsnep_xdp_xmit_frame_ring()
642 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
644 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
645 entry->xdpf = NULL; in tsnep_xdp_xmit_frame_ring()
647 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
654 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
655 i == count - 1); in tsnep_xdp_xmit_frame_ring()
656 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
664 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
666 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
671 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_xmit_back() argument
681 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); in tsnep_xdp_xmit_back()
692 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) in tsnep_xdp_tx_map_zc() argument
697 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
698 entry->zc = true; in tsnep_xdp_tx_map_zc()
700 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
701 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
703 entry->type = TSNEP_TX_TYPE_XSK; in tsnep_xdp_tx_map_zc()
704 entry->len = xdpd->len; in tsnep_xdp_tx_map_zc()
706 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
708 return xdpd->len; in tsnep_xdp_tx_map_zc()
712 struct tsnep_tx *tx) in tsnep_xdp_xmit_frame_ring_zc() argument
716 length = tsnep_xdp_tx_map_zc(xdpd, tx); in tsnep_xdp_xmit_frame_ring_zc()
718 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
719 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
722 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) in tsnep_xdp_xmit_zc() argument
724 int desc_available = tsnep_tx_desc_available(tx); in tsnep_xdp_xmit_zc()
725 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
728 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_zc()
729 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_zc()
734 desc_available -= MAX_SKB_FRAGS + 1; in tsnep_xdp_xmit_zc()
736 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
738 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); in tsnep_xdp_xmit_zc()
746 tsnep_xdp_xmit_flush(tx); in tsnep_xdp_xmit_zc()
750 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
759 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
763 if (tx->read == tx->write) in tsnep_tx_poll()
766 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
767 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
769 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_poll()
778 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_poll()
779 skb_shinfo(entry->skb)->nr_frags > 0) in tsnep_tx_poll()
780 count += skb_shinfo(entry->skb)->nr_frags; in tsnep_tx_poll()
781 else if ((entry->type & TSNEP_TX_TYPE_XDP) && in tsnep_tx_poll()
782 xdp_frame_has_frags(entry->xdpf)) in tsnep_tx_poll()
783 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; in tsnep_tx_poll()
785 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
787 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_poll()
788 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && in tsnep_tx_poll()
789 (__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
794 if (skb_shinfo(entry->skb)->tx_flags & in tsnep_tx_poll()
797 __le64_to_cpu(entry->desc_wb->counter); in tsnep_tx_poll()
800 __le64_to_cpu(entry->desc_wb->timestamp); in tsnep_tx_poll()
805 skb_tstamp_tx(entry->skb, &hwtstamps); in tsnep_tx_poll()
808 if (entry->type & TSNEP_TX_TYPE_SKB) in tsnep_tx_poll()
809 napi_consume_skb(entry->skb, napi_budget); in tsnep_tx_poll()
810 else if (entry->type & TSNEP_TX_TYPE_XDP) in tsnep_tx_poll()
811 xdp_return_frame_rx_napi(entry->xdpf); in tsnep_tx_poll()
815 entry->skb = NULL; in tsnep_tx_poll()
817 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
819 tx->packets++; in tsnep_tx_poll()
820 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
822 budget--; in tsnep_tx_poll()
825 if (tx->xsk_pool) { in tsnep_tx_poll()
827 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
828 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
829 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
830 tsnep_xdp_xmit_zc(tx); in tsnep_tx_poll()
833 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
843 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
849 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
852 if (tx->read != tx->write) { in tsnep_tx_pending()
853 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
854 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_pending()
856 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_pending()
865 static int tsnep_tx_open(struct tsnep_tx *tx) in tsnep_tx_open() argument
869 retval = tsnep_tx_ring_create(tx); in tsnep_tx_open()
873 tsnep_tx_init(tx); in tsnep_tx_open()
878 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
880 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
883 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
885 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
890 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
891 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
892 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
894 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
895 xsk_buff_free(entry->xdp); in tsnep_rx_ring_cleanup()
897 entry->page = NULL; in tsnep_rx_ring_cleanup()
900 if (rx->page_pool) in tsnep_rx_ring_cleanup()
901 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
903 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
906 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
907 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
908 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
909 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
910 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
915 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
917 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
925 rx->page[i] = in tsnep_rx_ring_create()
926 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
928 if (!rx->page[i]) { in tsnep_rx_ring_create()
929 retval = -ENOMEM; in tsnep_rx_ring_create()
933 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
934 entry->desc_wb = (struct tsnep_rx_desc_wb *) in tsnep_rx_ring_create()
935 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
936 entry->desc = (struct tsnep_rx_desc *) in tsnep_rx_ring_create()
937 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_rx_ring_create()
938 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
950 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
951 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
952 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
953 rx->page_pool = NULL; in tsnep_rx_ring_create()
958 entry = &rx->entry[i]; in tsnep_rx_ring_create()
959 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
960 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_rx_ring_create()
966 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
970 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
974 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
975 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
976 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
977 rx->write = 0; in tsnep_rx_init()
978 rx->read = 0; in tsnep_rx_init()
979 rx->owner_counter = 1; in tsnep_rx_init()
980 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
983 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
988 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
991 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
995 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
996 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1001 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1003 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1004 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1006 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1009 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1016 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1018 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1024 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1031 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) { in tsnep_rx_alloc_page_buffer()
1032 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1033 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1034 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1036 return -ENOMEM; in tsnep_rx_alloc_page_buffer()
1043 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1046 entry->page = page; in tsnep_rx_set_page()
1047 entry->len = TSNEP_MAX_RX_BUF_SIZE; in tsnep_rx_set_page()
1048 entry->dma = page_pool_get_dma_addr(entry->page); in tsnep_rx_set_page()
1049 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1052 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1054 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1057 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1059 return -ENOMEM; in tsnep_rx_alloc_buffer()
1060 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1065 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1067 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1068 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1070 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1071 read->page = NULL; in tsnep_rx_reuse_buffer()
1074 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1076 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1079 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; in tsnep_rx_activate()
1080 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_rx_activate()
1081 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1082 rx->owner_counter++; in tsnep_rx_activate()
1083 if (rx->owner_counter == 4) in tsnep_rx_activate()
1084 rx->owner_counter = 1; in tsnep_rx_activate()
1085 rx->increment_owner_counter--; in tsnep_rx_activate()
1086 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1087 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1089 entry->properties |= in tsnep_rx_activate()
1090 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1098 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_rx_activate()
1101 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1107 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1109 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1110 rx->alloc_failed++; in tsnep_rx_alloc()
1115 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1120 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1124 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1129 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1133 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1135 tsnep_rx_enable(rx); in tsnep_rx_refill()
1140 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1143 entry->xdp = xdp; in tsnep_rx_set_xdp()
1144 entry->len = TSNEP_XSK_RX_BUF_SIZE; in tsnep_rx_set_xdp()
1145 entry->dma = xsk_buff_xdp_get_dma(entry->xdp); in tsnep_rx_set_xdp()
1146 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1149 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1151 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1152 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1154 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1155 read->xdp = NULL; in tsnep_rx_reuse_buffer_zc()
1158 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1163 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1165 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1166 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1168 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1169 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1172 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1175 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1176 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1181 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1186 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1191 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1193 if (entry->xdp) in tsnep_rx_free_zc()
1194 xsk_buff_free(entry->xdp); in tsnep_rx_free_zc()
1195 entry->xdp = NULL; in tsnep_rx_free_zc()
1199 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1203 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1205 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1210 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1212 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
1218 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; in tsnep_xdp_run_prog()
1225 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) in tsnep_xdp_run_prog()
1230 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1235 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1239 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1245 sync = xdp->data_end - xdp->data_hard_start - in tsnep_xdp_run_prog()
1248 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1254 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1257 struct tsnep_tx *tx) in tsnep_xdp_run_prog_zc() argument
1263 /* XDP_REDIRECT is the main action for zero-copy */ in tsnep_xdp_run_prog_zc()
1265 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1275 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) in tsnep_xdp_run_prog_zc()
1280 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1284 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1293 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1297 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1305 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1316 __skb_put(skb, length - ETH_FCS_LEN); in tsnep_build_skb()
1318 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1324 skb_shinfo(skb)->tx_flags |= in tsnep_build_skb()
1327 hwtstamps->netdev_data = rx_inline; in tsnep_build_skb()
1330 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1331 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1336 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1341 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1345 rx->packets++; in tsnep_rx_page()
1346 rx->bytes += length; in tsnep_rx_page()
1347 if (skb->pkt_type == PACKET_MULTICAST) in tsnep_rx_page()
1348 rx->multicast++; in tsnep_rx_page()
1352 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1354 rx->dropped++; in tsnep_rx_page()
1358 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1361 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1367 struct tsnep_tx *tx; in tsnep_rx_poll() local
1373 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1374 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1375 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1377 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1378 rx->tx_queue_index); in tsnep_rx_poll()
1379 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1381 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1384 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1385 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1386 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1388 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll()
1395 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1397 if (!entry->page) { in tsnep_rx_poll()
1399 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll()
1400 * RX processing in tsnep_rx_poll()
1402 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1405 rx->dropped++; in tsnep_rx_poll()
1416 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); in tsnep_rx_poll()
1417 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1419 dma_sync_single_range_for_cpu(dmadev, entry->dma, in tsnep_rx_poll()
1422 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll()
1424 * consider metadata size as offset of actual data during RX in tsnep_rx_poll()
1427 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll()
1429 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1435 xdp_prepare_buff(&xdp, page_address(entry->page), in tsnep_rx_poll()
1439 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1440 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1442 rx->packets++; in tsnep_rx_poll()
1443 rx->bytes += length; in tsnep_rx_poll()
1445 entry->page = NULL; in tsnep_rx_poll()
1451 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1452 entry->page = NULL; in tsnep_rx_poll()
1456 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1459 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1464 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1470 struct tsnep_tx *tx; in tsnep_rx_poll_zc() local
1477 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1478 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1480 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1481 rx->tx_queue_index); in tsnep_rx_poll_zc()
1482 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1485 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1486 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1487 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1489 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll_zc()
1496 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1498 if (!entry->xdp) { in tsnep_rx_poll_zc()
1500 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll_zc()
1501 * RX processing in tsnep_rx_poll_zc()
1503 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1506 rx->dropped++; in tsnep_rx_poll_zc()
1517 prefetch(entry->xdp->data); in tsnep_rx_poll_zc()
1518 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1520 xsk_buff_set_size(entry->xdp, length); in tsnep_rx_poll_zc()
1521 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); in tsnep_rx_poll_zc()
1523 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll_zc()
1525 * consider metadata size as offset of actual data during RX in tsnep_rx_poll_zc()
1528 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1530 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1536 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1537 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1539 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1540 &xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1542 rx->packets++; in tsnep_rx_poll_zc()
1543 rx->bytes += length; in tsnep_rx_poll_zc()
1545 entry->xdp = NULL; in tsnep_rx_poll_zc()
1551 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1554 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE, in tsnep_rx_poll_zc()
1556 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1558 rx->dropped++; in tsnep_rx_poll_zc()
1560 xsk_buff_free(entry->xdp); in tsnep_rx_poll_zc()
1561 entry->xdp = NULL; in tsnep_rx_poll_zc()
1565 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1568 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1570 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1572 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1574 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1582 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1586 if (rx->read != rx->write) { in tsnep_rx_pending()
1587 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1588 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_pending()
1590 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_pending()
1597 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1602 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1606 tsnep_rx_init(rx); in tsnep_rx_open()
1608 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1609 if (rx->xsk_pool) in tsnep_rx_open()
1610 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1612 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1614 retval = -ENOMEM; in tsnep_rx_open()
1622 if (rx->xsk_pool) { in tsnep_rx_open()
1623 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1631 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1635 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1637 if (rx->xsk_pool) in tsnep_rx_close()
1638 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1640 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1643 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1645 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1648 tsnep_rx_init(rx); in tsnep_rx_reopen()
1651 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1656 entry->desc->properties = 0; in tsnep_rx_reopen()
1657 entry->desc_wb->properties = 0; in tsnep_rx_reopen()
1661 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1662 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1663 rx->write++; in tsnep_rx_reopen()
1671 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1673 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1677 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1683 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1684 TSNEP_RING_SIZE - 1); in tsnep_rx_reopen_xsk()
1687 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1692 if (entry->page) { in tsnep_rx_reopen_xsk()
1693 *page = entry->page; in tsnep_rx_reopen_xsk()
1694 entry->page = NULL; in tsnep_rx_reopen_xsk()
1702 entry->desc->properties = 0; in tsnep_rx_reopen_xsk()
1703 entry->desc_wb->properties = 0; in tsnep_rx_reopen_xsk()
1706 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1707 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1708 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1709 rx->write++; in tsnep_rx_reopen_xsk()
1711 allocated--; in tsnep_rx_reopen_xsk()
1718 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1721 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1734 if (queue->tx) in tsnep_poll()
1735 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1741 if (queue->rx) { in tsnep_poll()
1742 done = queue->rx->xsk_pool ? in tsnep_poll()
1743 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1744 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1754 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1761 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1766 return min(done, budget - 1); in tsnep_poll()
1771 const char *name = netdev_name(queue->adapter->netdev); in tsnep_request_irq()
1777 sprintf(queue->name, "%s-mac", name); in tsnep_request_irq()
1779 dev = queue->adapter; in tsnep_request_irq()
1781 if (queue->tx && queue->rx) in tsnep_request_irq()
1782 sprintf(queue->name, "%s-txrx-%d", name, in tsnep_request_irq()
1783 queue->rx->queue_index); in tsnep_request_irq()
1784 else if (queue->tx) in tsnep_request_irq()
1785 sprintf(queue->name, "%s-tx-%d", name, in tsnep_request_irq()
1786 queue->tx->queue_index); in tsnep_request_irq()
1788 sprintf(queue->name, "%s-rx-%d", name, in tsnep_request_irq()
1789 queue->rx->queue_index); in tsnep_request_irq()
1794 retval = request_irq(queue->irq, handler, 0, queue->name, dev); in tsnep_request_irq()
1797 memset(queue->name, 0, sizeof(queue->name)); in tsnep_request_irq()
1807 if (!strlen(queue->name)) in tsnep_free_irq()
1811 dev = queue->adapter; in tsnep_free_irq()
1815 free_irq(queue->irq, dev); in tsnep_free_irq()
1816 memset(queue->name, 0, sizeof(queue->name)); in tsnep_free_irq()
1821 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1825 if (rx) { in tsnep_queue_close()
1826 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1827 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1828 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1829 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1832 netif_napi_del(&queue->napi); in tsnep_queue_close()
1838 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1839 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1842 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); in tsnep_queue_open()
1844 if (rx) { in tsnep_queue_open()
1845 /* choose TX queue for XDP_TX */ in tsnep_queue_open()
1846 if (tx) in tsnep_queue_open()
1847 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1848 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1849 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1851 rx->tx_queue_index = 0; in tsnep_queue_open()
1857 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1858 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1861 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1863 rx->page_pool); in tsnep_queue_open()
1866 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1867 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1870 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1875 if (rx->xsk_pool) in tsnep_queue_open()
1876 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1881 netif_err(adapter, drv, adapter->netdev, in tsnep_queue_open()
1882 "can't get assigned irq %d.\n", queue->irq); in tsnep_queue_open()
1896 napi_enable(&queue->napi); in tsnep_queue_enable()
1897 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_queue_enable()
1899 if (queue->tx) in tsnep_queue_enable()
1900 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1902 if (queue->rx) in tsnep_queue_enable()
1903 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1908 if (queue->tx) in tsnep_queue_disable()
1909 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
1911 napi_disable(&queue->napi); in tsnep_queue_disable()
1912 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_queue_disable()
1914 /* disable RX after NAPI polling has been disabled, because RX can be in tsnep_queue_disable()
1917 if (queue->rx) in tsnep_queue_disable()
1918 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
1926 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
1927 if (adapter->queue[i].tx) { in tsnep_netdev_open()
1928 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
1932 if (adapter->queue[i].rx) { in tsnep_netdev_open()
1933 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
1938 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); in tsnep_netdev_open()
1943 retval = netif_set_real_num_tx_queues(adapter->netdev, in tsnep_netdev_open()
1944 adapter->num_tx_queues); in tsnep_netdev_open()
1947 retval = netif_set_real_num_rx_queues(adapter->netdev, in tsnep_netdev_open()
1948 adapter->num_rx_queues); in tsnep_netdev_open()
1957 for (i = 0; i < adapter->num_queues; i++) in tsnep_netdev_open()
1958 tsnep_queue_enable(&adapter->queue[i]); in tsnep_netdev_open()
1965 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
1966 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_open()
1968 if (adapter->queue[i].rx) in tsnep_netdev_open()
1969 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
1970 if (adapter->queue[i].tx) in tsnep_netdev_open()
1971 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
1984 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_close()
1985 tsnep_queue_disable(&adapter->queue[i]); in tsnep_netdev_close()
1987 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_close()
1989 if (adapter->queue[i].rx) in tsnep_netdev_close()
1990 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
1991 if (adapter->queue[i].tx) in tsnep_netdev_close()
1992 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2000 bool running = netif_running(queue->adapter->netdev); in tsnep_enable_xsk()
2005 return -EOPNOTSUPP; in tsnep_enable_xsk()
2007 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2008 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2010 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2011 return -ENOMEM; in tsnep_enable_xsk()
2012 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2013 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2015 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2016 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2017 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2019 return -ENOMEM; in tsnep_enable_xsk()
2022 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2027 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2028 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2031 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2040 bool running = netif_running(queue->adapter->netdev); in tsnep_disable_xsk()
2045 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2047 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2048 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2051 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2055 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2056 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2057 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2058 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2067 if (queue_mapping >= adapter->num_tx_queues) in tsnep_netdev_xmit_frame()
2070 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2077 return -EINVAL; in tsnep_netdev_ioctl()
2080 return phy_mii_ioctl(netdev->phydev, ifr, cmd); in tsnep_netdev_ioctl()
2090 if (netdev->flags & IFF_PROMISC) { in tsnep_netdev_set_multicast()
2093 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { in tsnep_netdev_set_multicast()
2096 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); in tsnep_netdev_set_multicast()
2107 for (i = 0; i < adapter->num_tx_queues; i++) { in tsnep_netdev_get_stats64()
2108 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2109 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2110 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2112 for (i = 0; i < adapter->num_rx_queues; i++) { in tsnep_netdev_get_stats64()
2113 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2114 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2115 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2116 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2118 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + in tsnep_netdev_get_stats64()
2122 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2125 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2128 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2129 stats->rx_fifo_errors += val; in tsnep_netdev_get_stats64()
2132 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2133 stats->rx_frame_errors += val; in tsnep_netdev_get_stats64()
2136 reg = ioread32(adapter->addr + ECM_STAT); in tsnep_netdev_get_stats64()
2138 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2140 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2141 stats->rx_crc_errors += val; in tsnep_netdev_get_stats64()
2143 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2148 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_set_address()
2150 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_set_address()
2152 ether_addr_copy(adapter->mac_address, addr); in tsnep_mac_set_address()
2153 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", in tsnep_mac_set_address()
2166 eth_hw_addr_set(netdev, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2167 tsnep_mac_set_address(adapter, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2176 netdev_features_t changed = netdev->features ^ features; in tsnep_netdev_set_features()
2192 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; in tsnep_netdev_get_tstamp()
2196 timestamp = __le64_to_cpu(rx_inline->counter); in tsnep_netdev_get_tstamp()
2198 timestamp = __le64_to_cpu(rx_inline->timestamp); in tsnep_netdev_get_tstamp()
2207 switch (bpf->command) { in tsnep_netdev_bpf()
2209 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); in tsnep_netdev_bpf()
2211 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool, in tsnep_netdev_bpf()
2212 bpf->xsk.queue_id); in tsnep_netdev_bpf()
2214 return -EOPNOTSUPP; in tsnep_netdev_bpf()
2221 cpu &= TSNEP_MAX_QUEUES - 1; in tsnep_xdp_get_tx()
2223 while (cpu >= adapter->num_tx_queues) in tsnep_xdp_get_tx()
2224 cpu -= adapter->num_tx_queues; in tsnep_xdp_get_tx()
2226 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2235 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
2240 return -EINVAL; in tsnep_netdev_xdp_xmit()
2242 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
2243 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2248 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
2260 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
2273 if (queue_id >= adapter->num_rx_queues || in tsnep_netdev_xsk_wakeup()
2274 queue_id >= adapter->num_tx_queues) in tsnep_netdev_xsk_wakeup()
2275 return -EINVAL; in tsnep_netdev_xsk_wakeup()
2277 queue = &adapter->queue[queue_id]; in tsnep_netdev_xsk_wakeup()
2279 if (!napi_if_scheduled_mark_missed(&queue->napi)) in tsnep_netdev_xsk_wakeup()
2280 napi_schedule(&queue->napi); in tsnep_netdev_xsk_wakeup()
2305 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
2308 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); in tsnep_mac_init()
2311 * - device tree in tsnep_mac_init()
2312 * - valid MAC address already set in tsnep_mac_init()
2313 * - MAC address register if valid in tsnep_mac_init()
2314 * - random MAC address in tsnep_mac_init()
2316 retval = of_get_mac_address(adapter->pdev->dev.of_node, in tsnep_mac_init()
2317 adapter->mac_address); in tsnep_mac_init()
2318 if (retval == -EPROBE_DEFER) in tsnep_mac_init()
2320 if (retval && !is_valid_ether_addr(adapter->mac_address)) { in tsnep_mac_init()
2321 *(u32 *)adapter->mac_address = in tsnep_mac_init()
2322 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_init()
2323 *(u16 *)(adapter->mac_address + sizeof(u32)) = in tsnep_mac_init()
2324 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_init()
2325 if (!is_valid_ether_addr(adapter->mac_address)) in tsnep_mac_init()
2326 eth_random_addr(adapter->mac_address); in tsnep_mac_init()
2329 tsnep_mac_set_address(adapter, adapter->mac_address); in tsnep_mac_init()
2330 eth_hw_addr_set(adapter->netdev, adapter->mac_address); in tsnep_mac_init()
2337 struct device_node *np = adapter->pdev->dev.of_node; in tsnep_mdio_init()
2345 adapter->suppress_preamble = in tsnep_mdio_init()
2346 of_property_read_bool(np, "suppress-preamble"); in tsnep_mdio_init()
2349 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); in tsnep_mdio_init()
2350 if (!adapter->mdiobus) { in tsnep_mdio_init()
2351 retval = -ENOMEM; in tsnep_mdio_init()
2356 adapter->mdiobus->priv = (void *)adapter; in tsnep_mdio_init()
2357 adapter->mdiobus->parent = &adapter->pdev->dev; in tsnep_mdio_init()
2358 adapter->mdiobus->read = tsnep_mdiobus_read; in tsnep_mdio_init()
2359 adapter->mdiobus->write = tsnep_mdiobus_write; in tsnep_mdio_init()
2360 adapter->mdiobus->name = TSNEP "-mdiobus"; in tsnep_mdio_init()
2361 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", in tsnep_mdio_init()
2362 adapter->pdev->name); in tsnep_mdio_init()
2365 adapter->mdiobus->phy_mask = 0x0000001; in tsnep_mdio_init()
2367 retval = of_mdiobus_register(adapter->mdiobus, np); in tsnep_mdio_init()
2380 retval = of_get_phy_mode(adapter->pdev->dev.of_node, in tsnep_phy_init()
2381 &adapter->phy_mode); in tsnep_phy_init()
2383 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; in tsnep_phy_init()
2385 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", in tsnep_phy_init()
2387 adapter->phydev = of_phy_find_device(phy_node); in tsnep_phy_init()
2389 if (!adapter->phydev && adapter->mdiobus) in tsnep_phy_init()
2390 adapter->phydev = phy_find_first(adapter->mdiobus); in tsnep_phy_init()
2391 if (!adapter->phydev) in tsnep_phy_init()
2392 return -EIO; in tsnep_phy_init()
2404 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2405 if (platform_irq_count(adapter->pdev) == 1) in tsnep_queue_init()
2406 retval = platform_get_irq(adapter->pdev, 0); in tsnep_queue_init()
2408 retval = platform_get_irq_byname(adapter->pdev, "mac"); in tsnep_queue_init()
2411 adapter->num_tx_queues = 1; in tsnep_queue_init()
2412 adapter->num_rx_queues = 1; in tsnep_queue_init()
2413 adapter->num_queues = 1; in tsnep_queue_init()
2414 adapter->queue[0].adapter = adapter; in tsnep_queue_init()
2415 adapter->queue[0].irq = retval; in tsnep_queue_init()
2416 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2417 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2418 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2419 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2420 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2421 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2422 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2423 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2424 adapter->queue[0].irq_mask = irq_mask; in tsnep_queue_init()
2425 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; in tsnep_queue_init()
2426 retval = tsnep_set_irq_coalesce(&adapter->queue[0], in tsnep_queue_init()
2431 adapter->netdev->irq = adapter->queue[0].irq; in tsnep_queue_init()
2433 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2437 sprintf(name, "txrx-%d", i); in tsnep_queue_init()
2438 retval = platform_get_irq_byname_optional(adapter->pdev, name); in tsnep_queue_init()
2442 adapter->num_tx_queues++; in tsnep_queue_init()
2443 adapter->num_rx_queues++; in tsnep_queue_init()
2444 adapter->num_queues++; in tsnep_queue_init()
2445 adapter->queue[i].adapter = adapter; in tsnep_queue_init()
2446 adapter->queue[i].irq = retval; in tsnep_queue_init()
2447 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2448 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2449 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2450 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()
2451 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2452 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2453 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2454 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()
2455 adapter->queue[i].irq_mask = in tsnep_queue_init()
2457 adapter->queue[i].irq_delay_addr = in tsnep_queue_init()
2458 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; in tsnep_queue_init()
2459 retval = tsnep_set_irq_coalesce(&adapter->queue[i], in tsnep_queue_init()
2479 netdev = devm_alloc_etherdev_mqs(&pdev->dev, in tsnep_probe()
2483 return -ENODEV; in tsnep_probe()
2484 SET_NETDEV_DEV(netdev, &pdev->dev); in tsnep_probe()
2487 adapter->pdev = pdev; in tsnep_probe()
2488 adapter->dmadev = &pdev->dev; in tsnep_probe()
2489 adapter->netdev = netdev; in tsnep_probe()
2490 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | in tsnep_probe()
2494 netdev->min_mtu = ETH_MIN_MTU; in tsnep_probe()
2495 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; in tsnep_probe()
2497 mutex_init(&adapter->gate_control_lock); in tsnep_probe()
2498 mutex_init(&adapter->rxnfc_lock); in tsnep_probe()
2499 INIT_LIST_HEAD(&adapter->rxnfc_rules); in tsnep_probe()
2502 adapter->addr = devm_ioremap_resource(&pdev->dev, io); in tsnep_probe()
2503 if (IS_ERR(adapter->addr)) in tsnep_probe()
2504 return PTR_ERR(adapter->addr); in tsnep_probe()
2505 netdev->mem_start = io->start; in tsnep_probe()
2506 netdev->mem_end = io->end; in tsnep_probe()
2508 type = ioread32(adapter->addr + ECM_TYPE); in tsnep_probe()
2512 adapter->gate_control = type & ECM_GATE_CONTROL; in tsnep_probe()
2513 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; in tsnep_probe()
2521 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, in tsnep_probe()
2524 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); in tsnep_probe()
2552 netdev->netdev_ops = &tsnep_netdev_ops; in tsnep_probe()
2553 netdev->ethtool_ops = &tsnep_ethtool_ops; in tsnep_probe()
2554 netdev->features = NETIF_F_SG; in tsnep_probe()
2555 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; in tsnep_probe()
2557 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in tsnep_probe()
2569 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, in tsnep_probe()
2571 if (adapter->gate_control) in tsnep_probe()
2572 dev_info(&adapter->pdev->dev, "gate control detected\n"); in tsnep_probe()
2584 if (adapter->mdiobus) in tsnep_probe()
2585 mdiobus_unregister(adapter->mdiobus); in tsnep_probe()
2594 unregister_netdev(adapter->netdev); in tsnep_remove()
2602 if (adapter->mdiobus) in tsnep_remove()
2603 mdiobus_unregister(adapter->mdiobus); in tsnep_remove()
2626 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");