Lines Matching +full:rx +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
32 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
44 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_enable_irq()
50 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_disable_irq()
56 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); in tsnep_irq()
60 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); in tsnep_irq()
64 phy_mac_interrupt(adapter->netdev->phydev); in tsnep_irq()
66 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
67 if ((active & adapter->queue[0].irq_mask) != 0) { in tsnep_irq()
68 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); in tsnep_irq()
69 napi_schedule(&adapter->queue[0].napi); in tsnep_irq()
79 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
80 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_irq_txrx()
81 napi_schedule(&queue->napi); in tsnep_irq_txrx()
88 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_read()
93 return -EOPNOTSUPP; in tsnep_mdiobus_read()
96 if (!adapter->suppress_preamble) in tsnep_mdiobus_read()
100 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_read()
101 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_read()
112 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_write()
117 return -EOPNOTSUPP; in tsnep_mdiobus_write()
120 if (!adapter->suppress_preamble) in tsnep_mdiobus_write()
125 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_write()
126 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_write()
138 switch (adapter->phydev->speed) { in tsnep_set_link_mode()
149 iowrite32(mode, adapter->addr + ECM_STATUS); in tsnep_set_link_mode()
155 struct phy_device *phydev = netdev->phydev; in tsnep_phy_link_status_change()
157 if (phydev->link) in tsnep_phy_link_status_change()
160 phy_print_status(netdev->phydev); in tsnep_phy_link_status_change()
167 retval = phy_loopback(adapter->phydev, enable); in tsnep_phy_loopback()
185 retval = phy_connect_direct(adapter->netdev, adapter->phydev, in tsnep_phy_open()
187 adapter->phy_mode); in tsnep_phy_open()
190 phydev = adapter->netdev->phydev; in tsnep_phy_open()
202 phy_ethtool_set_eee(adapter->phydev, &ethtool_eee); in tsnep_phy_open()
204 adapter->phydev->irq = PHY_MAC_INTERRUPT; in tsnep_phy_open()
205 phy_start(adapter->phydev); in tsnep_phy_open()
212 phy_stop(adapter->netdev->phydev); in tsnep_phy_close()
213 phy_disconnect(adapter->netdev->phydev); in tsnep_phy_close()
214 adapter->netdev->phydev = NULL; in tsnep_phy_close()
217 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument
219 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
222 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
225 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
226 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
227 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
228 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
229 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
234 static int tsnep_tx_ring_init(struct tsnep_tx *tx) in tsnep_tx_ring_init() argument
236 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_init()
243 tx->page[i] = in tsnep_tx_ring_init()
244 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_init()
246 if (!tx->page[i]) { in tsnep_tx_ring_init()
247 retval = -ENOMEM; in tsnep_tx_ring_init()
251 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_init()
252 entry->desc_wb = (struct tsnep_tx_desc_wb *) in tsnep_tx_ring_init()
253 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_init()
254 entry->desc = (struct tsnep_tx_desc *) in tsnep_tx_ring_init()
255 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_tx_ring_init()
256 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_init()
260 entry = &tx->entry[i]; in tsnep_tx_ring_init()
261 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; in tsnep_tx_ring_init()
262 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_tx_ring_init()
268 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_init()
272 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
275 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
277 entry->properties = 0; in tsnep_tx_activate()
278 if (entry->skb) { in tsnep_tx_activate()
279 entry->properties = length & TSNEP_DESC_LENGTH_MASK; in tsnep_tx_activate()
280 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_tx_activate()
281 if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) in tsnep_tx_activate()
282 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; in tsnep_tx_activate()
302 entry->owner_user_flag = !entry->owner_user_flag; in tsnep_tx_activate()
305 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; in tsnep_tx_activate()
306 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
307 tx->owner_counter++; in tsnep_tx_activate()
308 if (tx->owner_counter == 4) in tsnep_tx_activate()
309 tx->owner_counter = 1; in tsnep_tx_activate()
310 tx->increment_owner_counter--; in tsnep_tx_activate()
311 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
312 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
314 entry->properties |= in tsnep_tx_activate()
315 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
317 if (entry->owner_user_flag) in tsnep_tx_activate()
318 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; in tsnep_tx_activate()
319 entry->desc->more_properties = in tsnep_tx_activate()
320 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); in tsnep_tx_activate()
327 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_tx_activate()
330 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
332 if (tx->read <= tx->write) in tsnep_tx_desc_available()
333 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
335 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
338 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) in tsnep_tx_map() argument
340 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
348 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; in tsnep_tx_map()
352 dma = dma_map_single(dmadev, skb->data, len, in tsnep_tx_map()
355 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); in tsnep_tx_map()
357 &skb_shinfo(skb)->frags[i - 1], in tsnep_tx_map()
361 return -ENOMEM; in tsnep_tx_map()
363 entry->len = len; in tsnep_tx_map()
366 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
374 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
376 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
382 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; in tsnep_tx_unmap()
384 if (entry->len) { in tsnep_tx_unmap()
395 map_len += entry->len; in tsnep_tx_unmap()
396 entry->len = 0; in tsnep_tx_unmap()
404 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
413 if (skb_shinfo(skb)->nr_frags > 0) in tsnep_xmit_frame_ring()
414 count += skb_shinfo(skb)->nr_frags; in tsnep_xmit_frame_ring()
416 spin_lock_irqsave(&tx->lock, flags); in tsnep_xmit_frame_ring()
418 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
422 netif_stop_queue(tx->adapter->netdev); in tsnep_xmit_frame_ring()
424 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_xmit_frame_ring()
429 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
430 entry->skb = skb; in tsnep_xmit_frame_ring()
432 retval = tsnep_tx_map(skb, tx, count); in tsnep_xmit_frame_ring()
434 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
435 dev_kfree_skb_any(entry->skb); in tsnep_xmit_frame_ring()
436 entry->skb = NULL; in tsnep_xmit_frame_ring()
438 tx->dropped++; in tsnep_xmit_frame_ring()
440 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_xmit_frame_ring()
442 netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); in tsnep_xmit_frame_ring()
448 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) in tsnep_xmit_frame_ring()
449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in tsnep_xmit_frame_ring()
452 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, in tsnep_xmit_frame_ring()
453 i == (count - 1)); in tsnep_xmit_frame_ring()
454 tx->write = (tx->write + count) % TSNEP_RING_SIZE; in tsnep_xmit_frame_ring()
461 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
463 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
465 netif_stop_queue(tx->adapter->netdev); in tsnep_xmit_frame_ring()
468 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_xmit_frame_ring()
473 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
481 spin_lock_irqsave(&tx->lock, flags); in tsnep_tx_poll()
484 if (tx->read == tx->write) in tsnep_tx_poll()
487 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
488 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
490 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_poll()
499 if (skb_shinfo(entry->skb)->nr_frags > 0) in tsnep_tx_poll()
500 count += skb_shinfo(entry->skb)->nr_frags; in tsnep_tx_poll()
502 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
504 if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && in tsnep_tx_poll()
505 (__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
510 if (skb_shinfo(entry->skb)->tx_flags & in tsnep_tx_poll()
513 __le64_to_cpu(entry->desc_wb->counter); in tsnep_tx_poll()
516 __le64_to_cpu(entry->desc_wb->timestamp); in tsnep_tx_poll()
521 skb_tstamp_tx(entry->skb, &hwtstamps); in tsnep_tx_poll()
524 napi_consume_skb(entry->skb, budget); in tsnep_tx_poll()
525 entry->skb = NULL; in tsnep_tx_poll()
527 tx->read = (tx->read + count) % TSNEP_RING_SIZE; in tsnep_tx_poll()
529 tx->packets++; in tsnep_tx_poll()
530 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
532 budget--; in tsnep_tx_poll()
535 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
536 netif_queue_stopped(tx->adapter->netdev)) { in tsnep_tx_poll()
537 netif_wake_queue(tx->adapter->netdev); in tsnep_tx_poll()
540 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_tx_poll()
545 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
551 spin_lock_irqsave(&tx->lock, flags); in tsnep_tx_pending()
553 if (tx->read != tx->write) { in tsnep_tx_pending()
554 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
555 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_pending()
557 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_pending()
561 spin_unlock_irqrestore(&tx->lock, flags); in tsnep_tx_pending()
567 int queue_index, struct tsnep_tx *tx) in tsnep_tx_open() argument
572 memset(tx, 0, sizeof(*tx)); in tsnep_tx_open()
573 tx->adapter = adapter; in tsnep_tx_open()
574 tx->addr = addr; in tsnep_tx_open()
575 tx->queue_index = queue_index; in tsnep_tx_open()
577 retval = tsnep_tx_ring_init(tx); in tsnep_tx_open()
581 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_open()
582 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_open()
583 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_open()
584 tx->owner_counter = 1; in tsnep_tx_open()
585 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_open()
587 spin_lock_init(&tx->lock); in tsnep_tx_open()
592 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
596 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_close()
600 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
603 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
605 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
610 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
611 if (entry->page) in tsnep_rx_ring_cleanup()
612 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
614 entry->page = NULL; in tsnep_rx_ring_cleanup()
617 if (rx->page_pool) in tsnep_rx_ring_cleanup()
618 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
620 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
623 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
624 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
625 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
626 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
627 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
632 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, in tsnep_rx_alloc_buffer() argument
637 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
639 return -ENOMEM; in tsnep_rx_alloc_buffer()
641 entry->page = page; in tsnep_rx_alloc_buffer()
642 entry->len = TSNEP_MAX_RX_BUF_SIZE; in tsnep_rx_alloc_buffer()
643 entry->dma = page_pool_get_dma_addr(entry->page); in tsnep_rx_alloc_buffer()
644 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD); in tsnep_rx_alloc_buffer()
649 static int tsnep_rx_ring_init(struct tsnep_rx *rx) in tsnep_rx_ring_init() argument
651 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_init()
659 rx->page[i] = in tsnep_rx_ring_init()
660 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_init()
662 if (!rx->page[i]) { in tsnep_rx_ring_init()
663 retval = -ENOMEM; in tsnep_rx_ring_init()
667 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_init()
668 entry->desc_wb = (struct tsnep_rx_desc_wb *) in tsnep_rx_ring_init()
669 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_init()
670 entry->desc = (struct tsnep_rx_desc *) in tsnep_rx_ring_init()
671 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_rx_ring_init()
672 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_init()
684 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_init()
685 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_init()
686 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_init()
687 rx->page_pool = NULL; in tsnep_rx_ring_init()
692 entry = &rx->entry[i]; in tsnep_rx_ring_init()
693 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; in tsnep_rx_ring_init()
694 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_rx_ring_init()
696 retval = tsnep_rx_alloc_buffer(rx, entry); in tsnep_rx_ring_init()
704 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_init()
708 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
710 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
713 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; in tsnep_rx_activate()
714 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_rx_activate()
715 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
716 rx->owner_counter++; in tsnep_rx_activate()
717 if (rx->owner_counter == 4) in tsnep_rx_activate()
718 rx->owner_counter = 1; in tsnep_rx_activate()
719 rx->increment_owner_counter--; in tsnep_rx_activate()
720 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
721 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
723 entry->properties |= in tsnep_rx_activate()
724 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
732 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_rx_activate()
735 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
746 __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN); in tsnep_build_skb()
748 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
754 skb_shinfo(skb)->tx_flags |= in tsnep_build_skb()
757 hwtstamps->netdev_data = rx_inline; in tsnep_build_skb()
760 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
761 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
766 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
769 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
779 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
782 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
783 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
785 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll()
793 prefetch(page_address(entry->page) + TSNEP_SKB_PAD); in tsnep_rx_poll()
794 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
796 dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD, in tsnep_rx_poll()
798 page = entry->page; in tsnep_rx_poll()
803 retval = tsnep_rx_alloc_buffer(rx, entry); in tsnep_rx_poll()
805 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_poll()
807 page_pool_release_page(rx->page_pool, page); in tsnep_rx_poll()
809 rx->packets++; in tsnep_rx_poll()
810 rx->bytes += length - in tsnep_rx_poll()
812 if (skb->pkt_type == PACKET_MULTICAST) in tsnep_rx_poll()
813 rx->multicast++; in tsnep_rx_poll()
817 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_poll()
819 rx->dropped++; in tsnep_rx_poll()
823 rx->dropped++; in tsnep_rx_poll()
826 tsnep_rx_activate(rx, rx->read); in tsnep_rx_poll()
830 rx->read = (rx->read + 1) % TSNEP_RING_SIZE; in tsnep_rx_poll()
839 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_poll()
845 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
849 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
850 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_pending()
852 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_pending()
859 int queue_index, struct tsnep_rx *rx) in tsnep_rx_open() argument
865 memset(rx, 0, sizeof(*rx)); in tsnep_rx_open()
866 rx->adapter = adapter; in tsnep_rx_open()
867 rx->addr = addr; in tsnep_rx_open()
868 rx->queue_index = queue_index; in tsnep_rx_open()
870 retval = tsnep_rx_ring_init(rx); in tsnep_rx_open()
874 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_open()
875 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_open()
876 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_open()
877 rx->owner_counter = 1; in tsnep_rx_open()
878 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_open()
881 tsnep_rx_activate(rx, i); in tsnep_rx_open()
886 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_open()
891 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
895 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_close()
896 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_close()
900 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
905 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
908 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
921 if (queue->tx) in tsnep_poll()
922 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
924 if (queue->rx) { in tsnep_poll()
925 done = tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
935 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
942 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
947 return min(done, budget - 1); in tsnep_poll()
952 const char *name = netdev_name(queue->adapter->netdev); in tsnep_request_irq()
958 sprintf(queue->name, "%s-mac", name); in tsnep_request_irq()
960 dev = queue->adapter; in tsnep_request_irq()
962 if (queue->tx && queue->rx) in tsnep_request_irq()
963 sprintf(queue->name, "%s-txrx-%d", name, in tsnep_request_irq()
964 queue->rx->queue_index); in tsnep_request_irq()
965 else if (queue->tx) in tsnep_request_irq()
966 sprintf(queue->name, "%s-tx-%d", name, in tsnep_request_irq()
967 queue->tx->queue_index); in tsnep_request_irq()
969 sprintf(queue->name, "%s-rx-%d", name, in tsnep_request_irq()
970 queue->rx->queue_index); in tsnep_request_irq()
975 retval = request_irq(queue->irq, handler, 0, queue->name, dev); in tsnep_request_irq()
978 memset(queue->name, 0, sizeof(queue->name)); in tsnep_request_irq()
988 if (!strlen(queue->name)) in tsnep_free_irq()
992 dev = queue->adapter; in tsnep_free_irq()
996 free_irq(queue->irq, dev); in tsnep_free_irq()
997 memset(queue->name, 0, sizeof(queue->name)); in tsnep_free_irq()
1009 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
1010 adapter->queue[i].adapter = adapter; in tsnep_netdev_open()
1011 if (adapter->queue[i].tx) { in tsnep_netdev_open()
1012 addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); in tsnep_netdev_open()
1014 adapter->queue[i].tx); in tsnep_netdev_open()
1019 if (adapter->queue[i].rx) { in tsnep_netdev_open()
1020 addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); in tsnep_netdev_open()
1023 adapter->queue[i].rx); in tsnep_netdev_open()
1029 retval = tsnep_request_irq(&adapter->queue[i], i == 0); in tsnep_netdev_open()
1031 netif_err(adapter, drv, adapter->netdev, in tsnep_netdev_open()
1033 adapter->queue[i].irq); in tsnep_netdev_open()
1038 retval = netif_set_real_num_tx_queues(adapter->netdev, in tsnep_netdev_open()
1039 adapter->num_tx_queues); in tsnep_netdev_open()
1042 retval = netif_set_real_num_rx_queues(adapter->netdev, in tsnep_netdev_open()
1043 adapter->num_rx_queues); in tsnep_netdev_open()
1052 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
1053 netif_napi_add(adapter->netdev, &adapter->queue[i].napi, in tsnep_netdev_open()
1055 napi_enable(&adapter->queue[i].napi); in tsnep_netdev_open()
1057 tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); in tsnep_netdev_open()
1066 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
1067 tsnep_free_irq(&adapter->queue[i], i == 0); in tsnep_netdev_open()
1069 if (adapter->queue[i].rx) in tsnep_netdev_open()
1070 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
1071 if (adapter->queue[i].tx) in tsnep_netdev_open()
1072 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
1085 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_close()
1086 tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); in tsnep_netdev_close()
1088 napi_disable(&adapter->queue[i].napi); in tsnep_netdev_close()
1089 netif_napi_del(&adapter->queue[i].napi); in tsnep_netdev_close()
1091 tsnep_free_irq(&adapter->queue[i], i == 0); in tsnep_netdev_close()
1093 if (adapter->queue[i].rx) in tsnep_netdev_close()
1094 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
1095 if (adapter->queue[i].tx) in tsnep_netdev_close()
1096 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
1108 if (queue_mapping >= adapter->num_tx_queues) in tsnep_netdev_xmit_frame()
1111 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
1118 return -EINVAL; in tsnep_netdev_ioctl()
1121 return phy_mii_ioctl(netdev->phydev, ifr, cmd); in tsnep_netdev_ioctl()
1131 if (netdev->flags & IFF_PROMISC) { in tsnep_netdev_set_multicast()
1134 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { in tsnep_netdev_set_multicast()
1137 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); in tsnep_netdev_set_multicast()
1148 for (i = 0; i < adapter->num_tx_queues; i++) { in tsnep_netdev_get_stats64()
1149 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
1150 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
1151 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
1153 for (i = 0; i < adapter->num_rx_queues; i++) { in tsnep_netdev_get_stats64()
1154 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
1155 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
1156 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
1157 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
1159 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + in tsnep_netdev_get_stats64()
1163 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
1166 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
1169 stats->rx_errors += val; in tsnep_netdev_get_stats64()
1170 stats->rx_fifo_errors += val; in tsnep_netdev_get_stats64()
1173 stats->rx_errors += val; in tsnep_netdev_get_stats64()
1174 stats->rx_frame_errors += val; in tsnep_netdev_get_stats64()
1177 reg = ioread32(adapter->addr + ECM_STAT); in tsnep_netdev_get_stats64()
1179 stats->rx_errors += val; in tsnep_netdev_get_stats64()
1181 stats->rx_errors += val; in tsnep_netdev_get_stats64()
1182 stats->rx_crc_errors += val; in tsnep_netdev_get_stats64()
1184 stats->rx_errors += val; in tsnep_netdev_get_stats64()
1189 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_set_address()
1191 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_set_address()
1193 ether_addr_copy(adapter->mac_address, addr); in tsnep_mac_set_address()
1194 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", in tsnep_mac_set_address()
1207 eth_hw_addr_set(netdev, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
1208 tsnep_mac_set_address(adapter, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
1217 netdev_features_t changed = netdev->features ^ features; in tsnep_netdev_set_features()
1233 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; in tsnep_netdev_get_tstamp()
1237 timestamp = __le64_to_cpu(rx_inline->counter); in tsnep_netdev_get_tstamp()
1239 timestamp = __le64_to_cpu(rx_inline->timestamp); in tsnep_netdev_get_tstamp()
1261 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
1264 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); in tsnep_mac_init()
1267 * - device tree in tsnep_mac_init()
1268 * - valid MAC address already set in tsnep_mac_init()
1269 * - MAC address register if valid in tsnep_mac_init()
1270 * - random MAC address in tsnep_mac_init()
1272 retval = of_get_mac_address(adapter->pdev->dev.of_node, in tsnep_mac_init()
1273 adapter->mac_address); in tsnep_mac_init()
1274 if (retval == -EPROBE_DEFER) in tsnep_mac_init()
1276 if (retval && !is_valid_ether_addr(adapter->mac_address)) { in tsnep_mac_init()
1277 *(u32 *)adapter->mac_address = in tsnep_mac_init()
1278 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_init()
1279 *(u16 *)(adapter->mac_address + sizeof(u32)) = in tsnep_mac_init()
1280 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_init()
1281 if (!is_valid_ether_addr(adapter->mac_address)) in tsnep_mac_init()
1282 eth_random_addr(adapter->mac_address); in tsnep_mac_init()
1285 tsnep_mac_set_address(adapter, adapter->mac_address); in tsnep_mac_init()
1286 eth_hw_addr_set(adapter->netdev, adapter->mac_address); in tsnep_mac_init()
1293 struct device_node *np = adapter->pdev->dev.of_node; in tsnep_mdio_init()
1301 adapter->suppress_preamble = in tsnep_mdio_init()
1302 of_property_read_bool(np, "suppress-preamble"); in tsnep_mdio_init()
1305 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); in tsnep_mdio_init()
1306 if (!adapter->mdiobus) { in tsnep_mdio_init()
1307 retval = -ENOMEM; in tsnep_mdio_init()
1312 adapter->mdiobus->priv = (void *)adapter; in tsnep_mdio_init()
1313 adapter->mdiobus->parent = &adapter->pdev->dev; in tsnep_mdio_init()
1314 adapter->mdiobus->read = tsnep_mdiobus_read; in tsnep_mdio_init()
1315 adapter->mdiobus->write = tsnep_mdiobus_write; in tsnep_mdio_init()
1316 adapter->mdiobus->name = TSNEP "-mdiobus"; in tsnep_mdio_init()
1317 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", in tsnep_mdio_init()
1318 adapter->pdev->name); in tsnep_mdio_init()
1321 adapter->mdiobus->phy_mask = 0x0000001; in tsnep_mdio_init()
1323 retval = of_mdiobus_register(adapter->mdiobus, np); in tsnep_mdio_init()
1336 retval = of_get_phy_mode(adapter->pdev->dev.of_node, in tsnep_phy_init()
1337 &adapter->phy_mode); in tsnep_phy_init()
1339 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; in tsnep_phy_init()
1341 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", in tsnep_phy_init()
1343 adapter->phydev = of_phy_find_device(phy_node); in tsnep_phy_init()
1345 if (!adapter->phydev && adapter->mdiobus) in tsnep_phy_init()
1346 adapter->phydev = phy_find_first(adapter->mdiobus); in tsnep_phy_init()
1347 if (!adapter->phydev) in tsnep_phy_init()
1348 return -EIO; in tsnep_phy_init()
1360 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
1361 if (platform_irq_count(adapter->pdev) == 1) in tsnep_queue_init()
1362 retval = platform_get_irq(adapter->pdev, 0); in tsnep_queue_init()
1364 retval = platform_get_irq_byname(adapter->pdev, "mac"); in tsnep_queue_init()
1367 adapter->num_tx_queues = 1; in tsnep_queue_init()
1368 adapter->num_rx_queues = 1; in tsnep_queue_init()
1369 adapter->num_queues = 1; in tsnep_queue_init()
1370 adapter->queue[0].irq = retval; in tsnep_queue_init()
1371 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
1372 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
1373 adapter->queue[0].irq_mask = irq_mask; in tsnep_queue_init()
1375 adapter->netdev->irq = adapter->queue[0].irq; in tsnep_queue_init()
1377 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
1381 sprintf(name, "txrx-%d", i); in tsnep_queue_init()
1382 retval = platform_get_irq_byname_optional(adapter->pdev, name); in tsnep_queue_init()
1386 adapter->num_tx_queues++; in tsnep_queue_init()
1387 adapter->num_rx_queues++; in tsnep_queue_init()
1388 adapter->num_queues++; in tsnep_queue_init()
1389 adapter->queue[i].irq = retval; in tsnep_queue_init()
1390 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
1391 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
1392 adapter->queue[i].irq_mask = in tsnep_queue_init()
1410 netdev = devm_alloc_etherdev_mqs(&pdev->dev, in tsnep_probe()
1414 return -ENODEV; in tsnep_probe()
1415 SET_NETDEV_DEV(netdev, &pdev->dev); in tsnep_probe()
1418 adapter->pdev = pdev; in tsnep_probe()
1419 adapter->dmadev = &pdev->dev; in tsnep_probe()
1420 adapter->netdev = netdev; in tsnep_probe()
1421 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | in tsnep_probe()
1425 netdev->min_mtu = ETH_MIN_MTU; in tsnep_probe()
1426 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; in tsnep_probe()
1428 mutex_init(&adapter->gate_control_lock); in tsnep_probe()
1429 mutex_init(&adapter->rxnfc_lock); in tsnep_probe()
1430 INIT_LIST_HEAD(&adapter->rxnfc_rules); in tsnep_probe()
1433 adapter->addr = devm_ioremap_resource(&pdev->dev, io); in tsnep_probe()
1434 if (IS_ERR(adapter->addr)) in tsnep_probe()
1435 return PTR_ERR(adapter->addr); in tsnep_probe()
1436 netdev->mem_start = io->start; in tsnep_probe()
1437 netdev->mem_end = io->end; in tsnep_probe()
1439 type = ioread32(adapter->addr + ECM_TYPE); in tsnep_probe()
1443 adapter->gate_control = type & ECM_GATE_CONTROL; in tsnep_probe()
1444 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; in tsnep_probe()
1452 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, in tsnep_probe()
1455 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); in tsnep_probe()
1483 netdev->netdev_ops = &tsnep_netdev_ops; in tsnep_probe()
1484 netdev->ethtool_ops = &tsnep_ethtool_ops; in tsnep_probe()
1485 netdev->features = NETIF_F_SG; in tsnep_probe()
1486 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; in tsnep_probe()
1495 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, in tsnep_probe()
1497 if (adapter->gate_control) in tsnep_probe()
1498 dev_info(&adapter->pdev->dev, "gate control detected\n"); in tsnep_probe()
1510 if (adapter->mdiobus) in tsnep_probe()
1511 mdiobus_unregister(adapter->mdiobus); in tsnep_probe()
1520 unregister_netdev(adapter->netdev); in tsnep_remove()
1528 if (adapter->mdiobus) in tsnep_remove()
1529 mdiobus_unregister(adapter->mdiobus); in tsnep_remove()
1552 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");