Lines Matching +full:ethernet +full:- +full:pse
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
30 static int mtk_msg_level = -1;
32 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
178 __raw_writel(val, eth->base + reg); in mtk_w32()
183 return __raw_readl(eth->base + reg); in mtk_r32()
209 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
210 return -ETIMEDOUT; in mtk_mdio_busy_wait()
305 struct mtk_eth *eth = bus->priv; in mtk_mdio_write()
312 struct mtk_eth *eth = bus->priv; in mtk_mdio_read()
325 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mt7621_gmac0_rgmii_adjust()
328 dev_err(eth->dev, in mt7621_gmac0_rgmii_adjust()
330 return -EOPNOTSUPP; in mt7621_gmac0_rgmii_adjust()
336 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
351 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); in mtk_gmac0_rgmii_adjust()
353 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
361 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mtk_gmac0_rgmii_adjust()
366 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); in mtk_gmac0_rgmii_adjust()
368 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
384 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs()
389 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
390 0 : mac->id; in mtk_mac_select_pcs()
392 return mtk_sgmii_select_pcs(eth->sgmii, sid); in mtk_mac_select_pcs()
403 struct mtk_eth *eth = mac->hw; in mtk_mac_config()
408 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
409 mac->interface != state->interface) { in mtk_mac_config()
411 switch (state->interface) { in mtk_mac_config()
413 if (mac->id) in mtk_mac_config()
415 if (!MTK_HAS_CAPS(mac->hw->soc->caps, in mtk_mac_config()
426 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
427 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
435 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_mac_config()
436 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
442 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
443 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
453 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII && in mtk_mac_config()
454 !phy_interface_mode_is_8023z(state->interface) && in mtk_mac_config()
455 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) { in mtk_mac_config()
456 if (MTK_HAS_CAPS(mac->hw->soc->caps, in mtk_mac_config()
458 if (mt7621_gmac0_rgmii_adjust(mac->hw, in mtk_mac_config()
459 state->interface)) in mtk_mac_config()
463 * use state->speed (which is not guaranteed in mtk_mac_config()
470 mtk_gmac0_rgmii_adjust(mac->hw, in mtk_mac_config()
471 state->interface, in mtk_mac_config()
472 state->speed); in mtk_mac_config()
476 mtk_w32(mac->hw, in mtk_mac_config()
481 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL, in mtk_mac_config()
483 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL); in mtk_mac_config()
488 switch (state->interface) { in mtk_mac_config()
497 if (mac->id) in mtk_mac_config()
506 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
507 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); in mtk_mac_config()
508 val |= SYSCFG0_GE_MODE(ge_mode, mac->id); in mtk_mac_config()
509 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
511 mac->interface = state->interface; in mtk_mac_config()
515 if (state->interface == PHY_INTERFACE_MODE_SGMII || in mtk_mac_config()
516 phy_interface_mode_is_8023z(state->interface)) { in mtk_mac_config()
520 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
522 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
527 mac->syscfg0 = val; in mtk_mac_config()
529 dev_err(eth->dev, in mtk_mac_config()
530 "In-band mode not supported in non SGMII mode!\n"); in mtk_mac_config()
537 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
538 mac->id, phy_modes(state->interface)); in mtk_mac_config()
542 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
543 mac->id, phy_modes(state->interface), err); in mtk_mac_config()
551 struct mtk_eth *eth = mac->hw; in mtk_mac_finish()
557 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
558 SYSCFG0_SGMII_MASK, mac->syscfg0); in mtk_mac_finish()
561 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_mac_finish()
568 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); in mtk_mac_finish()
578 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id)); in mtk_mac_pcs_get_state()
580 state->link = (pmsr & MAC_MSR_LINK); in mtk_mac_pcs_get_state()
581 state->duplex = (pmsr & MAC_MSR_DPX) >> 1; in mtk_mac_pcs_get_state()
585 state->speed = SPEED_10; in mtk_mac_pcs_get_state()
588 state->speed = SPEED_100; in mtk_mac_pcs_get_state()
591 state->speed = SPEED_1000; in mtk_mac_pcs_get_state()
594 state->speed = SPEED_UNKNOWN; in mtk_mac_pcs_get_state()
598 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); in mtk_mac_pcs_get_state()
600 state->pause |= MLO_PAUSE_RX; in mtk_mac_pcs_get_state()
602 state->pause |= MLO_PAUSE_TX; in mtk_mac_pcs_get_state()
610 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_mac_link_down()
613 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); in mtk_mac_link_down()
625 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_mac_link_up()
645 /* Configure pause modes - phylink will avoid these for half duplex */ in mtk_mac_link_up()
652 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); in mtk_mac_link_up()
670 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
672 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
673 return -ENODEV; in mtk_mdio_init()
677 ret = -ENODEV; in mtk_mdio_init()
681 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
682 if (!eth->mii_bus) { in mtk_mdio_init()
683 ret = -ENOMEM; in mtk_mdio_init()
687 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
688 eth->mii_bus->read = mtk_mdio_read; in mtk_mdio_init()
689 eth->mii_bus->write = mtk_mdio_write; in mtk_mdio_init()
690 eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45; in mtk_mdio_init()
691 eth->mii_bus->priv = eth; in mtk_mdio_init()
692 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
694 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
695 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
704 if (!eth->mii_bus) in mtk_mdio_cleanup()
707 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
715 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
716 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
717 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
718 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
726 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
727 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
728 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
729 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
737 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
738 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
739 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
740 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
748 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
749 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
750 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
751 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
758 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address()
759 const char *macaddr = dev->dev_addr; in mtk_set_mac_address()
764 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_set_mac_address()
765 return -EBUSY; in mtk_set_mac_address()
767 spin_lock_bh(&mac->hw->page_lock); in mtk_set_mac_address()
768 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
769 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], in mtk_set_mac_address()
771 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | in mtk_set_mac_address()
775 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], in mtk_set_mac_address()
776 MTK_GDMA_MAC_ADRH(mac->id)); in mtk_set_mac_address()
777 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | in mtk_set_mac_address()
779 MTK_GDMA_MAC_ADRL(mac->id)); in mtk_set_mac_address()
781 spin_unlock_bh(&mac->hw->page_lock); in mtk_set_mac_address()
788 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_stats_update_mac()
789 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac()
791 u64_stats_update_begin(&hw_stats->syncp); in mtk_stats_update_mac()
793 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
794 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT); in mtk_stats_update_mac()
795 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT); in mtk_stats_update_mac()
796 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT); in mtk_stats_update_mac()
797 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT); in mtk_stats_update_mac()
798 hw_stats->rx_checksum_errors += in mtk_stats_update_mac()
799 mtk_r32(mac->hw, MT7628_SDM_CS_ERR); in mtk_stats_update_mac()
801 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
802 unsigned int offs = hw_stats->reg_offset; in mtk_stats_update_mac()
805 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs); in mtk_stats_update_mac()
806 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs); in mtk_stats_update_mac()
808 hw_stats->rx_bytes += (stats << 32); in mtk_stats_update_mac()
809 hw_stats->rx_packets += in mtk_stats_update_mac()
810 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs); in mtk_stats_update_mac()
811 hw_stats->rx_overflow += in mtk_stats_update_mac()
812 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs); in mtk_stats_update_mac()
813 hw_stats->rx_fcs_errors += in mtk_stats_update_mac()
814 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs); in mtk_stats_update_mac()
815 hw_stats->rx_short_errors += in mtk_stats_update_mac()
816 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs); in mtk_stats_update_mac()
817 hw_stats->rx_long_errors += in mtk_stats_update_mac()
818 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs); in mtk_stats_update_mac()
819 hw_stats->rx_checksum_errors += in mtk_stats_update_mac()
820 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs); in mtk_stats_update_mac()
821 hw_stats->rx_flow_control_packets += in mtk_stats_update_mac()
822 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs); in mtk_stats_update_mac()
823 hw_stats->tx_skip += in mtk_stats_update_mac()
824 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); in mtk_stats_update_mac()
825 hw_stats->tx_collisions += in mtk_stats_update_mac()
826 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); in mtk_stats_update_mac()
827 hw_stats->tx_bytes += in mtk_stats_update_mac()
828 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); in mtk_stats_update_mac()
829 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); in mtk_stats_update_mac()
831 hw_stats->tx_bytes += (stats << 32); in mtk_stats_update_mac()
832 hw_stats->tx_packets += in mtk_stats_update_mac()
833 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); in mtk_stats_update_mac()
836 u64_stats_update_end(&hw_stats->syncp); in mtk_stats_update_mac()
844 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
846 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
847 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
848 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
857 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_get_stats64()
861 if (spin_trylock_bh(&hw_stats->stats_lock)) { in mtk_get_stats64()
863 spin_unlock_bh(&hw_stats->stats_lock); in mtk_get_stats64()
868 start = u64_stats_fetch_begin_irq(&hw_stats->syncp); in mtk_get_stats64()
869 storage->rx_packets = hw_stats->rx_packets; in mtk_get_stats64()
870 storage->tx_packets = hw_stats->tx_packets; in mtk_get_stats64()
871 storage->rx_bytes = hw_stats->rx_bytes; in mtk_get_stats64()
872 storage->tx_bytes = hw_stats->tx_bytes; in mtk_get_stats64()
873 storage->collisions = hw_stats->tx_collisions; in mtk_get_stats64()
874 storage->rx_length_errors = hw_stats->rx_short_errors + in mtk_get_stats64()
875 hw_stats->rx_long_errors; in mtk_get_stats64()
876 storage->rx_over_errors = hw_stats->rx_overflow; in mtk_get_stats64()
877 storage->rx_crc_errors = hw_stats->rx_fcs_errors; in mtk_get_stats64()
878 storage->rx_errors = hw_stats->rx_checksum_errors; in mtk_get_stats64()
879 storage->tx_aborted_errors = hw_stats->tx_skip; in mtk_get_stats64()
880 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); in mtk_get_stats64()
882 storage->tx_errors = dev->stats.tx_errors; in mtk_get_stats64()
883 storage->rx_dropped = dev->stats.rx_dropped; in mtk_get_stats64()
884 storage->tx_dropped = dev->stats.tx_dropped; in mtk_get_stats64()
891 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_max_frag_size()
899 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - in mtk_max_buf_size()
910 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); in mtk_rx_get_desc()
911 if (!(rxd->rxd2 & RX_DMA_DONE)) in mtk_rx_get_desc()
914 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); in mtk_rx_get_desc()
915 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); in mtk_rx_get_desc()
916 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); in mtk_rx_get_desc()
917 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_rx_get_desc()
918 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); in mtk_rx_get_desc()
919 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); in mtk_rx_get_desc()
939 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
945 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, in mtk_init_fq_dma()
946 cnt * soc->txrx.txd_size, in mtk_init_fq_dma()
947 ð->phy_scratch_ring, in mtk_init_fq_dma()
949 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
950 return -ENOMEM; in mtk_init_fq_dma()
952 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
953 if (unlikely(!eth->scratch_head)) in mtk_init_fq_dma()
954 return -ENOMEM; in mtk_init_fq_dma()
956 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
957 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
959 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
960 return -ENOMEM; in mtk_init_fq_dma()
962 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); in mtk_init_fq_dma()
967 txd = eth->scratch_ring + i * soc->txrx.txd_size; in mtk_init_fq_dma()
968 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; in mtk_init_fq_dma()
969 if (i < cnt - 1) in mtk_init_fq_dma()
970 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
971 (i + 1) * soc->txrx.txd_size; in mtk_init_fq_dma()
973 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); in mtk_init_fq_dma()
974 txd->txd4 = 0; in mtk_init_fq_dma()
975 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { in mtk_init_fq_dma()
976 txd->txd5 = 0; in mtk_init_fq_dma()
977 txd->txd6 = 0; in mtk_init_fq_dma()
978 txd->txd7 = 0; in mtk_init_fq_dma()
979 txd->txd8 = 0; in mtk_init_fq_dma()
983 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
984 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
985 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
986 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
993 return ring->dma + (desc - ring->phys); in mtk_qdma_phys_to_virt()
999 int idx = (txd - ring->dma) / txd_size; in mtk_desc_to_tx_buf()
1001 return &ring->buf[idx]; in mtk_desc_to_tx_buf()
1007 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma; in qdma_to_pdma()
1012 return (dma - ring->dma) / txd_size; in txd_to_idx()
1018 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1019 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { in mtk_tx_unmap()
1020 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1024 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { in mtk_tx_unmap()
1025 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1032 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1039 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1046 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_tx_unmap()
1047 if (tx_buf->type == MTK_TYPE_SKB) { in mtk_tx_unmap()
1048 struct sk_buff *skb = tx_buf->data; in mtk_tx_unmap()
1055 struct xdp_frame *xdpf = tx_buf->data; in mtk_tx_unmap()
1057 if (napi && tx_buf->type == MTK_TYPE_XDP_TX) in mtk_tx_unmap()
1065 tx_buf->flags = 0; in mtk_tx_unmap()
1066 tx_buf->data = NULL; in mtk_tx_unmap()
1073 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1078 txd->txd3 = mapped_addr; in setup_tx_buf()
1079 txd->txd2 |= TX_DMA_PLEN1(size); in setup_tx_buf()
1083 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in setup_tx_buf()
1084 txd->txd1 = mapped_addr; in setup_tx_buf()
1085 txd->txd2 = TX_DMA_PLEN0(size); in setup_tx_buf()
1096 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1()
1100 WRITE_ONCE(desc->txd1, info->addr); in mtk_tx_set_dma_desc_v1()
1102 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size); in mtk_tx_set_dma_desc_v1()
1103 if (info->last) in mtk_tx_set_dma_desc_v1()
1105 WRITE_ONCE(desc->txd3, data); in mtk_tx_set_dma_desc_v1()
1107 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */ in mtk_tx_set_dma_desc_v1()
1108 if (info->first) { in mtk_tx_set_dma_desc_v1()
1109 if (info->gso) in mtk_tx_set_dma_desc_v1()
1112 if (info->csum) in mtk_tx_set_dma_desc_v1()
1115 if (info->vlan) in mtk_tx_set_dma_desc_v1()
1116 data |= TX_DMA_INS_VLAN | info->vlan_tci; in mtk_tx_set_dma_desc_v1()
1118 WRITE_ONCE(desc->txd4, data); in mtk_tx_set_dma_desc_v1()
1126 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2()
1129 WRITE_ONCE(desc->txd1, info->addr); in mtk_tx_set_dma_desc_v2()
1131 data = TX_DMA_PLEN0(info->size); in mtk_tx_set_dma_desc_v2()
1132 if (info->last) in mtk_tx_set_dma_desc_v2()
1134 WRITE_ONCE(desc->txd3, data); in mtk_tx_set_dma_desc_v2()
1136 if (!info->qid && mac->id) in mtk_tx_set_dma_desc_v2()
1137 info->qid = MTK_QDMA_GMAC2_QID; in mtk_tx_set_dma_desc_v2()
1139 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */ in mtk_tx_set_dma_desc_v2()
1140 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); in mtk_tx_set_dma_desc_v2()
1141 WRITE_ONCE(desc->txd4, data); in mtk_tx_set_dma_desc_v2()
1144 if (info->first) { in mtk_tx_set_dma_desc_v2()
1145 if (info->gso) in mtk_tx_set_dma_desc_v2()
1148 if (info->csum) in mtk_tx_set_dma_desc_v2()
1151 WRITE_ONCE(desc->txd5, data); in mtk_tx_set_dma_desc_v2()
1154 if (info->first && info->vlan) in mtk_tx_set_dma_desc_v2()
1155 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci; in mtk_tx_set_dma_desc_v2()
1156 WRITE_ONCE(desc->txd6, data); in mtk_tx_set_dma_desc_v2()
1158 WRITE_ONCE(desc->txd7, 0); in mtk_tx_set_dma_desc_v2()
1159 WRITE_ONCE(desc->txd8, 0); in mtk_tx_set_dma_desc_v2()
1166 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc()
1168 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_tx_set_dma_desc()
1180 .csum = skb->ip_summed == CHECKSUM_PARTIAL, in mtk_tx_map()
1182 .qid = skb->mark & MTK_QDMA_TX_MASK, in mtk_tx_map()
1188 struct mtk_eth *eth = mac->hw; in mtk_tx_map()
1189 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1196 itxd = ring->next_free; in mtk_tx_map()
1198 if (itxd == ring->last_free) in mtk_tx_map()
1199 return -ENOMEM; in mtk_tx_map()
1201 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); in mtk_tx_map()
1204 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1206 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1207 return -ENOMEM; in mtk_tx_map()
1211 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; in mtk_tx_map()
1212 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : in mtk_tx_map()
1221 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mtk_tx_map()
1222 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mtk_tx_map()
1229 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || in mtk_tx_map()
1231 txd = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_tx_map()
1233 if (txd == ring->last_free) in mtk_tx_map()
1243 soc->txrx.dma_max_len); in mtk_tx_map()
1244 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK; in mtk_tx_map()
1245 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && in mtk_tx_map()
1246 !(frag_size - txd_info.size); in mtk_tx_map()
1247 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1250 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1256 soc->txrx.txd_size); in mtk_tx_map()
1259 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in mtk_tx_map()
1260 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; in mtk_tx_map()
1261 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : in mtk_tx_map()
1267 frag_size -= txd_info.size; in mtk_tx_map()
1273 itx_buf->type = MTK_TYPE_SKB; in mtk_tx_map()
1274 itx_buf->data = skb; in mtk_tx_map()
1276 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_map()
1278 txd_pdma->txd2 |= TX_DMA_LS0; in mtk_tx_map()
1280 txd_pdma->txd2 |= TX_DMA_LS1; in mtk_tx_map()
1283 netdev_sent_queue(dev, skb->len); in mtk_tx_map()
1286 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_tx_map()
1287 atomic_sub(n_desc, &ring->free_count); in mtk_tx_map()
1294 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_map()
1297 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1301 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size), in mtk_tx_map()
1302 ring->dma_size); in mtk_tx_map()
1310 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); in mtk_tx_map()
1315 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_tx_map()
1316 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) in mtk_tx_map()
1317 itxd_pdma->txd2 = TX_DMA_DESP2_DEF; in mtk_tx_map()
1319 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); in mtk_tx_map()
1323 return -ENOMEM; in mtk_tx_map()
1332 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mtk_cal_txd_req()
1333 frag = &skb_shinfo(skb)->frags[i]; in mtk_cal_txd_req()
1335 eth->soc->txrx.dma_max_len); in mtk_cal_txd_req()
1338 nfrags += skb_shinfo(skb)->nr_frags; in mtk_cal_txd_req()
1349 if (!eth->netdev[i]) in mtk_queue_stopped()
1351 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1363 if (!eth->netdev[i]) in mtk_wake_queue()
1365 netif_wake_queue(eth->netdev[i]); in mtk_wake_queue()
1372 struct mtk_eth *eth = mac->hw; in mtk_start_xmit()
1373 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
1374 struct net_device_stats *stats = &dev->stats; in mtk_start_xmit()
1382 spin_lock(ð->page_lock); in mtk_start_xmit()
1384 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
1388 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { in mtk_start_xmit()
1392 spin_unlock(ð->page_lock); in mtk_start_xmit()
1404 if (skb_shinfo(skb)->gso_type & in mtk_start_xmit()
1407 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); in mtk_start_xmit()
1414 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) in mtk_start_xmit()
1417 spin_unlock(ð->page_lock); in mtk_start_xmit()
1422 spin_unlock(ð->page_lock); in mtk_start_xmit()
1423 stats->tx_dropped++; in mtk_start_xmit()
1434 if (!eth->hwlro) in mtk_get_rx_ring()
1435 return ð->rx_ring[0]; in mtk_get_rx_ring()
1440 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
1441 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); in mtk_get_rx_ring()
1442 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_get_rx_ring()
1443 if (rxd->rxd2 & RX_DMA_DONE) { in mtk_get_rx_ring()
1444 ring->calc_idx_update = true; in mtk_get_rx_ring()
1457 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1458 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
1459 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1462 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
1463 if (ring->calc_idx_update) { in mtk_update_rx_cpu_idx()
1464 ring->calc_idx_update = false; in mtk_update_rx_cpu_idx()
1465 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1473 return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); in mtk_page_pool_enabled()
1485 .dev = eth->dma_dev, in mtk_create_page_pool()
1492 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1498 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, eth->rx_napi.napi_id, in mtk_create_page_pool()
1532 if (ring->page_pool) in mtk_rx_put_buff()
1533 page_pool_put_full_page(ring->page_pool, in mtk_rx_put_buff()
1544 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_frame_map()
1549 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1550 txd_info->size, DMA_TO_DEVICE); in mtk_xdp_frame_map()
1551 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1552 return -ENOMEM; in mtk_xdp_frame_map()
1554 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; in mtk_xdp_frame_map()
1558 txd_info->addr = page_pool_get_dma_addr(page) + in mtk_xdp_frame_map()
1560 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1561 txd_info->size, DMA_BIDIRECTIONAL); in mtk_xdp_frame_map()
1565 tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; in mtk_xdp_frame_map()
1566 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; in mtk_xdp_frame_map()
1567 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in mtk_xdp_frame_map()
1570 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1580 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1581 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_submit_frame()
1583 .size = xdpf->len, in mtk_xdp_submit_frame()
1590 void *data = xdpf->data; in mtk_xdp_submit_frame()
1592 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_xdp_submit_frame()
1593 return -EBUSY; in mtk_xdp_submit_frame()
1595 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in mtk_xdp_submit_frame()
1596 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags)) in mtk_xdp_submit_frame()
1597 return -EBUSY; in mtk_xdp_submit_frame()
1599 spin_lock(ð->page_lock); in mtk_xdp_submit_frame()
1601 txd = ring->next_free; in mtk_xdp_submit_frame()
1602 if (txd == ring->last_free) { in mtk_xdp_submit_frame()
1603 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1604 return -ENOMEM; in mtk_xdp_submit_frame()
1608 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); in mtk_xdp_submit_frame()
1614 data, xdpf->headroom, index, dma_map); in mtk_xdp_submit_frame()
1621 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { in mtk_xdp_submit_frame()
1622 txd = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_xdp_submit_frame()
1623 if (txd == ring->last_free) in mtk_xdp_submit_frame()
1627 soc->txrx.txd_size); in mtk_xdp_submit_frame()
1633 txd_info.size = skb_frag_size(&sinfo->frags[index]); in mtk_xdp_submit_frame()
1635 data = skb_frag_address(&sinfo->frags[index]); in mtk_xdp_submit_frame()
1640 htx_buf->data = xdpf; in mtk_xdp_submit_frame()
1642 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
1646 txd_pdma->txd2 |= TX_DMA_LS0; in mtk_xdp_submit_frame()
1648 txd_pdma->txd2 |= TX_DMA_LS1; in mtk_xdp_submit_frame()
1651 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_xdp_submit_frame()
1652 atomic_sub(n_desc, &ring->free_count); in mtk_xdp_submit_frame()
1659 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
1660 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
1664 idx = txd_to_idx(ring, txd, soc->txrx.txd_size); in mtk_xdp_submit_frame()
1665 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
1669 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1675 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); in mtk_xdp_submit_frame()
1678 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_xdp_submit_frame()
1679 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
1682 txd_pdma->txd2 = TX_DMA_DESP2_DEF; in mtk_xdp_submit_frame()
1685 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); in mtk_xdp_submit_frame()
1688 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1697 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_xdp_xmit()
1698 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit()
1702 return -EINVAL; in mtk_xdp_xmit()
1710 u64_stats_update_begin(&hw_stats->syncp); in mtk_xdp_xmit()
1711 hw_stats->xdp_stats.tx_xdp_xmit += nxmit; in mtk_xdp_xmit()
1712 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit; in mtk_xdp_xmit()
1713 u64_stats_update_end(&hw_stats->syncp); in mtk_xdp_xmit()
1722 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_xdp_run()
1723 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop; in mtk_xdp_run()
1729 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
1736 count = &hw_stats->xdp_stats.rx_xdp_pass; in mtk_xdp_run()
1744 count = &hw_stats->xdp_stats.rx_xdp_redirect; in mtk_xdp_run()
1750 count = &hw_stats->xdp_stats.rx_xdp_tx_errors; in mtk_xdp_run()
1755 count = &hw_stats->xdp_stats.rx_xdp_tx; in mtk_xdp_run()
1768 page_pool_put_full_page(ring->page_pool, in mtk_xdp_run()
1769 virt_to_head_page(xdp->data), true); in mtk_xdp_run()
1772 u64_stats_update_begin(&hw_stats->syncp); in mtk_xdp_run()
1774 u64_stats_update_end(&hw_stats->syncp); in mtk_xdp_run()
1804 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); in mtk_poll_rx()
1805 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_poll_rx()
1806 data = ring->data[idx]; in mtk_poll_rx()
1812 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_poll_rx()
1813 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1; in mtk_poll_rx()
1814 else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
1816 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1; in mtk_poll_rx()
1819 !eth->netdev[mac])) in mtk_poll_rx()
1822 netdev = eth->netdev[mac]; in mtk_poll_rx()
1824 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
1830 if (ring->page_pool) { in mtk_poll_rx()
1835 new_data = mtk_page_pool_get_buff(ring->page_pool, in mtk_poll_rx()
1839 netdev->stats.rx_dropped++; in mtk_poll_rx()
1843 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
1845 pktlen, page_pool_get_dma_dir(ring->page_pool)); in mtk_poll_rx()
1847 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q); in mtk_poll_rx()
1861 page_pool_put_full_page(ring->page_pool, in mtk_poll_rx()
1863 netdev->stats.rx_dropped++; in mtk_poll_rx()
1867 skb_reserve(skb, xdp.data - xdp.data_hard_start); in mtk_poll_rx()
1868 skb_put(skb, xdp.data_end - xdp.data); in mtk_poll_rx()
1871 if (ring->frag_size <= PAGE_SIZE) in mtk_poll_rx()
1872 new_data = napi_alloc_frag(ring->frag_size); in mtk_poll_rx()
1877 netdev->stats.rx_dropped++; in mtk_poll_rx()
1881 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
1882 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
1883 ring->buf_size, DMA_FROM_DEVICE); in mtk_poll_rx()
1884 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
1887 netdev->stats.rx_dropped++; in mtk_poll_rx()
1891 dma_unmap_single(eth->dma_dev, trxd.rxd1, in mtk_poll_rx()
1892 ring->buf_size, DMA_FROM_DEVICE); in mtk_poll_rx()
1894 skb = build_skb(data, ring->frag_size); in mtk_poll_rx()
1896 netdev->stats.rx_dropped++; in mtk_poll_rx()
1905 skb->dev = netdev; in mtk_poll_rx()
1906 bytes += skb->len; in mtk_poll_rx()
1908 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_poll_rx()
1924 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) in mtk_poll_rx()
1925 skb->ip_summed = CHECKSUM_UNNECESSARY; in mtk_poll_rx()
1928 skb->protocol = eth_type_trans(skb, netdev); in mtk_poll_rx()
1931 mtk_ppe_check_skb(eth->ppe[0], skb, hash); in mtk_poll_rx()
1933 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { in mtk_poll_rx()
1934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_poll_rx()
1956 ring->data[idx] = new_data; in mtk_poll_rx()
1957 rxd->rxd1 = (unsigned int)dma_addr; in mtk_poll_rx()
1959 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
1960 rxd->rxd2 = RX_DMA_LSO; in mtk_poll_rx()
1962 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); in mtk_poll_rx()
1964 ring->calc_idx = idx; in mtk_poll_rx()
1977 eth->rx_packets += done; in mtk_poll_rx()
1978 eth->rx_bytes += bytes; in mtk_poll_rx()
1979 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
1981 net_dim(ð->rx_dim, dim_sample); in mtk_poll_rx()
1992 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
1993 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_qdma()
1999 cpu = ring->last_free_ptr; in mtk_poll_tx_qdma()
2000 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2006 u32 next_cpu = desc->txd2; in mtk_poll_tx_qdma()
2009 desc = mtk_qdma_phys_to_virt(ring, desc->txd2); in mtk_poll_tx_qdma()
2010 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) in mtk_poll_tx_qdma()
2014 eth->soc->txrx.txd_size); in mtk_poll_tx_qdma()
2015 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) in mtk_poll_tx_qdma()
2018 if (!tx_buf->data) in mtk_poll_tx_qdma()
2021 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_poll_tx_qdma()
2022 if (tx_buf->type == MTK_TYPE_SKB) { in mtk_poll_tx_qdma()
2023 struct sk_buff *skb = tx_buf->data; in mtk_poll_tx_qdma()
2025 bytes[mac] += skb->len; in mtk_poll_tx_qdma()
2028 budget--; in mtk_poll_tx_qdma()
2032 ring->last_free = desc; in mtk_poll_tx_qdma()
2033 atomic_inc(&ring->free_count); in mtk_poll_tx_qdma()
2039 ring->last_free_ptr = cpu; in mtk_poll_tx_qdma()
2040 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2048 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_pdma()
2054 cpu = ring->cpu_idx; in mtk_poll_tx_pdma()
2059 tx_buf = &ring->buf[cpu]; in mtk_poll_tx_pdma()
2060 if (!tx_buf->data) in mtk_poll_tx_pdma()
2063 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_poll_tx_pdma()
2064 if (tx_buf->type == MTK_TYPE_SKB) { in mtk_poll_tx_pdma()
2065 struct sk_buff *skb = tx_buf->data; in mtk_poll_tx_pdma()
2067 bytes[0] += skb->len; in mtk_poll_tx_pdma()
2070 budget--; in mtk_poll_tx_pdma()
2074 desc = ring->dma + cpu * eth->soc->txrx.txd_size; in mtk_poll_tx_pdma()
2075 ring->last_free = desc; in mtk_poll_tx_pdma()
2076 atomic_inc(&ring->free_count); in mtk_poll_tx_pdma()
2078 cpu = NEXT_DESP_IDX(cpu, ring->dma_size); in mtk_poll_tx_pdma()
2082 ring->cpu_idx = cpu; in mtk_poll_tx_pdma()
2089 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
2098 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2104 if (!eth->netdev[i] || !done[i]) in mtk_poll_tx()
2106 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); in mtk_poll_tx()
2108 eth->tx_packets += done[i]; in mtk_poll_tx()
2109 eth->tx_bytes += bytes[i]; in mtk_poll_tx()
2112 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2114 net_dim(ð->tx_dim, dim_sample); in mtk_poll_tx()
2117 (atomic_read(&ring->free_count) > ring->thresh)) in mtk_poll_tx()
2137 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2140 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2142 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2146 dev_info(eth->dev, in mtk_napi_tx()
2148 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2149 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2155 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2167 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2175 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, in mtk_napi_rx()
2176 reg_map->pdma.irq_status); in mtk_napi_rx()
2177 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2181 dev_info(eth->dev, in mtk_napi_rx()
2183 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2184 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2190 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2191 eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2194 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2201 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2202 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
2203 int i, sz = soc->txrx.txd_size; in mtk_tx_alloc()
2206 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), in mtk_tx_alloc()
2208 if (!ring->buf) in mtk_tx_alloc()
2211 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, in mtk_tx_alloc()
2212 &ring->phys, GFP_KERNEL); in mtk_tx_alloc()
2213 if (!ring->dma) in mtk_tx_alloc()
2218 u32 next_ptr = ring->phys + next * sz; in mtk_tx_alloc()
2220 txd = ring->dma + i * sz; in mtk_tx_alloc()
2221 txd->txd2 = next_ptr; in mtk_tx_alloc()
2222 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_tx_alloc()
2223 txd->txd4 = 0; in mtk_tx_alloc()
2224 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { in mtk_tx_alloc()
2225 txd->txd5 = 0; in mtk_tx_alloc()
2226 txd->txd6 = 0; in mtk_tx_alloc()
2227 txd->txd7 = 0; in mtk_tx_alloc()
2228 txd->txd8 = 0; in mtk_tx_alloc()
2232 /* On MT7688 (PDMA only) this driver uses the ring->dma structs in mtk_tx_alloc()
2234 * descriptors in ring->dma_pdma. in mtk_tx_alloc()
2236 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
2237 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, in mtk_tx_alloc()
2238 &ring->phys_pdma, GFP_KERNEL); in mtk_tx_alloc()
2239 if (!ring->dma_pdma) in mtk_tx_alloc()
2243 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; in mtk_tx_alloc()
2244 ring->dma_pdma[i].txd4 = 0; in mtk_tx_alloc()
2248 ring->dma_size = MTK_DMA_SIZE; in mtk_tx_alloc()
2249 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); in mtk_tx_alloc()
2250 ring->next_free = ring->dma; in mtk_tx_alloc()
2251 ring->last_free = (void *)txd; in mtk_tx_alloc()
2252 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); in mtk_tx_alloc()
2253 ring->thresh = MAX_SKB_FRAGS; in mtk_tx_alloc()
2260 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
2261 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2262 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2264 ring->phys + ((MTK_DMA_SIZE - 1) * sz), in mtk_tx_alloc()
2265 soc->reg_map->qdma.crx_ptr); in mtk_tx_alloc()
2266 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2268 soc->reg_map->qdma.qtx_cfg); in mtk_tx_alloc()
2270 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2273 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2279 return -ENOMEM; in mtk_tx_alloc()
2284 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2285 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
2288 if (ring->buf) { in mtk_tx_clean()
2290 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2291 kfree(ring->buf); in mtk_tx_clean()
2292 ring->buf = NULL; in mtk_tx_clean()
2295 if (ring->dma) { in mtk_tx_clean()
2296 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2297 MTK_DMA_SIZE * soc->txrx.txd_size, in mtk_tx_clean()
2298 ring->dma, ring->phys); in mtk_tx_clean()
2299 ring->dma = NULL; in mtk_tx_clean()
2302 if (ring->dma_pdma) { in mtk_tx_clean()
2303 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2304 MTK_DMA_SIZE * soc->txrx.txd_size, in mtk_tx_clean()
2305 ring->dma_pdma, ring->phys_pdma); in mtk_tx_clean()
2306 ring->dma_pdma = NULL; in mtk_tx_clean()
2312 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2319 return -EINVAL; in mtk_rx_alloc()
2320 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
2322 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
2333 ring->frag_size = mtk_max_frag_size(rx_data_len); in mtk_rx_alloc()
2334 ring->buf_size = mtk_max_buf_size(ring->frag_size); in mtk_rx_alloc()
2335 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), in mtk_rx_alloc()
2337 if (!ring->data) in mtk_rx_alloc()
2338 return -ENOMEM; in mtk_rx_alloc()
2343 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2348 ring->page_pool = pp; in mtk_rx_alloc()
2351 ring->dma = dma_alloc_coherent(eth->dma_dev, in mtk_rx_alloc()
2352 rx_dma_size * eth->soc->txrx.rxd_size, in mtk_rx_alloc()
2353 &ring->phys, GFP_KERNEL); in mtk_rx_alloc()
2354 if (!ring->dma) in mtk_rx_alloc()
2355 return -ENOMEM; in mtk_rx_alloc()
2362 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_alloc()
2363 if (ring->page_pool) { in mtk_rx_alloc()
2364 data = mtk_page_pool_get_buff(ring->page_pool, in mtk_rx_alloc()
2367 return -ENOMEM; in mtk_rx_alloc()
2369 if (ring->frag_size <= PAGE_SIZE) in mtk_rx_alloc()
2370 data = netdev_alloc_frag(ring->frag_size); in mtk_rx_alloc()
2375 return -ENOMEM; in mtk_rx_alloc()
2377 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2378 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2379 ring->buf_size, DMA_FROM_DEVICE); in mtk_rx_alloc()
2380 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2383 return -ENOMEM; in mtk_rx_alloc()
2386 rxd->rxd1 = (unsigned int)dma_addr; in mtk_rx_alloc()
2387 ring->data[i] = data; in mtk_rx_alloc()
2389 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2390 rxd->rxd2 = RX_DMA_LSO; in mtk_rx_alloc()
2392 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); in mtk_rx_alloc()
2394 rxd->rxd3 = 0; in mtk_rx_alloc()
2395 rxd->rxd4 = 0; in mtk_rx_alloc()
2396 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_rx_alloc()
2397 rxd->rxd5 = 0; in mtk_rx_alloc()
2398 rxd->rxd6 = 0; in mtk_rx_alloc()
2399 rxd->rxd7 = 0; in mtk_rx_alloc()
2400 rxd->rxd8 = 0; in mtk_rx_alloc()
2404 ring->dma_size = rx_dma_size; in mtk_rx_alloc()
2405 ring->calc_idx_update = false; in mtk_rx_alloc()
2406 ring->calc_idx = rx_dma_size - 1; in mtk_rx_alloc()
2408 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr + in mtk_rx_alloc()
2411 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + in mtk_rx_alloc()
2419 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2420 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2422 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2424 reg_map->qdma.rst_idx); in mtk_rx_alloc()
2426 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2427 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2429 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2431 reg_map->pdma.rst_idx); in mtk_rx_alloc()
2433 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2442 if (ring->data && ring->dma) { in mtk_rx_clean()
2443 for (i = 0; i < ring->dma_size; i++) { in mtk_rx_clean()
2446 if (!ring->data[i]) in mtk_rx_clean()
2449 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_clean()
2450 if (!rxd->rxd1) in mtk_rx_clean()
2453 dma_unmap_single(eth->dma_dev, rxd->rxd1, in mtk_rx_clean()
2454 ring->buf_size, DMA_FROM_DEVICE); in mtk_rx_clean()
2455 mtk_rx_put_buff(ring, ring->data[i], false); in mtk_rx_clean()
2457 kfree(ring->data); in mtk_rx_clean()
2458 ring->data = NULL; in mtk_rx_clean()
2461 if (ring->dma) { in mtk_rx_clean()
2462 dma_free_coherent(eth->dma_dev, in mtk_rx_clean()
2463 ring->dma_size * eth->soc->txrx.rxd_size, in mtk_rx_clean()
2464 ring->dma, ring->phys); in mtk_rx_clean()
2465 ring->dma = NULL; in mtk_rx_clean()
2468 if (ring->page_pool) { in mtk_rx_clean()
2469 if (xdp_rxq_info_is_reg(&ring->xdp_q)) in mtk_rx_clean()
2470 xdp_rxq_info_unreg(&ring->xdp_q); in mtk_rx_clean()
2471 page_pool_destroy(ring->page_pool); in mtk_rx_clean()
2472 ring->page_pool = NULL; in mtk_rx_clean()
2482 /* set LRO rings to auto-learn modes */ in mtk_hwlro_rx_init()
2514 /* auto-learn score delta setting */ in mtk_hwlro_rx_init()
2595 if (mac->hwlro_ip[i]) in mtk_hwlro_get_ip_cnt()
2606 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_add_ipaddr()
2608 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr()
2611 if ((fsp->flow_type != TCP_V4_FLOW) || in mtk_hwlro_add_ipaddr()
2612 (!fsp->h_u.tcp_ip4_spec.ip4dst) || in mtk_hwlro_add_ipaddr()
2613 (fsp->location > 1)) in mtk_hwlro_add_ipaddr()
2614 return -EINVAL; in mtk_hwlro_add_ipaddr()
2616 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); in mtk_hwlro_add_ipaddr()
2617 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; in mtk_hwlro_add_ipaddr()
2619 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); in mtk_hwlro_add_ipaddr()
2621 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
2630 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_del_ipaddr()
2632 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr()
2635 if (fsp->location > 1) in mtk_hwlro_del_ipaddr()
2636 return -EINVAL; in mtk_hwlro_del_ipaddr()
2638 mac->hwlro_ip[fsp->location] = 0; in mtk_hwlro_del_ipaddr()
2639 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; in mtk_hwlro_del_ipaddr()
2641 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); in mtk_hwlro_del_ipaddr()
2651 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable()
2655 mac->hwlro_ip[i] = 0; in mtk_hwlro_netdev_disable()
2656 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; in mtk_hwlro_netdev_disable()
2661 mac->hwlro_ip_cnt = 0; in mtk_hwlro_netdev_disable()
2669 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_get_fdir_entry()
2671 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip)) in mtk_hwlro_get_fdir_entry()
2672 return -EINVAL; in mtk_hwlro_get_fdir_entry()
2675 fsp->flow_type = TCP_V4_FLOW; in mtk_hwlro_get_fdir_entry()
2676 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); in mtk_hwlro_get_fdir_entry()
2677 fsp->m_u.tcp_ip4_spec.ip4dst = 0; in mtk_hwlro_get_fdir_entry()
2679 fsp->h_u.tcp_ip4_spec.ip4src = 0; in mtk_hwlro_get_fdir_entry()
2680 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; in mtk_hwlro_get_fdir_entry()
2681 fsp->h_u.tcp_ip4_spec.psrc = 0; in mtk_hwlro_get_fdir_entry()
2682 fsp->m_u.tcp_ip4_spec.psrc = 0xffff; in mtk_hwlro_get_fdir_entry()
2683 fsp->h_u.tcp_ip4_spec.pdst = 0; in mtk_hwlro_get_fdir_entry()
2684 fsp->m_u.tcp_ip4_spec.pdst = 0xffff; in mtk_hwlro_get_fdir_entry()
2685 fsp->h_u.tcp_ip4_spec.tos = 0; in mtk_hwlro_get_fdir_entry()
2686 fsp->m_u.tcp_ip4_spec.tos = 0xff; in mtk_hwlro_get_fdir_entry()
2700 if (mac->hwlro_ip[i]) { in mtk_hwlro_get_fdir_all()
2706 cmd->rule_cnt = cnt; in mtk_hwlro_get_fdir_all()
2732 if (!((dev->features ^ features) & NETIF_F_LRO)) in mtk_set_features()
2748 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
2749 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
2751 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
2753 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
2757 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
2768 return -EBUSY; in mtk_dma_init()
2770 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2783 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2793 if (eth->hwlro) { in mtk_dma_init()
2804 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2809 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
2810 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
2818 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
2822 if (eth->netdev[i]) in mtk_dma_free()
2823 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
2824 if (eth->scratch_ring) { in mtk_dma_free()
2825 dma_free_coherent(eth->dma_dev, in mtk_dma_free()
2826 MTK_DMA_SIZE * soc->txrx.txd_size, in mtk_dma_free()
2827 eth->scratch_ring, eth->phy_scratch_ring); in mtk_dma_free()
2828 eth->scratch_ring = NULL; in mtk_dma_free()
2829 eth->phy_scratch_ring = 0; in mtk_dma_free()
2832 mtk_rx_clean(eth, ð->rx_ring[0]); in mtk_dma_free()
2833 mtk_rx_clean(eth, ð->rx_ring_qdma); in mtk_dma_free()
2835 if (eth->hwlro) { in mtk_dma_free()
2838 mtk_rx_clean(eth, ð->rx_ring[i]); in mtk_dma_free()
2841 kfree(eth->scratch_head); in mtk_dma_free()
2847 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout()
2849 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
2852 schedule_work(ð->pending_work); in mtk_tx_timeout()
2859 eth->rx_events++; in mtk_handle_irq_rx()
2860 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
2861 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
2862 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_handle_irq_rx()
2872 eth->tx_events++; in mtk_handle_irq_tx()
2873 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
2874 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
2884 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
2886 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
2887 eth->soc->txrx.rx_irq_done_mask) { in mtk_handle_irq()
2888 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
2889 eth->soc->txrx.rx_irq_done_mask) in mtk_handle_irq()
2892 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
2893 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
2904 struct mtk_eth *eth = mac->hw; in mtk_poll_controller()
2907 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
2908 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
2910 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
2917 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
2926 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
2927 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
2932 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_start_dma()
2938 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
2943 reg_map->pdma.glo_cfg); in mtk_start_dma()
2947 reg_map->pdma.glo_cfg); in mtk_start_dma()
2957 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
2971 if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0])) in mtk_gdm_config()
2976 /* Reset and enable PSE */ in mtk_gdm_config()
2984 struct mtk_eth *eth = mac->hw; in mtk_open()
2987 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); in mtk_open()
2995 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
2996 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3002 phylink_disconnect_phy(mac->phylink); in mtk_open()
3006 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3007 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3009 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe in mtk_open()
3013 napi_enable(ð->tx_napi); in mtk_open()
3014 napi_enable(ð->rx_napi); in mtk_open()
3016 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); in mtk_open()
3017 refcount_set(ð->dma_refcnt, 1); in mtk_open()
3020 refcount_inc(ð->dma_refcnt); in mtk_open()
3022 phylink_start(mac->phylink); in mtk_open()
3033 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
3037 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
3053 struct mtk_eth *eth = mac->hw; in mtk_stop()
3056 phylink_stop(mac->phylink); in mtk_stop()
3060 phylink_disconnect_phy(mac->phylink); in mtk_stop()
3063 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
3069 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_stop()
3070 napi_disable(ð->tx_napi); in mtk_stop()
3071 napi_disable(ð->rx_napi); in mtk_stop()
3073 cancel_work_sync(ð->rx_dim.work); in mtk_stop()
3074 cancel_work_sync(ð->tx_dim.work); in mtk_stop()
3076 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3077 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3078 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3082 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3083 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3092 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup()
3096 if (eth->hwlro) { in mtk_xdp_setup()
3098 return -EOPNOTSUPP; in mtk_xdp_setup()
3101 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) { in mtk_xdp_setup()
3103 return -EOPNOTSUPP; in mtk_xdp_setup()
3106 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3110 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3122 switch (xdp->command) { in mtk_xdp()
3124 return mtk_xdp_setup(dev, xdp->prog, xdp->extack); in mtk_xdp()
3126 return -EINVAL; in mtk_xdp()
3132 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3137 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3147 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) in mtk_clk_disable()
3148 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3156 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3164 while (--clk >= 0) in mtk_clk_enable()
3165 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3174 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3178 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3179 dim->profile_ix); in mtk_dim_rx()
3180 spin_lock_bh(ð->dim_lock); in mtk_dim_rx()
3182 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3192 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3193 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3194 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3196 spin_unlock_bh(ð->dim_lock); in mtk_dim_rx()
3198 dim->state = DIM_START_MEASURE; in mtk_dim_rx()
3205 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3209 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3210 dim->profile_ix); in mtk_dim_tx()
3211 spin_lock_bh(ð->dim_lock); in mtk_dim_tx()
3213 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3223 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3224 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3225 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3227 spin_unlock_bh(ð->dim_lock); in mtk_dim_tx()
3229 dim->state = DIM_START_MEASURE; in mtk_dim_tx()
3236 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
3239 if (test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
3242 pm_runtime_enable(eth->dev); in mtk_hw_init()
3243 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
3249 if (eth->ethsys) in mtk_hw_init()
3250 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
3251 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
3253 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
3254 ret = device_reset(eth->dev); in mtk_hw_init()
3256 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
3261 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3262 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3272 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_hw_init()
3273 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_init()
3276 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_init()
3282 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_hw_init()
3283 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_init()
3291 if (eth->pctl) { in mtk_hw_init()
3293 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
3296 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
3299 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
3319 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3320 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3327 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
3328 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
3329 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
3330 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
3333 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_hw_init()
3334 /* PSE should not drop port8 and port9 packets */ in mtk_hw_init()
3337 /* PSE Free Queue Flow Control */ in mtk_hw_init()
3340 /* PSE config input queue threshold */ in mtk_hw_init()
3350 /* PSE config output queue threshold */ in mtk_hw_init()
3372 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
3373 pm_runtime_disable(eth->dev); in mtk_hw_init()
3380 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
3385 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
3386 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
3394 struct mtk_eth *eth = mac->hw; in mtk_init()
3397 ret = of_get_ethdev_address(mac->of_node, dev); in mtk_init()
3401 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_init()
3402 dev->dev_addr); in mtk_init()
3411 struct mtk_eth *eth = mac->hw; in mtk_uninit()
3413 phylink_disconnect_phy(mac->phylink); in mtk_uninit()
3422 struct mtk_eth *eth = mac->hw; in mtk_change_mtu()
3425 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
3428 return -EINVAL; in mtk_change_mtu()
3431 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_change_mtu()
3432 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_change_mtu()
3445 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); in mtk_change_mtu()
3448 dev->mtu = new_mtu; in mtk_change_mtu()
3461 return phylink_mii_ioctl(mac->phylink, ifr, cmd); in mtk_do_ioctl()
3466 return -EOPNOTSUPP; in mtk_do_ioctl()
3477 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); in mtk_pending_work()
3479 while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) in mtk_pending_work()
3482 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); in mtk_pending_work()
3485 if (!eth->netdev[i]) in mtk_pending_work()
3487 mtk_stop(eth->netdev[i]); in mtk_pending_work()
3490 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); in mtk_pending_work()
3497 if (eth->dev->pins) in mtk_pending_work()
3498 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
3499 eth->dev->pins->default_state); in mtk_pending_work()
3506 err = mtk_open(eth->netdev[i]); in mtk_pending_work()
3508 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
3510 dev_close(eth->netdev[i]); in mtk_pending_work()
3514 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); in mtk_pending_work()
3516 clear_bit_unlock(MTK_RESETTING, ð->state); in mtk_pending_work()
3526 if (!eth->netdev[i]) in mtk_free_dev()
3528 free_netdev(eth->netdev[i]); in mtk_free_dev()
3539 if (!eth->netdev[i]) in mtk_unreg_dev()
3541 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
3551 cancel_work_sync(ð->pending_work); in mtk_cleanup()
3561 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_get_link_ksettings()
3562 return -EBUSY; in mtk_get_link_ksettings()
3564 return phylink_ethtool_ksettings_get(mac->phylink, cmd); in mtk_get_link_ksettings()
3572 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_set_link_ksettings()
3573 return -EBUSY; in mtk_set_link_ksettings()
3575 return phylink_ethtool_ksettings_set(mac->phylink, cmd); in mtk_set_link_ksettings()
3583 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); in mtk_get_drvinfo()
3584 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); in mtk_get_drvinfo()
3585 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); in mtk_get_drvinfo()
3592 return mac->hw->msg_enable; in mtk_get_msglevel()
3599 mac->hw->msg_enable = value; in mtk_set_msglevel()
3606 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_nway_reset()
3607 return -EBUSY; in mtk_nway_reset()
3609 if (!mac->phylink) in mtk_nway_reset()
3610 return -ENOTSUPP; in mtk_nway_reset()
3612 return phylink_ethtool_nway_reset(mac->phylink); in mtk_nway_reset()
3627 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_strings()
3643 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_sset_count()
3648 return -EOPNOTSUPP; in mtk_get_sset_count()
3657 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
3658 struct mtk_rx_ring *ring = ð->rx_ring[i]; in mtk_ethtool_pp_stats()
3660 if (!ring->page_pool) in mtk_ethtool_pp_stats()
3663 page_pool_get_stats(ring->page_pool, &stats); in mtk_ethtool_pp_stats()
3672 struct mtk_hw_stats *hwstats = mac->hw_stats; in mtk_get_ethtool_stats()
3677 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_get_ethtool_stats()
3681 if (spin_trylock_bh(&hwstats->stats_lock)) { in mtk_get_ethtool_stats()
3683 spin_unlock_bh(&hwstats->stats_lock); in mtk_get_ethtool_stats()
3691 start = u64_stats_fetch_begin_irq(&hwstats->syncp); in mtk_get_ethtool_stats()
3695 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_ethtool_stats()
3696 mtk_ethtool_pp_stats(mac->hw, data_dst); in mtk_get_ethtool_stats()
3697 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); in mtk_get_ethtool_stats()
3703 int ret = -EOPNOTSUPP; in mtk_get_rxnfc()
3705 switch (cmd->cmd) { in mtk_get_rxnfc()
3707 if (dev->hw_features & NETIF_F_LRO) { in mtk_get_rxnfc()
3708 cmd->data = MTK_MAX_RX_RING_NUM; in mtk_get_rxnfc()
3713 if (dev->hw_features & NETIF_F_LRO) { in mtk_get_rxnfc()
3716 cmd->rule_cnt = mac->hwlro_ip_cnt; in mtk_get_rxnfc()
3721 if (dev->hw_features & NETIF_F_LRO) in mtk_get_rxnfc()
3725 if (dev->hw_features & NETIF_F_LRO) in mtk_get_rxnfc()
3738 int ret = -EOPNOTSUPP; in mtk_set_rxnfc()
3740 switch (cmd->cmd) { in mtk_set_rxnfc()
3742 if (dev->hw_features & NETIF_F_LRO) in mtk_set_rxnfc()
3746 if (dev->hw_features & NETIF_F_LRO) in mtk_set_rxnfc()
3802 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
3803 return -EINVAL; in mtk_add_mac()
3808 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
3809 return -EINVAL; in mtk_add_mac()
3812 if (eth->netdev[id]) { in mtk_add_mac()
3813 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
3814 return -EINVAL; in mtk_add_mac()
3817 eth->netdev[id] = alloc_etherdev(sizeof(*mac)); in mtk_add_mac()
3818 if (!eth->netdev[id]) { in mtk_add_mac()
3819 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
3820 return -ENOMEM; in mtk_add_mac()
3822 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
3823 eth->mac[id] = mac; in mtk_add_mac()
3824 mac->id = id; in mtk_add_mac()
3825 mac->hw = eth; in mtk_add_mac()
3826 mac->of_node = np; in mtk_add_mac()
3828 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); in mtk_add_mac()
3829 mac->hwlro_ip_cnt = 0; in mtk_add_mac()
3831 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
3832 sizeof(*mac->hw_stats), in mtk_add_mac()
3834 if (!mac->hw_stats) { in mtk_add_mac()
3835 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
3836 err = -ENOMEM; in mtk_add_mac()
3839 spin_lock_init(&mac->hw_stats->stats_lock); in mtk_add_mac()
3840 u64_stats_init(&mac->hw_stats->syncp); in mtk_add_mac()
3841 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; in mtk_add_mac()
3846 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
3851 mac->interface = PHY_INTERFACE_MODE_NA; in mtk_add_mac()
3852 mac->speed = SPEED_UNKNOWN; in mtk_add_mac()
3854 mac->phylink_config.dev = ð->netdev[id]->dev; in mtk_add_mac()
3855 mac->phylink_config.type = PHYLINK_NETDEV; in mtk_add_mac()
3856 /* This driver makes use of state->speed in mac_config */ in mtk_add_mac()
3857 mac->phylink_config.legacy_pre_march2020 = true; in mtk_add_mac()
3858 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | in mtk_add_mac()
3862 mac->phylink_config.supported_interfaces); in mtk_add_mac()
3864 mac->phylink_config.supported_interfaces); in mtk_add_mac()
3866 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) in mtk_add_mac()
3867 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); in mtk_add_mac()
3869 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id) in mtk_add_mac()
3871 mac->phylink_config.supported_interfaces); in mtk_add_mac()
3873 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { in mtk_add_mac()
3875 mac->phylink_config.supported_interfaces); in mtk_add_mac()
3877 mac->phylink_config.supported_interfaces); in mtk_add_mac()
3879 mac->phylink_config.supported_interfaces); in mtk_add_mac()
3882 phylink = phylink_create(&mac->phylink_config, in mtk_add_mac()
3883 of_fwnode_handle(mac->of_node), in mtk_add_mac()
3890 mac->phylink = phylink; in mtk_add_mac()
3892 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
3893 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
3894 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
3895 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
3897 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
3898 if (eth->hwlro) in mtk_add_mac()
3899 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
3901 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
3903 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
3904 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
3906 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
3907 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
3909 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
3910 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
3912 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
3917 free_netdev(eth->netdev[id]); in mtk_add_mac()
3930 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
3932 if (!dev || !(dev->flags & IFF_UP)) in mtk_eth_set_dma_device()
3935 list_add_tail(&dev->close_list, &dev_list); in mtk_eth_set_dma_device()
3940 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
3943 list_del_init(&dev->close_list); in mtk_eth_set_dma_device()
3957 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
3959 return -ENOMEM; in mtk_probe()
3961 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
3963 eth->dev = &pdev->dev; in mtk_probe()
3964 eth->dma_dev = &pdev->dev; in mtk_probe()
3965 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
3966 if (IS_ERR(eth->base)) in mtk_probe()
3967 return PTR_ERR(eth->base); in mtk_probe()
3969 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
3970 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
3972 spin_lock_init(ð->page_lock); in mtk_probe()
3973 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
3974 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
3975 spin_lock_init(ð->dim_lock); in mtk_probe()
3977 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
3978 INIT_WORK(ð->rx_dim.work, mtk_dim_rx); in mtk_probe()
3980 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
3981 INIT_WORK(ð->tx_dim.work, mtk_dim_tx); in mtk_probe()
3983 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
3984 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
3986 if (IS_ERR(eth->ethsys)) { in mtk_probe()
3987 dev_err(&pdev->dev, "no ethsys regmap found\n"); in mtk_probe()
3988 return PTR_ERR(eth->ethsys); in mtk_probe()
3992 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
3993 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
3995 if (IS_ERR(eth->infra)) { in mtk_probe()
3996 dev_err(&pdev->dev, "no infracfg regmap found\n"); in mtk_probe()
3997 return PTR_ERR(eth->infra); in mtk_probe()
4001 if (of_dma_is_coherent(pdev->dev.of_node)) { in mtk_probe()
4004 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4005 "cci-control-port"); in mtk_probe()
4011 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
4012 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), in mtk_probe()
4014 if (!eth->sgmii) in mtk_probe()
4015 return -ENOMEM; in mtk_probe()
4017 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node, in mtk_probe()
4018 eth->soc->ana_rgc3); in mtk_probe()
4024 if (eth->soc->required_pctl) { in mtk_probe()
4025 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4027 if (IS_ERR(eth->pctl)) { in mtk_probe()
4028 dev_err(&pdev->dev, "no pctl regmap found\n"); in mtk_probe()
4029 return PTR_ERR(eth->pctl); in mtk_probe()
4033 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_probe()
4036 return -EINVAL; in mtk_probe()
4039 if (eth->soc->offload_version) { in mtk_probe()
4045 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
4048 np = of_parse_phandle(pdev->dev.of_node, in mtk_probe()
4053 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
4054 wdma_phy = res ? res->start + wdma_base : 0; in mtk_probe()
4055 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
4061 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
4062 eth->irq[i] = eth->irq[0]; in mtk_probe()
4064 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
4065 if (eth->irq[i] < 0) { in mtk_probe()
4066 dev_err(&pdev->dev, "no IRQ%d resource found\n", i); in mtk_probe()
4067 err = -ENXIO; in mtk_probe()
4071 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
4072 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
4074 if (IS_ERR(eth->clks[i])) { in mtk_probe()
4075 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
4076 err = -EPROBE_DEFER; in mtk_probe()
4079 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
4080 dev_err(&pdev->dev, "clock %s not found\n", in mtk_probe()
4082 err = -EINVAL; in mtk_probe()
4085 eth->clks[i] = NULL; in mtk_probe()
4089 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
4090 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
4096 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
4098 for_each_child_of_node(pdev->dev.of_node, mac_np) { in mtk_probe()
4100 "mediatek,eth-mac")) in mtk_probe()
4113 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
4114 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
4116 dev_name(eth->dev), eth); in mtk_probe()
4118 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
4120 dev_name(eth->dev), eth); in mtk_probe()
4124 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
4126 dev_name(eth->dev), eth); in mtk_probe()
4132 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4138 if (eth->soc->offload_version) { in mtk_probe()
4141 num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; in mtk_probe()
4142 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); in mtk_probe()
4144 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; in mtk_probe()
4146 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, in mtk_probe()
4147 eth->soc->offload_version, i); in mtk_probe()
4148 if (!eth->ppe[i]) { in mtk_probe()
4149 err = -ENOMEM; in mtk_probe()
4160 if (!eth->netdev[i]) in mtk_probe()
4163 err = register_netdev(eth->netdev[i]); in mtk_probe()
4165 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
4168 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
4170 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
4176 init_dummy_netdev(ð->dummy_dev); in mtk_probe()
4177 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx); in mtk_probe()
4178 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); in mtk_probe()
4205 if (!eth->netdev[i]) in mtk_remove()
4207 mtk_stop(eth->netdev[i]); in mtk_remove()
4208 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
4209 phylink_disconnect_phy(mac->phylink); in mtk_remove()
4215 netif_napi_del(ð->tx_napi); in mtk_remove()
4216 netif_napi_del(ð->rx_napi); in mtk_remove()
4247 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4267 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4286 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4350 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4351 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4352 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4353 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4354 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4355 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4356 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4374 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");