Lines Matching refs:eth
61 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) in mtk_w32() argument
63 __raw_writel(val, eth->base + reg); in mtk_w32()
66 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
68 return __raw_readl(eth->base + reg); in mtk_r32()
71 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
76 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
83 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
87 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write() argument
90 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_write()
95 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | in _mtk_mdio_write()
100 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_write()
106 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) in _mtk_mdio_read() argument
110 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_read()
113 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | in _mtk_mdio_read()
118 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_read()
121 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; in _mtk_mdio_read()
129 struct mtk_eth *eth = bus->priv; in mtk_mdio_write() local
131 return _mtk_mdio_write(eth, phy_addr, phy_reg, val); in mtk_mdio_write()
136 struct mtk_eth *eth = bus->priv; in mtk_mdio_read() local
138 return _mtk_mdio_read(eth, phy_addr, phy_reg); in mtk_mdio_read()
141 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) in mtk_gmac0_rgmii_adjust() argument
148 mtk_w32(eth, val, INTF_MODE); in mtk_gmac0_rgmii_adjust()
150 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mtk_gmac0_rgmii_adjust()
155 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); in mtk_gmac0_rgmii_adjust()
157 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
161 mtk_w32(eth, val, TRGMII_RCK_CTRL); in mtk_gmac0_rgmii_adjust()
165 mtk_w32(eth, val, TRGMII_TCK_CTRL); in mtk_gmac0_rgmii_adjust()
168 static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id) in mtk_gmac_sgmii_hw_setup() argument
173 regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER, in mtk_gmac_sgmii_hw_setup()
176 regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val); in mtk_gmac_sgmii_hw_setup()
178 regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val); in mtk_gmac_sgmii_hw_setup()
180 regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val); in mtk_gmac_sgmii_hw_setup()
182 regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val); in mtk_gmac_sgmii_hw_setup()
184 regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val); in mtk_gmac_sgmii_hw_setup()
186 regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val); in mtk_gmac_sgmii_hw_setup()
189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) { in mtk_gmac_sgmii_hw_setup()
190 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_gmac_sgmii_hw_setup()
193 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_gmac_sgmii_hw_setup()
195 dev_info(eth->dev, "setup shared sgmii for gmac=%d\n", in mtk_gmac_sgmii_hw_setup()
202 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) && in mtk_gmac_sgmii_hw_setup()
204 mtk_w32(eth, 0, MTK_MAC_MISC); in mtk_gmac_sgmii_hw_setup()
205 dev_info(eth->dev, "setup gmac1 going through sgmii"); in mtk_gmac_sgmii_hw_setup()
274 static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, in mtk_phy_connect_node() argument
282 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); in mtk_phy_connect_node()
286 phydev = of_phy_connect(eth->netdev[mac->id], phy_node, in mtk_phy_connect_node()
289 dev_err(eth->dev, "could not connect to PHY\n"); in mtk_phy_connect_node()
293 dev_info(eth->dev, in mtk_phy_connect_node()
304 struct mtk_eth *eth; in mtk_phy_connect() local
308 eth = mac->hw; in mtk_phy_connect()
326 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) in mtk_phy_connect()
327 mtk_gmac_sgmii_hw_setup(eth, mac->id); in mtk_phy_connect()
345 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_phy_connect()
348 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_phy_connect()
351 if (mtk_phy_connect_node(eth, mac, np)) in mtk_phy_connect()
376 dev_err(eth->dev, "%s: invalid phy\n", __func__); in mtk_phy_connect()
380 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
385 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
387 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
396 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
397 if (!eth->mii_bus) { in mtk_mdio_init()
402 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
403 eth->mii_bus->read = mtk_mdio_read; in mtk_mdio_init()
404 eth->mii_bus->write = mtk_mdio_write; in mtk_mdio_init()
405 eth->mii_bus->priv = eth; in mtk_mdio_init()
406 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
408 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); in mtk_mdio_init()
409 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
416 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
418 if (!eth->mii_bus) in mtk_mdio_cleanup()
421 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
424 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
429 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
430 val = mtk_r32(eth, MTK_QDMA_INT_MASK); in mtk_tx_irq_disable()
431 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); in mtk_tx_irq_disable()
432 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
435 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
440 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
441 val = mtk_r32(eth, MTK_QDMA_INT_MASK); in mtk_tx_irq_enable()
442 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); in mtk_tx_irq_enable()
443 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
446 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
451 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
452 val = mtk_r32(eth, MTK_PDMA_INT_MASK); in mtk_rx_irq_disable()
453 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); in mtk_rx_irq_disable()
454 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
457 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
462 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
463 val = mtk_r32(eth, MTK_PDMA_INT_MASK); in mtk_rx_irq_enable()
464 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); in mtk_rx_irq_enable()
465 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
523 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
528 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
530 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
531 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
532 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
601 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
608 eth->scratch_ring = dma_zalloc_coherent(eth->dev, in mtk_init_fq_dma()
610 ð->phy_scratch_ring, in mtk_init_fq_dma()
612 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
615 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
617 if (unlikely(!eth->scratch_head)) in mtk_init_fq_dma()
620 dma_addr = dma_map_single(eth->dev, in mtk_init_fq_dma()
621 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
623 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) in mtk_init_fq_dma()
626 phy_ring_tail = eth->phy_scratch_ring + in mtk_init_fq_dma()
630 eth->scratch_ring[i].txd1 = in mtk_init_fq_dma()
633 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + in mtk_init_fq_dma()
635 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); in mtk_init_fq_dma()
638 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); in mtk_init_fq_dma()
639 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); in mtk_init_fq_dma()
640 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); in mtk_init_fq_dma()
641 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); in mtk_init_fq_dma()
661 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) in mtk_tx_unmap() argument
664 dma_unmap_single(eth->dev, in mtk_tx_unmap()
669 dma_unmap_page(eth->dev, in mtk_tx_unmap()
685 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
715 mapped_addr = dma_map_single(eth->dev, skb->data, in mtk_tx_map()
717 if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) in mtk_tx_map()
745 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, in mtk_tx_map()
748 if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) in mtk_tx_map()
794 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); in mtk_tx_map()
803 mtk_tx_unmap(eth, tx_buf); in mtk_tx_map()
830 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
835 if (!eth->netdev[i]) in mtk_queue_stopped()
837 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
844 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
849 if (!eth->netdev[i]) in mtk_wake_queue()
851 netif_wake_queue(eth->netdev[i]); in mtk_wake_queue()
855 static void mtk_stop_queue(struct mtk_eth *eth) in mtk_stop_queue() argument
860 if (!eth->netdev[i]) in mtk_stop_queue()
862 netif_stop_queue(eth->netdev[i]); in mtk_stop_queue()
869 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
870 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
879 spin_lock(ð->page_lock); in mtk_start_xmit()
881 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
886 mtk_stop_queue(eth); in mtk_start_xmit()
887 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
889 spin_unlock(ð->page_lock); in mtk_start_xmit()
896 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
912 mtk_stop_queue(eth); in mtk_start_xmit()
914 spin_unlock(ð->page_lock); in mtk_start_xmit()
919 spin_unlock(ð->page_lock); in mtk_start_xmit()
925 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
931 if (!eth->hwlro) in mtk_get_rx_ring()
932 return ð->rx_ring[0]; in mtk_get_rx_ring()
935 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
946 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
951 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
952 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
953 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
956 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
959 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
966 struct mtk_eth *eth) in mtk_poll_rx() argument
981 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
999 !eth->netdev[mac])) in mtk_poll_rx()
1002 netdev = eth->netdev[mac]; in mtk_poll_rx()
1004 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
1013 dma_addr = dma_map_single(eth->dev, in mtk_poll_rx()
1017 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { in mtk_poll_rx()
1032 dma_unmap_single(eth->dev, trxd.rxd1, in mtk_poll_rx()
1067 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
1073 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
1075 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
1087 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); in mtk_poll_tx()
1088 dma = mtk_r32(eth, MTK_QTX_DRX_PTR); in mtk_poll_tx()
1113 mtk_tx_unmap(eth, tx_buf); in mtk_poll_tx()
1121 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); in mtk_poll_tx()
1124 if (!eth->netdev[i] || !done[i]) in mtk_poll_tx()
1126 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); in mtk_poll_tx()
1130 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
1132 mtk_wake_queue(eth); in mtk_poll_tx()
1137 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
1139 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
1142 mtk_stats_update(eth); in mtk_handle_status_irq()
1143 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
1150 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
1154 mtk_handle_status_irq(eth); in mtk_napi_tx()
1155 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); in mtk_napi_tx()
1156 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
1158 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
1159 status = mtk_r32(eth, MTK_QMTK_INT_STATUS); in mtk_napi_tx()
1160 mask = mtk_r32(eth, MTK_QDMA_INT_MASK); in mtk_napi_tx()
1161 dev_info(eth->dev, in mtk_napi_tx()
1169 status = mtk_r32(eth, MTK_QMTK_INT_STATUS); in mtk_napi_tx()
1174 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
1181 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
1186 mtk_handle_status_irq(eth); in mtk_napi_rx()
1189 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); in mtk_napi_rx()
1190 rx_done = mtk_poll_rx(napi, remain_budget, eth); in mtk_napi_rx()
1192 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
1193 status = mtk_r32(eth, MTK_PDMA_INT_STATUS); in mtk_napi_rx()
1194 mask = mtk_r32(eth, MTK_PDMA_INT_MASK); in mtk_napi_rx()
1195 dev_info(eth->dev, in mtk_napi_rx()
1202 status = mtk_r32(eth, MTK_PDMA_INT_STATUS); in mtk_napi_rx()
1208 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); in mtk_napi_rx()
1213 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
1215 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
1223 ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, in mtk_tx_alloc()
1246 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); in mtk_tx_alloc()
1247 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); in mtk_tx_alloc()
1248 mtk_w32(eth, in mtk_tx_alloc()
1251 mtk_w32(eth, in mtk_tx_alloc()
1254 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); in mtk_tx_alloc()
1262 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
1264 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
1269 mtk_tx_unmap(eth, &ring->buf[i]); in mtk_tx_clean()
1275 dma_free_coherent(eth->dev, in mtk_tx_clean()
1283 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
1293 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
1296 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
1320 ring->dma = dma_zalloc_coherent(eth->dev, in mtk_rx_alloc()
1327 dma_addr_t dma_addr = dma_map_single(eth->dev, in mtk_rx_alloc()
1331 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) in mtk_rx_alloc()
1346 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); in mtk_rx_alloc()
1347 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); in mtk_rx_alloc()
1348 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); in mtk_rx_alloc()
1349 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); in mtk_rx_alloc()
1354 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) in mtk_rx_clean() argument
1364 dma_unmap_single(eth->dev, in mtk_rx_clean()
1375 dma_free_coherent(eth->dev, in mtk_rx_clean()
1383 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
1407 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
1408 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
1409 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
1419 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
1422 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
1425 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
1437 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
1438 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
1443 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
1449 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
1453 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
1463 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
1466 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
1469 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
1473 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
1476 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
1478 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
1481 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
1484 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
1488 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
1491 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
1493 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
1515 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
1528 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
1539 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
1550 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
1558 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
1565 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
1646 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
1651 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & in mtk_dma_busy_wait()
1658 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
1662 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
1667 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
1673 err = mtk_init_fq_dma(eth); in mtk_dma_init()
1677 err = mtk_tx_alloc(eth); in mtk_dma_init()
1681 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
1685 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
1689 if (eth->hwlro) { in mtk_dma_init()
1691 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
1695 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
1701 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, in mtk_dma_init()
1703 mtk_w32(eth, 0x0, MTK_QDMA_HRED2); in mtk_dma_init()
1708 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
1713 if (eth->netdev[i]) in mtk_dma_free()
1714 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
1715 if (eth->scratch_ring) { in mtk_dma_free()
1716 dma_free_coherent(eth->dev, in mtk_dma_free()
1718 eth->scratch_ring, in mtk_dma_free()
1719 eth->phy_scratch_ring); in mtk_dma_free()
1720 eth->scratch_ring = NULL; in mtk_dma_free()
1721 eth->phy_scratch_ring = 0; in mtk_dma_free()
1723 mtk_tx_clean(eth); in mtk_dma_free()
1724 mtk_rx_clean(eth, ð->rx_ring[0]); in mtk_dma_free()
1725 mtk_rx_clean(eth, ð->rx_ring_qdma); in mtk_dma_free()
1727 if (eth->hwlro) { in mtk_dma_free()
1728 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
1730 mtk_rx_clean(eth, ð->rx_ring[i]); in mtk_dma_free()
1733 kfree(eth->scratch_head); in mtk_dma_free()
1739 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
1741 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
1742 netif_err(eth, tx_err, dev, in mtk_tx_timeout()
1744 schedule_work(ð->pending_work); in mtk_tx_timeout()
1749 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
1751 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
1752 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
1753 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); in mtk_handle_irq_rx()
1761 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
1763 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
1764 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
1765 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
1775 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
1777 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
1778 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); in mtk_poll_controller()
1779 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
1780 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
1781 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); in mtk_poll_controller()
1785 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
1789 err = mtk_dma_init(eth); in mtk_start_dma()
1791 mtk_dma_free(eth); in mtk_start_dma()
1795 mtk_w32(eth, in mtk_start_dma()
1802 mtk_w32(eth, in mtk_start_dma()
1813 struct mtk_eth *eth = mac->hw; in mtk_open() local
1816 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
1817 int err = mtk_start_dma(eth); in mtk_open()
1822 napi_enable(ð->tx_napi); in mtk_open()
1823 napi_enable(ð->rx_napi); in mtk_open()
1824 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
1825 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); in mtk_open()
1826 refcount_set(ð->dma_refcnt, 1); in mtk_open()
1829 refcount_inc(ð->dma_refcnt); in mtk_open()
1837 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
1843 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
1844 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
1845 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
1847 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
1851 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
1863 struct mtk_eth *eth = mac->hw; in mtk_stop() local
1869 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
1872 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
1873 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); in mtk_stop()
1874 napi_disable(ð->tx_napi); in mtk_stop()
1875 napi_disable(ð->rx_napi); in mtk_stop()
1877 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); in mtk_stop()
1878 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); in mtk_stop()
1880 mtk_dma_free(eth); in mtk_stop()
1885 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
1887 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
1892 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
1898 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
1903 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
1906 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
1911 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
1920 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
1925 static int mtk_hw_init(struct mtk_eth *eth) in mtk_hw_init() argument
1929 if (test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
1932 pm_runtime_enable(eth->dev); in mtk_hw_init()
1933 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
1935 ret = mtk_clk_enable(eth); in mtk_hw_init()
1939 ethsys_reset(eth, RSTCTRL_FE); in mtk_hw_init()
1940 ethsys_reset(eth, RSTCTRL_PPE); in mtk_hw_init()
1942 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_hw_init()
1944 if (!eth->mac[i]) in mtk_hw_init()
1946 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id); in mtk_hw_init()
1947 val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id); in mtk_hw_init()
1949 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_hw_init()
1951 if (eth->pctl) { in mtk_hw_init()
1953 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
1956 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
1959 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
1967 mtk_w32(eth, 0, MTK_MAC_MCR(i)); in mtk_hw_init()
1972 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
1973 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
1976 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
1979 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); in mtk_hw_init()
1982 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); in mtk_hw_init()
1983 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
1984 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
1985 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_hw_init()
1986 mtk_w32(eth, 0, MTK_RST_GL); in mtk_hw_init()
1989 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); in mtk_hw_init()
1990 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); in mtk_hw_init()
1991 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); in mtk_hw_init()
1992 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); in mtk_hw_init()
1993 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
1996 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); in mtk_hw_init()
2005 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); in mtk_hw_init()
2011 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
2012 pm_runtime_disable(eth->dev); in mtk_hw_init()
2017 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
2019 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
2022 mtk_clk_disable(eth); in mtk_hw_deinit()
2024 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
2025 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
2033 struct mtk_eth *eth = mac->hw; in mtk_init() local
2043 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_init()
2053 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
2058 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
2059 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
2078 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
2084 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); in mtk_pending_work()
2086 while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) in mtk_pending_work()
2089 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); in mtk_pending_work()
2092 if (!eth->netdev[i]) in mtk_pending_work()
2094 mtk_stop(eth->netdev[i]); in mtk_pending_work()
2097 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); in mtk_pending_work()
2102 mtk_hw_deinit(eth); in mtk_pending_work()
2104 if (eth->dev->pins) in mtk_pending_work()
2105 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
2106 eth->dev->pins->default_state); in mtk_pending_work()
2107 mtk_hw_init(eth); in mtk_pending_work()
2110 if (!eth->mac[i] || in mtk_pending_work()
2111 of_phy_is_fixed_link(eth->mac[i]->of_node)) in mtk_pending_work()
2113 err = phy_init_hw(eth->netdev[i]->phydev); in mtk_pending_work()
2115 dev_err(eth->dev, "%s: PHY init failed.\n", in mtk_pending_work()
2116 eth->netdev[i]->name); in mtk_pending_work()
2123 err = mtk_open(eth->netdev[i]); in mtk_pending_work()
2125 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
2127 dev_close(eth->netdev[i]); in mtk_pending_work()
2131 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); in mtk_pending_work()
2133 clear_bit_unlock(MTK_RESETTING, ð->state); in mtk_pending_work()
2138 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
2143 if (!eth->netdev[i]) in mtk_free_dev()
2145 free_netdev(eth->netdev[i]); in mtk_free_dev()
2151 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
2156 if (!eth->netdev[i]) in mtk_unreg_dev()
2158 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
2164 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
2166 mtk_unreg_dev(eth); in mtk_cleanup()
2167 mtk_free_dev(eth); in mtk_cleanup()
2168 cancel_work_sync(ð->pending_work); in mtk_cleanup()
2389 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
2396 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
2402 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
2406 if (eth->netdev[id]) { in mtk_add_mac()
2407 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
2411 eth->netdev[id] = alloc_etherdev(sizeof(*mac)); in mtk_add_mac()
2412 if (!eth->netdev[id]) { in mtk_add_mac()
2413 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
2416 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
2417 eth->mac[id] = mac; in mtk_add_mac()
2419 mac->hw = eth; in mtk_add_mac()
2425 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
2429 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
2437 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
2438 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
2439 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
2440 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
2442 eth->netdev[id]->hw_features = MTK_HW_FEATURES; in mtk_add_mac()
2443 if (eth->hwlro) in mtk_add_mac()
2444 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
2446 eth->netdev[id]->vlan_features = MTK_HW_FEATURES & in mtk_add_mac()
2448 eth->netdev[id]->features |= MTK_HW_FEATURES; in mtk_add_mac()
2449 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
2451 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
2452 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
2457 free_netdev(eth->netdev[id]); in mtk_add_mac()
2465 struct mtk_eth *eth; in mtk_probe() local
2469 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
2470 if (!eth) in mtk_probe()
2473 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
2475 eth->dev = &pdev->dev; in mtk_probe()
2476 eth->base = devm_ioremap_resource(&pdev->dev, res); in mtk_probe()
2477 if (IS_ERR(eth->base)) in mtk_probe()
2478 return PTR_ERR(eth->base); in mtk_probe()
2480 spin_lock_init(ð->page_lock); in mtk_probe()
2481 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
2482 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
2484 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
2486 if (IS_ERR(eth->ethsys)) { in mtk_probe()
2488 return PTR_ERR(eth->ethsys); in mtk_probe()
2491 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
2492 eth->sgmiisys = in mtk_probe()
2495 if (IS_ERR(eth->sgmiisys)) { in mtk_probe()
2497 return PTR_ERR(eth->sgmiisys); in mtk_probe()
2501 if (eth->soc->required_pctl) { in mtk_probe()
2502 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
2504 if (IS_ERR(eth->pctl)) { in mtk_probe()
2506 return PTR_ERR(eth->pctl); in mtk_probe()
2511 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
2512 if (eth->irq[i] < 0) { in mtk_probe()
2517 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
2518 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
2520 if (IS_ERR(eth->clks[i])) { in mtk_probe()
2521 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) in mtk_probe()
2523 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
2528 eth->clks[i] = NULL; in mtk_probe()
2532 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
2533 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
2535 err = mtk_hw_init(eth); in mtk_probe()
2539 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
2549 err = mtk_add_mac(eth, mac_np); in mtk_probe()
2554 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, in mtk_probe()
2555 dev_name(eth->dev), eth); in mtk_probe()
2559 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, in mtk_probe()
2560 dev_name(eth->dev), eth); in mtk_probe()
2564 err = mtk_mdio_init(eth); in mtk_probe()
2569 if (!eth->netdev[i]) in mtk_probe()
2572 err = register_netdev(eth->netdev[i]); in mtk_probe()
2574 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
2577 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
2579 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
2585 init_dummy_netdev(ð->dummy_dev); in mtk_probe()
2586 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, in mtk_probe()
2588 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, in mtk_probe()
2591 platform_set_drvdata(pdev, eth); in mtk_probe()
2596 mtk_mdio_cleanup(eth); in mtk_probe()
2598 mtk_free_dev(eth); in mtk_probe()
2600 mtk_hw_deinit(eth); in mtk_probe()
2607 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
2612 if (!eth->netdev[i]) in mtk_remove()
2614 mtk_stop(eth->netdev[i]); in mtk_remove()
2617 mtk_hw_deinit(eth); in mtk_remove()
2619 netif_napi_del(ð->tx_napi); in mtk_remove()
2620 netif_napi_del(ð->rx_napi); in mtk_remove()
2621 mtk_cleanup(eth); in mtk_remove()
2622 mtk_mdio_cleanup(eth); in mtk_remove()