Lines Matching refs:fep

331 	struct fec_enet_private *fep = netdev_priv(ndev);  in fec_dump()  local
339 txq = fep->tx_queue[0]; in fec_dump()
383 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_frag_skb() local
409 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
412 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_frag_skb()
417 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
418 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_frag_skb()
429 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_frag_skb()
430 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_frag_skb()
434 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_frag_skb()
438 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, in fec_enet_txq_submit_frag_skb()
440 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_frag_skb()
460 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_txq_submit_frag_skb()
469 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_skb() local
505 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_skb()
506 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_skb()
510 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_skb()
515 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); in fec_enet_txq_submit_skb()
516 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_skb()
526 dma_unmap_single(&fep->pdev->dev, addr, in fec_enet_txq_submit_skb()
533 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
536 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
543 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
548 fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
551 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_skb()
599 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_put_data_tso() local
610 if (((unsigned long) data) & fep->tx_align || in fec_enet_txq_put_data_tso()
611 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_data_tso()
615 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_data_tso()
619 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); in fec_enet_txq_put_data_tso()
620 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_put_data_tso()
630 if (fep->bufdesc_ex) { in fec_enet_txq_put_data_tso()
631 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_data_tso()
644 if (fep->bufdesc_ex) in fec_enet_txq_put_data_tso()
658 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_put_hdr_tso() local
672 if (((unsigned long)bufaddr) & fep->tx_align || in fec_enet_txq_put_hdr_tso()
673 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_hdr_tso()
677 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_hdr_tso()
680 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, in fec_enet_txq_put_hdr_tso()
682 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { in fec_enet_txq_put_hdr_tso()
693 if (fep->bufdesc_ex) { in fec_enet_txq_put_hdr_tso()
694 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_hdr_tso()
711 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_tso() local
777 if (!(fep->quirks & FEC_QUIRK_ERR007885) || in fec_enet_txq_submit_tso()
794 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_start_xmit() local
802 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
823 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_bd_init() local
830 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_bd_init()
832 rxq = fep->rx_queue[q]; in fec_enet_bd_init()
852 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_bd_init()
854 txq = fep->tx_queue[q]; in fec_enet_bd_init()
863 dma_unmap_single(&fep->pdev->dev, in fec_enet_bd_init()
884 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_active_rxring() local
887 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_active_rxring()
888 writel(0, fep->rx_queue[i]->bd.reg_desc_active); in fec_enet_active_rxring()
893 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_enable_ring() local
898 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_enable_ring()
899 rxq = fep->rx_queue[i]; in fec_enet_enable_ring()
900 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); in fec_enet_enable_ring()
901 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); in fec_enet_enable_ring()
906 fep->hwp + FEC_RCMR(i)); in fec_enet_enable_ring()
909 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_enable_ring()
910 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
911 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
916 fep->hwp + FEC_DMA_CFG(i)); in fec_enet_enable_ring()
922 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_reset_skb() local
926 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_reset_skb()
927 txq = fep->tx_queue[i]; in fec_enet_reset_skb()
946 struct fec_enet_private *fep = netdev_priv(ndev); in fec_restart() local
956 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_restart()
957 writel(0, fep->hwp + FEC_ECNTRL); in fec_restart()
959 writel(1, fep->hwp + FEC_ECNTRL); in fec_restart()
969 fep->hwp + FEC_ADDR_LOW); in fec_restart()
971 fep->hwp + FEC_ADDR_HIGH); in fec_restart()
974 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); in fec_restart()
984 if (fep->full_duplex == DUPLEX_FULL) { in fec_restart()
986 writel(0x04, fep->hwp + FEC_X_CNTRL); in fec_restart()
990 writel(0x0, fep->hwp + FEC_X_CNTRL); in fec_restart()
994 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_restart()
997 if (fep->quirks & FEC_QUIRK_HAS_RACC) { in fec_restart()
998 val = readl(fep->hwp + FEC_RACC); in fec_restart()
1001 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) in fec_restart()
1006 writel(val, fep->hwp + FEC_RACC); in fec_restart()
1007 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); in fec_restart()
1015 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
1020 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || in fec_restart()
1021 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in fec_restart()
1022 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || in fec_restart()
1023 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) in fec_restart()
1025 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1041 if (fep->quirks & FEC_QUIRK_USE_GASKET) { in fec_restart()
1044 writel(0, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1045 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) in fec_restart()
1053 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1057 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); in fec_restart()
1060 writel(2, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1067 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || in fec_restart()
1068 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && in fec_restart()
1073 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); in fec_restart()
1074 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); in fec_restart()
1075 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); in fec_restart()
1076 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); in fec_restart()
1079 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); in fec_restart()
1085 writel(rcntl, fep->hwp + FEC_R_CNTRL); in fec_restart()
1090 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); in fec_restart()
1091 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); in fec_restart()
1094 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
1098 writel(1 << 8, fep->hwp + FEC_X_WMRK); in fec_restart()
1101 if (fep->bufdesc_ex) in fec_restart()
1106 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); in fec_restart()
1110 writel(ecntl, fep->hwp + FEC_ECNTRL); in fec_restart()
1113 if (fep->bufdesc_ex) in fec_restart()
1117 if (fep->link) in fec_restart()
1118 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_restart()
1120 writel(0, fep->hwp + FEC_IMASK); in fec_restart()
1127 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) in fec_enet_stop_mode() argument
1129 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; in fec_enet_stop_mode()
1130 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; in fec_enet_stop_mode()
1148 struct fec_enet_private *fep = netdev_priv(ndev); in fec_stop() local
1149 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); in fec_stop()
1153 if (fep->link) { in fec_stop()
1154 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ in fec_stop()
1156 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) in fec_stop()
1164 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1165 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_stop()
1166 writel(0, fep->hwp + FEC_ECNTRL); in fec_stop()
1168 writel(1, fep->hwp + FEC_ECNTRL); in fec_stop()
1171 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_stop()
1173 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); in fec_stop()
1174 val = readl(fep->hwp + FEC_ECNTRL); in fec_stop()
1176 writel(val, fep->hwp + FEC_ECNTRL); in fec_stop()
1177 fec_enet_stop_mode(fep, true); in fec_stop()
1179 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_stop()
1182 if (fep->quirks & FEC_QUIRK_ENET_MAC && in fec_stop()
1183 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1184 writel(2, fep->hwp + FEC_ECNTRL); in fec_stop()
1185 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); in fec_stop()
1193 struct fec_enet_private *fep = netdev_priv(ndev); in fec_timeout() local
1199 schedule_work(&fep->tx_timeout_work); in fec_timeout()
1204 struct fec_enet_private *fep = in fec_enet_timeout_work() local
1206 struct net_device *ndev = fep->netdev; in fec_enet_timeout_work()
1210 napi_disable(&fep->napi); in fec_enet_timeout_work()
1215 napi_enable(&fep->napi); in fec_enet_timeout_work()
1221 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, in fec_enet_hwtstamp() argument
1227 spin_lock_irqsave(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1228 ns = timecounter_cyc2time(&fep->tc, ts); in fec_enet_hwtstamp()
1229 spin_unlock_irqrestore(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1238 struct fec_enet_private *fep; in fec_enet_tx_queue() local
1247 fep = netdev_priv(ndev); in fec_enet_tx_queue()
1249 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1269 dma_unmap_single(&fep->pdev->dev, in fec_enet_tx_queue()
1302 fep->hwts_tx_en) && in fec_enet_tx_queue()
1303 fep->bufdesc_ex) { in fec_enet_tx_queue()
1307 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); in fec_enet_tx_queue()
1346 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_tx() local
1350 for (i = fep->num_tx_queues - 1; i >= 0; i--) in fec_enet_tx()
1357 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_new_rxbdp() local
1360 off = ((unsigned long)skb->data) & fep->rx_align; in fec_enet_new_rxbdp()
1362 skb_reserve(skb, fep->rx_align + 1 - off); in fec_enet_new_rxbdp()
1364 …bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fe… in fec_enet_new_rxbdp()
1365 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { in fec_enet_new_rxbdp()
1377 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_copybreak() local
1380 if (length > fep->rx_copybreak) in fec_enet_copybreak()
1387 dma_sync_single_for_cpu(&fep->pdev->dev, in fec_enet_copybreak()
1389 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_copybreak()
1408 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx_queue() local
1422 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; in fec_enet_rx_queue()
1427 rxq = fep->rx_queue[queue_id]; in fec_enet_rx_queue()
1440 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); in fec_enet_rx_queue()
1488 dma_unmap_single(&fep->pdev->dev, in fec_enet_rx_queue()
1490 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_rx_queue()
1502 if (fep->quirks & FEC_QUIRK_HAS_RACC) in fec_enet_rx_queue()
1508 if (fep->bufdesc_ex) in fec_enet_rx_queue()
1514 fep->bufdesc_ex && in fec_enet_rx_queue()
1530 if (fep->hwts_rx_en && fep->bufdesc_ex) in fec_enet_rx_queue()
1531 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), in fec_enet_rx_queue()
1534 if (fep->bufdesc_ex && in fec_enet_rx_queue()
1535 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { in fec_enet_rx_queue()
1551 napi_gro_receive(&fep->napi, skb); in fec_enet_rx_queue()
1554 dma_sync_single_for_device(&fep->pdev->dev, in fec_enet_rx_queue()
1556 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_rx_queue()
1570 if (fep->bufdesc_ex) { in fec_enet_rx_queue()
1598 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx() local
1602 for (i = fep->num_rx_queues - 1; i >= 0; i--) in fec_enet_rx()
1608 static bool fec_enet_collect_events(struct fec_enet_private *fep) in fec_enet_collect_events() argument
1612 int_events = readl(fep->hwp + FEC_IEVENT); in fec_enet_collect_events()
1617 writel(int_events, fep->hwp + FEC_IEVENT); in fec_enet_collect_events()
1626 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_interrupt() local
1629 if (fec_enet_collect_events(fep) && fep->link) { in fec_enet_interrupt()
1632 if (napi_schedule_prep(&fep->napi)) { in fec_enet_interrupt()
1634 writel(0, fep->hwp + FEC_IMASK); in fec_enet_interrupt()
1635 __napi_schedule(&fep->napi); in fec_enet_interrupt()
1645 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx_napi() local
1651 } while ((done < budget) && fec_enet_collect_events(fep)); in fec_enet_rx_napi()
1655 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_enet_rx_napi()
1664 struct fec_enet_private *fep = netdev_priv(ndev); in fec_get_mac() local
1665 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); in fec_get_mac()
1680 struct device_node *np = fep->pdev->dev.of_node; in fec_get_mac()
1706 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); in fec_get_mac()
1708 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); in fec_get_mac()
1717 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); in fec_get_mac()
1719 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", in fec_get_mac()
1728 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; in fec_get_mac()
1738 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_adjust_link() local
1748 fep->link = 0; in fec_enet_adjust_link()
1750 if (!fep->link) { in fec_enet_adjust_link()
1751 fep->link = phy_dev->link; in fec_enet_adjust_link()
1755 if (fep->full_duplex != phy_dev->duplex) { in fec_enet_adjust_link()
1756 fep->full_duplex = phy_dev->duplex; in fec_enet_adjust_link()
1760 if (phy_dev->speed != fep->speed) { in fec_enet_adjust_link()
1761 fep->speed = phy_dev->speed; in fec_enet_adjust_link()
1767 napi_disable(&fep->napi); in fec_enet_adjust_link()
1772 napi_enable(&fep->napi); in fec_enet_adjust_link()
1775 if (fep->link) { in fec_enet_adjust_link()
1776 napi_disable(&fep->napi); in fec_enet_adjust_link()
1780 napi_enable(&fep->napi); in fec_enet_adjust_link()
1781 fep->link = phy_dev->link; in fec_enet_adjust_link()
1790 static int fec_enet_mdio_wait(struct fec_enet_private *fep) in fec_enet_mdio_wait() argument
1795 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, in fec_enet_mdio_wait()
1799 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); in fec_enet_mdio_wait()
1806 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_read() local
1807 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_read()
1823 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read()
1826 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_read()
1828 netdev_err(fep->netdev, "MDIO address write timeout\n"); in fec_enet_mdio_read()
1844 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read()
1847 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_read()
1849 netdev_err(fep->netdev, "MDIO read timeout\n"); in fec_enet_mdio_read()
1853 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); in fec_enet_mdio_read()
1865 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_write() local
1866 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_write()
1882 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write()
1885 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_write()
1887 netdev_err(fep->netdev, "MDIO address write timeout\n"); in fec_enet_mdio_write()
1900 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write()
1903 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_write()
1905 netdev_err(fep->netdev, "MDIO write timeout\n"); in fec_enet_mdio_write()
1916 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_phy_reset_after_clk_enable() local
1921 } else if (fep->phy_node) { in fec_enet_phy_reset_after_clk_enable()
1929 phy_dev = of_phy_find_device(fep->phy_node); in fec_enet_phy_reset_after_clk_enable()
1937 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_clk_enable() local
1941 ret = clk_prepare_enable(fep->clk_enet_out); in fec_enet_clk_enable()
1945 if (fep->clk_ptp) { in fec_enet_clk_enable()
1946 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1947 ret = clk_prepare_enable(fep->clk_ptp); in fec_enet_clk_enable()
1949 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1952 fep->ptp_clk_on = true; in fec_enet_clk_enable()
1954 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1957 ret = clk_prepare_enable(fep->clk_ref); in fec_enet_clk_enable()
1963 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
1964 if (fep->clk_ptp) { in fec_enet_clk_enable()
1965 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1966 clk_disable_unprepare(fep->clk_ptp); in fec_enet_clk_enable()
1967 fep->ptp_clk_on = false; in fec_enet_clk_enable()
1968 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1970 clk_disable_unprepare(fep->clk_ref); in fec_enet_clk_enable()
1976 if (fep->clk_ptp) { in fec_enet_clk_enable()
1977 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1978 clk_disable_unprepare(fep->clk_ptp); in fec_enet_clk_enable()
1979 fep->ptp_clk_on = false; in fec_enet_clk_enable()
1980 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1983 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
1990 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_mii_probe() local
1995 int dev_id = fep->dev_id; in fec_enet_mii_probe()
1997 if (fep->phy_node) { in fec_enet_mii_probe()
1998 phy_dev = of_phy_connect(ndev, fep->phy_node, in fec_enet_mii_probe()
2000 fep->phy_interface); in fec_enet_mii_probe()
2008 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) in fec_enet_mii_probe()
2012 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); in fec_enet_mii_probe()
2025 fep->phy_interface); in fec_enet_mii_probe()
2034 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { in fec_enet_mii_probe()
2045 fep->link = 0; in fec_enet_mii_probe()
2046 fep->full_duplex = 0; in fec_enet_mii_probe()
2057 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_mii_init() local
2080 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { in fec_enet_mii_init()
2083 fep->mii_bus = fec0_mii_bus; in fec_enet_mii_init()
2106 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); in fec_enet_mii_init()
2107 if (fep->quirks & FEC_QUIRK_ENET_MAC) in fec_enet_mii_init()
2112 clk_get_rate(fep->clk_ipg)); in fec_enet_mii_init()
2129 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; in fec_enet_mii_init()
2131 fep->phy_speed = mii_speed << 1 | holdtime << 8; in fec_enet_mii_init()
2134 fep->phy_speed |= BIT(7); in fec_enet_mii_init()
2136 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { in fec_enet_mii_init()
2145 writel(0, fep->hwp + FEC_MII_DATA); in fec_enet_mii_init()
2148 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_enet_mii_init()
2151 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); in fec_enet_mii_init()
2153 fep->mii_bus = mdiobus_alloc(); in fec_enet_mii_init()
2154 if (fep->mii_bus == NULL) { in fec_enet_mii_init()
2159 fep->mii_bus->name = "fec_enet_mii_bus"; in fec_enet_mii_init()
2160 fep->mii_bus->read = fec_enet_mdio_read; in fec_enet_mii_init()
2161 fep->mii_bus->write = fec_enet_mdio_write; in fec_enet_mii_init()
2162 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in fec_enet_mii_init()
2163 pdev->name, fep->dev_id + 1); in fec_enet_mii_init()
2164 fep->mii_bus->priv = fep; in fec_enet_mii_init()
2165 fep->mii_bus->parent = &pdev->dev; in fec_enet_mii_init()
2167 err = of_mdiobus_register(fep->mii_bus, node); in fec_enet_mii_init()
2175 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) in fec_enet_mii_init()
2176 fec0_mii_bus = fep->mii_bus; in fec_enet_mii_init()
2181 mdiobus_free(fep->mii_bus); in fec_enet_mii_init()
2186 static void fec_enet_mii_remove(struct fec_enet_private *fep) in fec_enet_mii_remove() argument
2189 mdiobus_unregister(fep->mii_bus); in fec_enet_mii_remove()
2190 mdiobus_free(fep->mii_bus); in fec_enet_mii_remove()
2197 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_drvinfo() local
2199 strlcpy(info->driver, fep->pdev->dev.driver->name, in fec_enet_get_drvinfo()
2206 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_regs_len() local
2210 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); in fec_enet_get_regs_len()
2269 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_regs() local
2270 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; in fec_enet_get_regs()
2271 struct device *dev = &fep->pdev->dev; in fec_enet_get_regs()
2288 !(fep->quirks & FEC_QUIRK_HAS_FRREG)) in fec_enet_get_regs()
2302 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_ts_info() local
2304 if (fep->bufdesc_ex) { in fec_enet_get_ts_info()
2312 if (fep->ptp_clock) in fec_enet_get_ts_info()
2313 info->phc_index = ptp_clock_index(fep->ptp_clock); in fec_enet_get_ts_info()
2333 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_pauseparam() local
2335 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; in fec_enet_get_pauseparam()
2336 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; in fec_enet_get_pauseparam()
2343 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_pauseparam() local
2354 fep->pause_flag = 0; in fec_enet_set_pauseparam()
2357 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; in fec_enet_set_pauseparam()
2358 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; in fec_enet_set_pauseparam()
2369 napi_disable(&fep->napi); in fec_enet_set_pauseparam()
2374 napi_enable(&fep->napi); in fec_enet_set_pauseparam()
2450 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_update_ethtool_stats() local
2454 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); in fec_enet_update_ethtool_stats()
2460 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_get_ethtool_stats() local
2465 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); in fec_enet_get_ethtool_stats()
2493 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_clear_ethtool_stats() local
2497 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); in fec_enet_clear_ethtool_stats()
2500 writel(0, fep->hwp + fec_stats[i].offset); in fec_enet_clear_ethtool_stats()
2503 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); in fec_enet_clear_ethtool_stats()
2523 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_us_to_itr_clock() local
2525 return us * (fep->itr_clk_rate / 64000) / 1000; in fec_enet_us_to_itr_clock()
2531 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_itr_coal_set() local
2535 if (!fep->rx_time_itr || !fep->rx_pkts_itr || in fec_enet_itr_coal_set()
2536 !fep->tx_time_itr || !fep->tx_pkts_itr) in fec_enet_itr_coal_set()
2546 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); in fec_enet_itr_coal_set()
2547 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); in fec_enet_itr_coal_set()
2548 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); in fec_enet_itr_coal_set()
2549 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); in fec_enet_itr_coal_set()
2554 writel(tx_itr, fep->hwp + FEC_TXIC0); in fec_enet_itr_coal_set()
2555 writel(rx_itr, fep->hwp + FEC_RXIC0); in fec_enet_itr_coal_set()
2556 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_enet_itr_coal_set()
2557 writel(tx_itr, fep->hwp + FEC_TXIC1); in fec_enet_itr_coal_set()
2558 writel(rx_itr, fep->hwp + FEC_RXIC1); in fec_enet_itr_coal_set()
2559 writel(tx_itr, fep->hwp + FEC_TXIC2); in fec_enet_itr_coal_set()
2560 writel(rx_itr, fep->hwp + FEC_RXIC2); in fec_enet_itr_coal_set()
2567 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_coalesce() local
2569 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) in fec_enet_get_coalesce()
2572 ec->rx_coalesce_usecs = fep->rx_time_itr; in fec_enet_get_coalesce()
2573 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; in fec_enet_get_coalesce()
2575 ec->tx_coalesce_usecs = fep->tx_time_itr; in fec_enet_get_coalesce()
2576 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; in fec_enet_get_coalesce()
2584 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_coalesce() local
2585 struct device *dev = &fep->pdev->dev; in fec_enet_set_coalesce()
2588 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) in fec_enet_set_coalesce()
2613 fep->rx_time_itr = ec->rx_coalesce_usecs; in fec_enet_set_coalesce()
2614 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; in fec_enet_set_coalesce()
2616 fep->tx_time_itr = ec->tx_coalesce_usecs; in fec_enet_set_coalesce()
2617 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; in fec_enet_set_coalesce()
2641 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_get_tunable() local
2646 *(u32 *)data = fep->rx_copybreak; in fec_enet_get_tunable()
2660 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_set_tunable() local
2665 fep->rx_copybreak = *(u32 *)data; in fec_enet_set_tunable()
2678 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_wol() local
2680 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { in fec_enet_get_wol()
2682 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; in fec_enet_get_wol()
2691 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_wol() local
2693 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) in fec_enet_set_wol()
2701 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; in fec_enet_set_wol()
2702 if (fep->irq[0] > 0) in fec_enet_set_wol()
2703 enable_irq_wake(fep->irq[0]); in fec_enet_set_wol()
2705 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); in fec_enet_set_wol()
2706 if (fep->irq[0] > 0) in fec_enet_set_wol()
2707 disable_irq_wake(fep->irq[0]); in fec_enet_set_wol()
2741 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_ioctl() local
2750 if (fep->bufdesc_ex) { in fec_enet_ioctl()
2768 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_free_buffers() local
2776 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_free_buffers()
2777 rxq = fep->rx_queue[q]; in fec_enet_free_buffers()
2783 dma_unmap_single(&fep->pdev->dev, in fec_enet_free_buffers()
2785 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_free_buffers()
2793 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_free_buffers()
2794 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
2807 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_free_queue() local
2811 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
2812 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { in fec_enet_free_queue()
2813 txq = fep->tx_queue[i]; in fec_enet_free_queue()
2814 dma_free_coherent(&fep->pdev->dev, in fec_enet_free_queue()
2820 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_free_queue()
2821 kfree(fep->rx_queue[i]); in fec_enet_free_queue()
2822 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
2823 kfree(fep->tx_queue[i]); in fec_enet_free_queue()
2828 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_queue() local
2833 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_alloc_queue()
2840 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
2842 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; in fec_enet_alloc_queue()
2848 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, in fec_enet_alloc_queue()
2858 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_alloc_queue()
2859 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), in fec_enet_alloc_queue()
2861 if (!fep->rx_queue[i]) { in fec_enet_alloc_queue()
2866 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; in fec_enet_alloc_queue()
2867 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; in fec_enet_alloc_queue()
2879 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_rxq_buffers() local
2885 rxq = fep->rx_queue[queue]; in fec_enet_alloc_rxq_buffers()
2900 if (fep->bufdesc_ex) { in fec_enet_alloc_rxq_buffers()
2921 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_txq_buffers() local
2926 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
2936 if (fep->bufdesc_ex) { in fec_enet_alloc_txq_buffers()
2957 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_buffers() local
2960 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_alloc_buffers()
2964 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_alloc_buffers()
2973 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_open() local
2977 ret = pm_runtime_resume_and_get(&fep->pdev->dev); in fec_enet_open()
2981 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_enet_open()
3019 if (fep->quirks & FEC_QUIRK_ERR006687) in fec_enet_open()
3022 napi_enable(&fep->napi); in fec_enet_open()
3026 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & in fec_enet_open()
3036 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_open()
3037 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_open()
3038 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_open()
3045 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_close() local
3050 napi_disable(&fep->napi); in fec_enet_close()
3057 if (fep->quirks & FEC_QUIRK_ERR006687) in fec_enet_close()
3063 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_close()
3064 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_close()
3065 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_close()
3086 struct fec_enet_private *fep = netdev_priv(ndev); in set_multicast_list() local
3093 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3095 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3099 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3101 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3107 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3108 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3129 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3130 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3137 struct fec_enet_private *fep = netdev_priv(ndev); in fec_set_mac_address() local
3156 fep->hwp + FEC_ADDR_LOW); in fec_set_mac_address()
3158 fep->hwp + FEC_ADDR_HIGH); in fec_set_mac_address()
3173 struct fec_enet_private *fep = netdev_priv(dev); in fec_poll_controller() local
3176 if (fep->irq[i] > 0) { in fec_poll_controller()
3177 disable_irq(fep->irq[i]); in fec_poll_controller()
3178 fec_enet_interrupt(fep->irq[i], dev); in fec_poll_controller()
3179 enable_irq(fep->irq[i]); in fec_poll_controller()
3188 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_set_netdev_features() local
3196 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3198 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3205 struct fec_enet_private *fep = netdev_priv(netdev); in fec_set_features() local
3209 napi_disable(&fep->napi); in fec_set_features()
3216 napi_enable(&fep->napi); in fec_set_features()
3253 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_init() local
3258 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : in fec_enet_init()
3265 fep->rx_align = 0xf; in fec_enet_init()
3266 fep->tx_align = 0xf; in fec_enet_init()
3268 fep->rx_align = 0x3; in fec_enet_init()
3269 fep->tx_align = 0x3; in fec_enet_init()
3273 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); in fec_enet_init()
3275 dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); in fec_enet_init()
3281 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; in fec_enet_init()
3284 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, in fec_enet_init()
3296 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_init()
3297 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; in fec_enet_init()
3306 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; in fec_enet_init()
3312 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_init()
3313 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; in fec_enet_init()
3322 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; in fec_enet_init()
3334 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); in fec_enet_init()
3335 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); in fec_enet_init()
3337 if (fep->quirks & FEC_QUIRK_HAS_VLAN) in fec_enet_init()
3341 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { in fec_enet_init()
3347 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_init()
3350 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_enet_init()
3351 fep->tx_align = 0; in fec_enet_init()
3352 fep->rx_align = 0x3f; in fec_enet_init()
3359 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) in fec_enet_init()
3477 static int fec_enet_init_stop_mode(struct fec_enet_private *fep, in fec_enet_init_stop_mode() argument
3491 dev_dbg(&fep->pdev->dev, "no stop mode property\n"); in fec_enet_init_stop_mode()
3495 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); in fec_enet_init_stop_mode()
3496 if (IS_ERR(fep->stop_gpr.gpr)) { in fec_enet_init_stop_mode()
3497 dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); in fec_enet_init_stop_mode()
3498 ret = PTR_ERR(fep->stop_gpr.gpr); in fec_enet_init_stop_mode()
3499 fep->stop_gpr.gpr = NULL; in fec_enet_init_stop_mode()
3503 fep->stop_gpr.reg = out_val[1]; in fec_enet_init_stop_mode()
3504 fep->stop_gpr.bit = out_val[2]; in fec_enet_init_stop_mode()
3515 struct fec_enet_private *fep; in fec_probe() local
3540 fep = netdev_priv(ndev); in fec_probe()
3547 fep->quirks = dev_info->quirks; in fec_probe()
3549 fep->netdev = ndev; in fec_probe()
3550 fep->num_rx_queues = num_rx_qs; in fec_probe()
3551 fep->num_tx_queues = num_tx_qs; in fec_probe()
3555 if (fep->quirks & FEC_QUIRK_HAS_GBIT) in fec_probe()
3556 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; in fec_probe()
3562 fep->hwp = devm_platform_ioremap_resource(pdev, 0); in fec_probe()
3563 if (IS_ERR(fep->hwp)) { in fec_probe()
3564 ret = PTR_ERR(fep->hwp); in fec_probe()
3568 fep->pdev = pdev; in fec_probe()
3569 fep->dev_id = dev_id++; in fec_probe()
3576 fep->quirks |= FEC_QUIRK_ERR006687; in fec_probe()
3579 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; in fec_probe()
3581 ret = fec_enet_init_stop_mode(fep, np); in fec_probe()
3595 fep->phy_node = phy_node; in fec_probe()
3601 fep->phy_interface = pdata->phy; in fec_probe()
3603 fep->phy_interface = PHY_INTERFACE_MODE_MII; in fec_probe()
3605 fep->phy_interface = interface; in fec_probe()
3608 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); in fec_probe()
3609 if (IS_ERR(fep->clk_ipg)) { in fec_probe()
3610 ret = PTR_ERR(fep->clk_ipg); in fec_probe()
3614 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); in fec_probe()
3615 if (IS_ERR(fep->clk_ahb)) { in fec_probe()
3616 ret = PTR_ERR(fep->clk_ahb); in fec_probe()
3620 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); in fec_probe()
3623 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); in fec_probe()
3624 if (IS_ERR(fep->clk_enet_out)) in fec_probe()
3625 fep->clk_enet_out = NULL; in fec_probe()
3627 fep->ptp_clk_on = false; in fec_probe()
3628 mutex_init(&fep->ptp_clk_mutex); in fec_probe()
3631 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); in fec_probe()
3632 if (IS_ERR(fep->clk_ref)) in fec_probe()
3633 fep->clk_ref = NULL; in fec_probe()
3635 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; in fec_probe()
3636 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); in fec_probe()
3637 if (IS_ERR(fep->clk_ptp)) { in fec_probe()
3638 fep->clk_ptp = NULL; in fec_probe()
3639 fep->bufdesc_ex = false; in fec_probe()
3646 ret = clk_prepare_enable(fep->clk_ipg); in fec_probe()
3649 ret = clk_prepare_enable(fep->clk_ahb); in fec_probe()
3653 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); in fec_probe()
3654 if (!IS_ERR(fep->reg_phy)) { in fec_probe()
3655 ret = regulator_enable(fep->reg_phy); in fec_probe()
3662 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { in fec_probe()
3666 fep->reg_phy = NULL; in fec_probe()
3680 if (fep->bufdesc_ex) in fec_probe()
3701 fep->irq[i] = irq; in fec_probe()
3719 device_init_wakeup(&ndev->dev, fep->wol_flag & in fec_probe()
3722 if (fep->bufdesc_ex && fep->ptp_clock) in fec_probe()
3723 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); in fec_probe()
3725 fep->rx_copybreak = COPYBREAK_DEFAULT; in fec_probe()
3726 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); in fec_probe()
3734 fec_enet_mii_remove(fep); in fec_probe()
3742 if (fep->reg_phy) in fec_probe()
3743 regulator_disable(fep->reg_phy); in fec_probe()
3745 clk_disable_unprepare(fep->clk_ahb); in fec_probe()
3747 clk_disable_unprepare(fep->clk_ipg); in fec_probe()
3767 struct fec_enet_private *fep = netdev_priv(ndev); in fec_drv_remove() local
3775 cancel_work_sync(&fep->tx_timeout_work); in fec_drv_remove()
3778 fec_enet_mii_remove(fep); in fec_drv_remove()
3779 if (fep->reg_phy) in fec_drv_remove()
3780 regulator_disable(fep->reg_phy); in fec_drv_remove()
3784 of_node_put(fep->phy_node); in fec_drv_remove()
3787 clk_disable_unprepare(fep->clk_ahb); in fec_drv_remove()
3788 clk_disable_unprepare(fep->clk_ipg); in fec_drv_remove()
3798 struct fec_enet_private *fep = netdev_priv(ndev); in fec_suspend() local
3802 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) in fec_suspend()
3803 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; in fec_suspend()
3805 napi_disable(&fep->napi); in fec_suspend()
3811 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) in fec_suspend()
3812 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_suspend()
3816 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) in fec_suspend()
3817 regulator_disable(fep->reg_phy); in fec_suspend()
3822 if (fep->clk_enet_out || fep->reg_phy) in fec_suspend()
3823 fep->link = 0; in fec_suspend()
3831 struct fec_enet_private *fep = netdev_priv(ndev); in fec_resume() local
3835 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { in fec_resume()
3836 ret = regulator_enable(fep->reg_phy); in fec_resume()
3848 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { in fec_resume()
3849 fec_enet_stop_mode(fep, false); in fec_resume()
3851 val = readl(fep->hwp + FEC_ECNTRL); in fec_resume()
3853 writel(val, fep->hwp + FEC_ECNTRL); in fec_resume()
3854 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; in fec_resume()
3856 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_resume()
3862 napi_enable(&fep->napi); in fec_resume()
3870 if (fep->reg_phy) in fec_resume()
3871 regulator_disable(fep->reg_phy); in fec_resume()
3878 struct fec_enet_private *fep = netdev_priv(ndev); in fec_runtime_suspend() local
3880 clk_disable_unprepare(fep->clk_ahb); in fec_runtime_suspend()
3881 clk_disable_unprepare(fep->clk_ipg); in fec_runtime_suspend()
3889 struct fec_enet_private *fep = netdev_priv(ndev); in fec_runtime_resume() local
3892 ret = clk_prepare_enable(fep->clk_ahb); in fec_runtime_resume()
3895 ret = clk_prepare_enable(fep->clk_ipg); in fec_runtime_resume()
3902 clk_disable_unprepare(fep->clk_ahb); in fec_runtime_resume()