Lines Matching +full:mac +full:- +full:clk +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0
2 /* Atheros AR71xx built-in ethernet mac driver
11 * David Bauer <mail@david-bauer.net>
14 * Hauke Mehrtens <hauke@hauke-m.de>
15 * Johann Neuhauser <johann@it-neuhauser.de>
17 * Jo-Philipp Wich <jo@mein.io>
38 #include <linux/clk.h>
42 /* For our NAPI weight bigger does *NOT* mean better - it means more
43 * D-cache misses and lots more wasted cycles than we'll ever
72 #define MAC_CFG1_RXE BIT(2) /* Rx Enable */
73 #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
75 #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
126 #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
127 #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
203 #define RX_CTRL_RXE BIT(0) /* Rx Enable */
211 #define RX_STATUS_OF BIT(2) /* Rx Overflow */
244 { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
245 { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
246 { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
247 { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
248 { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
249 { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
250 { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
251 { 0x009C, GENMASK(23, 0), "Rx Byte", },
252 { 0x00A0, GENMASK(17, 0), "Rx Packet", },
253 { 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
254 { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
255 { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
256 { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
257 { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
258 { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
259 { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
260 { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
261 { 0x00C4, GENMASK(11, 0), "Rx Code Error", },
262 { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
263 { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
264 { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
265 { 0x00D4, GENMASK(11, 0), "Rx Fragments", },
266 { 0x00D8, GENMASK(11, 0), "Rx Jabber", },
267 { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
312 } rx; member
321 /* "Cold" fields - not used in the data path. */
348 /* Critical data related to the per-packet data path are clustered
349 * early in this structure to help improve the D-cache footprint.
363 /* From this point onwards we're not looking at per-packet fields. */
383 struct clk *clk_mdio;
384 struct clk *clk_eth;
389 return (desc->ctrl & DESC_EMPTY) != 0; in ag71xx_desc_empty()
394 return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE]; in ag71xx_ring_desc()
399 return fls(size - 1); in ag71xx_ring_size_order()
404 return ag->dcfg->type == type; in ag71xx_is()
409 iowrite32(value, ag->mac_base + reg); in ag71xx_wr()
411 (void)ioread32(ag->mac_base + reg); in ag71xx_wr()
416 return ioread32(ag->mac_base + reg); in ag71xx_rr()
423 r = ag->mac_base + reg; in ag71xx_sb()
433 r = ag->mac_base + reg; in ag71xx_cb()
454 strscpy(info->driver, "ag71xx", sizeof(info->driver)); in ag71xx_get_drvinfo()
455 strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node), in ag71xx_get_drvinfo()
456 sizeof(info->bus_info)); in ag71xx_get_drvinfo()
464 return phylink_ethtool_ksettings_get(ag->phylink, kset); in ag71xx_get_link_ksettings()
472 return phylink_ethtool_ksettings_set(ag->phylink, kset); in ag71xx_set_link_ksettings()
479 return phylink_ethtool_nway_reset(ag->phylink); in ag71xx_ethtool_nway_reset()
487 phylink_ethtool_get_pauseparam(ag->phylink, pause); in ag71xx_ethtool_get_pauseparam()
495 return phylink_ethtool_set_pauseparam(ag->phylink, pause); in ag71xx_ethtool_set_pauseparam()
534 return -EOPNOTSUPP; in ag71xx_ethtool_get_sset_count()
555 struct net_device *ndev = ag->ndev; in ag71xx_mdio_wait_busy()
572 return -ETIMEDOUT; in ag71xx_mdio_wait_busy()
577 struct ag71xx *ag = bus->priv; in ag71xx_mdio_mii_read()
597 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n", in ag71xx_mdio_mii_read()
606 struct ag71xx *ag = bus->priv; in ag71xx_mdio_mii_write()
608 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n", in ag71xx_mdio_mii_write()
636 ref_clock = clk_get_rate(ag->clk_mdio); in ag71xx_mdio_get_divider()
638 return -EINVAL; in ag71xx_mdio_get_divider()
661 return -ENOENT; in ag71xx_mdio_get_divider()
666 struct ag71xx *ag = bus->priv; in ag71xx_mdio_reset()
685 struct device *dev = &ag->pdev->dev; in ag71xx_mdio_probe()
686 struct net_device *ndev = ag->ndev; in ag71xx_mdio_probe()
691 np = dev->of_node; in ag71xx_mdio_probe()
692 ag->mii_bus = NULL; in ag71xx_mdio_probe()
694 ag->clk_mdio = devm_clk_get(dev, "mdio"); in ag71xx_mdio_probe()
695 if (IS_ERR(ag->clk_mdio)) { in ag71xx_mdio_probe()
696 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n"); in ag71xx_mdio_probe()
697 return PTR_ERR(ag->clk_mdio); in ag71xx_mdio_probe()
700 err = clk_prepare_enable(ag->clk_mdio); in ag71xx_mdio_probe()
702 netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n"); in ag71xx_mdio_probe()
708 err = -ENOMEM; in ag71xx_mdio_probe()
712 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); in ag71xx_mdio_probe()
713 if (IS_ERR(ag->mdio_reset)) { in ag71xx_mdio_probe()
715 err = PTR_ERR(ag->mdio_reset); in ag71xx_mdio_probe()
719 mii_bus->name = "ag71xx_mdio"; in ag71xx_mdio_probe()
720 mii_bus->read = ag71xx_mdio_mii_read; in ag71xx_mdio_probe()
721 mii_bus->write = ag71xx_mdio_mii_write; in ag71xx_mdio_probe()
722 mii_bus->reset = ag71xx_mdio_reset; in ag71xx_mdio_probe()
723 mii_bus->priv = ag; in ag71xx_mdio_probe()
724 mii_bus->parent = dev; in ag71xx_mdio_probe()
725 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); in ag71xx_mdio_probe()
727 if (!IS_ERR(ag->mdio_reset)) { in ag71xx_mdio_probe()
728 reset_control_assert(ag->mdio_reset); in ag71xx_mdio_probe()
730 reset_control_deassert(ag->mdio_reset); in ag71xx_mdio_probe()
740 ag->mii_bus = mii_bus; in ag71xx_mdio_probe()
745 clk_disable_unprepare(ag->clk_mdio); in ag71xx_mdio_probe()
751 if (ag->mii_bus) in ag71xx_mdio_remove()
752 mdiobus_unregister(ag->mii_bus); in ag71xx_mdio_remove()
753 clk_disable_unprepare(ag->clk_mdio); in ag71xx_mdio_remove()
758 /* disable all interrupts and stop the rx/tx engine */ in ag71xx_hw_stop()
769 timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start); in ag71xx_check_dma_stuck()
773 if (!netif_carrier_ok(ag->ndev)) in ag71xx_check_dma_stuck()
791 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_tx_packets()
793 struct net_device *ndev = ag->ndev; in ag71xx_tx_packets()
797 ring_mask = BIT(ring->order) - 1; in ag71xx_tx_packets()
798 ring_size = BIT(ring->order); in ag71xx_tx_packets()
802 while (ring->dirty + n != ring->curr) { in ag71xx_tx_packets()
807 i = (ring->dirty + n) & ring_mask; in ag71xx_tx_packets()
809 skb = ring->buf[i].tx.skb; in ag71xx_tx_packets()
812 if (ag->dcfg->tx_hang_workaround && in ag71xx_tx_packets()
814 schedule_delayed_work(&ag->restart_work, in ag71xx_tx_packets()
822 desc->ctrl |= DESC_EMPTY; in ag71xx_tx_packets()
829 ring->buf[i].tx.skb = NULL; in ag71xx_tx_packets()
831 bytes_compl += ring->buf[i].tx.len; in ag71xx_tx_packets()
834 ring->dirty += n; in ag71xx_tx_packets()
838 n--; in ag71xx_tx_packets()
847 ag->ndev->stats.tx_bytes += bytes_compl; in ag71xx_tx_packets()
848 ag->ndev->stats.tx_packets += sent; in ag71xx_tx_packets()
850 netdev_completed_queue(ag->ndev, sent, bytes_compl); in ag71xx_tx_packets()
851 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) in ag71xx_tx_packets()
852 netif_wake_queue(ag->ndev); in ag71xx_tx_packets()
855 cancel_delayed_work(&ag->restart_work); in ag71xx_tx_packets()
862 struct net_device *ndev = ag->ndev; in ag71xx_dma_wait_stop()
866 u32 rx, tx; in ag71xx_dma_wait_stop() local
870 rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE; in ag71xx_dma_wait_stop()
872 if (!rx && !tx) in ag71xx_dma_wait_stop()
881 struct net_device *ndev = ag->ndev; in ag71xx_dma_reset()
885 /* stop RX and TX */ in ag71xx_dma_reset()
889 /* give the hardware some time to really stop all rx/tx activity in ag71xx_dma_reset()
895 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); in ag71xx_dma_reset()
896 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); in ag71xx_dma_reset()
898 /* clear pending RX/TX interrupts */ in ag71xx_dma_reset()
910 netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n", in ag71xx_dma_reset()
927 /* setup MAC configuration registers */ in ag71xx_hw_setup()
938 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); in ag71xx_hw_setup()
939 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); in ag71xx_hw_setup()
949 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac) in ag71xx_hw_set_macaddr() argument
953 t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16) in ag71xx_hw_set_macaddr()
954 | (((u32)mac[3]) << 8) | ((u32)mac[2]); in ag71xx_hw_set_macaddr()
958 t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16); in ag71xx_hw_set_macaddr()
964 struct net_device *dev = ag->ndev; in ag71xx_fast_reset()
975 reset_control_assert(ag->mac_reset); in ag71xx_fast_reset()
977 reset_control_deassert(ag->mac_reset); in ag71xx_fast_reset()
982 ag->tx_ring.curr = 0; in ag71xx_fast_reset()
983 ag->tx_ring.dirty = 0; in ag71xx_fast_reset()
984 netdev_reset_queue(ag->ndev); in ag71xx_fast_reset()
988 ag71xx_max_frame_len(ag->ndev->mtu)); in ag71xx_fast_reset()
991 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); in ag71xx_fast_reset()
994 ag71xx_hw_set_macaddr(ag, dev->dev_addr); in ag71xx_fast_reset()
999 /* start RX engine */ in ag71xx_hw_start()
1005 netif_wake_queue(ag->ndev); in ag71xx_hw_start()
1011 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_config()
1019 if (ag->tx_ring.desc_split) { in ag71xx_mac_config()
1020 ag->fifodata[2] &= 0xffff; in ag71xx_mac_config()
1021 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; in ag71xx_mac_config()
1024 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); in ag71xx_mac_config()
1030 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_link_down()
1041 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_link_up()
1099 ag->phylink_config.dev = &ag->ndev->dev; in ag71xx_phylink_setup()
1100 ag->phylink_config.type = PHYLINK_NETDEV; in ag71xx_phylink_setup()
1101 ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | in ag71xx_phylink_setup()
1104 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) || in ag71xx_phylink_setup()
1107 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) in ag71xx_phylink_setup()
1109 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1111 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) || in ag71xx_phylink_setup()
1112 (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) || in ag71xx_phylink_setup()
1113 (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1)) in ag71xx_phylink_setup()
1115 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1117 if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0) in ag71xx_phylink_setup()
1119 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1121 if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0) in ag71xx_phylink_setup()
1123 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1125 if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) || in ag71xx_phylink_setup()
1126 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) in ag71xx_phylink_setup()
1128 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1130 phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode, in ag71xx_phylink_setup()
1131 ag->phy_if_mode, &ag71xx_phylink_mac_ops); in ag71xx_phylink_setup()
1135 ag->phylink = phylink; in ag71xx_phylink_setup()
1141 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_ring_tx_clean()
1142 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_tx_clean()
1144 struct net_device *ndev = ag->ndev; in ag71xx_ring_tx_clean()
1146 while (ring->curr != ring->dirty) { in ag71xx_ring_tx_clean()
1148 u32 i = ring->dirty & ring_mask; in ag71xx_ring_tx_clean()
1152 desc->ctrl = 0; in ag71xx_ring_tx_clean()
1153 ndev->stats.tx_errors++; in ag71xx_ring_tx_clean()
1156 if (ring->buf[i].tx.skb) { in ag71xx_ring_tx_clean()
1157 bytes_compl += ring->buf[i].tx.len; in ag71xx_ring_tx_clean()
1159 dev_kfree_skb_any(ring->buf[i].tx.skb); in ag71xx_ring_tx_clean()
1161 ring->buf[i].tx.skb = NULL; in ag71xx_ring_tx_clean()
1162 ring->dirty++; in ag71xx_ring_tx_clean()
1173 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_ring_tx_init()
1174 int ring_size = BIT(ring->order); in ag71xx_ring_tx_init()
1175 int ring_mask = ring_size - 1; in ag71xx_ring_tx_init()
1181 desc->next = (u32)(ring->descs_dma + in ag71xx_ring_tx_init()
1184 desc->ctrl = DESC_EMPTY; in ag71xx_ring_tx_init()
1185 ring->buf[i].tx.skb = NULL; in ag71xx_ring_tx_init()
1191 ring->curr = 0; in ag71xx_ring_tx_init()
1192 ring->dirty = 0; in ag71xx_ring_tx_init()
1193 netdev_reset_queue(ag->ndev); in ag71xx_ring_tx_init()
1198 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_clean()
1199 int ring_size = BIT(ring->order); in ag71xx_ring_rx_clean()
1202 if (!ring->buf) in ag71xx_ring_rx_clean()
1206 if (ring->buf[i].rx.rx_buf) { in ag71xx_ring_rx_clean()
1207 dma_unmap_single(&ag->pdev->dev, in ag71xx_ring_rx_clean()
1208 ring->buf[i].rx.dma_addr, in ag71xx_ring_rx_clean()
1209 ag->rx_buf_size, DMA_FROM_DEVICE); in ag71xx_ring_rx_clean()
1210 skb_free_frag(ring->buf[i].rx.rx_buf); in ag71xx_ring_rx_clean()
1216 return ag->rx_buf_size + in ag71xx_buffer_size()
1224 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_fill_rx_buf()
1228 desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); in ag71xx_fill_rx_buf()
1234 buf->rx.rx_buf = data; in ag71xx_fill_rx_buf()
1235 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, in ag71xx_fill_rx_buf()
1237 desc->data = (u32)buf->rx.dma_addr + offset; in ag71xx_fill_rx_buf()
1243 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_init()
1244 struct net_device *ndev = ag->ndev; in ag71xx_ring_rx_init()
1245 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_rx_init()
1246 int ring_size = BIT(ring->order); in ag71xx_ring_rx_init()
1254 desc->next = (u32)(ring->descs_dma + in ag71xx_ring_rx_init()
1257 netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n", in ag71xx_ring_rx_init()
1258 desc, desc->next); in ag71xx_ring_rx_init()
1264 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, in ag71xx_ring_rx_init()
1266 ret = -ENOMEM; in ag71xx_ring_rx_init()
1270 desc->ctrl = DESC_EMPTY; in ag71xx_ring_rx_init()
1276 ring->curr = 0; in ag71xx_ring_rx_init()
1277 ring->dirty = 0; in ag71xx_ring_rx_init()
1284 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_refill()
1285 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_rx_refill()
1286 int offset = ag->rx_buf_offset; in ag71xx_ring_rx_refill()
1290 for (; ring->curr - ring->dirty > 0; ring->dirty++) { in ag71xx_ring_rx_refill()
1294 i = ring->dirty & ring_mask; in ag71xx_ring_rx_refill()
1297 if (!ring->buf[i].rx.rx_buf && in ag71xx_ring_rx_refill()
1298 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, in ag71xx_ring_rx_refill()
1302 desc->ctrl = DESC_EMPTY; in ag71xx_ring_rx_refill()
1309 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n", in ag71xx_ring_rx_refill()
1317 struct ag71xx_ring *tx = &ag->tx_ring; in ag71xx_rings_init()
1318 struct ag71xx_ring *rx = &ag->rx_ring; in ag71xx_rings_init() local
1321 ring_size = BIT(tx->order) + BIT(rx->order); in ag71xx_rings_init()
1322 tx_size = BIT(tx->order); in ag71xx_rings_init()
1324 tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL); in ag71xx_rings_init()
1325 if (!tx->buf) in ag71xx_rings_init()
1326 return -ENOMEM; in ag71xx_rings_init()
1328 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, in ag71xx_rings_init()
1330 &tx->descs_dma, GFP_KERNEL); in ag71xx_rings_init()
1331 if (!tx->descs_cpu) { in ag71xx_rings_init()
1332 kfree(tx->buf); in ag71xx_rings_init()
1333 tx->buf = NULL; in ag71xx_rings_init()
1334 return -ENOMEM; in ag71xx_rings_init()
1337 rx->buf = &tx->buf[tx_size]; in ag71xx_rings_init()
1338 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; in ag71xx_rings_init()
1339 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; in ag71xx_rings_init()
1347 struct ag71xx_ring *tx = &ag->tx_ring; in ag71xx_rings_free()
1348 struct ag71xx_ring *rx = &ag->rx_ring; in ag71xx_rings_free() local
1351 ring_size = BIT(tx->order) + BIT(rx->order); in ag71xx_rings_free()
1353 if (tx->descs_cpu) in ag71xx_rings_free()
1354 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, in ag71xx_rings_free()
1355 tx->descs_cpu, tx->descs_dma); in ag71xx_rings_free()
1357 kfree(tx->buf); in ag71xx_rings_free()
1359 tx->descs_cpu = NULL; in ag71xx_rings_free()
1360 rx->descs_cpu = NULL; in ag71xx_rings_free()
1361 tx->buf = NULL; in ag71xx_rings_free()
1362 rx->buf = NULL; in ag71xx_rings_free()
1371 netdev_reset_queue(ag->ndev); in ag71xx_rings_cleanup()
1381 reset_control_assert(ag->mac_reset); in ag71xx_hw_init()
1383 reset_control_deassert(ag->mac_reset); in ag71xx_hw_init()
1399 napi_enable(&ag->napi); in ag71xx_hw_enable()
1400 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); in ag71xx_hw_enable()
1401 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); in ag71xx_hw_enable()
1402 netif_start_queue(ag->ndev); in ag71xx_hw_enable()
1409 netif_stop_queue(ag->ndev); in ag71xx_hw_disable()
1414 napi_disable(&ag->napi); in ag71xx_hw_disable()
1415 del_timer_sync(&ag->oom_timer); in ag71xx_hw_disable()
1426 ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0); in ag71xx_open()
1433 max_frame_len = ag71xx_max_frame_len(ndev->mtu); in ag71xx_open()
1434 ag->rx_buf_size = in ag71xx_open()
1439 ag71xx_hw_set_macaddr(ag, ndev->dev_addr); in ag71xx_open()
1445 phylink_start(ag->phylink); in ag71xx_open()
1451 phylink_disconnect_phy(ag->phylink); in ag71xx_open()
1459 phylink_stop(ag->phylink); in ag71xx_stop()
1460 phylink_disconnect_phy(ag->phylink); in ag71xx_stop()
1471 ring_mask = BIT(ring->order) - 1; in ag71xx_fill_dma_desc()
1473 split = ring->desc_split; in ag71xx_fill_dma_desc()
1481 i = (ring->curr + ndesc) & ring_mask; in ag71xx_fill_dma_desc()
1485 return -1; in ag71xx_fill_dma_desc()
1494 cur_len -= 4; in ag71xx_fill_dma_desc()
1497 desc->data = addr; in ag71xx_fill_dma_desc()
1499 len -= cur_len; in ag71xx_fill_dma_desc()
1508 desc->ctrl = cur_len; in ag71xx_fill_dma_desc()
1524 ring = &ag->tx_ring; in ag71xx_hard_start_xmit()
1525 ring_mask = BIT(ring->order) - 1; in ag71xx_hard_start_xmit()
1526 ring_size = BIT(ring->order); in ag71xx_hard_start_xmit()
1528 if (skb->len <= 4) { in ag71xx_hard_start_xmit()
1533 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, in ag71xx_hard_start_xmit()
1536 i = ring->curr & ring_mask; in ag71xx_hard_start_xmit()
1541 skb->len & ag->dcfg->desc_pktlen_mask); in ag71xx_hard_start_xmit()
1545 i = (ring->curr + n - 1) & ring_mask; in ag71xx_hard_start_xmit()
1546 ring->buf[i].tx.len = skb->len; in ag71xx_hard_start_xmit()
1547 ring->buf[i].tx.skb = skb; in ag71xx_hard_start_xmit()
1549 netdev_sent_queue(ndev, skb->len); in ag71xx_hard_start_xmit()
1553 desc->ctrl &= ~DESC_EMPTY; in ag71xx_hard_start_xmit()
1554 ring->curr += n; in ag71xx_hard_start_xmit()
1560 if (ring->desc_split) in ag71xx_hard_start_xmit()
1563 if (ring->curr - ring->dirty >= ring_size - ring_min) { in ag71xx_hard_start_xmit()
1576 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); in ag71xx_hard_start_xmit()
1579 ndev->stats.tx_dropped++; in ag71xx_hard_start_xmit()
1589 napi_schedule(&ag->napi); in ag71xx_oom_timer_handler()
1598 schedule_delayed_work(&ag->restart_work, 1); in ag71xx_tx_timeout()
1610 phylink_stop(ag->phylink); in ag71xx_restart_work_func()
1611 phylink_start(ag->phylink); in ag71xx_restart_work_func()
1618 struct net_device *ndev = ag->ndev; in ag71xx_rx_packets()
1625 ring = &ag->rx_ring; in ag71xx_rx_packets()
1626 pktlen_mask = ag->dcfg->desc_pktlen_mask; in ag71xx_rx_packets()
1627 offset = ag->rx_buf_offset; in ag71xx_rx_packets()
1628 ring_mask = BIT(ring->order) - 1; in ag71xx_rx_packets()
1629 ring_size = BIT(ring->order); in ag71xx_rx_packets()
1631 netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n", in ag71xx_rx_packets()
1632 limit, ring->curr, ring->dirty); in ag71xx_rx_packets()
1637 unsigned int i = ring->curr & ring_mask; in ag71xx_rx_packets()
1645 if ((ring->dirty + ring_size) == ring->curr) { in ag71xx_rx_packets()
1646 WARN_ONCE(1, "RX out of ring"); in ag71xx_rx_packets()
1652 pktlen = desc->ctrl & pktlen_mask; in ag71xx_rx_packets()
1653 pktlen -= ETH_FCS_LEN; in ag71xx_rx_packets()
1655 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, in ag71xx_rx_packets()
1656 ag->rx_buf_size, DMA_FROM_DEVICE); in ag71xx_rx_packets()
1658 ndev->stats.rx_packets++; in ag71xx_rx_packets()
1659 ndev->stats.rx_bytes += pktlen; in ag71xx_rx_packets()
1661 skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); in ag71xx_rx_packets()
1663 skb_free_frag(ring->buf[i].rx.rx_buf); in ag71xx_rx_packets()
1671 ndev->stats.rx_dropped++; in ag71xx_rx_packets()
1674 skb->dev = ndev; in ag71xx_rx_packets()
1675 skb->ip_summed = CHECKSUM_NONE; in ag71xx_rx_packets()
1676 list_add_tail(&skb->list, &rx_list); in ag71xx_rx_packets()
1680 ring->buf[i].rx.rx_buf = NULL; in ag71xx_rx_packets()
1683 ring->curr++; in ag71xx_rx_packets()
1689 skb->protocol = eth_type_trans(skb, ndev); in ag71xx_rx_packets()
1692 netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n", in ag71xx_rx_packets()
1693 ring->curr, ring->dirty, done); in ag71xx_rx_packets()
1701 struct ag71xx_ring *rx_ring = &ag->rx_ring; in ag71xx_poll()
1702 int rx_ring_size = BIT(rx_ring->order); in ag71xx_poll()
1703 struct net_device *ndev = ag->ndev; in ag71xx_poll()
1709 netif_dbg(ag, rx_status, ndev, "processing RX ring\n"); in ag71xx_poll()
1712 if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) in ag71xx_poll()
1718 ndev->stats.rx_fifo_errors++; in ag71xx_poll()
1720 /* restart RX */ in ag71xx_poll()
1732 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n", in ag71xx_poll()
1743 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n", in ag71xx_poll()
1750 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); in ag71xx_poll()
1774 netif_err(ag, intr, ndev, "RX BUS error\n"); in ag71xx_interrupt()
1781 napi_schedule(&ag->napi); in ag71xx_interrupt()
1791 ndev->mtu = new_mtu; in ag71xx_change_mtu()
1793 ag71xx_max_frame_len(ndev->mtu)); in ag71xx_change_mtu()
1815 struct device_node *np = pdev->dev.of_node; in ag71xx_probe()
1823 return -ENODEV; in ag71xx_probe()
1825 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); in ag71xx_probe()
1827 return -ENOMEM; in ag71xx_probe()
1831 return -EINVAL; in ag71xx_probe()
1833 dcfg = of_device_get_match_data(&pdev->dev); in ag71xx_probe()
1835 return -EINVAL; in ag71xx_probe()
1838 ag->mac_idx = -1; in ag71xx_probe()
1840 if (ar71xx_addr_ar7100[i] == res->start) in ag71xx_probe()
1841 ag->mac_idx = i; in ag71xx_probe()
1844 if (ag->mac_idx < 0) { in ag71xx_probe()
1845 netif_err(ag, probe, ndev, "unknown mac idx\n"); in ag71xx_probe()
1846 return -EINVAL; in ag71xx_probe()
1849 ag->clk_eth = devm_clk_get(&pdev->dev, "eth"); in ag71xx_probe()
1850 if (IS_ERR(ag->clk_eth)) { in ag71xx_probe()
1851 netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); in ag71xx_probe()
1852 return PTR_ERR(ag->clk_eth); in ag71xx_probe()
1855 SET_NETDEV_DEV(ndev, &pdev->dev); in ag71xx_probe()
1857 ag->pdev = pdev; in ag71xx_probe()
1858 ag->ndev = ndev; in ag71xx_probe()
1859 ag->dcfg = dcfg; in ag71xx_probe()
1860 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE); in ag71xx_probe()
1861 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); in ag71xx_probe()
1863 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); in ag71xx_probe()
1864 if (IS_ERR(ag->mac_reset)) { in ag71xx_probe()
1865 netif_err(ag, probe, ndev, "missing mac reset\n"); in ag71xx_probe()
1866 return PTR_ERR(ag->mac_reset); in ag71xx_probe()
1869 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); in ag71xx_probe()
1870 if (!ag->mac_base) in ag71xx_probe()
1871 return -ENOMEM; in ag71xx_probe()
1873 ndev->irq = platform_get_irq(pdev, 0); in ag71xx_probe()
1874 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, in ag71xx_probe()
1875 0x0, dev_name(&pdev->dev), ndev); in ag71xx_probe()
1878 ndev->irq); in ag71xx_probe()
1882 ndev->netdev_ops = &ag71xx_netdev_ops; in ag71xx_probe()
1883 ndev->ethtool_ops = &ag71xx_ethtool_ops; in ag71xx_probe()
1885 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); in ag71xx_probe()
1886 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); in ag71xx_probe()
1889 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); in ag71xx_probe()
1891 ndev->min_mtu = 68; in ag71xx_probe()
1892 ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0); in ag71xx_probe()
1894 ag->rx_buf_offset = NET_SKB_PAD; in ag71xx_probe()
1896 ag->rx_buf_offset += NET_IP_ALIGN; in ag71xx_probe()
1899 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; in ag71xx_probe()
1902 ag->tx_ring.order = ag71xx_ring_size_order(tx_size); in ag71xx_probe()
1904 ag->stop_desc = dmam_alloc_coherent(&pdev->dev, in ag71xx_probe()
1906 &ag->stop_desc_dma, GFP_KERNEL); in ag71xx_probe()
1907 if (!ag->stop_desc) in ag71xx_probe()
1908 return -ENOMEM; in ag71xx_probe()
1910 ag->stop_desc->data = 0; in ag71xx_probe()
1911 ag->stop_desc->ctrl = 0; in ag71xx_probe()
1912 ag->stop_desc->next = (u32)ag->stop_desc_dma; in ag71xx_probe()
1916 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); in ag71xx_probe()
1920 err = of_get_phy_mode(np, &ag->phy_if_mode); in ag71xx_probe()
1922 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); in ag71xx_probe()
1926 netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll, in ag71xx_probe()
1929 err = clk_prepare_enable(ag->clk_eth); in ag71xx_probe()
1931 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); in ag71xx_probe()
1959 (unsigned long)ag->mac_base, ndev->irq, in ag71xx_probe()
1960 phy_modes(ag->phy_if_mode)); in ag71xx_probe()
1967 clk_disable_unprepare(ag->clk_eth); in ag71xx_probe()
1982 clk_disable_unprepare(ag->clk_eth); in ag71xx_remove()
2004 .desc_pktlen_mask = SZ_4K - 1,
2012 .desc_pktlen_mask = SZ_4K - 1,
2020 .desc_pktlen_mask = SZ_4K - 1,
2028 .desc_pktlen_mask = SZ_4K - 1,
2035 .max_frame_len = SZ_16K - 1,
2036 .desc_pktlen_mask = SZ_16K - 1,
2043 .max_frame_len = SZ_16K - 1,
2044 .desc_pktlen_mask = SZ_16K - 1,
2052 .desc_pktlen_mask = SZ_16K - 1,
2057 { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2058 { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2059 { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2060 { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2061 { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2062 { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2063 { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2064 { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2065 { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2066 { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },