Lines Matching +full:zynq +full:- +full:gpio +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2004-2006 Atmel Corporation
10 #include <linux/clk-provider.h>
20 #include <linux/gpio.h>
21 #include <linux/gpio/consumer.h>
25 #include <linux/dma-mapping.h>
54 * (bp)->rx_ring_size)
60 * (bp)->tx_ring_size)
63 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
74 …define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX…
85 #define MACB_WOL_ENABLED (0x1 << 1)
88 #define MACB_SERDES_RATE_10G 1
91 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
102 * 1. dma address width 32 bits:
103 * word 1: 32 bit address of Data Buffer
107 * word 1: 32 bit address of Data Buffer
113 * word 1: 32 bit address of Data Buffer
115 * word 3: timestamp word 1
119 * word 1: 32 bit address of Data Buffer
123 * word 5: timestamp word 1
131 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
156 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
159 desc_idx <<= 1; in macb_adj_dma_desc_idx()
182 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
188 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
189 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
190 return &queue->tx_ring[index]; in macb_tx_desc()
196 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
203 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
204 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
206 return queue->tx_ring_dma + offset; in macb_tx_dma()
211 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
216 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
217 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
218 return &queue->rx_ring[index]; in macb_rx_desc()
223 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
224 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
230 return __raw_readl(bp->regs + offset); in hw_readl_native()
235 __raw_writel(value, bp->regs + offset); in hw_writel_native()
240 return readl_relaxed(bp->regs + offset); in hw_readl()
245 writel_relaxed(value, bp->regs + offset); in hw_writel()
282 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
284 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
309 addr[1] = (bottom >> 8) & 0xff; in macb_get_hwaddr()
316 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); in macb_get_hwaddr()
321 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
322 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
330 1, MACB_MDIO_TIMEOUT); in macb_mdio_wait_for_idle()
335 struct macb *bp = bus->priv; in macb_mdio_read()
338 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_read()
340 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_read()
380 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read()
381 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read()
389 struct macb *bp = bus->priv; in macb_mdio_write()
392 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_write()
394 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_write()
434 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write()
435 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write()
445 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
446 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); in macb_init_buffers()
448 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
450 upper_32_bits(queue->rx_ring_dma)); in macb_init_buffers()
452 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_init_buffers()
454 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
456 upper_32_bits(queue->tx_ring_dma)); in macb_init_buffers()
462 * macb_set_tx_clk() - Set a clock to a new frequency
470 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) in macb_set_tx_clk()
474 if (bp->phy_interface == PHY_INTERFACE_MODE_MII) in macb_set_tx_clk()
491 rate_rounded = clk_round_rate(bp->tx_clk, rate); in macb_set_tx_clk()
498 ferr = abs(rate_rounded - rate); in macb_set_tx_clk()
501 netdev_warn(bp->dev, in macb_set_tx_clk()
505 if (clk_set_rate(bp->tx_clk, rate_rounded)) in macb_set_tx_clk()
506 netdev_err(bp->dev, "adjusting tx_clk failed.\n"); in macb_set_tx_clk()
513 struct net_device *ndev = to_net_dev(config->dev); in macb_validate()
518 if (state->interface != PHY_INTERFACE_MODE_NA && in macb_validate()
519 state->interface != PHY_INTERFACE_MODE_MII && in macb_validate()
520 state->interface != PHY_INTERFACE_MODE_RMII && in macb_validate()
521 state->interface != PHY_INTERFACE_MODE_GMII && in macb_validate()
522 state->interface != PHY_INTERFACE_MODE_SGMII && in macb_validate()
523 state->interface != PHY_INTERFACE_MODE_10GBASER && in macb_validate()
524 !phy_interface_mode_is_rgmii(state->interface)) { in macb_validate()
530 (state->interface == PHY_INTERFACE_MODE_GMII || in macb_validate()
531 phy_interface_mode_is_rgmii(state->interface))) { in macb_validate()
536 if (state->interface == PHY_INTERFACE_MODE_10GBASER && in macb_validate()
537 !(bp->caps & MACB_CAPS_HIGH_SPEED && in macb_validate()
538 bp->caps & MACB_CAPS_PCS)) { in macb_validate()
547 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
548 (state->interface == PHY_INTERFACE_MODE_NA || in macb_validate()
549 state->interface == PHY_INTERFACE_MODE_10GBASER)) { in macb_validate()
557 if (state->interface != PHY_INTERFACE_MODE_NA) in macb_validate()
566 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
567 (state->interface == PHY_INTERFACE_MODE_NA || in macb_validate()
568 state->interface == PHY_INTERFACE_MODE_GMII || in macb_validate()
569 state->interface == PHY_INTERFACE_MODE_SGMII || in macb_validate()
570 phy_interface_mode_is_rgmii(state->interface))) { in macb_validate()
574 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_validate()
579 bitmap_and(state->advertising, state->advertising, mask, in macb_validate()
604 state->speed = SPEED_10000; in macb_usx_pcs_get_state()
605 state->duplex = 1; in macb_usx_pcs_get_state()
606 state->an_complete = 1; in macb_usx_pcs_get_state()
609 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK)); in macb_usx_pcs_get_state()
612 state->pause = MLO_PAUSE_RX; in macb_usx_pcs_get_state()
632 state->link = 0; in macb_pcs_get_state()
664 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_config()
670 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
675 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
676 if (state->interface == PHY_INTERFACE_MODE_RMII) in macb_mac_config()
682 if (state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
684 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { in macb_mac_config()
701 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
713 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
719 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_down()
725 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
726 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
728 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
743 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_up()
750 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
762 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
779 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
782 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
784 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
789 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) in macb_mac_link_up()
793 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
804 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_prepare()
808 bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops; in macb_mac_prepare()
810 bp->phylink_pcs.ops = &macb_phylink_pcs_ops; in macb_mac_prepare()
812 bp->phylink_pcs.ops = NULL; in macb_mac_prepare()
814 if (bp->phylink_pcs.ops) in macb_mac_prepare()
815 phylink_set_pcs(bp->phylink, &bp->phylink_pcs); in macb_mac_prepare()
830 dn = of_parse_phandle(dn, "phy-handle", 0); in macb_phy_handle_exists()
837 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
838 struct net_device *dev = bp->dev; in macb_phylink_connect()
843 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
846 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
849 return -ENXIO; in macb_phylink_connect()
853 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
861 phylink_start(bp->phylink); in macb_phylink_connect()
869 struct net_device *ndev = to_net_dev(config->dev); in macb_get_pcs_fixed_state()
872 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; in macb_get_pcs_fixed_state()
880 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
881 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
883 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in macb_mii_probe()
884 bp->phylink_config.poll_fixed_state = true; in macb_mii_probe()
885 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; in macb_mii_probe()
888 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
889 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
890 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
892 PTR_ERR(bp->phylink)); in macb_mii_probe()
893 return PTR_ERR(bp->phylink); in macb_mii_probe()
901 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
904 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
918 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
921 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
926 int err = -ENXIO; in macb_mii_init()
931 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
932 if (!bp->mii_bus) { in macb_mii_init()
933 err = -ENOMEM; in macb_mii_init()
937 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
938 bp->mii_bus->read = &macb_mdio_read; in macb_mii_init()
939 bp->mii_bus->write = &macb_mdio_write; in macb_mii_init()
940 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
941 bp->pdev->name, bp->pdev->id); in macb_mii_init()
942 bp->mii_bus->priv = bp; in macb_mii_init()
943 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
945 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
951 err = macb_mii_probe(bp->dev); in macb_mii_init()
958 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
960 mdiobus_free(bp->mii_bus); in macb_mii_init()
967 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
968 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
971 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); in macb_update_stats()
974 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
994 return -ETIMEDOUT; in macb_halt_tx()
999 if (tx_skb->mapping) { in macb_tx_unmap()
1000 if (tx_skb->mapped_as_page) in macb_tx_unmap()
1001 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1002 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
1004 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1005 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
1006 tx_skb->mapping = 0; in macb_tx_unmap()
1009 if (tx_skb->skb) { in macb_tx_unmap()
1010 dev_kfree_skb_any(tx_skb->skb); in macb_tx_unmap()
1011 tx_skb->skb = NULL; in macb_tx_unmap()
1020 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
1022 desc_64->addrh = upper_32_bits(addr); in macb_set_addr()
1030 desc->addr = lower_32_bits(addr); in macb_set_addr()
1039 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
1041 addr = ((u64)(desc_64->addrh) << 32); in macb_get_addr()
1044 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); in macb_get_addr()
1052 struct macb *bp = queue->bp; in macb_tx_error_task()
1059 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
1060 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
1061 queue->tx_tail, queue->tx_head); in macb_tx_error_task()
1069 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
1072 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
1080 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
1085 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { in macb_tx_error_task()
1089 ctrl = desc->ctrl; in macb_tx_error_task()
1091 skb = tx_skb->skb; in macb_tx_error_task()
1099 skb = tx_skb->skb; in macb_tx_error_task()
1106 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
1108 skb->data); in macb_tx_error_task()
1109 bp->dev->stats.tx_packets++; in macb_tx_error_task()
1110 queue->stats.tx_packets++; in macb_tx_error_task()
1111 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
1112 queue->stats.tx_bytes += skb->len; in macb_tx_error_task()
1115 /* "Buffers exhausted mid-frame" errors may only happen in macb_tx_error_task()
1120 netdev_err(bp->dev, in macb_tx_error_task()
1121 "BUG: TX buffers exhausted mid-frame\n"); in macb_tx_error_task()
1123 desc->ctrl = ctrl | MACB_BIT(TX_USED); in macb_tx_error_task()
1132 desc->ctrl = MACB_BIT(TX_USED); in macb_tx_error_task()
1138 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_tx_error_task()
1140 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_tx_error_task()
1141 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); in macb_tx_error_task()
1144 queue->tx_head = 0; in macb_tx_error_task()
1145 queue->tx_tail = 0; in macb_tx_error_task()
1152 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
1155 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1163 struct macb *bp = queue->bp; in macb_tx_interrupt()
1164 u16 queue_index = queue - bp->queues; in macb_tx_interrupt()
1169 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_interrupt()
1172 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", in macb_tx_interrupt()
1175 head = queue->tx_head; in macb_tx_interrupt()
1176 for (tail = queue->tx_tail; tail != head; tail++) { in macb_tx_interrupt()
1187 ctrl = desc->ctrl; in macb_tx_interrupt()
1198 skb = tx_skb->skb; in macb_tx_interrupt()
1202 if (unlikely(skb_shinfo(skb)->tx_flags & in macb_tx_interrupt()
1208 tx_skb->skb = NULL; in macb_tx_interrupt()
1210 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_interrupt()
1212 skb->data); in macb_tx_interrupt()
1213 bp->dev->stats.tx_packets++; in macb_tx_interrupt()
1214 queue->stats.tx_packets++; in macb_tx_interrupt()
1215 bp->dev->stats.tx_bytes += skb->len; in macb_tx_interrupt()
1216 queue->stats.tx_bytes += skb->len; in macb_tx_interrupt()
1231 queue->tx_tail = tail; in macb_tx_interrupt()
1232 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_interrupt()
1233 CIRC_CNT(queue->tx_head, queue->tx_tail, in macb_tx_interrupt()
1234 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_interrupt()
1235 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_interrupt()
1243 struct macb *bp = queue->bp; in gem_rx_refill()
1246 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, in gem_rx_refill()
1247 bp->rx_ring_size) > 0) { in gem_rx_refill()
1248 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1253 queue->rx_prepared_head++; in gem_rx_refill()
1256 if (!queue->rx_skbuff[entry]) { in gem_rx_refill()
1258 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1260 netdev_err(bp->dev, in gem_rx_refill()
1266 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1267 bp->rx_buffer_size, in gem_rx_refill()
1269 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1274 queue->rx_skbuff[entry] = skb; in gem_rx_refill()
1276 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1278 desc->ctrl = 0; in gem_rx_refill()
1288 desc->ctrl = 0; in gem_rx_refill()
1290 desc->addr &= ~MACB_BIT(RX_USED); in gem_rx_refill()
1297 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1298 queue, queue->rx_prepared_head, queue->rx_tail); in gem_rx_refill()
1310 desc->addr &= ~MACB_BIT(RX_USED); in discard_partial_frame()
1325 struct macb *bp = queue->bp; in gem_rx()
1337 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1343 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; in gem_rx()
1349 /* Ensure ctrl is at least as up-to-date as rxused */ in gem_rx()
1352 ctrl = desc->ctrl; in gem_rx()
1354 queue->rx_tail++; in gem_rx()
1358 netdev_err(bp->dev, in gem_rx()
1360 bp->dev->stats.rx_dropped++; in gem_rx()
1361 queue->stats.rx_dropped++; in gem_rx()
1364 skb = queue->rx_skbuff[entry]; in gem_rx()
1366 netdev_err(bp->dev, in gem_rx()
1368 bp->dev->stats.rx_dropped++; in gem_rx()
1369 queue->stats.rx_dropped++; in gem_rx()
1373 queue->rx_skbuff[entry] = NULL; in gem_rx()
1374 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1376 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1379 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1380 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1382 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1384 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1385 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1387 skb->ip_summed = CHECKSUM_UNNECESSARY; in gem_rx()
1389 bp->dev->stats.rx_packets++; in gem_rx()
1390 queue->stats.rx_packets++; in gem_rx()
1391 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1392 queue->stats.rx_bytes += skb->len; in gem_rx()
1397 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1398 skb->len, skb->csum); in gem_rx()
1399 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, in gem_rx()
1401 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, in gem_rx()
1402 skb->data, 32, true); in gem_rx()
1421 struct macb *bp = queue->bp; in macb_rx_frame()
1424 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1426 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1432 * payload word-aligned. in macb_rx_frame()
1438 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1440 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1443 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1451 return 1; in macb_rx_frame()
1460 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1465 return -1; in macb_rx_frame()
1467 frag_len = len - offset; in macb_rx_frame()
1472 offset += bp->rx_buffer_size; in macb_rx_frame()
1474 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1484 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1486 bp->dev->stats.rx_packets++; in macb_rx_frame()
1487 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1488 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1489 skb->len, skb->csum); in macb_rx_frame()
1497 struct macb *bp = queue->bp; in macb_init_rx_ring()
1502 addr = queue->rx_buffers_dma; in macb_init_rx_ring()
1503 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1506 desc->ctrl = 0; in macb_init_rx_ring()
1507 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1509 desc->addr |= MACB_BIT(RX_WRAP); in macb_init_rx_ring()
1510 queue->rx_tail = 0; in macb_init_rx_ring()
1516 struct macb *bp = queue->bp; in macb_rx()
1520 int first_frag = -1; in macb_rx()
1522 for (tail = queue->rx_tail; budget > 0; tail++) { in macb_rx()
1529 if (!(desc->addr & MACB_BIT(RX_USED))) in macb_rx()
1532 /* Ensure ctrl is at least as up-to-date as addr */ in macb_rx()
1535 ctrl = desc->ctrl; in macb_rx()
1538 if (first_frag != -1) in macb_rx()
1546 if (unlikely(first_frag == -1)) { in macb_rx()
1552 first_frag = -1; in macb_rx()
1559 budget--; in macb_rx()
1568 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1570 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1576 queue_writel(queue, RBQP, queue->rx_ring_dma); in macb_rx()
1580 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1584 if (first_frag != -1) in macb_rx()
1585 queue->rx_tail = first_frag; in macb_rx()
1587 queue->rx_tail = tail; in macb_rx()
1595 struct macb *bp = queue->bp; in macb_poll()
1602 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", in macb_poll()
1605 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_poll()
1612 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1616 queue_writel(queue, IER, bp->rx_intr_mask); in macb_poll()
1628 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1633 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1634 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1645 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1651 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1653 bp->rx_intr_mask | in macb_hresp_error_task()
1666 unsigned int head = queue->tx_head; in macb_tx_restart()
1667 unsigned int tail = queue->tx_tail; in macb_tx_restart()
1668 struct macb *bp = queue->bp; in macb_tx_restart()
1670 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_restart()
1682 struct macb *bp = queue->bp; in macb_wol_interrupt()
1690 spin_lock(&bp->lock); in macb_wol_interrupt()
1695 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1696 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1698 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1700 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1703 spin_unlock(&bp->lock); in macb_wol_interrupt()
1711 struct macb *bp = queue->bp; in gem_wol_interrupt()
1719 spin_lock(&bp->lock); in gem_wol_interrupt()
1724 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1725 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1727 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1729 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1732 spin_unlock(&bp->lock); in gem_wol_interrupt()
1740 struct macb *bp = queue->bp; in macb_interrupt()
1741 struct net_device *dev = bp->dev; in macb_interrupt()
1749 spin_lock(&bp->lock); in macb_interrupt()
1754 queue_writel(queue, IDR, -1); in macb_interrupt()
1755 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1756 queue_writel(queue, ISR, -1); in macb_interrupt()
1760 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1761 (unsigned int)(queue - bp->queues), in macb_interrupt()
1764 if (status & bp->rx_intr_mask) { in macb_interrupt()
1771 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1772 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1775 if (napi_schedule_prep(&queue->napi)) { in macb_interrupt()
1776 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1777 __napi_schedule(&queue->napi); in macb_interrupt()
1783 schedule_work(&queue->tx_error_task); in macb_interrupt()
1785 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1798 * add that if/when we get our hands on a full-blown MII PHY. in macb_interrupt()
1803 * interrupts but it can be cleared by re-enabling RX. See in macb_interrupt()
1804 * the at91rm9200 manual, section 41.3.1 or the Zynq manual in macb_interrupt()
1814 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1821 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1823 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1825 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1830 tasklet_schedule(&bp->hresp_err_tasklet); in macb_interrupt()
1833 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1839 spin_unlock(&bp->lock); in macb_interrupt()
1845 /* Polling receive - used by netconsole and other diagnostic tools
1856 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1857 macb_interrupt(dev->irq, queue); in macb_poll_controller()
1868 unsigned int len, entry, i, tx_head = queue->tx_head; in macb_tx_map()
1872 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; in macb_tx_map()
1873 unsigned int eof = 1, mss_mfs = 0; in macb_tx_map()
1877 if (skb_shinfo(skb)->gso_size != 0) { in macb_tx_map()
1878 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_tx_map()
1879 /* UDP - UFO */ in macb_tx_map()
1882 /* TCP - TSO */ in macb_tx_map()
1886 /* First, map non-paged data */ in macb_tx_map()
1895 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
1897 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
1898 skb->data + offset, in macb_tx_map()
1900 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1904 tx_skb->skb = NULL; in macb_tx_map()
1905 tx_skb->mapping = mapping; in macb_tx_map()
1906 tx_skb->size = size; in macb_tx_map()
1907 tx_skb->mapped_as_page = false; in macb_tx_map()
1909 len -= size; in macb_tx_map()
1914 size = min(len, bp->max_tx_length); in macb_tx_map()
1919 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_tx_map()
1924 size = min(len, bp->max_tx_length); in macb_tx_map()
1926 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
1928 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
1930 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1934 tx_skb->skb = NULL; in macb_tx_map()
1935 tx_skb->mapping = mapping; in macb_tx_map()
1936 tx_skb->size = size; in macb_tx_map()
1937 tx_skb->mapped_as_page = true; in macb_tx_map()
1939 len -= size; in macb_tx_map()
1948 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
1953 tx_skb->skb = skb; in macb_tx_map()
1966 desc->ctrl = ctrl; in macb_tx_map()
1971 mss_mfs = skb_shinfo(skb)->gso_size + in macb_tx_map()
1975 mss_mfs = skb_shinfo(skb)->gso_size; in macb_tx_map()
1984 i--; in macb_tx_map()
1986 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
1989 ctrl = (u32)tx_skb->size; in macb_tx_map()
1994 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
1998 if (i == queue->tx_head) { in macb_tx_map()
2001 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
2002 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) in macb_tx_map()
2011 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
2012 /* desc->addr must be visible to hardware before clearing in macb_tx_map()
2013 * 'TX_USED' bit in desc->ctrl. in macb_tx_map()
2016 desc->ctrl = ctrl; in macb_tx_map()
2017 } while (i != queue->tx_head); in macb_tx_map()
2019 queue->tx_head = tx_head; in macb_tx_map()
2024 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
2026 for (i = queue->tx_head; i != tx_head; i++) { in macb_tx_map()
2045 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) in macb_features_check()
2055 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) in macb_features_check()
2058 nr_frags = skb_shinfo(skb)->nr_frags; in macb_features_check()
2060 nr_frags--; in macb_features_check()
2062 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_features_check()
2073 if (skb->ip_summed != CHECKSUM_PARTIAL) in macb_clear_csum()
2078 return -1; in macb_clear_csum()
2081 * This is required - at least for Zynq, which otherwise calculates in macb_clear_csum()
2084 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; in macb_clear_csum()
2092 int padlen = ETH_ZLEN - (*skb)->len; in macb_pad_and_fcs()
2098 if (!(ndev->features & NETIF_F_HW_CSUM) || in macb_pad_and_fcs()
2099 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || in macb_pad_and_fcs()
2100 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ in macb_pad_and_fcs()
2119 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); in macb_pad_and_fcs()
2120 skb_set_tail_pointer(*skb, (*skb)->len); in macb_pad_and_fcs()
2124 return -ENOMEM; in macb_pad_and_fcs()
2131 skb_put_zero(*skb, padlen - ETH_FCS_LEN); in macb_pad_and_fcs()
2135 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); in macb_pad_and_fcs()
2150 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2167 is_lso = (skb_shinfo(skb)->gso_size != 0); in macb_start_xmit()
2171 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_start_xmit()
2177 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2182 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2185 netdev_vdbg(bp->dev, in macb_start_xmit()
2187 queue_index, skb->len, skb->head, skb->data, in macb_start_xmit()
2189 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, in macb_start_xmit()
2190 skb->data, 16, true); in macb_start_xmit()
2199 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2201 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2202 nr_frags = skb_shinfo(skb)->nr_frags; in macb_start_xmit()
2204 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); in macb_start_xmit()
2205 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2208 spin_lock_irqsave(&bp->lock, flags); in macb_start_xmit()
2211 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, in macb_start_xmit()
2212 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2214 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2215 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2216 queue->tx_head, queue->tx_tail); in macb_start_xmit()
2232 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2236 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2244 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2246 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2248 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2249 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2252 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2253 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2257 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2258 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2270 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2271 if (!queue->rx_skbuff) in gem_free_rx_buffers()
2274 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2275 skb = queue->rx_skbuff[i]; in gem_free_rx_buffers()
2283 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2289 kfree(queue->rx_skbuff); in gem_free_rx_buffers()
2290 queue->rx_skbuff = NULL; in gem_free_rx_buffers()
2296 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2298 if (queue->rx_buffers) { in macb_free_rx_buffers()
2299 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2300 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2301 queue->rx_buffers, queue->rx_buffers_dma); in macb_free_rx_buffers()
2302 queue->rx_buffers = NULL; in macb_free_rx_buffers()
2312 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2314 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2315 kfree(queue->tx_skb); in macb_free_consistent()
2316 queue->tx_skb = NULL; in macb_free_consistent()
2317 if (queue->tx_ring) { in macb_free_consistent()
2318 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_free_consistent()
2319 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2320 queue->tx_ring, queue->tx_ring_dma); in macb_free_consistent()
2321 queue->tx_ring = NULL; in macb_free_consistent()
2323 if (queue->rx_ring) { in macb_free_consistent()
2324 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_free_consistent()
2325 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2326 queue->rx_ring, queue->rx_ring_dma); in macb_free_consistent()
2327 queue->rx_ring = NULL; in macb_free_consistent()
2338 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2339 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2340 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); in gem_alloc_rx_buffers()
2341 if (!queue->rx_skbuff) in gem_alloc_rx_buffers()
2342 return -ENOMEM; in gem_alloc_rx_buffers()
2344 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2346 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2353 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2356 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2357 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2358 &queue->rx_buffers_dma, GFP_KERNEL); in macb_alloc_rx_buffers()
2359 if (!queue->rx_buffers) in macb_alloc_rx_buffers()
2360 return -ENOMEM; in macb_alloc_rx_buffers()
2362 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2364 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); in macb_alloc_rx_buffers()
2374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2375 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_alloc_consistent()
2376 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2377 &queue->tx_ring_dma, in macb_alloc_consistent()
2379 if (!queue->tx_ring) in macb_alloc_consistent()
2381 netdev_dbg(bp->dev, in macb_alloc_consistent()
2383 q, size, (unsigned long)queue->tx_ring_dma, in macb_alloc_consistent()
2384 queue->tx_ring); in macb_alloc_consistent()
2386 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2387 queue->tx_skb = kmalloc(size, GFP_KERNEL); in macb_alloc_consistent()
2388 if (!queue->tx_skb) in macb_alloc_consistent()
2391 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_alloc_consistent()
2392 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2393 &queue->rx_ring_dma, GFP_KERNEL); in macb_alloc_consistent()
2394 if (!queue->rx_ring) in macb_alloc_consistent()
2396 netdev_dbg(bp->dev, in macb_alloc_consistent()
2398 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); in macb_alloc_consistent()
2400 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2407 return -ENOMEM; in macb_alloc_consistent()
2417 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2418 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2421 desc->ctrl = MACB_BIT(TX_USED); in gem_init_rings()
2423 desc->ctrl |= MACB_BIT(TX_WRAP); in gem_init_rings()
2424 queue->tx_head = 0; in gem_init_rings()
2425 queue->tx_tail = 0; in gem_init_rings()
2427 queue->rx_tail = 0; in gem_init_rings()
2428 queue->rx_prepared_head = 0; in gem_init_rings()
2440 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2442 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2443 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2445 desc->ctrl = MACB_BIT(TX_USED); in macb_init_rings()
2447 bp->queues[0].tx_head = 0; in macb_init_rings()
2448 bp->queues[0].tx_tail = 0; in macb_init_rings()
2449 desc->ctrl |= MACB_BIT(TX_WRAP); in macb_init_rings()
2469 macb_writel(bp, TSR, -1); in macb_reset_hw()
2470 macb_writel(bp, RSR, -1); in macb_reset_hw()
2473 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2474 queue_writel(queue, IDR, -1); in macb_reset_hw()
2476 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2477 queue_writel(queue, ISR, -1); in macb_reset_hw()
2484 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2510 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2537 case 1: in macb_dbw()
2544 * - use the correct receive buffer size
2545 * - set best burst length for DMA operations
2547 * - set both rx/tx packet buffers to full memory size
2557 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2559 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2560 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2566 if (bp->dma_burst_length) in macb_configure_dma()
2567 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2568 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); in macb_configure_dma()
2571 if (bp->native_io) in macb_configure_dma()
2576 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2583 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2587 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2590 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2606 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2610 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2612 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2614 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2618 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2619 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2620 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2621 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2622 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2642 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2651 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2662 if (addr[bitnr / 8] & (1 << (bitnr % 8))) in hash_bit_value()
2663 return 1; in hash_bit_value()
2683 /* Add multicast addresses to the internal multicast-hash table. */
2692 mc_filter[1] = 0; in macb_sethashtable()
2695 bitnr = hash_get_index(ha->addr); in macb_sethashtable()
2696 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); in macb_sethashtable()
2700 macb_or_gem_writel(bp, HRT, mc_filter[1]); in macb_sethashtable()
2711 if (dev->flags & IFF_PROMISC) { in macb_set_rx_mode()
2723 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2727 if (dev->flags & IFF_ALLMULTI) { in macb_set_rx_mode()
2729 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2730 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2736 } else if (dev->flags & (~IFF_ALLMULTI)) { in macb_set_rx_mode()
2748 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; in macb_open()
2754 netdev_dbg(bp->dev, "open\n"); in macb_open()
2756 err = pm_runtime_get_sync(&bp->pdev->dev); in macb_open()
2770 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2771 napi_enable(&queue->napi); in macb_open()
2781 if (bp->ptp_info) in macb_open()
2782 bp->ptp_info->ptp_init(dev); in macb_open()
2788 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2789 napi_disable(&queue->napi); in macb_open()
2792 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
2805 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_close()
2806 napi_disable(&queue->napi); in macb_close()
2808 phylink_stop(bp->phylink); in macb_close()
2809 phylink_disconnect_phy(bp->phylink); in macb_close()
2811 spin_lock_irqsave(&bp->lock, flags); in macb_close()
2814 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
2818 if (bp->ptp_info) in macb_close()
2819 bp->ptp_info->ptp_remove(dev); in macb_close()
2821 pm_runtime_put(&bp->pdev->dev); in macb_close()
2829 return -EBUSY; in macb_change_mtu()
2831 dev->mtu = new_mtu; in macb_change_mtu()
2842 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
2846 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
2848 bp->ethtool_stats[i] += val; in gem_update_stats()
2853 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
2854 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
2860 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
2861 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) in gem_update_stats()
2862 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
2867 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
2868 struct net_device_stats *nstat = &bp->dev->stats; in gem_get_stats()
2870 if (!netif_running(bp->dev)) in gem_get_stats()
2875 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + in gem_get_stats()
2876 hwstat->rx_alignment_errors + in gem_get_stats()
2877 hwstat->rx_resource_errors + in gem_get_stats()
2878 hwstat->rx_overruns + in gem_get_stats()
2879 hwstat->rx_oversize_frames + in gem_get_stats()
2880 hwstat->rx_jabbers + in gem_get_stats()
2881 hwstat->rx_undersized_frames + in gem_get_stats()
2882 hwstat->rx_length_field_frame_errors); in gem_get_stats()
2883 nstat->tx_errors = (hwstat->tx_late_collisions + in gem_get_stats()
2884 hwstat->tx_excessive_collisions + in gem_get_stats()
2885 hwstat->tx_underrun + in gem_get_stats()
2886 hwstat->tx_carrier_sense_errors); in gem_get_stats()
2887 nstat->multicast = hwstat->rx_multicast_frames; in gem_get_stats()
2888 nstat->collisions = (hwstat->tx_single_collision_frames + in gem_get_stats()
2889 hwstat->tx_multiple_collision_frames + in gem_get_stats()
2890 hwstat->tx_excessive_collisions); in gem_get_stats()
2891 nstat->rx_length_errors = (hwstat->rx_oversize_frames + in gem_get_stats()
2892 hwstat->rx_jabbers + in gem_get_stats()
2893 hwstat->rx_undersized_frames + in gem_get_stats()
2894 hwstat->rx_length_field_frame_errors); in gem_get_stats()
2895 nstat->rx_over_errors = hwstat->rx_resource_errors; in gem_get_stats()
2896 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; in gem_get_stats()
2897 nstat->rx_frame_errors = hwstat->rx_alignment_errors; in gem_get_stats()
2898 nstat->rx_fifo_errors = hwstat->rx_overruns; in gem_get_stats()
2899 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; in gem_get_stats()
2900 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; in gem_get_stats()
2901 nstat->tx_fifo_errors = hwstat->tx_underrun; in gem_get_stats()
2913 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
2923 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
2925 return -EOPNOTSUPP; in gem_get_sset_count()
2943 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
2957 struct net_device_stats *nstat = &bp->dev->stats; in macb_get_stats()
2958 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
2967 nstat->rx_errors = (hwstat->rx_fcs_errors + in macb_get_stats()
2968 hwstat->rx_align_errors + in macb_get_stats()
2969 hwstat->rx_resource_errors + in macb_get_stats()
2970 hwstat->rx_overruns + in macb_get_stats()
2971 hwstat->rx_oversize_pkts + in macb_get_stats()
2972 hwstat->rx_jabbers + in macb_get_stats()
2973 hwstat->rx_undersize_pkts + in macb_get_stats()
2974 hwstat->rx_length_mismatch); in macb_get_stats()
2975 nstat->tx_errors = (hwstat->tx_late_cols + in macb_get_stats()
2976 hwstat->tx_excessive_cols + in macb_get_stats()
2977 hwstat->tx_underruns + in macb_get_stats()
2978 hwstat->tx_carrier_errors + in macb_get_stats()
2979 hwstat->sqe_test_errors); in macb_get_stats()
2980 nstat->collisions = (hwstat->tx_single_cols + in macb_get_stats()
2981 hwstat->tx_multiple_cols + in macb_get_stats()
2982 hwstat->tx_excessive_cols); in macb_get_stats()
2983 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + in macb_get_stats()
2984 hwstat->rx_jabbers + in macb_get_stats()
2985 hwstat->rx_undersize_pkts + in macb_get_stats()
2986 hwstat->rx_length_mismatch); in macb_get_stats()
2987 nstat->rx_over_errors = hwstat->rx_resource_errors + in macb_get_stats()
2988 hwstat->rx_overruns; in macb_get_stats()
2989 nstat->rx_crc_errors = hwstat->rx_fcs_errors; in macb_get_stats()
2990 nstat->rx_frame_errors = hwstat->rx_align_errors; in macb_get_stats()
2991 nstat->rx_fifo_errors = hwstat->rx_overruns; in macb_get_stats()
2993 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; in macb_get_stats()
2994 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; in macb_get_stats()
2995 nstat->tx_fifo_errors = hwstat->tx_underruns; in macb_get_stats()
3013 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
3016 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
3017 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
3020 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); in macb_get_regs()
3030 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
3031 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
3033 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
3043 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { in macb_get_wol()
3044 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
3045 wol->supported |= WAKE_MAGIC; in macb_get_wol()
3047 if (bp->wol & MACB_WOL_ENABLED) in macb_get_wol()
3048 wol->wolopts |= WAKE_MAGIC; in macb_get_wol()
3058 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
3062 if (!ret || ret != -EOPNOTSUPP) in macb_set_wol()
3065 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || in macb_set_wol()
3066 (wol->wolopts & ~WAKE_MAGIC)) in macb_set_wol()
3067 return -EOPNOTSUPP; in macb_set_wol()
3069 if (wol->wolopts & WAKE_MAGIC) in macb_set_wol()
3070 bp->wol |= MACB_WOL_ENABLED; in macb_set_wol()
3072 bp->wol &= ~MACB_WOL_ENABLED; in macb_set_wol()
3074 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); in macb_set_wol()
3084 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
3092 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
3100 ring->rx_max_pending = MAX_RX_RING_SIZE; in macb_get_ringparam()
3101 ring->tx_max_pending = MAX_TX_RING_SIZE; in macb_get_ringparam()
3103 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
3104 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
3114 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in macb_set_ringparam()
3115 return -EINVAL; in macb_set_ringparam()
3117 new_rx_size = clamp_t(u32, ring->rx_pending, in macb_set_ringparam()
3121 new_tx_size = clamp_t(u32, ring->tx_pending, in macb_set_ringparam()
3125 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
3126 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
3131 if (netif_running(bp->dev)) { in macb_set_ringparam()
3132 reset = 1; in macb_set_ringparam()
3133 macb_close(bp->dev); in macb_set_ringparam()
3136 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
3137 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
3140 macb_open(bp->dev); in macb_set_ringparam()
3151 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
3155 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
3156 tsu_clk = bp->pclk; in gem_get_tsu_rate()
3159 return -ENOTSUPP; in gem_get_tsu_rate()
3173 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3178 info->so_timestamping = in gem_get_ts_info()
3185 info->tx_types = in gem_get_ts_info()
3186 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | in gem_get_ts_info()
3187 (1 << HWTSTAMP_TX_OFF) | in gem_get_ts_info()
3188 (1 << HWTSTAMP_TX_ON); in gem_get_ts_info()
3189 info->rx_filters = in gem_get_ts_info()
3190 (1 << HWTSTAMP_FILTER_NONE) | in gem_get_ts_info()
3191 (1 << HWTSTAMP_FILTER_ALL); in gem_get_ts_info()
3193 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; in gem_get_ts_info()
3214 if (bp->ptp_info) in macb_get_ts_info()
3215 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3222 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3227 if (!(netdev->features & NETIF_F_NTUPLE)) in gem_enable_flow_filters()
3232 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3233 struct ethtool_rx_flow_spec *fs = &item->fs; in gem_enable_flow_filters()
3236 if (fs->location >= num_t2_scr) in gem_enable_flow_filters()
3239 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3245 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_enable_flow_filters()
3247 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) in gem_enable_flow_filters()
3248 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); in gem_enable_flow_filters()
3252 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) in gem_enable_flow_filters()
3253 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); in gem_enable_flow_filters()
3257 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) in gem_enable_flow_filters()
3258 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); in gem_enable_flow_filters()
3262 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3269 uint16_t index = fs->location; in gem_prog_cmp_regs()
3278 tp4sp_v = &(fs->h_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3279 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3282 if (tp4sp_m->ip4src == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3283 /* 1st compare reg - IP source address */ in gem_prog_cmp_regs()
3286 w0 = tp4sp_v->ip4src; in gem_prog_cmp_regs()
3287 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3296 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3297 /* 2nd compare reg - IP destination address */ in gem_prog_cmp_regs()
3300 w0 = tp4sp_v->ip4dst; in gem_prog_cmp_regs()
3301 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3310 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { in gem_prog_cmp_regs()
3311 /* 3rd compare reg - source port, destination port */ in gem_prog_cmp_regs()
3315 if (tp4sp_m->psrc == tp4sp_m->pdst) { in gem_prog_cmp_regs()
3316 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3317 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3318 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3322 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ in gem_prog_cmp_regs()
3324 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ in gem_prog_cmp_regs()
3325 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3328 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3338 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); in gem_prog_cmp_regs()
3353 struct ethtool_rx_flow_spec *fs = &cmd->fs; in gem_add_flow_filter()
3356 int ret = -EINVAL; in gem_add_flow_filter()
3361 return -ENOMEM; in gem_add_flow_filter()
3362 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); in gem_add_flow_filter()
3366 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_add_flow_filter()
3367 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_add_flow_filter()
3368 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_add_flow_filter()
3369 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); in gem_add_flow_filter()
3371 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3374 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3375 if (item->fs.location > newfs->fs.location) { in gem_add_flow_filter()
3376 list_add_tail(&newfs->list, &item->list); in gem_add_flow_filter()
3379 } else if (item->fs.location == fs->location) { in gem_add_flow_filter()
3381 fs->location); in gem_add_flow_filter()
3382 ret = -EBUSY; in gem_add_flow_filter()
3387 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3390 bp->rx_fs_list.count++; in gem_add_flow_filter()
3392 gem_enable_flow_filters(bp, 1); in gem_add_flow_filter()
3394 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3398 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3411 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3413 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3414 if (item->fs.location == cmd->fs.location) { in gem_del_flow_filter()
3416 fs = &(item->fs); in gem_del_flow_filter()
3419 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_del_flow_filter()
3420 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_del_flow_filter()
3421 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_del_flow_filter()
3422 htons(fs->h_u.tcp_ip4_spec.psrc), in gem_del_flow_filter()
3423 htons(fs->h_u.tcp_ip4_spec.pdst)); in gem_del_flow_filter()
3425 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3427 list_del(&item->list); in gem_del_flow_filter()
3428 bp->rx_fs_list.count--; in gem_del_flow_filter()
3429 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3435 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3436 return -EINVAL; in gem_del_flow_filter()
3445 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3446 if (item->fs.location == cmd->fs.location) { in gem_get_flow_entry()
3447 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); in gem_get_flow_entry()
3451 return -EINVAL; in gem_get_flow_entry()
3461 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3462 if (cnt == cmd->rule_cnt) in gem_get_all_flow_entries()
3463 return -EMSGSIZE; in gem_get_all_flow_entries()
3464 rule_locs[cnt] = item->fs.location; in gem_get_all_flow_entries()
3467 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3468 cmd->rule_cnt = cnt; in gem_get_all_flow_entries()
3479 switch (cmd->cmd) { in gem_get_rxnfc()
3481 cmd->data = bp->num_queues; in gem_get_rxnfc()
3484 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3494 "Command parameter %d is not supported\n", cmd->cmd); in gem_get_rxnfc()
3495 ret = -EOPNOTSUPP; in gem_get_rxnfc()
3506 switch (cmd->cmd) { in gem_set_rxnfc()
3508 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3509 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3510 ret = -EINVAL; in gem_set_rxnfc()
3520 "Command parameter %d is not supported\n", cmd->cmd); in gem_set_rxnfc()
3521 ret = -EOPNOTSUPP; in gem_set_rxnfc()
3563 return -EINVAL; in macb_ioctl()
3565 if (bp->ptp_info) { in macb_ioctl()
3568 return bp->ptp_info->set_hwtst(dev, rq, cmd); in macb_ioctl()
3570 return bp->ptp_info->get_hwtst(dev, rq); in macb_ioctl()
3574 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3597 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
3604 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) in macb_set_rxcsum_feature()
3625 netdev_features_t changed = features ^ netdev->features; in macb_set_features()
3644 struct net_device *netdev = bp->dev; in macb_restore_features()
3645 netdev_features_t features = netdev->features; in macb_restore_features()
3655 list_for_each_entry(item, &bp->rx_fs_list.list, list) in macb_restore_features()
3656 gem_prog_cmp_regs(bp, &item->fs); in macb_restore_features()
3687 bp->caps = dt_conf->caps; in macb_configure_caps()
3689 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
3690 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
3694 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
3696 bp->caps |= MACB_CAPS_PCS; in macb_configure_caps()
3698 if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1) in macb_configure_caps()
3699 bp->caps |= MACB_CAPS_HIGH_SPEED; in macb_configure_caps()
3702 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
3706 dev_err(&bp->pdev->dev, in macb_configure_caps()
3709 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
3710 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
3716 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
3725 *num_queues = 1; in macb_probe_queues()
3762 pdata = dev_get_platdata(&pdev->dev); in macb_clk_init()
3764 *pclk = pdata->pclk; in macb_clk_init()
3765 *hclk = pdata->hclk; in macb_clk_init()
3767 *pclk = devm_clk_get(&pdev->dev, "pclk"); in macb_clk_init()
3768 *hclk = devm_clk_get(&pdev->dev, "hclk"); in macb_clk_init()
3772 return dev_err_probe(&pdev->dev, in macb_clk_init()
3773 IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV, in macb_clk_init()
3777 return dev_err_probe(&pdev->dev, in macb_clk_init()
3778 IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV, in macb_clk_init()
3781 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); in macb_clk_init()
3785 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); in macb_clk_init()
3789 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); in macb_clk_init()
3795 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in macb_clk_init()
3801 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); in macb_clk_init()
3807 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); in macb_clk_init()
3813 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); in macb_clk_init()
3819 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); in macb_clk_init()
3849 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
3850 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
3857 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
3860 queue = &bp->queues[q]; in macb_init()
3861 queue->bp = bp; in macb_init()
3862 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); in macb_init()
3864 queue->ISR = GEM_ISR(hw_q - 1); in macb_init()
3865 queue->IER = GEM_IER(hw_q - 1); in macb_init()
3866 queue->IDR = GEM_IDR(hw_q - 1); in macb_init()
3867 queue->IMR = GEM_IMR(hw_q - 1); in macb_init()
3868 queue->TBQP = GEM_TBQP(hw_q - 1); in macb_init()
3869 queue->RBQP = GEM_RBQP(hw_q - 1); in macb_init()
3870 queue->RBQS = GEM_RBQS(hw_q - 1); in macb_init()
3872 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3873 queue->TBQPH = GEM_TBQPH(hw_q - 1); in macb_init()
3874 queue->RBQPH = GEM_RBQPH(hw_q - 1); in macb_init()
3879 queue->ISR = MACB_ISR; in macb_init()
3880 queue->IER = MACB_IER; in macb_init()
3881 queue->IDR = MACB_IDR; in macb_init()
3882 queue->IMR = MACB_IMR; in macb_init()
3883 queue->TBQP = MACB_TBQP; in macb_init()
3884 queue->RBQP = MACB_RBQP; in macb_init()
3886 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3887 queue->TBQPH = MACB_TBQPH; in macb_init()
3888 queue->RBQPH = MACB_RBQPH; in macb_init()
3898 queue->irq = platform_get_irq(pdev, q); in macb_init()
3899 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, in macb_init()
3900 IRQF_SHARED, dev->name, queue); in macb_init()
3902 dev_err(&pdev->dev, in macb_init()
3904 queue->irq, err); in macb_init()
3908 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); in macb_init()
3912 dev->netdev_ops = &macb_netdev_ops; in macb_init()
3916 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_init()
3917 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
3918 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
3919 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
3920 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
3921 dev->ethtool_ops = &gem_ethtool_ops; in macb_init()
3923 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_init()
3924 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
3925 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
3926 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
3927 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
3928 dev->ethtool_ops = &macb_ethtool_ops; in macb_init()
3932 dev->hw_features = NETIF_F_SG; in macb_init()
3936 dev->hw_features |= MACB_NETIF_LSO; in macb_init()
3939 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
3940 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; in macb_init()
3941 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
3942 dev->hw_features &= ~NETIF_F_SG; in macb_init()
3943 dev->features = dev->hw_features; in macb_init()
3947 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs in macb_init()
3950 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
3952 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
3953 if (bp->max_tuples > 0) { in macb_init()
3961 dev->hw_features |= NETIF_F_NTUPLE; in macb_init()
3963 bp->rx_fs_list.count = 0; in macb_init()
3964 spin_lock_init(&bp->rx_fs_lock); in macb_init()
3966 bp->max_tuples = 0; in macb_init()
3969 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
3971 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
3972 val = bp->usrio->rgmii; in macb_init()
3973 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
3974 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
3975 val = bp->usrio->rmii; in macb_init()
3976 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
3977 val = bp->usrio->mii; in macb_init()
3979 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
3980 val |= bp->usrio->refclk; in macb_init()
3988 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
4012 struct macb_queue *q = &lp->queues[0]; in at91ether_alloc_coherent()
4014 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4017 &q->rx_ring_dma, GFP_KERNEL); in at91ether_alloc_coherent()
4018 if (!q->rx_ring) in at91ether_alloc_coherent()
4019 return -ENOMEM; in at91ether_alloc_coherent()
4021 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4024 &q->rx_buffers_dma, GFP_KERNEL); in at91ether_alloc_coherent()
4025 if (!q->rx_buffers) { in at91ether_alloc_coherent()
4026 dma_free_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4029 q->rx_ring, q->rx_ring_dma); in at91ether_alloc_coherent()
4030 q->rx_ring = NULL; in at91ether_alloc_coherent()
4031 return -ENOMEM; in at91ether_alloc_coherent()
4039 struct macb_queue *q = &lp->queues[0]; in at91ether_free_coherent()
4041 if (q->rx_ring) { in at91ether_free_coherent()
4042 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
4045 q->rx_ring, q->rx_ring_dma); in at91ether_free_coherent()
4046 q->rx_ring = NULL; in at91ether_free_coherent()
4049 if (q->rx_buffers) { in at91ether_free_coherent()
4050 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
4053 q->rx_buffers, q->rx_buffers_dma); in at91ether_free_coherent()
4054 q->rx_buffers = NULL; in at91ether_free_coherent()
4061 struct macb_queue *q = &lp->queues[0]; in at91ether_start()
4071 addr = q->rx_buffers_dma; in at91ether_start()
4075 desc->ctrl = 0; in at91ether_start()
4080 desc->addr |= MACB_BIT(RX_WRAP); in at91ether_start()
4083 q->rx_tail = 0; in at91ether_start()
4086 macb_writel(lp, RBQP, q->rx_ring_dma); in at91ether_start()
4132 ret = pm_runtime_get_sync(&lp->pdev->dev); in at91ether_open()
4134 pm_runtime_put_noidle(&lp->pdev->dev); in at91ether_open()
4159 pm_runtime_put_sync(&lp->pdev->dev); in at91ether_open()
4170 phylink_stop(lp->phylink); in at91ether_close()
4171 phylink_disconnect_phy(lp->phylink); in at91ether_close()
4175 return pm_runtime_put(&lp->pdev->dev); in at91ether_close()
4190 lp->rm9200_txq[desc].skb = skb; in at91ether_start_xmit()
4191 lp->rm9200_txq[desc].size = skb->len; in at91ether_start_xmit()
4192 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, in at91ether_start_xmit()
4193 skb->len, DMA_TO_DEVICE); in at91ether_start_xmit()
4194 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) { in at91ether_start_xmit()
4196 dev->stats.tx_dropped++; in at91ether_start_xmit()
4202 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); in at91ether_start_xmit()
4204 macb_writel(lp, TCR, skb->len); in at91ether_start_xmit()
4220 struct macb_queue *q = &lp->queues[0]; in at91ether_rx()
4226 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4227 while (desc->addr & MACB_BIT(RX_USED)) { in at91ether_rx()
4228 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; in at91ether_rx()
4229 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); in at91ether_rx()
4235 skb->protocol = eth_type_trans(skb, dev); in at91ether_rx()
4236 dev->stats.rx_packets++; in at91ether_rx()
4237 dev->stats.rx_bytes += pktlen; in at91ether_rx()
4240 dev->stats.rx_dropped++; in at91ether_rx()
4243 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) in at91ether_rx()
4244 dev->stats.multicast++; in at91ether_rx()
4247 desc->addr &= ~MACB_BIT(RX_USED); in at91ether_rx()
4250 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) in at91ether_rx()
4251 q->rx_tail = 0; in at91ether_rx()
4253 q->rx_tail++; in at91ether_rx()
4255 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4280 dev->stats.tx_errors++; in at91ether_interrupt()
4283 if (lp->rm9200_txq[desc].skb) { in at91ether_interrupt()
4284 dev_consume_skb_irq(lp->rm9200_txq[desc].skb); in at91ether_interrupt()
4285 lp->rm9200_txq[desc].skb = NULL; in at91ether_interrupt()
4286 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, in at91ether_interrupt()
4287 lp->rm9200_txq[desc].size, DMA_TO_DEVICE); in at91ether_interrupt()
4288 dev->stats.tx_packets++; in at91ether_interrupt()
4289 dev->stats.tx_bytes += lp->rm9200_txq[desc].size; in at91ether_interrupt()
4294 /* Work-around for EMAC Errata section 41.3.1 */ in at91ether_interrupt()
4314 at91ether_interrupt(dev->irq, dev); in at91ether_poll_controller()
4344 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); in at91ether_clk_init()
4350 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in at91ether_clk_init()
4363 bp->queues[0].bp = bp; in at91ether_init()
4365 dev->netdev_ops = &at91ether_netdev_ops; in at91ether_init()
4366 dev->ethtool_ops = &macb_ethtool_ops; in at91ether_init()
4368 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, in at91ether_init()
4369 0, dev->name, dev); in at91ether_init()
4383 return mgmt->rate; in fu540_macb_tx_recalc_rate()
4416 iowrite32(1, mgmt->reg); in fu540_macb_tx_set_rate()
4418 iowrite32(0, mgmt->reg); in fu540_macb_tx_set_rate()
4419 mgmt->rate = rate; in fu540_macb_tx_set_rate()
4441 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); in fu540_c000_clk_init()
4443 err = -ENOMEM; in fu540_c000_clk_init()
4447 init.name = "sifive-gemgxl-mgmt"; in fu540_c000_clk_init()
4452 mgmt->rate = 0; in fu540_c000_clk_init()
4453 mgmt->hw.init = &init; in fu540_c000_clk_init()
4455 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); in fu540_c000_clk_init()
4463 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); in fu540_c000_clk_init()
4467 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); in fu540_c000_clk_init()
4480 mgmt->reg = devm_platform_ioremap_resource(pdev, 1); in fu540_c000_init()
4481 if (IS_ERR(mgmt->reg)) in fu540_c000_init()
4482 return PTR_ERR(mgmt->reg); in fu540_c000_init()
4489 .rmii = 1,
4613 { .compatible = "cdns,at32ap7000-macb" },
4614 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4616 { .compatible = "cdns,np4-macb", .data = &np4_config },
4617 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4619 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4620 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4621 { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
4622 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4623 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4624 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4625 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4627 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4628 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4629 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4630 { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
4631 { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
4653 struct clk **) = macb_config->clk_init; in macb_probe()
4654 int (*init)(struct platform_device *) = macb_config->init; in macb_probe()
4655 struct device_node *np = pdev->dev.of_node; in macb_probe()
4675 if (match && match->data) { in macb_probe()
4676 macb_config = match->data; in macb_probe()
4677 clk_init = macb_config->clk_init; in macb_probe()
4678 init = macb_config->init; in macb_probe()
4686 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); in macb_probe()
4687 pm_runtime_use_autosuspend(&pdev->dev); in macb_probe()
4688 pm_runtime_get_noresume(&pdev->dev); in macb_probe()
4689 pm_runtime_set_active(&pdev->dev); in macb_probe()
4690 pm_runtime_enable(&pdev->dev); in macb_probe()
4696 err = -ENOMEM; in macb_probe()
4700 dev->base_addr = regs->start; in macb_probe()
4702 SET_NETDEV_DEV(dev, &pdev->dev); in macb_probe()
4705 bp->pdev = pdev; in macb_probe()
4706 bp->dev = dev; in macb_probe()
4707 bp->regs = mem; in macb_probe()
4708 bp->native_io = native_io; in macb_probe()
4710 bp->macb_reg_readl = hw_readl_native; in macb_probe()
4711 bp->macb_reg_writel = hw_writel_native; in macb_probe()
4713 bp->macb_reg_readl = hw_readl; in macb_probe()
4714 bp->macb_reg_writel = hw_writel; in macb_probe()
4716 bp->num_queues = num_queues; in macb_probe()
4717 bp->queue_mask = queue_mask; in macb_probe()
4719 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
4720 bp->pclk = pclk; in macb_probe()
4721 bp->hclk = hclk; in macb_probe()
4722 bp->tx_clk = tx_clk; in macb_probe()
4723 bp->rx_clk = rx_clk; in macb_probe()
4724 bp->tsu_clk = tsu_clk; in macb_probe()
4726 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
4728 bp->wol = 0; in macb_probe()
4729 if (of_get_property(np, "magic-packet", NULL)) in macb_probe()
4730 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; in macb_probe()
4731 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); in macb_probe()
4733 bp->usrio = macb_config->usrio; in macb_probe()
4735 spin_lock_init(&bp->lock); in macb_probe()
4742 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); in macb_probe()
4743 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
4748 dev->irq = platform_get_irq(pdev, 0); in macb_probe()
4749 if (dev->irq < 0) { in macb_probe()
4750 err = dev->irq; in macb_probe()
4754 /* MTU range: 68 - 1500 or 10240 */ in macb_probe()
4755 dev->min_mtu = GEM_MTU_MIN_SIZE; in macb_probe()
4756 if (bp->caps & MACB_CAPS_JUMBO) in macb_probe()
4757 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
4759 dev->max_mtu = ETH_DATA_LEN; in macb_probe()
4761 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
4764 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4769 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4773 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
4774 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
4775 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
4777 err = of_get_mac_address(np, bp->dev->dev_addr); in macb_probe()
4778 if (err == -EPROBE_DEFER) in macb_probe()
4786 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
4788 bp->phy_interface = interface; in macb_probe()
4803 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); in macb_probe()
4807 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); in macb_probe()
4811 dev->base_addr, dev->irq, dev->dev_addr); in macb_probe()
4813 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
4814 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
4819 mdiobus_unregister(bp->mii_bus); in macb_probe()
4820 mdiobus_free(bp->mii_bus); in macb_probe()
4827 pm_runtime_disable(&pdev->dev); in macb_probe()
4828 pm_runtime_set_suspended(&pdev->dev); in macb_probe()
4829 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_probe()
4843 mdiobus_unregister(bp->mii_bus); in macb_remove()
4844 mdiobus_free(bp->mii_bus); in macb_remove()
4847 tasklet_kill(&bp->hresp_err_tasklet); in macb_remove()
4848 pm_runtime_disable(&pdev->dev); in macb_remove()
4849 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_remove()
4850 if (!pm_runtime_suspended(&pdev->dev)) { in macb_remove()
4851 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, in macb_remove()
4852 bp->rx_clk, bp->tsu_clk); in macb_remove()
4853 pm_runtime_set_suspended(&pdev->dev); in macb_remove()
4855 phylink_destroy(bp->phylink); in macb_remove()
4874 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
4875 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4877 macb_writel(bp, TSR, -1); in macb_suspend()
4878 macb_writel(bp, RSR, -1); in macb_suspend()
4879 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4882 queue_writel(queue, IDR, -1); in macb_suspend()
4884 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
4885 queue_writel(queue, ISR, -1); in macb_suspend()
4890 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
4892 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
4893 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4897 bp->queues[0].irq, err); in macb_suspend()
4898 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4901 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
4904 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
4905 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4909 bp->queues[0].irq, err); in macb_suspend()
4910 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4913 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
4916 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4918 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
4922 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4924 napi_disable(&queue->napi); in macb_suspend()
4926 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
4928 phylink_stop(bp->phylink); in macb_suspend()
4930 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4932 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4935 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
4936 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
4938 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_suspend()
4939 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
4941 if (bp->ptp_info) in macb_suspend()
4942 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
4964 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
4965 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
4968 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
4971 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
4975 queue_readl(bp->queues, ISR); in macb_resume()
4976 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
4977 queue_writel(bp->queues, ISR, -1); in macb_resume()
4979 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
4980 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
4981 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
4985 bp->queues[0].irq, err); in macb_resume()
4986 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
4989 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
4991 disable_irq_wake(bp->queues[0].irq); in macb_resume()
4997 phylink_stop(bp->phylink); in macb_resume()
5001 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
5003 napi_enable(&queue->napi); in macb_resume()
5005 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_resume()
5006 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
5008 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
5009 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
5016 phylink_start(bp->phylink); in macb_resume()
5020 if (bp->ptp_info) in macb_resume()
5021 bp->ptp_info->ptp_init(netdev); in macb_resume()
5032 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); in macb_runtime_suspend()
5034 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); in macb_runtime_suspend()
5045 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
5046 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
5047 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
5048 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
5050 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()