Lines Matching +full:rx +full:- +full:delay +full:- +full:ps

7  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
151 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
153 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
264 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
276 * the RX side. Those two bytes being at the front of the Ethernet
304 /* Max number of Rx descriptors */
333 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
336 ((addr >= txq->tso_hdrs_phys) && \
337 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
340 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
432 struct mvneta_stats ps; member
449 /* Pointer to the CPU-local NAPI struct */
560 u32 reserved2; /* hw_cmd - (for future use, PMT) */
561 u32 reserved3[4]; /* Reserved - (for future use) */
566 u16 reserved1; /* pnc_info - (for future use, PnC) */
572 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
574 u16 reserved4; /* csum_l4 - (for future use, PnC) */
584 u32 reserved2; /* hw_cmd - (for future use, PMT) */
586 u32 reserved3[4]; /* Reserved - (for future use) */
591 u16 reserved1; /* pnc_info - (for future use, PnC) */
597 u16 reserved4; /* csum_l4 - (for future use, PnC) */
599 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
621 /* Number of this TX queue, in the range 0-7 */
669 /* rx queue number, in the range 0-7 */
672 /* num of rx descriptors in the rx descriptor ring */
682 /* Virtual address of the RX buffer */
685 /* Virtual address of the RX DMA descriptors array */
688 /* DMA address of the RX DMA descriptors array */
691 /* Index of the last RX DMA descriptor */
694 /* Index of the next RX DMA descriptor to process */
697 /* Index of first RX DMA descriptor to refill */
703 /* The hardware supports eight (8) rx queues, but we are only allowing
724 writel(data, pp->base + offset); in mvreg_write()
730 return readl(pp->base + offset); in mvreg_read()
736 txq->txq_get_index++; in mvneta_txq_inc_get()
737 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
738 txq->txq_get_index = 0; in mvneta_txq_inc_get()
744 txq->txq_put_index++; in mvneta_txq_inc_put()
745 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
746 txq->txq_put_index = 0; in mvneta_txq_inc_put()
780 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
782 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); in mvneta_get_stats64()
783 rx_packets = cpu_stats->es.ps.rx_packets; in mvneta_get_stats64()
784 rx_bytes = cpu_stats->es.ps.rx_bytes; in mvneta_get_stats64()
785 rx_dropped = cpu_stats->rx_dropped; in mvneta_get_stats64()
786 rx_errors = cpu_stats->rx_errors; in mvneta_get_stats64()
787 tx_packets = cpu_stats->es.ps.tx_packets; in mvneta_get_stats64()
788 tx_bytes = cpu_stats->es.ps.tx_bytes; in mvneta_get_stats64()
789 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); in mvneta_get_stats64()
791 stats->rx_packets += rx_packets; in mvneta_get_stats64()
792 stats->rx_bytes += rx_bytes; in mvneta_get_stats64()
793 stats->rx_dropped += rx_dropped; in mvneta_get_stats64()
794 stats->rx_errors += rx_errors; in mvneta_get_stats64()
795 stats->tx_packets += tx_packets; in mvneta_get_stats64()
796 stats->tx_bytes += tx_bytes; in mvneta_get_stats64()
799 stats->tx_dropped = dev->stats.tx_dropped; in mvneta_get_stats64()
802 /* Rx descriptors helper methods */
804 /* Checks whether the RX descriptor having this status is both the first
805 * and the last descriptor for the RX packet. Each RX packet is currently
806 * received through a single RX descriptor, so not having each RX
824 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
827 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; in mvneta_rxq_non_occup_desc_add()
830 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
834 /* Get number of RX descriptors occupied by received packets */
840 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
844 /* Update num of rx desc called upon return from rx path or
856 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
867 rx_done -= 0xff; in mvneta_rxq_desc_num_update()
874 rx_filled -= 0xff; in mvneta_rxq_desc_num_update()
876 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
880 /* Get pointer to next RX descriptor to be processed by SW */
884 int rx_desc = rxq->next_desc_to_proc; in mvneta_rxq_next_desc_get()
886 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); in mvneta_rxq_next_desc_get()
887 prefetch(rxq->descs + rxq->next_desc_to_proc); in mvneta_rxq_next_desc_get()
888 return rxq->descs + rx_desc; in mvneta_rxq_next_desc_get()
898 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << in mvneta_max_rx_size_set()
904 /* Set rx queue offset */
911 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
916 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
929 pend_desc += txq->pending; in mvneta_txq_pend_desc_add()
934 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
935 pend_desc -= val; in mvneta_txq_pend_desc_add()
937 txq->pending = 0; in mvneta_txq_pend_desc_add()
944 int tx_desc = txq->next_desc_to_proc; in mvneta_txq_next_desc_get()
946 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); in mvneta_txq_next_desc_get()
947 return txq->descs + tx_desc; in mvneta_txq_next_desc_get()
955 if (txq->next_desc_to_proc == 0) in mvneta_txq_desc_put()
956 txq->next_desc_to_proc = txq->last_desc - 1; in mvneta_txq_desc_put()
958 txq->next_desc_to_proc--; in mvneta_txq_desc_put()
968 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
973 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
982 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
984 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
993 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_enable()
995 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_enable()
1004 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_long_pool_set()
1006 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); in mvneta_rxq_long_pool_set()
1008 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_long_pool_set()
1017 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_short_pool_set()
1019 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); in mvneta_rxq_short_pool_set()
1021 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_short_pool_set()
1032 dev_warn(pp->dev->dev.parent, in mvneta_bm_pool_bufsize_set()
1052 if (pp->bm_win_id < 0) { in mvneta_mbus_io_win_set()
1056 pp->bm_win_id = i; in mvneta_mbus_io_win_set()
1061 return -ENOMEM; in mvneta_mbus_io_win_set()
1063 i = pp->bm_win_id; in mvneta_mbus_io_win_set()
1075 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); in mvneta_mbus_io_win_set()
1094 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, in mvneta_bm_port_mbus_init()
1099 pp->bm_win_id = -1; in mvneta_bm_port_mbus_init()
1101 /* Open NETA -> BM window */ in mvneta_bm_port_mbus_init()
1102 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, in mvneta_bm_port_mbus_init()
1105 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); in mvneta_bm_port_mbus_init()
1117 struct device_node *dn = pdev->dev.of_node; in mvneta_bm_port_init()
1120 if (!pp->neta_armada3700) { in mvneta_bm_port_init()
1128 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { in mvneta_bm_port_init()
1129 netdev_info(pp->dev, "missing long pool id\n"); in mvneta_bm_port_init()
1130 return -EINVAL; in mvneta_bm_port_init()
1134 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, in mvneta_bm_port_init()
1135 MVNETA_BM_LONG, pp->id, in mvneta_bm_port_init()
1136 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); in mvneta_bm_port_init()
1137 if (!pp->pool_long) { in mvneta_bm_port_init()
1138 netdev_info(pp->dev, "fail to obtain long pool for port\n"); in mvneta_bm_port_init()
1139 return -ENOMEM; in mvneta_bm_port_init()
1142 pp->pool_long->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1144 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, in mvneta_bm_port_init()
1145 pp->pool_long->id); in mvneta_bm_port_init()
1148 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) in mvneta_bm_port_init()
1152 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, in mvneta_bm_port_init()
1153 MVNETA_BM_SHORT, pp->id, in mvneta_bm_port_init()
1155 if (!pp->pool_short) { in mvneta_bm_port_init()
1156 netdev_info(pp->dev, "fail to obtain short pool for port\n"); in mvneta_bm_port_init()
1157 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_port_init()
1158 return -ENOMEM; in mvneta_bm_port_init()
1162 pp->pool_short->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1163 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, in mvneta_bm_port_init()
1164 pp->pool_short->id); in mvneta_bm_port_init()
1173 struct mvneta_bm_pool *bm_pool = pp->pool_long; in mvneta_bm_update_mtu()
1174 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; in mvneta_bm_update_mtu()
1178 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); in mvneta_bm_update_mtu()
1179 if (hwbm_pool->buf_num) { in mvneta_bm_update_mtu()
1181 bm_pool->id); in mvneta_bm_update_mtu()
1185 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); in mvneta_bm_update_mtu()
1186 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); in mvneta_bm_update_mtu()
1187 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + in mvneta_bm_update_mtu()
1188 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); in mvneta_bm_update_mtu()
1191 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); in mvneta_bm_update_mtu()
1192 if (num != hwbm_pool->size) { in mvneta_bm_update_mtu()
1194 bm_pool->id, num, hwbm_pool->size); in mvneta_bm_update_mtu()
1197 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); in mvneta_bm_update_mtu()
1202 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_update_mtu()
1203 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); in mvneta_bm_update_mtu()
1205 pp->bm_priv = NULL; in mvneta_bm_update_mtu()
1206 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_bm_update_mtu()
1208 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); in mvneta_bm_update_mtu()
1211 /* Start the Ethernet port RX and TX activity */
1220 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
1221 if (txq->descs) in mvneta_port_up()
1229 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
1231 if (rxq->descs) in mvneta_port_up()
1243 /* Stop Rx port activity. Check port Rx activity. */ in mvneta_port_down()
1251 /* Wait for all Rx activity to terminate. */ in mvneta_port_down()
1255 netdev_warn(pp->dev, in mvneta_port_down()
1256 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", in mvneta_port_down()
1278 netdev_warn(pp->dev, in mvneta_port_down()
1294 netdev_warn(pp->dev, in mvneta_port_down()
1334 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1340 if (queue == -1) { in mvneta_set_ucast_table()
1351 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1357 if (queue == -1) { in mvneta_set_special_mcast_table()
1369 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1375 if (queue == -1) { in mvneta_set_other_mcast_table()
1376 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1379 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1429 * Resets RX and TX descriptor rings.
1451 /* Set CPU queue access map. CPUs are assigned to the RX and in mvneta_defaults_set()
1454 * default RX queue. in mvneta_defaults_set()
1459 if (!pp->neta_armada3700) { in mvneta_defaults_set()
1473 txq_map = (cpu == pp->rxq_def) ? in mvneta_defaults_set()
1484 /* Reset RX and TX DMAs */ in mvneta_defaults_set()
1499 if (pp->bm_priv) in mvneta_defaults_set()
1507 if (pp->bm_priv) in mvneta_defaults_set()
1508 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); in mvneta_defaults_set()
1511 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_defaults_set()
1540 mvneta_set_ucast_table(pp, -1); in mvneta_defaults_set()
1541 mvneta_set_special_mcast_table(pp, -1); in mvneta_defaults_set()
1542 mvneta_set_other_mcast_table(pp, -1); in mvneta_defaults_set()
1544 /* Set port interrupt enable register - default enable all */ in mvneta_defaults_set()
1611 if (queue == -1) { in mvneta_set_ucast_addr()
1629 if (queue != -1) { in mvneta_mac_addr_set()
1642 /* Set the number of packets that will be received before RX interrupt
1648 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1652 /* Set the time delay in usec before RX interrupt will be generated by
1661 clk_rate = clk_get_rate(pp->clk); in mvneta_rx_time_coal_set()
1664 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1673 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1678 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1681 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1688 rx_desc->buf_phys_addr = phys_addr; in mvneta_rx_desc_fill()
1689 i = rx_desc - rxq->descs; in mvneta_rx_desc_fill()
1690 rxq->buf_virt_addr[i] = virt_addr; in mvneta_rx_desc_fill()
1703 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1704 sent_desc = sent_desc - 0xff; in mvneta_txq_sent_desc_dec()
1708 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1718 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1776 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_error()
1777 u32 status = rx_desc->status; in mvneta_rx_error()
1779 /* update per-cpu counter */ in mvneta_rx_error()
1780 u64_stats_update_begin(&stats->syncp); in mvneta_rx_error()
1781 stats->rx_errors++; in mvneta_rx_error()
1782 u64_stats_update_end(&stats->syncp); in mvneta_rx_error()
1786 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", in mvneta_rx_error()
1787 status, rx_desc->data_size); in mvneta_rx_error()
1790 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", in mvneta_rx_error()
1791 status, rx_desc->data_size); in mvneta_rx_error()
1794 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", in mvneta_rx_error()
1795 status, rx_desc->data_size); in mvneta_rx_error()
1798 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", in mvneta_rx_error()
1799 status, rx_desc->data_size); in mvneta_rx_error()
1804 /* Handle RX checksum offload based on the descriptor's status */
1808 if ((pp->dev->features & NETIF_F_RXCSUM) && in mvneta_rx_csum()
1811 skb->csum = 0; in mvneta_rx_csum()
1812 skb->ip_summed = CHECKSUM_UNNECESSARY; in mvneta_rx_csum()
1816 skb->ip_summed = CHECKSUM_NONE; in mvneta_rx_csum()
1826 int queue = fls(cause) - 1; in mvneta_tx_done_policy()
1828 return &pp->txqs[queue]; in mvneta_tx_done_policy()
1840 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; in mvneta_txq_bufs_free()
1841 struct mvneta_tx_desc *tx_desc = txq->descs + in mvneta_txq_bufs_free()
1842 txq->txq_get_index; in mvneta_txq_bufs_free()
1846 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) && in mvneta_txq_bufs_free()
1847 buf->type != MVNETA_TYPE_XDP_TX) in mvneta_txq_bufs_free()
1848 dma_unmap_single(pp->dev->dev.parent, in mvneta_txq_bufs_free()
1849 tx_desc->buf_phys_addr, in mvneta_txq_bufs_free()
1850 tx_desc->data_size, DMA_TO_DEVICE); in mvneta_txq_bufs_free()
1851 if (buf->type == MVNETA_TYPE_SKB && buf->skb) { in mvneta_txq_bufs_free()
1852 bytes_compl += buf->skb->len; in mvneta_txq_bufs_free()
1854 dev_kfree_skb_any(buf->skb); in mvneta_txq_bufs_free()
1855 } else if (buf->type == MVNETA_TYPE_XDP_TX || in mvneta_txq_bufs_free()
1856 buf->type == MVNETA_TYPE_XDP_NDO) { in mvneta_txq_bufs_free()
1857 if (napi && buf->type == MVNETA_TYPE_XDP_TX) in mvneta_txq_bufs_free()
1858 xdp_return_frame_rx_napi(buf->xdpf); in mvneta_txq_bufs_free()
1860 xdp_return_frame(buf->xdpf); in mvneta_txq_bufs_free()
1871 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1880 txq->count -= tx_done; in mvneta_txq_done()
1883 if (txq->count <= txq->tx_wake_threshold) in mvneta_txq_done()
1898 page = page_pool_alloc_pages(rxq->page_pool, in mvneta_rx_refill()
1901 return -ENOMEM; in mvneta_rx_refill()
1903 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; in mvneta_rx_refill()
1912 if (skb->ip_summed == CHECKSUM_PARTIAL) { in mvneta_skb_tx_csum()
1921 ip_hdr_len = ip4h->ihl; in mvneta_skb_tx_csum()
1922 l4_proto = ip4h->protocol; in mvneta_skb_tx_csum()
1929 l4_proto = ip6h->nexthdr; in mvneta_skb_tx_csum()
1950 if (pp->bm_priv) { in mvneta_rxq_drop_pkts()
1957 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rxq_drop_pkts()
1959 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rxq_drop_pkts()
1960 rx_desc->buf_phys_addr); in mvneta_rxq_drop_pkts()
1965 for (i = 0; i < rxq->size; i++) { in mvneta_rxq_drop_pkts()
1966 struct mvneta_rx_desc *rx_desc = rxq->descs + i; in mvneta_rxq_drop_pkts()
1967 void *data = rxq->buf_virt_addr[i]; in mvneta_rxq_drop_pkts()
1968 if (!data || !(rx_desc->buf_phys_addr)) in mvneta_rxq_drop_pkts()
1971 page_pool_put_full_page(rxq->page_pool, data, false); in mvneta_rxq_drop_pkts()
1973 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) in mvneta_rxq_drop_pkts()
1974 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mvneta_rxq_drop_pkts()
1975 page_pool_destroy(rxq->page_pool); in mvneta_rxq_drop_pkts()
1976 rxq->page_pool = NULL; in mvneta_rxq_drop_pkts()
1981 struct mvneta_stats *ps) in mvneta_update_stats() argument
1983 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_update_stats()
1985 u64_stats_update_begin(&stats->syncp); in mvneta_update_stats()
1986 stats->es.ps.rx_packets += ps->rx_packets; in mvneta_update_stats()
1987 stats->es.ps.rx_bytes += ps->rx_bytes; in mvneta_update_stats()
1989 stats->es.ps.xdp_redirect += ps->xdp_redirect; in mvneta_update_stats()
1990 stats->es.ps.xdp_pass += ps->xdp_pass; in mvneta_update_stats()
1991 stats->es.ps.xdp_drop += ps->xdp_drop; in mvneta_update_stats()
1992 u64_stats_update_end(&stats->syncp); in mvneta_update_stats()
1999 int curr_desc = rxq->first_to_refill; in mvneta_rx_refill_queue()
2002 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { in mvneta_rx_refill_queue()
2003 rx_desc = rxq->descs + curr_desc; in mvneta_rx_refill_queue()
2004 if (!(rx_desc->buf_phys_addr)) { in mvneta_rx_refill_queue()
2009 rxq->id, i, rxq->refill_num); in mvneta_rx_refill_queue()
2011 stats = this_cpu_ptr(pp->stats); in mvneta_rx_refill_queue()
2012 u64_stats_update_begin(&stats->syncp); in mvneta_rx_refill_queue()
2013 stats->es.refill_error++; in mvneta_rx_refill_queue()
2014 u64_stats_update_end(&stats->syncp); in mvneta_rx_refill_queue()
2020 rxq->refill_num -= i; in mvneta_rx_refill_queue()
2021 rxq->first_to_refill = curr_desc; in mvneta_rx_refill_queue()
2033 for (i = 0; i < sinfo->nr_frags; i++) in mvneta_xdp_put_buff()
2034 page_pool_put_full_page(rxq->page_pool, in mvneta_xdp_put_buff()
2035 skb_frag_page(&sinfo->frags[i]), napi); in mvneta_xdp_put_buff()
2036 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), in mvneta_xdp_put_buff()
2048 if (txq->count >= txq->tx_stop_threshold) in mvneta_xdp_submit_frame()
2053 buf = &txq->buf[txq->txq_put_index]; in mvneta_xdp_submit_frame()
2056 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, in mvneta_xdp_submit_frame()
2057 xdpf->len, DMA_TO_DEVICE); in mvneta_xdp_submit_frame()
2058 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { in mvneta_xdp_submit_frame()
2062 buf->type = MVNETA_TYPE_XDP_NDO; in mvneta_xdp_submit_frame()
2064 struct page *page = virt_to_page(xdpf->data); in mvneta_xdp_submit_frame()
2067 sizeof(*xdpf) + xdpf->headroom; in mvneta_xdp_submit_frame()
2068 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, in mvneta_xdp_submit_frame()
2069 xdpf->len, DMA_BIDIRECTIONAL); in mvneta_xdp_submit_frame()
2070 buf->type = MVNETA_TYPE_XDP_TX; in mvneta_xdp_submit_frame()
2072 buf->xdpf = xdpf; in mvneta_xdp_submit_frame()
2074 tx_desc->command = MVNETA_TXD_FLZ_DESC; in mvneta_xdp_submit_frame()
2075 tx_desc->buf_phys_addr = dma_addr; in mvneta_xdp_submit_frame()
2076 tx_desc->data_size = xdpf->len; in mvneta_xdp_submit_frame()
2079 txq->pending++; in mvneta_xdp_submit_frame()
2080 txq->count++; in mvneta_xdp_submit_frame()
2088 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit_back()
2100 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit_back()
2101 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back()
2106 u64_stats_update_begin(&stats->syncp); in mvneta_xdp_xmit_back()
2107 stats->es.ps.tx_bytes += xdpf->len; in mvneta_xdp_xmit_back()
2108 stats->es.ps.tx_packets++; in mvneta_xdp_xmit_back()
2109 stats->es.ps.xdp_tx++; in mvneta_xdp_xmit_back()
2110 u64_stats_update_end(&stats->syncp); in mvneta_xdp_xmit_back()
2114 u64_stats_update_begin(&stats->syncp); in mvneta_xdp_xmit_back()
2115 stats->es.ps.xdp_tx_err++; in mvneta_xdp_xmit_back()
2116 u64_stats_update_end(&stats->syncp); in mvneta_xdp_xmit_back()
2128 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit()
2135 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) in mvneta_xdp_xmit()
2136 return -ENETDOWN; in mvneta_xdp_xmit()
2139 return -EINVAL; in mvneta_xdp_xmit()
2141 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit()
2142 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit()
2148 nxmit_byte += frames[i]->len; in mvneta_xdp_xmit()
2151 nxmit--; in mvneta_xdp_xmit()
2159 u64_stats_update_begin(&stats->syncp); in mvneta_xdp_xmit()
2160 stats->es.ps.tx_bytes += nxmit_byte; in mvneta_xdp_xmit()
2161 stats->es.ps.tx_packets += nxmit; in mvneta_xdp_xmit()
2162 stats->es.ps.xdp_xmit += nxmit; in mvneta_xdp_xmit()
2163 stats->es.ps.xdp_xmit_err += num_frame - nxmit; in mvneta_xdp_xmit()
2164 u64_stats_update_end(&stats->syncp); in mvneta_xdp_xmit()
2177 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2178 data_len = xdp->data_end - xdp->data; in mvneta_run_xdp()
2182 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2187 stats->xdp_pass++; in mvneta_run_xdp()
2192 err = xdp_do_redirect(pp->dev, xdp, prog); in mvneta_run_xdp()
2198 stats->xdp_redirect++; in mvneta_run_xdp()
2211 trace_xdp_exception(pp->dev, prog, act); in mvneta_run_xdp()
2216 stats->xdp_drop++; in mvneta_run_xdp()
2220 stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; in mvneta_run_xdp()
2221 stats->rx_packets++; in mvneta_run_xdp()
2234 int data_len = -MVNETA_MH_SIZE, len; in mvneta_swbm_rx_frame()
2235 struct net_device *dev = pp->dev; in mvneta_swbm_rx_frame()
2244 data_len += len - ETH_FCS_LEN; in mvneta_swbm_rx_frame()
2246 *size = *size - len; in mvneta_swbm_rx_frame()
2248 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_rx_frame()
2249 dma_sync_single_for_cpu(dev->dev.parent, in mvneta_swbm_rx_frame()
2250 rx_desc->buf_phys_addr, in mvneta_swbm_rx_frame()
2253 rx_desc->buf_phys_addr = 0; in mvneta_swbm_rx_frame()
2258 xdp->data_hard_start = data; in mvneta_swbm_rx_frame()
2259 xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE; in mvneta_swbm_rx_frame()
2260 xdp->data_end = xdp->data + data_len; in mvneta_swbm_rx_frame()
2264 sinfo->nr_frags = 0; in mvneta_swbm_rx_frame()
2275 struct net_device *dev = pp->dev; in mvneta_swbm_add_rx_fragment()
2284 data_len = len - ETH_FCS_LEN; in mvneta_swbm_add_rx_fragment()
2286 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_add_rx_fragment()
2287 dma_sync_single_for_cpu(dev->dev.parent, in mvneta_swbm_add_rx_fragment()
2288 rx_desc->buf_phys_addr, in mvneta_swbm_add_rx_fragment()
2290 rx_desc->buf_phys_addr = 0; in mvneta_swbm_add_rx_fragment()
2292 if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { in mvneta_swbm_add_rx_fragment()
2293 skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; in mvneta_swbm_add_rx_fragment()
2295 skb_frag_off_set(frag, pp->rx_offset_correction); in mvneta_swbm_add_rx_fragment()
2298 sinfo->nr_frags++; in mvneta_swbm_add_rx_fragment()
2300 page_pool_put_full_page(rxq->page_pool, page, true); in mvneta_swbm_add_rx_fragment()
2302 *size -= len; in mvneta_swbm_add_rx_fragment()
2310 int i, num_frags = sinfo->nr_frags; in mvneta_swbm_build_skb()
2313 skb = build_skb(xdp->data_hard_start, PAGE_SIZE); in mvneta_swbm_build_skb()
2315 return ERR_PTR(-ENOMEM); in mvneta_swbm_build_skb()
2317 page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data)); in mvneta_swbm_build_skb()
2319 skb_reserve(skb, xdp->data - xdp->data_hard_start); in mvneta_swbm_build_skb()
2320 skb_put(skb, xdp->data_end - xdp->data); in mvneta_swbm_build_skb()
2324 skb_frag_t *frag = &sinfo->frags[i]; in mvneta_swbm_build_skb()
2326 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in mvneta_swbm_build_skb()
2329 page_pool_release_page(rxq->page_pool, skb_frag_page(frag)); in mvneta_swbm_build_skb()
2335 /* Main rx processing when using software buffer management */
2341 struct net_device *dev = pp->dev; in mvneta_rx_swbm()
2344 .rxq = &rxq->xdp_rxq, in mvneta_rx_swbm()
2346 struct mvneta_stats ps = {}; in mvneta_rx_swbm() local
2354 xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_rx_swbm()
2363 index = rx_desc - rxq->descs; in mvneta_rx_swbm()
2364 page = (struct page *)rxq->buf_virt_addr[index]; in mvneta_rx_swbm()
2366 rx_status = rx_desc->status; in mvneta_rx_swbm()
2368 rxq->refill_num++; in mvneta_rx_swbm()
2377 size = rx_desc->data_size; in mvneta_rx_swbm()
2378 frame_sz = size - ETH_FCS_LEN; in mvneta_rx_swbm()
2385 rx_desc->buf_phys_addr = 0; in mvneta_rx_swbm()
2386 page_pool_put_full_page(rxq->page_pool, page, in mvneta_rx_swbm()
2400 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); in mvneta_rx_swbm()
2405 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) in mvneta_rx_swbm()
2410 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_swbm()
2412 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); in mvneta_rx_swbm()
2414 u64_stats_update_begin(&stats->syncp); in mvneta_rx_swbm()
2415 stats->es.skb_alloc_error++; in mvneta_rx_swbm()
2416 stats->rx_dropped++; in mvneta_rx_swbm()
2417 u64_stats_update_end(&stats->syncp); in mvneta_rx_swbm()
2422 ps.rx_bytes += skb->len; in mvneta_rx_swbm()
2423 ps.rx_packets++; in mvneta_rx_swbm()
2425 skb->protocol = eth_type_trans(skb, dev); in mvneta_rx_swbm()
2433 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); in mvneta_rx_swbm()
2435 if (ps.xdp_redirect) in mvneta_rx_swbm()
2438 if (ps.rx_packets) in mvneta_rx_swbm()
2439 mvneta_update_stats(pp, &ps); in mvneta_rx_swbm()
2447 return ps.rx_packets; in mvneta_rx_swbm()
2450 /* Main rx processing when using hardware buffer management */
2455 struct net_device *dev = pp->dev; in mvneta_rx_hwbm()
2480 rx_status = rx_desc->status; in mvneta_rx_hwbm()
2481 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); in mvneta_rx_hwbm()
2482 data = (u8 *)(uintptr_t)rx_desc->buf_cookie; in mvneta_rx_hwbm()
2483 phys_addr = rx_desc->buf_phys_addr; in mvneta_rx_hwbm()
2485 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rx_hwbm()
2491 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2492 rx_desc->buf_phys_addr); in mvneta_rx_hwbm()
2505 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, in mvneta_rx_hwbm()
2506 rx_desc->buf_phys_addr, in mvneta_rx_hwbm()
2513 skb->protocol = eth_type_trans(skb, dev); in mvneta_rx_hwbm()
2521 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2522 rx_desc->buf_phys_addr); in mvneta_rx_hwbm()
2529 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); in mvneta_rx_hwbm()
2533 netdev_err(dev, "Linux processing - Can't refill\n"); in mvneta_rx_hwbm()
2535 stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2536 u64_stats_update_begin(&stats->syncp); in mvneta_rx_hwbm()
2537 stats->es.refill_error++; in mvneta_rx_hwbm()
2538 u64_stats_update_end(&stats->syncp); in mvneta_rx_hwbm()
2543 frag_size = bm_pool->hwbm_pool.frag_size; in mvneta_rx_hwbm()
2550 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, in mvneta_rx_hwbm()
2551 bm_pool->buf_size, DMA_FROM_DEVICE); in mvneta_rx_hwbm()
2562 skb->protocol = eth_type_trans(skb, dev); in mvneta_rx_hwbm()
2570 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2572 u64_stats_update_begin(&stats->syncp); in mvneta_rx_hwbm()
2573 stats->es.ps.rx_packets += rcvd_pkts; in mvneta_rx_hwbm()
2574 stats->es.ps.rx_bytes += rcvd_bytes; in mvneta_rx_hwbm()
2575 u64_stats_update_end(&stats->syncp); in mvneta_rx_hwbm()
2589 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tso_put_hdr()
2593 tx_desc->data_size = hdr_len; in mvneta_tso_put_hdr()
2594 tx_desc->command = mvneta_skb_tx_csum(pp, skb); in mvneta_tso_put_hdr()
2595 tx_desc->command |= MVNETA_TXD_F_DESC; in mvneta_tso_put_hdr()
2596 tx_desc->buf_phys_addr = txq->tso_hdrs_phys + in mvneta_tso_put_hdr()
2597 txq->txq_put_index * TSO_HEADER_SIZE; in mvneta_tso_put_hdr()
2598 buf->type = MVNETA_TYPE_SKB; in mvneta_tso_put_hdr()
2599 buf->skb = NULL; in mvneta_tso_put_hdr()
2609 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tso_put_data()
2613 tx_desc->data_size = size; in mvneta_tso_put_data()
2614 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, in mvneta_tso_put_data()
2616 if (unlikely(dma_mapping_error(dev->dev.parent, in mvneta_tso_put_data()
2617 tx_desc->buf_phys_addr))) { in mvneta_tso_put_data()
2619 return -ENOMEM; in mvneta_tso_put_data()
2622 tx_desc->command = 0; in mvneta_tso_put_data()
2623 buf->type = MVNETA_TYPE_SKB; in mvneta_tso_put_data()
2624 buf->skb = NULL; in mvneta_tso_put_data()
2628 tx_desc->command = MVNETA_TXD_L_DESC; in mvneta_tso_put_data()
2632 buf->skb = skb; in mvneta_tso_put_data()
2648 if ((txq->count + tso_count_descs(skb)) >= txq->size) in mvneta_tx_tso()
2659 total_len = skb->len - hdr_len; in mvneta_tx_tso()
2663 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); in mvneta_tx_tso()
2664 total_len -= data_left; in mvneta_tx_tso()
2668 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; in mvneta_tx_tso()
2684 data_left -= size; in mvneta_tx_tso()
2694 * be DMA-unmapped. in mvneta_tx_tso()
2696 for (i = desc_count - 1; i >= 0; i--) { in mvneta_tx_tso()
2697 struct mvneta_tx_desc *tx_desc = txq->descs + i; in mvneta_tx_tso()
2698 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) in mvneta_tx_tso()
2699 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_tso()
2700 tx_desc->buf_phys_addr, in mvneta_tx_tso()
2701 tx_desc->data_size, in mvneta_tx_tso()
2713 int i, nr_frags = skb_shinfo(skb)->nr_frags; in mvneta_tx_frag_process()
2716 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tx_frag_process()
2717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mvneta_tx_frag_process()
2721 tx_desc->data_size = skb_frag_size(frag); in mvneta_tx_frag_process()
2723 tx_desc->buf_phys_addr = in mvneta_tx_frag_process()
2724 dma_map_single(pp->dev->dev.parent, addr, in mvneta_tx_frag_process()
2725 tx_desc->data_size, DMA_TO_DEVICE); in mvneta_tx_frag_process()
2727 if (dma_mapping_error(pp->dev->dev.parent, in mvneta_tx_frag_process()
2728 tx_desc->buf_phys_addr)) { in mvneta_tx_frag_process()
2733 if (i == nr_frags - 1) { in mvneta_tx_frag_process()
2735 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; in mvneta_tx_frag_process()
2736 buf->skb = skb; in mvneta_tx_frag_process()
2739 tx_desc->command = 0; in mvneta_tx_frag_process()
2740 buf->skb = NULL; in mvneta_tx_frag_process()
2742 buf->type = MVNETA_TYPE_SKB; in mvneta_tx_frag_process()
2752 for (i = i - 1; i >= 0; i--) { in mvneta_tx_frag_process()
2753 tx_desc = txq->descs + i; in mvneta_tx_frag_process()
2754 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_frag_process()
2755 tx_desc->buf_phys_addr, in mvneta_tx_frag_process()
2756 tx_desc->data_size, in mvneta_tx_frag_process()
2761 return -ENOMEM; in mvneta_tx_frag_process()
2769 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx()
2770 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tx()
2772 int len = skb->len; in mvneta_tx()
2784 frags = skb_shinfo(skb)->nr_frags + 1; in mvneta_tx()
2791 tx_desc->data_size = skb_headlen(skb); in mvneta_tx()
2793 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, in mvneta_tx()
2794 tx_desc->data_size, in mvneta_tx()
2796 if (unlikely(dma_mapping_error(dev->dev.parent, in mvneta_tx()
2797 tx_desc->buf_phys_addr))) { in mvneta_tx()
2803 buf->type = MVNETA_TYPE_SKB; in mvneta_tx()
2807 tx_desc->command = tx_cmd; in mvneta_tx()
2808 buf->skb = skb; in mvneta_tx()
2813 buf->skb = NULL; in mvneta_tx()
2815 tx_desc->command = tx_cmd; in mvneta_tx()
2818 dma_unmap_single(dev->dev.parent, in mvneta_tx()
2819 tx_desc->buf_phys_addr, in mvneta_tx()
2820 tx_desc->data_size, in mvneta_tx()
2831 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_tx()
2835 txq->count += frags; in mvneta_tx()
2836 if (txq->count >= txq->tx_stop_threshold) in mvneta_tx()
2840 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) in mvneta_tx()
2843 txq->pending += frags; in mvneta_tx()
2845 u64_stats_update_begin(&stats->syncp); in mvneta_tx()
2846 stats->es.ps.tx_bytes += len; in mvneta_tx()
2847 stats->es.ps.tx_packets++; in mvneta_tx()
2848 u64_stats_update_end(&stats->syncp); in mvneta_tx()
2850 dev->stats.tx_dropped++; in mvneta_tx()
2863 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done_force()
2864 int tx_done = txq->count; in mvneta_txq_done_force()
2869 txq->count = 0; in mvneta_txq_done_force()
2870 txq->txq_put_index = 0; in mvneta_txq_done_force()
2871 txq->txq_get_index = 0; in mvneta_txq_done_force()
2874 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2886 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
2889 if (txq->count) in mvneta_tx_done_gbe()
2893 cause_tx_done &= ~((1 << txq->id)); in mvneta_tx_done_gbe()
2909 for (j = 7; j >= 0; j--) { in mvneta_addr_crc()
2920 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2922 * Table entries in the DA-Filter table. This method set the Special
2941 if (queue == -1) in mvneta_set_special_mcast_addr()
2954 * A CRC-8 is used as an index to the Other Multicast Table entries
2955 * in the DA-Filter table.
2956 * The method gets the CRC-8 value from the calling routine and
2958 * specified CRC-8 .
2973 if (queue == -1) { in mvneta_set_other_mcast_addr()
2986 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2988 * Table entries in the DA-Filter table.
2989 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2991 * DA-Filter table.
3004 if (queue == -1) { in mvneta_mcast_addr_set()
3005 if (pp->mcast_count[crc_result] == 0) { in mvneta_mcast_addr_set()
3006 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", in mvneta_mcast_addr_set()
3008 return -EINVAL; in mvneta_mcast_addr_set()
3011 pp->mcast_count[crc_result]--; in mvneta_mcast_addr_set()
3012 if (pp->mcast_count[crc_result] != 0) { in mvneta_mcast_addr_set()
3013 netdev_info(pp->dev, in mvneta_mcast_addr_set()
3015 pp->mcast_count[crc_result], crc_result); in mvneta_mcast_addr_set()
3016 return -EINVAL; in mvneta_mcast_addr_set()
3019 pp->mcast_count[crc_result]++; in mvneta_mcast_addr_set()
3059 if (dev->flags & IFF_PROMISC) { in mvneta_set_rx_mode()
3062 mvneta_set_ucast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3063 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3064 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3068 mvneta_set_ucast_table(pp, -1); in mvneta_set_rx_mode()
3069 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); in mvneta_set_rx_mode()
3071 if (dev->flags & IFF_ALLMULTI) { in mvneta_set_rx_mode()
3073 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3074 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3077 mvneta_set_special_mcast_table(pp, -1); in mvneta_set_rx_mode()
3078 mvneta_set_other_mcast_table(pp, -1); in mvneta_set_rx_mode()
3082 mvneta_mcast_addr_set(pp, ha->addr, in mvneta_set_rx_mode()
3083 pp->rxq_def); in mvneta_set_rx_mode()
3090 /* Interrupt handling - the callback for request_irq() */
3096 napi_schedule(&pp->napi); in mvneta_isr()
3101 /* Interrupt handling - the callback for request_percpu_irq() */
3106 disable_percpu_irq(port->pp->dev->irq); in mvneta_percpu_isr()
3107 napi_schedule(&port->napi); in mvneta_percpu_isr()
3116 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); in mvneta_link_change()
3120 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3122 * Bits 8 -15 of the cause Rx Tx register indicate that are received
3123 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3131 struct mvneta_port *pp = netdev_priv(napi->dev); in mvneta_poll()
3132 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); in mvneta_poll()
3134 if (!netif_running(pp->dev)) { in mvneta_poll()
3158 * RX packets in mvneta_poll()
3160 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : in mvneta_poll()
3161 port->cause_rx_tx; in mvneta_poll()
3165 rx_queue = rx_queue - 1; in mvneta_poll()
3166 if (pp->bm_priv) in mvneta_poll()
3168 &pp->rxqs[rx_queue]); in mvneta_poll()
3171 &pp->rxqs[rx_queue]); in mvneta_poll()
3178 if (pp->neta_armada3700) { in mvneta_poll()
3188 enable_percpu_irq(pp->dev->irq, 0); in mvneta_poll()
3192 if (pp->neta_armada3700) in mvneta_poll()
3193 pp->cause_rx_tx = cause_rx_tx; in mvneta_poll()
3195 port->cause_rx_tx = cause_rx_tx; in mvneta_poll()
3203 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_create_page_pool()
3209 .dev = pp->dev->dev.parent, in mvneta_create_page_pool()
3211 .offset = pp->rx_offset_correction, in mvneta_create_page_pool()
3216 rxq->page_pool = page_pool_create(&pp_params); in mvneta_create_page_pool()
3217 if (IS_ERR(rxq->page_pool)) { in mvneta_create_page_pool()
3218 err = PTR_ERR(rxq->page_pool); in mvneta_create_page_pool()
3219 rxq->page_pool = NULL; in mvneta_create_page_pool()
3223 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id); in mvneta_create_page_pool()
3227 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in mvneta_create_page_pool()
3228 rxq->page_pool); in mvneta_create_page_pool()
3235 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mvneta_create_page_pool()
3237 page_pool_destroy(rxq->page_pool); in mvneta_create_page_pool()
3238 rxq->page_pool = NULL; in mvneta_create_page_pool()
3253 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); in mvneta_rxq_fill()
3254 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill()
3256 netdev_err(pp->dev, in mvneta_rxq_fill()
3258 __func__, rxq->id, i, num); in mvneta_rxq_fill()
3263 /* Add this number of RX descriptors as non occupied (ready to in mvneta_rxq_fill()
3278 mvneta_txq_done_force(pp, &pp->txqs[queue]); in mvneta_tx_reset()
3290 /* Rx/Tx queue initialization/cleanup methods */
3295 rxq->size = pp->rx_ring_size; in mvneta_rxq_sw_init()
3297 /* Allocate memory for RX descriptors */ in mvneta_rxq_sw_init()
3298 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_sw_init()
3299 rxq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_rxq_sw_init()
3300 &rxq->descs_phys, GFP_KERNEL); in mvneta_rxq_sw_init()
3301 if (!rxq->descs) in mvneta_rxq_sw_init()
3302 return -ENOMEM; in mvneta_rxq_sw_init()
3304 rxq->last_desc = rxq->size - 1; in mvneta_rxq_sw_init()
3312 /* Set Rx descriptors queue starting address */ in mvneta_rxq_hw_init()
3313 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_hw_init()
3314 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_hw_init()
3317 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init()
3318 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init()
3320 if (!pp->bm_priv) { in mvneta_rxq_hw_init()
3325 MVNETA_RX_BUF_SIZE(pp->pkt_size)); in mvneta_rxq_hw_init()
3327 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3331 NET_SKB_PAD - pp->rx_offset_correction); in mvneta_rxq_hw_init()
3334 /* Fill RXQ with buffers from RX pool */ in mvneta_rxq_hw_init()
3337 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3341 /* Create a specified RX queue */
3357 /* Cleanup Rx queue */
3363 if (rxq->descs) in mvneta_rxq_deinit()
3364 dma_free_coherent(pp->dev->dev.parent, in mvneta_rxq_deinit()
3365 rxq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_rxq_deinit()
3366 rxq->descs, in mvneta_rxq_deinit()
3367 rxq->descs_phys); in mvneta_rxq_deinit()
3369 rxq->descs = NULL; in mvneta_rxq_deinit()
3370 rxq->last_desc = 0; in mvneta_rxq_deinit()
3371 rxq->next_desc_to_proc = 0; in mvneta_rxq_deinit()
3372 rxq->descs_phys = 0; in mvneta_rxq_deinit()
3373 rxq->first_to_refill = 0; in mvneta_rxq_deinit()
3374 rxq->refill_num = 0; in mvneta_rxq_deinit()
3382 txq->size = pp->tx_ring_size; in mvneta_txq_sw_init()
3388 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; in mvneta_txq_sw_init()
3389 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; in mvneta_txq_sw_init()
3392 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3393 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_sw_init()
3394 &txq->descs_phys, GFP_KERNEL); in mvneta_txq_sw_init()
3395 if (!txq->descs) in mvneta_txq_sw_init()
3396 return -ENOMEM; in mvneta_txq_sw_init()
3398 txq->last_desc = txq->size - 1; in mvneta_txq_sw_init()
3400 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); in mvneta_txq_sw_init()
3401 if (!txq->buf) in mvneta_txq_sw_init()
3402 return -ENOMEM; in mvneta_txq_sw_init()
3405 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3406 txq->size * TSO_HEADER_SIZE, in mvneta_txq_sw_init()
3407 &txq->tso_hdrs_phys, GFP_KERNEL); in mvneta_txq_sw_init()
3408 if (!txq->tso_hdrs) in mvneta_txq_sw_init()
3409 return -ENOMEM; in mvneta_txq_sw_init()
3413 cpu = txq->id % num_present_cpus(); in mvneta_txq_sw_init()
3415 cpu = pp->rxq_def % num_present_cpus(); in mvneta_txq_sw_init()
3416 cpumask_set_cpu(cpu, &txq->affinity_mask); in mvneta_txq_sw_init()
3417 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); in mvneta_txq_sw_init()
3426 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_hw_init()
3427 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_hw_init()
3430 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_hw_init()
3431 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_hw_init()
3433 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_hw_init()
3455 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_sw_deinit()
3457 kfree(txq->buf); in mvneta_txq_sw_deinit()
3459 if (txq->tso_hdrs) in mvneta_txq_sw_deinit()
3460 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3461 txq->size * TSO_HEADER_SIZE, in mvneta_txq_sw_deinit()
3462 txq->tso_hdrs, txq->tso_hdrs_phys); in mvneta_txq_sw_deinit()
3463 if (txq->descs) in mvneta_txq_sw_deinit()
3464 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3465 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_sw_deinit()
3466 txq->descs, txq->descs_phys); in mvneta_txq_sw_deinit()
3470 txq->descs = NULL; in mvneta_txq_sw_deinit()
3471 txq->last_desc = 0; in mvneta_txq_sw_deinit()
3472 txq->next_desc_to_proc = 0; in mvneta_txq_sw_deinit()
3473 txq->descs_phys = 0; in mvneta_txq_sw_deinit()
3480 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3481 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3484 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3485 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3501 mvneta_txq_deinit(pp, &pp->txqs[queue]); in mvneta_cleanup_txqs()
3504 /* Cleanup all Rx queues */
3510 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); in mvneta_cleanup_rxqs()
3514 /* Init all Rx queues */
3520 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); in mvneta_setup_rxqs()
3523 netdev_err(pp->dev, "%s: can't create rxq=%d\n", in mvneta_setup_rxqs()
3539 int err = mvneta_txq_init(pp, &pp->txqs[queue]); in mvneta_setup_txqs()
3541 netdev_err(pp->dev, "%s: can't create txq=%d\n", in mvneta_setup_txqs()
3555 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); in mvneta_comphy_init()
3559 return phy_power_on(pp->comphy); in mvneta_comphy_init()
3567 if (pp->comphy) { in mvneta_config_interface()
3595 pp->phy_interface = interface; in mvneta_config_interface()
3604 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); in mvneta_start_dev()
3606 mvneta_max_rx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3607 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3609 /* start the Rx/Tx activity */ in mvneta_start_dev()
3612 if (!pp->neta_armada3700) { in mvneta_start_dev()
3616 per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev()
3618 napi_enable(&port->napi); in mvneta_start_dev()
3621 napi_enable(&pp->napi); in mvneta_start_dev()
3631 phylink_start(pp->phylink); in mvneta_start_dev()
3634 phylink_speed_up(pp->phylink); in mvneta_start_dev()
3636 netif_tx_start_all_queues(pp->dev); in mvneta_start_dev()
3638 clear_bit(__MVNETA_DOWN, &pp->state); in mvneta_start_dev()
3645 set_bit(__MVNETA_DOWN, &pp->state); in mvneta_stop_dev()
3647 if (device_may_wakeup(&pp->dev->dev)) in mvneta_stop_dev()
3648 phylink_speed_down(pp->phylink, false); in mvneta_stop_dev()
3650 phylink_stop(pp->phylink); in mvneta_stop_dev()
3652 if (!pp->neta_armada3700) { in mvneta_stop_dev()
3655 per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev()
3657 napi_disable(&port->napi); in mvneta_stop_dev()
3660 napi_disable(&pp->napi); in mvneta_stop_dev()
3663 netif_carrier_off(pp->dev); in mvneta_stop_dev()
3666 netif_tx_stop_all_queues(pp->dev); in mvneta_stop_dev()
3680 WARN_ON(phy_power_off(pp->comphy)); in mvneta_stop_dev()
3687 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); in mvneta_percpu_enable()
3694 disable_percpu_irq(pp->dev->irq); in mvneta_percpu_disable()
3709 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { in mvneta_change_mtu()
3711 return -EINVAL; in mvneta_change_mtu()
3714 dev->mtu = mtu; in mvneta_change_mtu()
3717 if (pp->bm_priv) in mvneta_change_mtu()
3733 if (pp->bm_priv) in mvneta_change_mtu()
3736 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); in mvneta_change_mtu()
3763 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { in mvneta_fix_features()
3767 pp->tx_csum_limit); in mvneta_fix_features()
3799 mvneta_mac_addr_set(pp, dev->dev_addr, -1); in mvneta_set_mac_addr()
3802 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); in mvneta_set_mac_addr()
3812 struct net_device *ndev = to_net_dev(config->dev); in mvneta_validate()
3817 if (state->interface != PHY_INTERFACE_MODE_NA && in mvneta_validate()
3818 state->interface != PHY_INTERFACE_MODE_QSGMII && in mvneta_validate()
3819 state->interface != PHY_INTERFACE_MODE_SGMII && in mvneta_validate()
3820 !phy_interface_mode_is_8023z(state->interface) && in mvneta_validate()
3821 !phy_interface_mode_is_rgmii(state->interface)) { in mvneta_validate()
3833 /* Half-duplex at speeds higher than 100Mbit is unsupported */ in mvneta_validate()
3834 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { in mvneta_validate()
3838 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { in mvneta_validate()
3843 if (!phy_interface_mode_is_8023z(state->interface)) { in mvneta_validate()
3844 /* 10M and 100M are only supported in non-802.3z mode */ in mvneta_validate()
3853 bitmap_and(state->advertising, state->advertising, mask, in mvneta_validate()
3865 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_pcs_get_state()
3872 state->speed = in mvneta_mac_pcs_get_state()
3873 state->interface == PHY_INTERFACE_MODE_2500BASEX ? in mvneta_mac_pcs_get_state()
3876 state->speed = SPEED_100; in mvneta_mac_pcs_get_state()
3878 state->speed = SPEED_10; in mvneta_mac_pcs_get_state()
3880 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); in mvneta_mac_pcs_get_state()
3881 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); in mvneta_mac_pcs_get_state()
3882 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); in mvneta_mac_pcs_get_state()
3884 state->pause = 0; in mvneta_mac_pcs_get_state()
3886 state->pause |= MLO_PAUSE_RX; in mvneta_mac_pcs_get_state()
3888 state->pause |= MLO_PAUSE_TX; in mvneta_mac_pcs_get_state()
3893 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_an_restart()
3906 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_config()
3931 if (state->interface == PHY_INTERFACE_MODE_QSGMII || in mvneta_mac_config()
3932 state->interface == PHY_INTERFACE_MODE_SGMII || in mvneta_mac_config()
3933 phy_interface_mode_is_8023z(state->interface)) in mvneta_mac_config()
3936 if (phylink_test(state->advertising, Pause)) in mvneta_mac_config()
3940 /* Phy or fixed speed - nothing to do, leave the in mvneta_mac_config()
3941 * configured speed, duplex and flow control as-is. in mvneta_mac_config()
3943 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { in mvneta_mac_config()
3956 /* 802.3z negotiation - only 1000base-X */ in mvneta_mac_config()
3967 if (state->pause & MLO_PAUSE_AN && state->an_enabled) in mvneta_mac_config()
3972 * and in-band enable when the link is down, so force it down in mvneta_mac_config()
3986 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) in mvneta_mac_config()
3989 if (pp->phy_interface != state->interface) { in mvneta_mac_config()
3990 if (pp->comphy) in mvneta_mac_config()
3991 WARN_ON(phy_power_off(pp->comphy)); in mvneta_mac_config()
3992 WARN_ON(mvneta_config_interface(pp, state->interface)); in mvneta_mac_config()
4028 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_link_down()
4041 pp->eee_active = false; in mvneta_mac_link_down()
4051 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_link_up()
4092 if (phy && pp->eee_enabled) { in mvneta_mac_link_up()
4093 pp->eee_active = phy_init_eee(phy, 0) >= 0; in mvneta_mac_link_up()
4094 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); in mvneta_mac_link_up()
4110 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); in mvneta_mdio_probe()
4113 netdev_err(pp->dev, "could not attach PHY: %d\n", err); in mvneta_mdio_probe()
4115 phylink_ethtool_get_wol(pp->phylink, &wol); in mvneta_mdio_probe()
4116 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); in mvneta_mdio_probe()
4120 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); in mvneta_mdio_probe()
4127 phylink_disconnect_phy(pp->phylink); in mvneta_mdio_remove()
4141 if (cpu_online(pp->rxq_def)) in mvneta_percpu_elect()
4142 elected_cpu = pp->rxq_def; in mvneta_percpu_elect()
4158 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); in mvneta_percpu_elect()
4162 * the CPU bound to the default RX queue in mvneta_percpu_elect()
4188 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_online()
4191 spin_lock(&pp->lock); in mvneta_cpu_online()
4196 if (pp->is_stopped) { in mvneta_cpu_online()
4197 spin_unlock(&pp->lock); in mvneta_cpu_online()
4200 netif_tx_stop_all_queues(pp->dev); in mvneta_cpu_online()
4209 per_cpu_ptr(pp->ports, other_cpu); in mvneta_cpu_online()
4211 napi_synchronize(&other_port->napi); in mvneta_cpu_online()
4217 napi_enable(&port->napi); in mvneta_cpu_online()
4220 * Enable per-CPU interrupts on the CPU that is in mvneta_cpu_online()
4226 * Enable per-CPU interrupt on the one CPU we care in mvneta_cpu_online()
4236 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_online()
4237 spin_unlock(&pp->lock); in mvneta_cpu_online()
4245 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_down_prepare()
4251 spin_lock(&pp->lock); in mvneta_cpu_down_prepare()
4254 spin_unlock(&pp->lock); in mvneta_cpu_down_prepare()
4256 napi_synchronize(&port->napi); in mvneta_cpu_down_prepare()
4257 napi_disable(&port->napi); in mvneta_cpu_down_prepare()
4258 /* Disable per-CPU interrupts on the CPU that is brought down. */ in mvneta_cpu_down_prepare()
4269 spin_lock(&pp->lock); in mvneta_cpu_dead()
4271 spin_unlock(&pp->lock); in mvneta_cpu_dead()
4277 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_dead()
4286 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); in mvneta_open()
4297 if (pp->neta_armada3700) in mvneta_open()
4298 ret = request_irq(pp->dev->irq, mvneta_isr, 0, in mvneta_open()
4299 dev->name, pp); in mvneta_open()
4301 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, in mvneta_open()
4302 dev->name, pp->ports); in mvneta_open()
4304 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); in mvneta_open()
4308 if (!pp->neta_armada3700) { in mvneta_open()
4309 /* Enable per-CPU interrupt on all the CPU to handle our RX in mvneta_open()
4314 pp->is_stopped = false; in mvneta_open()
4319 &pp->node_online); in mvneta_open()
4324 &pp->node_dead); in mvneta_open()
4340 if (!pp->neta_armada3700) in mvneta_open()
4342 &pp->node_dead); in mvneta_open()
4344 if (!pp->neta_armada3700) in mvneta_open()
4346 &pp->node_online); in mvneta_open()
4348 if (pp->neta_armada3700) { in mvneta_open()
4349 free_irq(pp->dev->irq, pp); in mvneta_open()
4352 free_percpu_irq(pp->dev->irq, pp->ports); in mvneta_open()
4366 if (!pp->neta_armada3700) { in mvneta_stop()
4372 spin_lock(&pp->lock); in mvneta_stop()
4373 pp->is_stopped = true; in mvneta_stop()
4374 spin_unlock(&pp->lock); in mvneta_stop()
4380 &pp->node_online); in mvneta_stop()
4382 &pp->node_dead); in mvneta_stop()
4384 free_percpu_irq(dev->irq, pp->ports); in mvneta_stop()
4388 free_irq(dev->irq, pp); in mvneta_stop()
4401 return phylink_mii_ioctl(pp->phylink, ifr, cmd); in mvneta_ioctl()
4411 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { in mvneta_xdp_setup()
4413 return -EOPNOTSUPP; in mvneta_xdp_setup()
4416 if (pp->bm_priv) { in mvneta_xdp_setup()
4419 return -EOPNOTSUPP; in mvneta_xdp_setup()
4422 need_update = !!pp->xdp_prog != !!prog; in mvneta_xdp_setup()
4426 old_prog = xchg(&pp->xdp_prog, prog); in mvneta_xdp_setup()
4438 switch (xdp->command) { in mvneta_xdp()
4440 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); in mvneta_xdp()
4442 return -EINVAL; in mvneta_xdp()
4455 return phylink_ethtool_ksettings_set(pp->phylink, cmd); in mvneta_ethtool_set_link_ksettings()
4465 return phylink_ethtool_ksettings_get(pp->phylink, cmd); in mvneta_ethtool_get_link_ksettings()
4472 return phylink_ethtool_nway_reset(pp->phylink); in mvneta_ethtool_nway_reset()
4483 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
4484 rxq->time_coal = c->rx_coalesce_usecs; in mvneta_ethtool_set_coalesce()
4485 rxq->pkts_coal = c->rx_max_coalesced_frames; in mvneta_ethtool_set_coalesce()
4486 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
4487 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
4491 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce()
4492 txq->done_pkts_coal = c->tx_max_coalesced_frames; in mvneta_ethtool_set_coalesce()
4493 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
4505 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
4506 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
4508 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; in mvneta_ethtool_get_coalesce()
4516 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, in mvneta_ethtool_get_drvinfo()
4517 sizeof(drvinfo->driver)); in mvneta_ethtool_get_drvinfo()
4518 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, in mvneta_ethtool_get_drvinfo()
4519 sizeof(drvinfo->version)); in mvneta_ethtool_get_drvinfo()
4520 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), in mvneta_ethtool_get_drvinfo()
4521 sizeof(drvinfo->bus_info)); in mvneta_ethtool_get_drvinfo()
4530 ring->rx_max_pending = MVNETA_MAX_RXD; in mvneta_ethtool_get_ringparam()
4531 ring->tx_max_pending = MVNETA_MAX_TXD; in mvneta_ethtool_get_ringparam()
4532 ring->rx_pending = pp->rx_ring_size; in mvneta_ethtool_get_ringparam()
4533 ring->tx_pending = pp->tx_ring_size; in mvneta_ethtool_get_ringparam()
4541 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) in mvneta_ethtool_set_ringparam()
4542 return -EINVAL; in mvneta_ethtool_set_ringparam()
4543 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? in mvneta_ethtool_set_ringparam()
4544 ring->rx_pending : MVNETA_MAX_RXD; in mvneta_ethtool_set_ringparam()
4546 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, in mvneta_ethtool_set_ringparam()
4548 if (pp->tx_ring_size != ring->tx_pending) in mvneta_ethtool_set_ringparam()
4550 pp->tx_ring_size, ring->tx_pending); in mvneta_ethtool_set_ringparam()
4557 return -ENOMEM; in mvneta_ethtool_set_ringparam()
4569 phylink_ethtool_get_pauseparam(pp->phylink, pause); in mvneta_ethtool_get_pauseparam()
4577 return phylink_ethtool_set_pauseparam(pp->phylink, pause); in mvneta_ethtool_set_pauseparam()
4611 stats = per_cpu_ptr(pp->stats, cpu); in mvneta_ethtool_update_pcpu_stats()
4613 start = u64_stats_fetch_begin_irq(&stats->syncp); in mvneta_ethtool_update_pcpu_stats()
4614 skb_alloc_error = stats->es.skb_alloc_error; in mvneta_ethtool_update_pcpu_stats()
4615 refill_error = stats->es.refill_error; in mvneta_ethtool_update_pcpu_stats()
4616 xdp_redirect = stats->es.ps.xdp_redirect; in mvneta_ethtool_update_pcpu_stats()
4617 xdp_pass = stats->es.ps.xdp_pass; in mvneta_ethtool_update_pcpu_stats()
4618 xdp_drop = stats->es.ps.xdp_drop; in mvneta_ethtool_update_pcpu_stats()
4619 xdp_xmit = stats->es.ps.xdp_xmit; in mvneta_ethtool_update_pcpu_stats()
4620 xdp_xmit_err = stats->es.ps.xdp_xmit_err; in mvneta_ethtool_update_pcpu_stats()
4621 xdp_tx = stats->es.ps.xdp_tx; in mvneta_ethtool_update_pcpu_stats()
4622 xdp_tx_err = stats->es.ps.xdp_tx_err; in mvneta_ethtool_update_pcpu_stats()
4623 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); in mvneta_ethtool_update_pcpu_stats()
4625 es->skb_alloc_error += skb_alloc_error; in mvneta_ethtool_update_pcpu_stats()
4626 es->refill_error += refill_error; in mvneta_ethtool_update_pcpu_stats()
4627 es->ps.xdp_redirect += xdp_redirect; in mvneta_ethtool_update_pcpu_stats()
4628 es->ps.xdp_pass += xdp_pass; in mvneta_ethtool_update_pcpu_stats()
4629 es->ps.xdp_drop += xdp_drop; in mvneta_ethtool_update_pcpu_stats()
4630 es->ps.xdp_xmit += xdp_xmit; in mvneta_ethtool_update_pcpu_stats()
4631 es->ps.xdp_xmit_err += xdp_xmit_err; in mvneta_ethtool_update_pcpu_stats()
4632 es->ps.xdp_tx += xdp_tx; in mvneta_ethtool_update_pcpu_stats()
4633 es->ps.xdp_tx_err += xdp_tx_err; in mvneta_ethtool_update_pcpu_stats()
4641 void __iomem *base = pp->base; in mvneta_ethtool_update_stats()
4650 switch (s->type) { in mvneta_ethtool_update_stats()
4652 val = readl_relaxed(base + s->offset); in mvneta_ethtool_update_stats()
4653 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4656 /* Docs say to read low 32-bit then high */ in mvneta_ethtool_update_stats()
4657 low = readl_relaxed(base + s->offset); in mvneta_ethtool_update_stats()
4658 high = readl_relaxed(base + s->offset + 4); in mvneta_ethtool_update_stats()
4660 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4663 switch (s->offset) { in mvneta_ethtool_update_stats()
4665 val = phylink_get_eee_err(pp->phylink); in mvneta_ethtool_update_stats()
4666 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4669 pp->ethtool_stats[i] = stats.skb_alloc_error; in mvneta_ethtool_update_stats()
4672 pp->ethtool_stats[i] = stats.refill_error; in mvneta_ethtool_update_stats()
4675 pp->ethtool_stats[i] = stats.ps.xdp_redirect; in mvneta_ethtool_update_stats()
4678 pp->ethtool_stats[i] = stats.ps.xdp_pass; in mvneta_ethtool_update_stats()
4681 pp->ethtool_stats[i] = stats.ps.xdp_drop; in mvneta_ethtool_update_stats()
4684 pp->ethtool_stats[i] = stats.ps.xdp_tx; in mvneta_ethtool_update_stats()
4687 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; in mvneta_ethtool_update_stats()
4690 pp->ethtool_stats[i] = stats.ps.xdp_xmit; in mvneta_ethtool_update_stats()
4693 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; in mvneta_ethtool_update_stats()
4710 *data++ = pp->ethtool_stats[i]; in mvneta_ethtool_get_stats()
4717 return -EOPNOTSUPP; in mvneta_ethtool_get_sset_count()
4729 switch (info->cmd) { in mvneta_ethtool_get_rxnfc()
4731 info->data = rxq_number; in mvneta_ethtool_get_rxnfc()
4734 return -EOPNOTSUPP; in mvneta_ethtool_get_rxnfc()
4736 return -EOPNOTSUPP; in mvneta_ethtool_get_rxnfc()
4745 netif_tx_stop_all_queues(pp->dev); in mvneta_config_rss()
4749 if (!pp->neta_armada3700) { in mvneta_config_rss()
4753 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
4755 napi_synchronize(&pcpu_port->napi); in mvneta_config_rss()
4756 napi_disable(&pcpu_port->napi); in mvneta_config_rss()
4759 napi_synchronize(&pp->napi); in mvneta_config_rss()
4760 napi_disable(&pp->napi); in mvneta_config_rss()
4763 pp->rxq_def = pp->indir[0]; in mvneta_config_rss()
4766 mvneta_set_rx_mode(pp->dev); in mvneta_config_rss()
4769 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_config_rss()
4773 spin_lock(&pp->lock); in mvneta_config_rss()
4775 spin_unlock(&pp->lock); in mvneta_config_rss()
4777 if (!pp->neta_armada3700) { in mvneta_config_rss()
4781 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
4783 napi_enable(&pcpu_port->napi); in mvneta_config_rss()
4786 napi_enable(&pp->napi); in mvneta_config_rss()
4789 netif_tx_start_all_queues(pp->dev); in mvneta_config_rss()
4800 if (pp->neta_armada3700) in mvneta_ethtool_set_rxfh()
4801 return -EOPNOTSUPP; in mvneta_ethtool_set_rxfh()
4808 return -EOPNOTSUPP; in mvneta_ethtool_set_rxfh()
4813 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_set_rxfh()
4824 if (pp->neta_armada3700) in mvneta_ethtool_get_rxfh()
4825 return -EOPNOTSUPP; in mvneta_ethtool_get_rxfh()
4833 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_get_rxfh()
4843 phylink_ethtool_get_wol(pp->phylink, wol); in mvneta_ethtool_get_wol()
4852 ret = phylink_ethtool_set_wol(pp->phylink, wol); in mvneta_ethtool_set_wol()
4854 device_set_wakeup_enable(&dev->dev, !!wol->wolopts); in mvneta_ethtool_set_wol()
4867 eee->eee_enabled = pp->eee_enabled; in mvneta_ethtool_get_eee()
4868 eee->eee_active = pp->eee_active; in mvneta_ethtool_get_eee()
4869 eee->tx_lpi_enabled = pp->tx_lpi_enabled; in mvneta_ethtool_get_eee()
4870 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; in mvneta_ethtool_get_eee()
4872 return phylink_ethtool_get_eee(pp->phylink, eee); in mvneta_ethtool_get_eee()
4882 * it being an 8-bit register. */ in mvneta_ethtool_set_eee()
4883 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) in mvneta_ethtool_set_eee()
4884 return -EINVAL; in mvneta_ethtool_set_eee()
4888 lpi_ctl0 |= eee->tx_lpi_timer << 8; in mvneta_ethtool_set_eee()
4891 pp->eee_enabled = eee->eee_enabled; in mvneta_ethtool_set_eee()
4892 pp->tx_lpi_enabled = eee->tx_lpi_enabled; in mvneta_ethtool_set_eee()
4894 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); in mvneta_ethtool_set_eee()
4896 return phylink_ethtool_set_eee(pp->phylink, eee); in mvneta_ethtool_set_eee()
4951 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); in mvneta_init()
4952 if (!pp->txqs) in mvneta_init()
4953 return -ENOMEM; in mvneta_init()
4957 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init()
4958 txq->id = queue; in mvneta_init()
4959 txq->size = pp->tx_ring_size; in mvneta_init()
4960 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; in mvneta_init()
4963 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); in mvneta_init()
4964 if (!pp->rxqs) in mvneta_init()
4965 return -ENOMEM; in mvneta_init()
4967 /* Create Rx descriptor rings */ in mvneta_init()
4969 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init()
4970 rxq->id = queue; in mvneta_init()
4971 rxq->size = pp->rx_ring_size; in mvneta_init()
4972 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; in mvneta_init()
4973 rxq->time_coal = MVNETA_RX_COAL_USEC; in mvneta_init()
4974 rxq->buf_virt_addr in mvneta_init()
4975 = devm_kmalloc_array(pp->dev->dev.parent, in mvneta_init()
4976 rxq->size, in mvneta_init()
4977 sizeof(*rxq->buf_virt_addr), in mvneta_init()
4979 if (!rxq->buf_virt_addr) in mvneta_init()
4980 return -ENOMEM; in mvneta_init()
5006 for (i = 0; i < dram->num_cs; i++) { in mvneta_conf_mbus_windows()
5007 const struct mbus_dram_window *cs = dram->cs + i; in mvneta_conf_mbus_windows()
5010 (cs->base & 0xffff0000) | in mvneta_conf_mbus_windows()
5011 (cs->mbus_attr << 8) | in mvneta_conf_mbus_windows()
5012 dram->mbus_dram_target_id); in mvneta_conf_mbus_windows()
5015 (cs->size - 1) & 0xffff0000); in mvneta_conf_mbus_windows()
5044 return -EINVAL; in mvneta_port_power_up()
5052 struct device_node *dn = pdev->dev.of_node; in mvneta_probe()
5066 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), in mvneta_probe()
5069 return -ENOMEM; in mvneta_probe()
5071 dev->irq = irq_of_parse_and_map(dn, 0); in mvneta_probe()
5072 if (dev->irq == 0) in mvneta_probe()
5073 return -EINVAL; in mvneta_probe()
5077 dev_err(&pdev->dev, "incorrect phy-mode\n"); in mvneta_probe()
5081 comphy = devm_of_phy_get(&pdev->dev, dn, NULL); in mvneta_probe()
5082 if (comphy == ERR_PTR(-EPROBE_DEFER)) { in mvneta_probe()
5083 err = -EPROBE_DEFER; in mvneta_probe()
5090 spin_lock_init(&pp->lock); in mvneta_probe()
5092 pp->phylink_config.dev = &dev->dev; in mvneta_probe()
5093 pp->phylink_config.type = PHYLINK_NETDEV; in mvneta_probe()
5095 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, in mvneta_probe()
5102 dev->tx_queue_len = MVNETA_MAX_TXD; in mvneta_probe()
5103 dev->watchdog_timeo = 5 * HZ; in mvneta_probe()
5104 dev->netdev_ops = &mvneta_netdev_ops; in mvneta_probe()
5106 dev->ethtool_ops = &mvneta_eth_tool_ops; in mvneta_probe()
5108 pp->phylink = phylink; in mvneta_probe()
5109 pp->comphy = comphy; in mvneta_probe()
5110 pp->phy_interface = phy_mode; in mvneta_probe()
5111 pp->dn = dn; in mvneta_probe()
5113 pp->rxq_def = rxq_def; in mvneta_probe()
5114 pp->indir[0] = rxq_def; in mvneta_probe()
5117 if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) in mvneta_probe()
5118 pp->neta_armada3700 = true; in mvneta_probe()
5120 pp->clk = devm_clk_get(&pdev->dev, "core"); in mvneta_probe()
5121 if (IS_ERR(pp->clk)) in mvneta_probe()
5122 pp->clk = devm_clk_get(&pdev->dev, NULL); in mvneta_probe()
5123 if (IS_ERR(pp->clk)) { in mvneta_probe()
5124 err = PTR_ERR(pp->clk); in mvneta_probe()
5128 clk_prepare_enable(pp->clk); in mvneta_probe()
5130 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); in mvneta_probe()
5131 if (!IS_ERR(pp->clk_bus)) in mvneta_probe()
5132 clk_prepare_enable(pp->clk_bus); in mvneta_probe()
5134 pp->base = devm_platform_ioremap_resource(pdev, 0); in mvneta_probe()
5135 if (IS_ERR(pp->base)) { in mvneta_probe()
5136 err = PTR_ERR(pp->base); in mvneta_probe()
5140 /* Alloc per-cpu port structure */ in mvneta_probe()
5141 pp->ports = alloc_percpu(struct mvneta_pcpu_port); in mvneta_probe()
5142 if (!pp->ports) { in mvneta_probe()
5143 err = -ENOMEM; in mvneta_probe()
5147 /* Alloc per-cpu stats */ in mvneta_probe()
5148 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); in mvneta_probe()
5149 if (!pp->stats) { in mvneta_probe()
5150 err = -ENOMEM; in mvneta_probe()
5157 ether_addr_copy(dev->dev_addr, dt_mac_addr); in mvneta_probe()
5162 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); in mvneta_probe()
5169 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { in mvneta_probe()
5173 dev_info(&pdev->dev, in mvneta_probe()
5177 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { in mvneta_probe()
5183 pp->tx_csum_limit = tx_csum_limit; in mvneta_probe()
5185 pp->dram_target_info = mv_mbus_dram_info(); in mvneta_probe()
5190 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_probe()
5191 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_probe()
5193 pp->tx_ring_size = MVNETA_MAX_TXD; in mvneta_probe()
5194 pp->rx_ring_size = MVNETA_MAX_RXD; in mvneta_probe()
5196 pp->dev = dev; in mvneta_probe()
5197 SET_NETDEV_DEV(dev, &pdev->dev); in mvneta_probe()
5199 pp->id = global_port_id++; in mvneta_probe()
5202 bm_node = of_parse_phandle(dn, "buffer-manager", 0); in mvneta_probe()
5204 pp->bm_priv = mvneta_bm_get(bm_node); in mvneta_probe()
5205 if (pp->bm_priv) { in mvneta_probe()
5208 dev_info(&pdev->dev, in mvneta_probe()
5210 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5211 pp->bm_priv = NULL; in mvneta_probe()
5214 /* Set RX packet offset correction for platforms, whose in mvneta_probe()
5215 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit in mvneta_probe()
5216 * platforms and 0B for 32-bit ones. in mvneta_probe()
5218 pp->rx_offset_correction = max(0, in mvneta_probe()
5219 NET_SKB_PAD - in mvneta_probe()
5225 if (!pp->bm_priv) in mvneta_probe()
5226 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_probe()
5228 err = mvneta_init(&pdev->dev, pp); in mvneta_probe()
5232 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_probe()
5234 dev_err(&pdev->dev, "can't power up port\n"); in mvneta_probe()
5238 /* Armada3700 network controller does not support per-cpu in mvneta_probe()
5241 if (pp->neta_armada3700) { in mvneta_probe()
5242 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); in mvneta_probe()
5246 per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
5248 netif_napi_add(dev, &port->napi, mvneta_poll, in mvneta_probe()
5250 port->pp = pp; in mvneta_probe()
5254 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in mvneta_probe()
5256 dev->hw_features |= dev->features; in mvneta_probe()
5257 dev->vlan_features |= dev->features; in mvneta_probe()
5258 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in mvneta_probe()
5259 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; in mvneta_probe()
5261 /* MTU range: 68 - 9676 */ in mvneta_probe()
5262 dev->min_mtu = ETH_MIN_MTU; in mvneta_probe()
5263 /* 9676 == 9700 - 20 and rounding to 8 */ in mvneta_probe()
5264 dev->max_mtu = 9676; in mvneta_probe()
5268 dev_err(&pdev->dev, "failed to register\n"); in mvneta_probe()
5273 dev->dev_addr); in mvneta_probe()
5275 platform_set_drvdata(pdev, pp->dev); in mvneta_probe()
5280 if (pp->bm_priv) { in mvneta_probe()
5281 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_probe()
5282 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_probe()
5283 1 << pp->id); in mvneta_probe()
5284 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5286 free_percpu(pp->stats); in mvneta_probe()
5288 free_percpu(pp->ports); in mvneta_probe()
5290 clk_disable_unprepare(pp->clk_bus); in mvneta_probe()
5291 clk_disable_unprepare(pp->clk); in mvneta_probe()
5293 if (pp->phylink) in mvneta_probe()
5294 phylink_destroy(pp->phylink); in mvneta_probe()
5296 irq_dispose_mapping(dev->irq); in mvneta_probe()
5307 clk_disable_unprepare(pp->clk_bus); in mvneta_remove()
5308 clk_disable_unprepare(pp->clk); in mvneta_remove()
5309 free_percpu(pp->ports); in mvneta_remove()
5310 free_percpu(pp->stats); in mvneta_remove()
5311 irq_dispose_mapping(dev->irq); in mvneta_remove()
5312 phylink_destroy(pp->phylink); in mvneta_remove()
5314 if (pp->bm_priv) { in mvneta_remove()
5315 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_remove()
5316 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_remove()
5317 1 << pp->id); in mvneta_remove()
5318 mvneta_bm_put(pp->bm_priv); in mvneta_remove()
5334 if (!pp->neta_armada3700) { in mvneta_suspend()
5335 spin_lock(&pp->lock); in mvneta_suspend()
5336 pp->is_stopped = true; in mvneta_suspend()
5337 spin_unlock(&pp->lock); in mvneta_suspend()
5340 &pp->node_online); in mvneta_suspend()
5342 &pp->node_dead); in mvneta_suspend()
5350 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_suspend()
5356 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_suspend()
5363 clk_disable_unprepare(pp->clk_bus); in mvneta_suspend()
5364 clk_disable_unprepare(pp->clk); in mvneta_suspend()
5376 clk_prepare_enable(pp->clk); in mvneta_resume()
5377 if (!IS_ERR(pp->clk_bus)) in mvneta_resume()
5378 clk_prepare_enable(pp->clk_bus); in mvneta_resume()
5379 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_resume()
5380 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_resume()
5381 if (pp->bm_priv) { in mvneta_resume()
5384 dev_info(&pdev->dev, "use SW buffer management\n"); in mvneta_resume()
5385 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_resume()
5386 pp->bm_priv = NULL; in mvneta_resume()
5390 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_resume()
5402 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_resume()
5404 rxq->next_desc_to_proc = 0; in mvneta_resume()
5409 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_resume()
5411 txq->next_desc_to_proc = 0; in mvneta_resume()
5415 if (!pp->neta_armada3700) { in mvneta_resume()
5416 spin_lock(&pp->lock); in mvneta_resume()
5417 pp->is_stopped = false; in mvneta_resume()
5418 spin_unlock(&pp->lock); in mvneta_resume()
5420 &pp->node_online); in mvneta_resume()
5422 &pp->node_dead); in mvneta_resume()
5437 { .compatible = "marvell,armada-370-neta" },
5438 { .compatible = "marvell,armada-xp-neta" },
5439 { .compatible = "marvell,armada-3700-neta" },
5491 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5492 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.c…