Lines Matching refs:pp

447 	struct mvneta_port	*pp;  member
722 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) in mvreg_write() argument
724 writel(data, pp->base + offset); in mvreg_write()
728 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) in mvreg_read() argument
730 return readl(pp->base + offset); in mvreg_read()
751 static void mvneta_mib_counters_clear(struct mvneta_port *pp) in mvneta_mib_counters_clear() argument
757 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); in mvneta_mib_counters_clear()
758 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); in mvneta_mib_counters_clear()
759 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); in mvneta_mib_counters_clear()
767 struct mvneta_port *pp = netdev_priv(dev); in mvneta_get_stats64() local
780 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
816 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, in mvneta_rxq_non_occup_desc_add() argument
824 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
830 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
835 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, in mvneta_rxq_busy_desc_num_get() argument
840 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
847 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, in mvneta_rxq_desc_num_update() argument
856 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
876 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
892 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) in mvneta_max_rx_size_set() argument
896 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_max_rx_size_set()
900 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_max_rx_size_set()
905 static void mvneta_rxq_offset_set(struct mvneta_port *pp, in mvneta_rxq_offset_set() argument
911 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
916 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
923 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, in mvneta_txq_pend_desc_add() argument
934 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
962 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, in mvneta_rxq_buf_size_set() argument
968 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
973 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
977 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, in mvneta_rxq_bm_disable() argument
982 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
984 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
988 static void mvneta_rxq_bm_enable(struct mvneta_port *pp, in mvneta_rxq_bm_enable() argument
993 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_enable()
995 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_enable()
999 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, in mvneta_rxq_long_pool_set() argument
1004 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_long_pool_set()
1006 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); in mvneta_rxq_long_pool_set()
1008 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_long_pool_set()
1012 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, in mvneta_rxq_short_pool_set() argument
1017 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_short_pool_set()
1019 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); in mvneta_rxq_short_pool_set()
1021 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_short_pool_set()
1025 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, in mvneta_bm_pool_bufsize_set() argument
1032 dev_warn(pp->dev->dev.parent, in mvneta_bm_pool_bufsize_set()
1038 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); in mvneta_bm_pool_bufsize_set()
1040 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); in mvneta_bm_pool_bufsize_set()
1044 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, in mvneta_mbus_io_win_set() argument
1050 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); in mvneta_mbus_io_win_set()
1052 if (pp->bm_win_id < 0) { in mvneta_mbus_io_win_set()
1056 pp->bm_win_id = i; in mvneta_mbus_io_win_set()
1063 i = pp->bm_win_id; in mvneta_mbus_io_win_set()
1066 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_mbus_io_win_set()
1067 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_mbus_io_win_set()
1070 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_mbus_io_win_set()
1072 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | in mvneta_mbus_io_win_set()
1075 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); in mvneta_mbus_io_win_set()
1077 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); in mvneta_mbus_io_win_set()
1079 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); in mvneta_mbus_io_win_set()
1082 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_mbus_io_win_set()
1087 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) in mvneta_bm_port_mbus_init() argument
1094 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, in mvneta_bm_port_mbus_init()
1099 pp->bm_win_id = -1; in mvneta_bm_port_mbus_init()
1102 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, in mvneta_bm_port_mbus_init()
1105 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); in mvneta_bm_port_mbus_init()
1115 struct mvneta_port *pp) in mvneta_bm_port_init() argument
1120 if (!pp->neta_armada3700) { in mvneta_bm_port_init()
1123 ret = mvneta_bm_port_mbus_init(pp); in mvneta_bm_port_init()
1129 netdev_info(pp->dev, "missing long pool id\n"); in mvneta_bm_port_init()
1134 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, in mvneta_bm_port_init()
1135 MVNETA_BM_LONG, pp->id, in mvneta_bm_port_init()
1136 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); in mvneta_bm_port_init()
1137 if (!pp->pool_long) { in mvneta_bm_port_init()
1138 netdev_info(pp->dev, "fail to obtain long pool for port\n"); in mvneta_bm_port_init()
1142 pp->pool_long->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1144 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, in mvneta_bm_port_init()
1145 pp->pool_long->id); in mvneta_bm_port_init()
1152 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, in mvneta_bm_port_init()
1153 MVNETA_BM_SHORT, pp->id, in mvneta_bm_port_init()
1155 if (!pp->pool_short) { in mvneta_bm_port_init()
1156 netdev_info(pp->dev, "fail to obtain short pool for port\n"); in mvneta_bm_port_init()
1157 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_port_init()
1162 pp->pool_short->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1163 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, in mvneta_bm_port_init()
1164 pp->pool_short->id); in mvneta_bm_port_init()
1171 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) in mvneta_bm_update_mtu() argument
1173 struct mvneta_bm_pool *bm_pool = pp->pool_long; in mvneta_bm_update_mtu()
1178 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); in mvneta_bm_update_mtu()
1197 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); in mvneta_bm_update_mtu()
1202 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_update_mtu()
1203 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); in mvneta_bm_update_mtu()
1205 pp->bm_priv = NULL; in mvneta_bm_update_mtu()
1206 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_bm_update_mtu()
1207 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); in mvneta_bm_update_mtu()
1208 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); in mvneta_bm_update_mtu()
1212 static void mvneta_port_up(struct mvneta_port *pp) in mvneta_port_up() argument
1220 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
1224 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); in mvneta_port_up()
1229 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
1234 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); in mvneta_port_up()
1238 static void mvneta_port_down(struct mvneta_port *pp) in mvneta_port_down() argument
1244 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; in mvneta_port_down()
1248 mvreg_write(pp, MVNETA_RXQ_CMD, in mvneta_port_down()
1255 netdev_warn(pp->dev, in mvneta_port_down()
1262 val = mvreg_read(pp, MVNETA_RXQ_CMD); in mvneta_port_down()
1268 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; in mvneta_port_down()
1271 mvreg_write(pp, MVNETA_TXQ_CMD, in mvneta_port_down()
1278 netdev_warn(pp->dev, in mvneta_port_down()
1286 val = mvreg_read(pp, MVNETA_TXQ_CMD); in mvneta_port_down()
1294 netdev_warn(pp->dev, in mvneta_port_down()
1301 val = mvreg_read(pp, MVNETA_PORT_STATUS); in mvneta_port_down()
1309 static void mvneta_port_enable(struct mvneta_port *pp) in mvneta_port_enable() argument
1314 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_enable()
1316 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_enable()
1320 static void mvneta_port_disable(struct mvneta_port *pp) in mvneta_port_disable() argument
1325 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_disable()
1327 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_disable()
1335 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) in mvneta_set_ucast_table() argument
1348 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); in mvneta_set_ucast_table()
1352 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_special_mcast_table() argument
1365 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); in mvneta_set_special_mcast_table()
1370 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_other_mcast_table() argument
1376 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1379 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1385 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); in mvneta_set_other_mcast_table()
1390 struct mvneta_port *pp = arg; in mvneta_percpu_unmask_interrupt() local
1395 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_percpu_unmask_interrupt()
1403 struct mvneta_port *pp = arg; in mvneta_percpu_mask_interrupt() local
1408 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_percpu_mask_interrupt()
1409 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_percpu_mask_interrupt()
1410 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_percpu_mask_interrupt()
1415 struct mvneta_port *pp = arg; in mvneta_percpu_clear_intr_cause() local
1420 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1421 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1422 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1434 static void mvneta_defaults_set(struct mvneta_port *pp) in mvneta_defaults_set() argument
1442 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); in mvneta_defaults_set()
1445 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_defaults_set()
1446 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); in mvneta_defaults_set()
1449 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); in mvneta_defaults_set()
1459 if (!pp->neta_armada3700) { in mvneta_defaults_set()
1473 txq_map = (cpu == pp->rxq_def) ? in mvneta_defaults_set()
1481 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); in mvneta_defaults_set()
1485 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_defaults_set()
1486 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_defaults_set()
1489 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); in mvneta_defaults_set()
1491 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); in mvneta_defaults_set()
1492 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); in mvneta_defaults_set()
1495 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_defaults_set()
1496 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_defaults_set()
1499 if (pp->bm_priv) in mvneta_defaults_set()
1505 mvreg_write(pp, MVNETA_ACC_MODE, val); in mvneta_defaults_set()
1507 if (pp->bm_priv) in mvneta_defaults_set()
1508 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); in mvneta_defaults_set()
1511 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_defaults_set()
1512 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_defaults_set()
1515 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); in mvneta_defaults_set()
1516 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); in mvneta_defaults_set()
1531 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); in mvneta_defaults_set()
1536 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); in mvneta_defaults_set()
1538 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); in mvneta_defaults_set()
1540 mvneta_set_ucast_table(pp, -1); in mvneta_defaults_set()
1541 mvneta_set_special_mcast_table(pp, -1); in mvneta_defaults_set()
1542 mvneta_set_other_mcast_table(pp, -1); in mvneta_defaults_set()
1545 mvreg_write(pp, MVNETA_INTR_ENABLE, in mvneta_defaults_set()
1549 mvneta_mib_counters_clear(pp); in mvneta_defaults_set()
1553 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) in mvneta_txq_max_tx_size_set() argument
1564 val = mvreg_read(pp, MVNETA_TX_MTU); in mvneta_txq_max_tx_size_set()
1567 mvreg_write(pp, MVNETA_TX_MTU, val); in mvneta_txq_max_tx_size_set()
1570 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); in mvneta_txq_max_tx_size_set()
1577 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); in mvneta_txq_max_tx_size_set()
1580 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); in mvneta_txq_max_tx_size_set()
1587 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); in mvneta_txq_max_tx_size_set()
1593 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, in mvneta_set_ucast_addr() argument
1609 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); in mvneta_set_ucast_addr()
1619 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); in mvneta_set_ucast_addr()
1623 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, in mvneta_mac_addr_set() argument
1634 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); in mvneta_mac_addr_set()
1635 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); in mvneta_mac_addr_set()
1639 mvneta_set_ucast_addr(pp, addr[5], queue); in mvneta_mac_addr_set()
1645 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, in mvneta_rx_pkts_coal_set() argument
1648 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1655 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, in mvneta_rx_time_coal_set() argument
1661 clk_rate = clk_get_rate(pp->clk); in mvneta_rx_time_coal_set()
1664 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1668 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, in mvneta_tx_done_pkts_coal_set() argument
1673 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1678 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1694 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, in mvneta_txq_sent_desc_dec() argument
1703 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1708 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1712 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, in mvneta_txq_sent_desc_num_get() argument
1718 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1728 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, in mvneta_txq_sent_desc_proc() argument
1734 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); in mvneta_txq_sent_desc_proc()
1738 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); in mvneta_txq_sent_desc_proc()
1773 static void mvneta_rx_error(struct mvneta_port *pp, in mvneta_rx_error() argument
1776 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_error()
1786 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", in mvneta_rx_error()
1790 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", in mvneta_rx_error()
1794 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", in mvneta_rx_error()
1798 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", in mvneta_rx_error()
1805 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, in mvneta_rx_csum() argument
1808 if ((pp->dev->features & NETIF_F_RXCSUM) && in mvneta_rx_csum()
1823 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, in mvneta_tx_done_policy() argument
1828 return &pp->txqs[queue]; in mvneta_tx_done_policy()
1832 static void mvneta_txq_bufs_free(struct mvneta_port *pp, in mvneta_txq_bufs_free() argument
1848 dma_unmap_single(pp->dev->dev.parent, in mvneta_txq_bufs_free()
1868 static void mvneta_txq_done(struct mvneta_port *pp, in mvneta_txq_done() argument
1871 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1874 tx_done = mvneta_txq_sent_desc_proc(pp, txq); in mvneta_txq_done()
1878 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); in mvneta_txq_done()
1890 static int mvneta_rx_refill(struct mvneta_port *pp, in mvneta_rx_refill() argument
1903 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; in mvneta_rx_refill()
1910 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) in mvneta_skb_tx_csum() argument
1941 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, in mvneta_rxq_drop_pkts() argument
1946 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rxq_drop_pkts()
1948 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rxq_drop_pkts()
1950 if (pp->bm_priv) { in mvneta_rxq_drop_pkts()
1957 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rxq_drop_pkts()
1959 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rxq_drop_pkts()
1980 mvneta_update_stats(struct mvneta_port *pp, in mvneta_update_stats() argument
1983 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_update_stats()
1996 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) in mvneta_rx_refill_queue() argument
2005 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { in mvneta_rx_refill_queue()
2011 stats = this_cpu_ptr(pp->stats); in mvneta_rx_refill_queue()
2027 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_xdp_put_buff() argument
2041 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, in mvneta_xdp_submit_frame() argument
2056 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, in mvneta_xdp_submit_frame()
2058 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { in mvneta_xdp_submit_frame()
2068 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, in mvneta_xdp_submit_frame()
2086 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) in mvneta_xdp_xmit_back() argument
2088 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit_back()
2100 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit_back()
2101 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back()
2104 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); in mvneta_xdp_xmit_back()
2112 mvneta_txq_pend_desc_add(pp, txq, 0); in mvneta_xdp_xmit_back()
2127 struct mvneta_port *pp = netdev_priv(dev); in mvneta_xdp_xmit() local
2128 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit()
2135 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) in mvneta_xdp_xmit()
2141 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit()
2142 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit()
2146 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); in mvneta_xdp_xmit()
2156 mvneta_txq_pend_desc_add(pp, txq, 0); in mvneta_xdp_xmit()
2170 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_run_xdp() argument
2177 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2182 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2192 err = xdp_do_redirect(pp->dev, xdp, prog); in mvneta_run_xdp()
2194 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); in mvneta_run_xdp()
2203 ret = mvneta_xdp_xmit_back(pp, xdp); in mvneta_run_xdp()
2205 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); in mvneta_run_xdp()
2211 trace_xdp_exception(pp->dev, prog, act); in mvneta_run_xdp()
2214 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); in mvneta_run_xdp()
2227 mvneta_swbm_rx_frame(struct mvneta_port *pp, in mvneta_swbm_rx_frame() argument
2235 struct net_device *dev = pp->dev; in mvneta_swbm_rx_frame()
2259 xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE; in mvneta_swbm_rx_frame()
2268 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, in mvneta_swbm_add_rx_fragment() argument
2275 struct net_device *dev = pp->dev; in mvneta_swbm_add_rx_fragment()
2295 skb_frag_off_set(frag, pp->rx_offset_correction); in mvneta_swbm_add_rx_fragment()
2306 mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_swbm_build_skb() argument
2321 mvneta_rx_csum(pp, desc_status, skb); in mvneta_swbm_build_skb()
2337 struct mvneta_port *pp, int budget, in mvneta_rx_swbm() argument
2341 struct net_device *dev = pp->dev; in mvneta_rx_swbm()
2351 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_swbm()
2354 xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_rx_swbm()
2373 mvneta_rx_error(pp, rx_desc); in mvneta_rx_swbm()
2381 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2391 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2400 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); in mvneta_rx_swbm()
2405 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) in mvneta_rx_swbm()
2408 skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); in mvneta_rx_swbm()
2410 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_swbm()
2412 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); in mvneta_rx_swbm()
2433 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); in mvneta_rx_swbm()
2439 mvneta_update_stats(pp, &ps); in mvneta_rx_swbm()
2442 refill = mvneta_rx_refill_queue(pp, rxq); in mvneta_rx_swbm()
2445 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); in mvneta_rx_swbm()
2452 struct mvneta_port *pp, int rx_todo, in mvneta_rx_hwbm() argument
2455 struct net_device *dev = pp->dev; in mvneta_rx_hwbm()
2461 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_hwbm()
2485 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rx_hwbm()
2491 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2494 mvneta_rx_error(pp, rx_desc); in mvneta_rx_hwbm()
2505 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, in mvneta_rx_hwbm()
2514 mvneta_rx_csum(pp, rx_status, skb); in mvneta_rx_hwbm()
2521 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2535 stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2550 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, in mvneta_rx_hwbm()
2564 mvneta_rx_csum(pp, rx_status, skb); in mvneta_rx_hwbm()
2570 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2579 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rx_hwbm()
2586 struct mvneta_port *pp, struct mvneta_tx_queue *txq) in mvneta_tso_put_hdr() argument
2594 tx_desc->command = mvneta_skb_tx_csum(pp, skb); in mvneta_tso_put_hdr()
2643 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx_tso() local
2671 mvneta_tso_put_hdr(skb, pp, txq); in mvneta_tx_tso()
2699 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_tso()
2709 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, in mvneta_tx_frag_process() argument
2724 dma_map_single(pp->dev->dev.parent, addr, in mvneta_tx_frag_process()
2727 if (dma_mapping_error(pp->dev->dev.parent, in mvneta_tx_frag_process()
2754 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_frag_process()
2767 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx() local
2769 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx()
2789 tx_cmd = mvneta_skb_tx_csum(pp, skb); in mvneta_tx()
2817 if (mvneta_tx_frag_process(pp, skb, txq)) { in mvneta_tx()
2831 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_tx()
2841 mvneta_txq_pend_desc_add(pp, txq, frags); in mvneta_tx()
2859 static void mvneta_txq_done_force(struct mvneta_port *pp, in mvneta_txq_done_force() argument
2863 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done_force()
2866 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); in mvneta_txq_done_force()
2877 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) in mvneta_tx_done_gbe() argument
2884 txq = mvneta_tx_done_policy(pp, cause_tx_done); in mvneta_tx_done_gbe()
2886 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
2890 mvneta_txq_done(pp, txq); in mvneta_tx_done_gbe()
2925 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, in mvneta_set_special_mcast_addr() argument
2938 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST in mvneta_set_special_mcast_addr()
2948 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, in mvneta_set_special_mcast_addr()
2960 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, in mvneta_set_other_mcast_addr() argument
2971 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); in mvneta_set_other_mcast_addr()
2981 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); in mvneta_set_other_mcast_addr()
2993 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, in mvneta_mcast_addr_set() argument
2999 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); in mvneta_mcast_addr_set()
3005 if (pp->mcast_count[crc_result] == 0) { in mvneta_mcast_addr_set()
3006 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", in mvneta_mcast_addr_set()
3011 pp->mcast_count[crc_result]--; in mvneta_mcast_addr_set()
3012 if (pp->mcast_count[crc_result] != 0) { in mvneta_mcast_addr_set()
3013 netdev_info(pp->dev, in mvneta_mcast_addr_set()
3015 pp->mcast_count[crc_result], crc_result); in mvneta_mcast_addr_set()
3019 pp->mcast_count[crc_result]++; in mvneta_mcast_addr_set()
3021 mvneta_set_other_mcast_addr(pp, crc_result, queue); in mvneta_mcast_addr_set()
3027 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, in mvneta_rx_unicast_promisc_set() argument
3032 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); in mvneta_rx_unicast_promisc_set()
3034 val = mvreg_read(pp, MVNETA_TYPE_PRIO); in mvneta_rx_unicast_promisc_set()
3041 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); in mvneta_rx_unicast_promisc_set()
3042 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); in mvneta_rx_unicast_promisc_set()
3049 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); in mvneta_rx_unicast_promisc_set()
3050 mvreg_write(pp, MVNETA_TYPE_PRIO, val); in mvneta_rx_unicast_promisc_set()
3056 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_rx_mode() local
3061 mvneta_rx_unicast_promisc_set(pp, 1); in mvneta_set_rx_mode()
3062 mvneta_set_ucast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3063 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3064 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3067 mvneta_rx_unicast_promisc_set(pp, 0); in mvneta_set_rx_mode()
3068 mvneta_set_ucast_table(pp, -1); in mvneta_set_rx_mode()
3069 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); in mvneta_set_rx_mode()
3073 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3074 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3077 mvneta_set_special_mcast_table(pp, -1); in mvneta_set_rx_mode()
3078 mvneta_set_other_mcast_table(pp, -1); in mvneta_set_rx_mode()
3082 mvneta_mcast_addr_set(pp, ha->addr, in mvneta_set_rx_mode()
3083 pp->rxq_def); in mvneta_set_rx_mode()
3093 struct mvneta_port *pp = (struct mvneta_port *)dev_id; in mvneta_isr() local
3095 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_isr()
3096 napi_schedule(&pp->napi); in mvneta_isr()
3106 disable_percpu_irq(port->pp->dev->irq); in mvneta_percpu_isr()
3112 static void mvneta_link_change(struct mvneta_port *pp) in mvneta_link_change() argument
3114 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_link_change()
3116 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); in mvneta_link_change()
3131 struct mvneta_port *pp = netdev_priv(napi->dev); in mvneta_poll() local
3132 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); in mvneta_poll()
3134 if (!netif_running(pp->dev)) { in mvneta_poll()
3140 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); in mvneta_poll()
3142 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); in mvneta_poll()
3144 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_poll()
3148 mvneta_link_change(pp); in mvneta_poll()
3153 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); in mvneta_poll()
3160 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : in mvneta_poll()
3166 if (pp->bm_priv) in mvneta_poll()
3167 rx_done = mvneta_rx_hwbm(napi, pp, budget, in mvneta_poll()
3168 &pp->rxqs[rx_queue]); in mvneta_poll()
3170 rx_done = mvneta_rx_swbm(napi, pp, budget, in mvneta_poll()
3171 &pp->rxqs[rx_queue]); in mvneta_poll()
3178 if (pp->neta_armada3700) { in mvneta_poll()
3182 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_poll()
3188 enable_percpu_irq(pp->dev->irq, 0); in mvneta_poll()
3192 if (pp->neta_armada3700) in mvneta_poll()
3193 pp->cause_rx_tx = cause_rx_tx; in mvneta_poll()
3200 static int mvneta_create_page_pool(struct mvneta_port *pp, in mvneta_create_page_pool() argument
3203 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_create_page_pool()
3209 .dev = pp->dev->dev.parent, in mvneta_create_page_pool()
3211 .offset = pp->rx_offset_correction, in mvneta_create_page_pool()
3223 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id); in mvneta_create_page_pool()
3243 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_rxq_fill() argument
3248 err = mvneta_create_page_pool(pp, rxq, num); in mvneta_rxq_fill()
3254 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill()
3256 netdev_err(pp->dev, in mvneta_rxq_fill()
3266 mvneta_rxq_non_occup_desc_add(pp, rxq, i); in mvneta_rxq_fill()
3272 static void mvneta_tx_reset(struct mvneta_port *pp) in mvneta_tx_reset() argument
3278 mvneta_txq_done_force(pp, &pp->txqs[queue]); in mvneta_tx_reset()
3280 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_tx_reset()
3281 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_tx_reset()
3284 static void mvneta_rx_reset(struct mvneta_port *pp) in mvneta_rx_reset() argument
3286 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_rx_reset()
3287 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_rx_reset()
3292 static int mvneta_rxq_sw_init(struct mvneta_port *pp, in mvneta_rxq_sw_init() argument
3295 rxq->size = pp->rx_ring_size; in mvneta_rxq_sw_init()
3298 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_sw_init()
3309 static void mvneta_rxq_hw_init(struct mvneta_port *pp, in mvneta_rxq_hw_init() argument
3313 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_hw_init()
3314 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_hw_init()
3317 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init()
3318 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init()
3320 if (!pp->bm_priv) { in mvneta_rxq_hw_init()
3322 mvneta_rxq_offset_set(pp, rxq, 0); in mvneta_rxq_hw_init()
3323 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? in mvneta_rxq_hw_init()
3325 MVNETA_RX_BUF_SIZE(pp->pkt_size)); in mvneta_rxq_hw_init()
3326 mvneta_rxq_bm_disable(pp, rxq); in mvneta_rxq_hw_init()
3327 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3330 mvneta_rxq_offset_set(pp, rxq, in mvneta_rxq_hw_init()
3331 NET_SKB_PAD - pp->rx_offset_correction); in mvneta_rxq_hw_init()
3333 mvneta_rxq_bm_enable(pp, rxq); in mvneta_rxq_hw_init()
3335 mvneta_rxq_long_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3336 mvneta_rxq_short_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3337 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3342 static int mvneta_rxq_init(struct mvneta_port *pp, in mvneta_rxq_init() argument
3348 ret = mvneta_rxq_sw_init(pp, rxq); in mvneta_rxq_init()
3352 mvneta_rxq_hw_init(pp, rxq); in mvneta_rxq_init()
3358 static void mvneta_rxq_deinit(struct mvneta_port *pp, in mvneta_rxq_deinit() argument
3361 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_rxq_deinit()
3364 dma_free_coherent(pp->dev->dev.parent, in mvneta_rxq_deinit()
3377 static int mvneta_txq_sw_init(struct mvneta_port *pp, in mvneta_txq_sw_init() argument
3382 txq->size = pp->tx_ring_size; in mvneta_txq_sw_init()
3392 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3405 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3415 cpu = pp->rxq_def % num_present_cpus(); in mvneta_txq_sw_init()
3417 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); in mvneta_txq_sw_init()
3422 static void mvneta_txq_hw_init(struct mvneta_port *pp, in mvneta_txq_hw_init() argument
3426 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_hw_init()
3427 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_hw_init()
3430 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_hw_init()
3431 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_hw_init()
3433 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_hw_init()
3437 static int mvneta_txq_init(struct mvneta_port *pp, in mvneta_txq_init() argument
3442 ret = mvneta_txq_sw_init(pp, txq); in mvneta_txq_init()
3446 mvneta_txq_hw_init(pp, txq); in mvneta_txq_init()
3452 static void mvneta_txq_sw_deinit(struct mvneta_port *pp, in mvneta_txq_sw_deinit() argument
3455 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_sw_deinit()
3460 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3464 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3476 static void mvneta_txq_hw_deinit(struct mvneta_port *pp, in mvneta_txq_hw_deinit() argument
3480 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3481 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3484 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3485 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3488 static void mvneta_txq_deinit(struct mvneta_port *pp, in mvneta_txq_deinit() argument
3491 mvneta_txq_sw_deinit(pp, txq); in mvneta_txq_deinit()
3492 mvneta_txq_hw_deinit(pp, txq); in mvneta_txq_deinit()
3496 static void mvneta_cleanup_txqs(struct mvneta_port *pp) in mvneta_cleanup_txqs() argument
3501 mvneta_txq_deinit(pp, &pp->txqs[queue]); in mvneta_cleanup_txqs()
3505 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) in mvneta_cleanup_rxqs() argument
3510 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); in mvneta_cleanup_rxqs()
3515 static int mvneta_setup_rxqs(struct mvneta_port *pp) in mvneta_setup_rxqs() argument
3520 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); in mvneta_setup_rxqs()
3523 netdev_err(pp->dev, "%s: can't create rxq=%d\n", in mvneta_setup_rxqs()
3525 mvneta_cleanup_rxqs(pp); in mvneta_setup_rxqs()
3534 static int mvneta_setup_txqs(struct mvneta_port *pp) in mvneta_setup_txqs() argument
3539 int err = mvneta_txq_init(pp, &pp->txqs[queue]); in mvneta_setup_txqs()
3541 netdev_err(pp->dev, "%s: can't create txq=%d\n", in mvneta_setup_txqs()
3543 mvneta_cleanup_txqs(pp); in mvneta_setup_txqs()
3551 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) in mvneta_comphy_init() argument
3555 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); in mvneta_comphy_init()
3559 return phy_power_on(pp->comphy); in mvneta_comphy_init()
3562 static int mvneta_config_interface(struct mvneta_port *pp, in mvneta_config_interface() argument
3567 if (pp->comphy) { in mvneta_config_interface()
3571 ret = mvneta_comphy_init(pp, interface); in mvneta_config_interface()
3576 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3582 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3587 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3595 pp->phy_interface = interface; in mvneta_config_interface()
3600 static void mvneta_start_dev(struct mvneta_port *pp) in mvneta_start_dev() argument
3604 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); in mvneta_start_dev()
3606 mvneta_max_rx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3607 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3610 mvneta_port_enable(pp); in mvneta_start_dev()
3612 if (!pp->neta_armada3700) { in mvneta_start_dev()
3616 per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev()
3621 napi_enable(&pp->napi); in mvneta_start_dev()
3625 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_start_dev()
3627 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_start_dev()
3631 phylink_start(pp->phylink); in mvneta_start_dev()
3634 phylink_speed_up(pp->phylink); in mvneta_start_dev()
3636 netif_tx_start_all_queues(pp->dev); in mvneta_start_dev()
3638 clear_bit(__MVNETA_DOWN, &pp->state); in mvneta_start_dev()
3641 static void mvneta_stop_dev(struct mvneta_port *pp) in mvneta_stop_dev() argument
3645 set_bit(__MVNETA_DOWN, &pp->state); in mvneta_stop_dev()
3647 if (device_may_wakeup(&pp->dev->dev)) in mvneta_stop_dev()
3648 phylink_speed_down(pp->phylink, false); in mvneta_stop_dev()
3650 phylink_stop(pp->phylink); in mvneta_stop_dev()
3652 if (!pp->neta_armada3700) { in mvneta_stop_dev()
3655 per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev()
3660 napi_disable(&pp->napi); in mvneta_stop_dev()
3663 netif_carrier_off(pp->dev); in mvneta_stop_dev()
3665 mvneta_port_down(pp); in mvneta_stop_dev()
3666 netif_tx_stop_all_queues(pp->dev); in mvneta_stop_dev()
3669 mvneta_port_disable(pp); in mvneta_stop_dev()
3672 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); in mvneta_stop_dev()
3675 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_stop_dev()
3677 mvneta_tx_reset(pp); in mvneta_stop_dev()
3678 mvneta_rx_reset(pp); in mvneta_stop_dev()
3680 WARN_ON(phy_power_off(pp->comphy)); in mvneta_stop_dev()
3685 struct mvneta_port *pp = arg; in mvneta_percpu_enable() local
3687 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); in mvneta_percpu_enable()
3692 struct mvneta_port *pp = arg; in mvneta_percpu_disable() local
3694 disable_percpu_irq(pp->dev->irq); in mvneta_percpu_disable()
3700 struct mvneta_port *pp = netdev_priv(dev); in mvneta_change_mtu() local
3709 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { in mvneta_change_mtu()
3717 if (pp->bm_priv) in mvneta_change_mtu()
3718 mvneta_bm_update_mtu(pp, mtu); in mvneta_change_mtu()
3727 mvneta_stop_dev(pp); in mvneta_change_mtu()
3728 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_change_mtu()
3730 mvneta_cleanup_txqs(pp); in mvneta_change_mtu()
3731 mvneta_cleanup_rxqs(pp); in mvneta_change_mtu()
3733 if (pp->bm_priv) in mvneta_change_mtu()
3734 mvneta_bm_update_mtu(pp, mtu); in mvneta_change_mtu()
3736 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); in mvneta_change_mtu()
3738 ret = mvneta_setup_rxqs(pp); in mvneta_change_mtu()
3744 ret = mvneta_setup_txqs(pp); in mvneta_change_mtu()
3750 on_each_cpu(mvneta_percpu_enable, pp, true); in mvneta_change_mtu()
3751 mvneta_start_dev(pp); in mvneta_change_mtu()
3761 struct mvneta_port *pp = netdev_priv(dev); in mvneta_fix_features() local
3763 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { in mvneta_fix_features()
3767 pp->tx_csum_limit); in mvneta_fix_features()
3774 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) in mvneta_get_mac_addr() argument
3778 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); in mvneta_get_mac_addr()
3779 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); in mvneta_get_mac_addr()
3791 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_mac_addr() local
3799 mvneta_mac_addr_set(pp, dev->dev_addr, -1); in mvneta_set_mac_addr()
3802 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); in mvneta_set_mac_addr()
3813 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_validate() local
3834 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { in mvneta_validate()
3838 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { in mvneta_validate()
3866 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_pcs_get_state() local
3869 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_mac_pcs_get_state()
3894 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_an_restart() local
3895 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_an_restart()
3897 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_mac_an_restart()
3899 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_mac_an_restart()
3907 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_config() local
3908 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_mac_config()
3909 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); in mvneta_mac_config()
3910 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); in mvneta_mac_config()
3911 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); in mvneta_mac_config()
3912 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_config()
3977 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_mac_config()
3989 if (pp->phy_interface != state->interface) { in mvneta_mac_config()
3990 if (pp->comphy) in mvneta_mac_config()
3991 WARN_ON(phy_power_off(pp->comphy)); in mvneta_mac_config()
3992 WARN_ON(mvneta_config_interface(pp, state->interface)); in mvneta_mac_config()
3996 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); in mvneta_mac_config()
3998 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); in mvneta_mac_config()
4000 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); in mvneta_mac_config()
4002 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); in mvneta_mac_config()
4004 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); in mvneta_mac_config()
4007 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & in mvneta_mac_config()
4013 static void mvneta_set_eee(struct mvneta_port *pp, bool enable) in mvneta_set_eee() argument
4017 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); in mvneta_set_eee()
4022 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); in mvneta_set_eee()
4029 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_link_down() local
4032 mvneta_port_down(pp); in mvneta_mac_link_down()
4035 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_down()
4038 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_down()
4041 pp->eee_active = false; in mvneta_mac_link_down()
4042 mvneta_set_eee(pp, false); in mvneta_mac_link_down()
4052 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_link_up() local
4056 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_up()
4075 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_up()
4081 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_up()
4087 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_up()
4090 mvneta_port_up(pp); in mvneta_mac_link_up()
4092 if (phy && pp->eee_enabled) { in mvneta_mac_link_up()
4093 pp->eee_active = phy_init_eee(phy, 0) >= 0; in mvneta_mac_link_up()
4094 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); in mvneta_mac_link_up()
4107 static int mvneta_mdio_probe(struct mvneta_port *pp) in mvneta_mdio_probe() argument
4110 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); in mvneta_mdio_probe()
4113 netdev_err(pp->dev, "could not attach PHY: %d\n", err); in mvneta_mdio_probe()
4115 phylink_ethtool_get_wol(pp->phylink, &wol); in mvneta_mdio_probe()
4116 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); in mvneta_mdio_probe()
4120 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); in mvneta_mdio_probe()
4125 static void mvneta_mdio_remove(struct mvneta_port *pp) in mvneta_mdio_remove() argument
4127 phylink_disconnect_phy(pp->phylink); in mvneta_mdio_remove()
4134 static void mvneta_percpu_elect(struct mvneta_port *pp) in mvneta_percpu_elect() argument
4141 if (cpu_online(pp->rxq_def)) in mvneta_percpu_elect()
4142 elected_cpu = pp->rxq_def; in mvneta_percpu_elect()
4158 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); in mvneta_percpu_elect()
4168 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & in mvneta_percpu_elect()
4171 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); in mvneta_percpu_elect()
4177 pp, true); in mvneta_percpu_elect()
4186 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_online() local
4188 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_online()
4191 spin_lock(&pp->lock); in mvneta_cpu_online()
4196 if (pp->is_stopped) { in mvneta_cpu_online()
4197 spin_unlock(&pp->lock); in mvneta_cpu_online()
4200 netif_tx_stop_all_queues(pp->dev); in mvneta_cpu_online()
4209 per_cpu_ptr(pp->ports, other_cpu); in mvneta_cpu_online()
4216 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_cpu_online()
4223 mvneta_percpu_enable(pp); in mvneta_cpu_online()
4229 mvneta_percpu_elect(pp); in mvneta_cpu_online()
4232 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_cpu_online()
4233 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_cpu_online()
4236 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_online()
4237 spin_unlock(&pp->lock); in mvneta_cpu_online()
4243 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_down_prepare() local
4245 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_down_prepare()
4251 spin_lock(&pp->lock); in mvneta_cpu_down_prepare()
4253 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_cpu_down_prepare()
4254 spin_unlock(&pp->lock); in mvneta_cpu_down_prepare()
4259 mvneta_percpu_disable(pp); in mvneta_cpu_down_prepare()
4265 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_dead() local
4269 spin_lock(&pp->lock); in mvneta_cpu_dead()
4270 mvneta_percpu_elect(pp); in mvneta_cpu_dead()
4271 spin_unlock(&pp->lock); in mvneta_cpu_dead()
4273 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_cpu_dead()
4274 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_cpu_dead()
4277 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_dead()
4283 struct mvneta_port *pp = netdev_priv(dev); in mvneta_open() local
4286 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); in mvneta_open()
4288 ret = mvneta_setup_rxqs(pp); in mvneta_open()
4292 ret = mvneta_setup_txqs(pp); in mvneta_open()
4297 if (pp->neta_armada3700) in mvneta_open()
4298 ret = request_irq(pp->dev->irq, mvneta_isr, 0, in mvneta_open()
4299 dev->name, pp); in mvneta_open()
4301 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, in mvneta_open()
4302 dev->name, pp->ports); in mvneta_open()
4304 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); in mvneta_open()
4308 if (!pp->neta_armada3700) { in mvneta_open()
4312 on_each_cpu(mvneta_percpu_enable, pp, true); in mvneta_open()
4314 pp->is_stopped = false; in mvneta_open()
4319 &pp->node_online); in mvneta_open()
4324 &pp->node_dead); in mvneta_open()
4329 ret = mvneta_mdio_probe(pp); in mvneta_open()
4335 mvneta_start_dev(pp); in mvneta_open()
4340 if (!pp->neta_armada3700) in mvneta_open()
4342 &pp->node_dead); in mvneta_open()
4344 if (!pp->neta_armada3700) in mvneta_open()
4346 &pp->node_online); in mvneta_open()
4348 if (pp->neta_armada3700) { in mvneta_open()
4349 free_irq(pp->dev->irq, pp); in mvneta_open()
4351 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_open()
4352 free_percpu_irq(pp->dev->irq, pp->ports); in mvneta_open()
4355 mvneta_cleanup_txqs(pp); in mvneta_open()
4357 mvneta_cleanup_rxqs(pp); in mvneta_open()
4364 struct mvneta_port *pp = netdev_priv(dev); in mvneta_stop() local
4366 if (!pp->neta_armada3700) { in mvneta_stop()
4372 spin_lock(&pp->lock); in mvneta_stop()
4373 pp->is_stopped = true; in mvneta_stop()
4374 spin_unlock(&pp->lock); in mvneta_stop()
4376 mvneta_stop_dev(pp); in mvneta_stop()
4377 mvneta_mdio_remove(pp); in mvneta_stop()
4380 &pp->node_online); in mvneta_stop()
4382 &pp->node_dead); in mvneta_stop()
4383 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_stop()
4384 free_percpu_irq(dev->irq, pp->ports); in mvneta_stop()
4386 mvneta_stop_dev(pp); in mvneta_stop()
4387 mvneta_mdio_remove(pp); in mvneta_stop()
4388 free_irq(dev->irq, pp); in mvneta_stop()
4391 mvneta_cleanup_rxqs(pp); in mvneta_stop()
4392 mvneta_cleanup_txqs(pp); in mvneta_stop()
4399 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ioctl() local
4401 return phylink_mii_ioctl(pp->phylink, ifr, cmd); in mvneta_ioctl()
4408 struct mvneta_port *pp = netdev_priv(dev); in mvneta_xdp_setup() local
4416 if (pp->bm_priv) { in mvneta_xdp_setup()
4422 need_update = !!pp->xdp_prog != !!prog; in mvneta_xdp_setup()
4426 old_prog = xchg(&pp->xdp_prog, prog); in mvneta_xdp_setup()
4453 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_ethtool_set_link_ksettings() local
4455 return phylink_ethtool_ksettings_set(pp->phylink, cmd); in mvneta_ethtool_set_link_ksettings()
4463 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_ethtool_get_link_ksettings() local
4465 return phylink_ethtool_ksettings_get(pp->phylink, cmd); in mvneta_ethtool_get_link_ksettings()
4470 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_nway_reset() local
4472 return phylink_ethtool_nway_reset(pp->phylink); in mvneta_ethtool_nway_reset()
4479 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_coalesce() local
4483 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
4486 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
4487 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
4491 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce()
4493 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
4503 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_coalesce() local
4505 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
4506 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
4508 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; in mvneta_ethtool_get_coalesce()
4528 struct mvneta_port *pp = netdev_priv(netdev); in mvneta_ethtool_get_ringparam() local
4532 ring->rx_pending = pp->rx_ring_size; in mvneta_ethtool_get_ringparam()
4533 ring->tx_pending = pp->tx_ring_size; in mvneta_ethtool_get_ringparam()
4539 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_ringparam() local
4543 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? in mvneta_ethtool_set_ringparam()
4546 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, in mvneta_ethtool_set_ringparam()
4548 if (pp->tx_ring_size != ring->tx_pending) in mvneta_ethtool_set_ringparam()
4550 pp->tx_ring_size, ring->tx_pending); in mvneta_ethtool_set_ringparam()
4567 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_pauseparam() local
4569 phylink_ethtool_get_pauseparam(pp->phylink, pause); in mvneta_ethtool_get_pauseparam()
4575 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_pauseparam() local
4577 return phylink_ethtool_set_pauseparam(pp->phylink, pause); in mvneta_ethtool_set_pauseparam()
4593 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, in mvneta_ethtool_update_pcpu_stats() argument
4611 stats = per_cpu_ptr(pp->stats, cpu); in mvneta_ethtool_update_pcpu_stats()
4637 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) in mvneta_ethtool_update_stats() argument
4641 void __iomem *base = pp->base; in mvneta_ethtool_update_stats()
4646 mvneta_ethtool_update_pcpu_stats(pp, &stats); in mvneta_ethtool_update_stats()
4653 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4660 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4665 val = phylink_get_eee_err(pp->phylink); in mvneta_ethtool_update_stats()
4666 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4669 pp->ethtool_stats[i] = stats.skb_alloc_error; in mvneta_ethtool_update_stats()
4672 pp->ethtool_stats[i] = stats.refill_error; in mvneta_ethtool_update_stats()
4675 pp->ethtool_stats[i] = stats.ps.xdp_redirect; in mvneta_ethtool_update_stats()
4678 pp->ethtool_stats[i] = stats.ps.xdp_pass; in mvneta_ethtool_update_stats()
4681 pp->ethtool_stats[i] = stats.ps.xdp_drop; in mvneta_ethtool_update_stats()
4684 pp->ethtool_stats[i] = stats.ps.xdp_tx; in mvneta_ethtool_update_stats()
4687 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; in mvneta_ethtool_update_stats()
4690 pp->ethtool_stats[i] = stats.ps.xdp_xmit; in mvneta_ethtool_update_stats()
4693 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; in mvneta_ethtool_update_stats()
4704 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_stats() local
4707 mvneta_ethtool_update_stats(pp); in mvneta_ethtool_get_stats()
4710 *data++ = pp->ethtool_stats[i]; in mvneta_ethtool_get_stats()
4740 static int mvneta_config_rss(struct mvneta_port *pp) in mvneta_config_rss() argument
4745 netif_tx_stop_all_queues(pp->dev); in mvneta_config_rss()
4747 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_config_rss()
4749 if (!pp->neta_armada3700) { in mvneta_config_rss()
4753 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
4759 napi_synchronize(&pp->napi); in mvneta_config_rss()
4760 napi_disable(&pp->napi); in mvneta_config_rss()
4763 pp->rxq_def = pp->indir[0]; in mvneta_config_rss()
4766 mvneta_set_rx_mode(pp->dev); in mvneta_config_rss()
4769 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_config_rss()
4770 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_config_rss()
4773 spin_lock(&pp->lock); in mvneta_config_rss()
4774 mvneta_percpu_elect(pp); in mvneta_config_rss()
4775 spin_unlock(&pp->lock); in mvneta_config_rss()
4777 if (!pp->neta_armada3700) { in mvneta_config_rss()
4781 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
4786 napi_enable(&pp->napi); in mvneta_config_rss()
4789 netif_tx_start_all_queues(pp->dev); in mvneta_config_rss()
4797 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_rxfh() local
4800 if (pp->neta_armada3700) in mvneta_ethtool_set_rxfh()
4813 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_set_rxfh()
4815 return mvneta_config_rss(pp); in mvneta_ethtool_set_rxfh()
4821 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_rxfh() local
4824 if (pp->neta_armada3700) in mvneta_ethtool_get_rxfh()
4833 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_get_rxfh()
4841 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_wol() local
4843 phylink_ethtool_get_wol(pp->phylink, wol); in mvneta_ethtool_get_wol()
4849 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_wol() local
4852 ret = phylink_ethtool_set_wol(pp->phylink, wol); in mvneta_ethtool_set_wol()
4862 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_eee() local
4865 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); in mvneta_ethtool_get_eee()
4867 eee->eee_enabled = pp->eee_enabled; in mvneta_ethtool_get_eee()
4868 eee->eee_active = pp->eee_active; in mvneta_ethtool_get_eee()
4869 eee->tx_lpi_enabled = pp->tx_lpi_enabled; in mvneta_ethtool_get_eee()
4872 return phylink_ethtool_get_eee(pp->phylink, eee); in mvneta_ethtool_get_eee()
4878 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_eee() local
4886 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); in mvneta_ethtool_set_eee()
4889 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); in mvneta_ethtool_set_eee()
4891 pp->eee_enabled = eee->eee_enabled; in mvneta_ethtool_set_eee()
4892 pp->tx_lpi_enabled = eee->tx_lpi_enabled; in mvneta_ethtool_set_eee()
4894 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); in mvneta_ethtool_set_eee()
4896 return phylink_ethtool_set_eee(pp->phylink, eee); in mvneta_ethtool_set_eee()
4941 static int mvneta_init(struct device *dev, struct mvneta_port *pp) in mvneta_init() argument
4946 mvneta_port_disable(pp); in mvneta_init()
4949 mvneta_defaults_set(pp); in mvneta_init()
4951 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); in mvneta_init()
4952 if (!pp->txqs) in mvneta_init()
4957 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init()
4959 txq->size = pp->tx_ring_size; in mvneta_init()
4963 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); in mvneta_init()
4964 if (!pp->rxqs) in mvneta_init()
4969 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init()
4971 rxq->size = pp->rx_ring_size; in mvneta_init()
4975 = devm_kmalloc_array(pp->dev->dev.parent, in mvneta_init()
4987 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, in mvneta_conf_mbus_windows() argument
4995 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_conf_mbus_windows()
4996 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_conf_mbus_windows()
4999 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_conf_mbus_windows()
5009 mvreg_write(pp, MVNETA_WIN_BASE(i), in mvneta_conf_mbus_windows()
5014 mvreg_write(pp, MVNETA_WIN_SIZE(i), in mvneta_conf_mbus_windows()
5025 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); in mvneta_conf_mbus_windows()
5030 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_conf_mbus_windows()
5031 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); in mvneta_conf_mbus_windows()
5035 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) in mvneta_port_power_up() argument
5038 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); in mvneta_port_power_up()
5054 struct mvneta_port *pp; in mvneta_probe() local
5089 pp = netdev_priv(dev); in mvneta_probe()
5090 spin_lock_init(&pp->lock); in mvneta_probe()
5092 pp->phylink_config.dev = &dev->dev; in mvneta_probe()
5093 pp->phylink_config.type = PHYLINK_NETDEV; in mvneta_probe()
5095 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, in mvneta_probe()
5108 pp->phylink = phylink; in mvneta_probe()
5109 pp->comphy = comphy; in mvneta_probe()
5110 pp->phy_interface = phy_mode; in mvneta_probe()
5111 pp->dn = dn; in mvneta_probe()
5113 pp->rxq_def = rxq_def; in mvneta_probe()
5114 pp->indir[0] = rxq_def; in mvneta_probe()
5118 pp->neta_armada3700 = true; in mvneta_probe()
5120 pp->clk = devm_clk_get(&pdev->dev, "core"); in mvneta_probe()
5121 if (IS_ERR(pp->clk)) in mvneta_probe()
5122 pp->clk = devm_clk_get(&pdev->dev, NULL); in mvneta_probe()
5123 if (IS_ERR(pp->clk)) { in mvneta_probe()
5124 err = PTR_ERR(pp->clk); in mvneta_probe()
5128 clk_prepare_enable(pp->clk); in mvneta_probe()
5130 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); in mvneta_probe()
5131 if (!IS_ERR(pp->clk_bus)) in mvneta_probe()
5132 clk_prepare_enable(pp->clk_bus); in mvneta_probe()
5134 pp->base = devm_platform_ioremap_resource(pdev, 0); in mvneta_probe()
5135 if (IS_ERR(pp->base)) { in mvneta_probe()
5136 err = PTR_ERR(pp->base); in mvneta_probe()
5141 pp->ports = alloc_percpu(struct mvneta_pcpu_port); in mvneta_probe()
5142 if (!pp->ports) { in mvneta_probe()
5148 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); in mvneta_probe()
5149 if (!pp->stats) { in mvneta_probe()
5159 mvneta_get_mac_addr(pp, hw_mac_addr); in mvneta_probe()
5183 pp->tx_csum_limit = tx_csum_limit; in mvneta_probe()
5185 pp->dram_target_info = mv_mbus_dram_info(); in mvneta_probe()
5190 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_probe()
5191 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_probe()
5193 pp->tx_ring_size = MVNETA_MAX_TXD; in mvneta_probe()
5194 pp->rx_ring_size = MVNETA_MAX_RXD; in mvneta_probe()
5196 pp->dev = dev; in mvneta_probe()
5199 pp->id = global_port_id++; in mvneta_probe()
5204 pp->bm_priv = mvneta_bm_get(bm_node); in mvneta_probe()
5205 if (pp->bm_priv) { in mvneta_probe()
5206 err = mvneta_bm_port_init(pdev, pp); in mvneta_probe()
5210 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5211 pp->bm_priv = NULL; in mvneta_probe()
5218 pp->rx_offset_correction = max(0, in mvneta_probe()
5225 if (!pp->bm_priv) in mvneta_probe()
5226 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_probe()
5228 err = mvneta_init(&pdev->dev, pp); in mvneta_probe()
5232 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_probe()
5241 if (pp->neta_armada3700) { in mvneta_probe()
5242 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); in mvneta_probe()
5246 per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
5250 port->pp = pp; in mvneta_probe()
5275 platform_set_drvdata(pdev, pp->dev); in mvneta_probe()
5280 if (pp->bm_priv) { in mvneta_probe()
5281 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_probe()
5282 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_probe()
5283 1 << pp->id); in mvneta_probe()
5284 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5286 free_percpu(pp->stats); in mvneta_probe()
5288 free_percpu(pp->ports); in mvneta_probe()
5290 clk_disable_unprepare(pp->clk_bus); in mvneta_probe()
5291 clk_disable_unprepare(pp->clk); in mvneta_probe()
5293 if (pp->phylink) in mvneta_probe()
5294 phylink_destroy(pp->phylink); in mvneta_probe()
5304 struct mvneta_port *pp = netdev_priv(dev); in mvneta_remove() local
5307 clk_disable_unprepare(pp->clk_bus); in mvneta_remove()
5308 clk_disable_unprepare(pp->clk); in mvneta_remove()
5309 free_percpu(pp->ports); in mvneta_remove()
5310 free_percpu(pp->stats); in mvneta_remove()
5312 phylink_destroy(pp->phylink); in mvneta_remove()
5314 if (pp->bm_priv) { in mvneta_remove()
5315 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_remove()
5316 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_remove()
5317 1 << pp->id); in mvneta_remove()
5318 mvneta_bm_put(pp->bm_priv); in mvneta_remove()
5329 struct mvneta_port *pp = netdev_priv(dev); in mvneta_suspend() local
5334 if (!pp->neta_armada3700) { in mvneta_suspend()
5335 spin_lock(&pp->lock); in mvneta_suspend()
5336 pp->is_stopped = true; in mvneta_suspend()
5337 spin_unlock(&pp->lock); in mvneta_suspend()
5340 &pp->node_online); in mvneta_suspend()
5342 &pp->node_dead); in mvneta_suspend()
5346 mvneta_stop_dev(pp); in mvneta_suspend()
5350 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_suspend()
5352 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_suspend()
5356 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_suspend()
5358 mvneta_txq_hw_deinit(pp, txq); in mvneta_suspend()
5363 clk_disable_unprepare(pp->clk_bus); in mvneta_suspend()
5364 clk_disable_unprepare(pp->clk); in mvneta_suspend()
5373 struct mvneta_port *pp = netdev_priv(dev); in mvneta_resume() local
5376 clk_prepare_enable(pp->clk); in mvneta_resume()
5377 if (!IS_ERR(pp->clk_bus)) in mvneta_resume()
5378 clk_prepare_enable(pp->clk_bus); in mvneta_resume()
5379 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_resume()
5380 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_resume()
5381 if (pp->bm_priv) { in mvneta_resume()
5382 err = mvneta_bm_port_init(pdev, pp); in mvneta_resume()
5385 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_resume()
5386 pp->bm_priv = NULL; in mvneta_resume()
5389 mvneta_defaults_set(pp); in mvneta_resume()
5390 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_resume()
5402 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_resume()
5405 mvneta_rxq_hw_init(pp, rxq); in mvneta_resume()
5409 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_resume()
5412 mvneta_txq_hw_init(pp, txq); in mvneta_resume()
5415 if (!pp->neta_armada3700) { in mvneta_resume()
5416 spin_lock(&pp->lock); in mvneta_resume()
5417 pp->is_stopped = false; in mvneta_resume()
5418 spin_unlock(&pp->lock); in mvneta_resume()
5420 &pp->node_online); in mvneta_resume()
5422 &pp->node_dead); in mvneta_resume()
5426 mvneta_start_dev(pp); in mvneta_resume()