Lines Matching full:pp

449 	struct mvneta_port	*pp;  member
725 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) in mvreg_write() argument
727 writel(data, pp->base + offset); in mvreg_write()
731 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) in mvreg_read() argument
733 return readl(pp->base + offset); in mvreg_read()
754 static void mvneta_mib_counters_clear(struct mvneta_port *pp) in mvneta_mib_counters_clear() argument
760 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); in mvneta_mib_counters_clear()
761 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); in mvneta_mib_counters_clear()
762 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); in mvneta_mib_counters_clear()
770 struct mvneta_port *pp = netdev_priv(dev); in mvneta_get_stats64() local
783 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
819 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, in mvneta_rxq_non_occup_desc_add() argument
827 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
833 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
838 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, in mvneta_rxq_busy_desc_num_get() argument
843 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
850 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, in mvneta_rxq_desc_num_update() argument
859 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
879 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
895 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) in mvneta_max_rx_size_set() argument
899 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_max_rx_size_set()
903 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_max_rx_size_set()
908 static void mvneta_rxq_offset_set(struct mvneta_port *pp, in mvneta_rxq_offset_set() argument
914 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
919 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
926 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, in mvneta_txq_pend_desc_add() argument
937 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
965 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, in mvneta_rxq_buf_size_set() argument
971 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
976 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
980 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, in mvneta_rxq_bm_disable() argument
985 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
987 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
991 static void mvneta_rxq_bm_enable(struct mvneta_port *pp, in mvneta_rxq_bm_enable() argument
996 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_enable()
998 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_enable()
1002 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, in mvneta_rxq_long_pool_set() argument
1007 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_long_pool_set()
1009 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); in mvneta_rxq_long_pool_set()
1011 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_long_pool_set()
1015 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, in mvneta_rxq_short_pool_set() argument
1020 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_short_pool_set()
1022 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); in mvneta_rxq_short_pool_set()
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_short_pool_set()
1028 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, in mvneta_bm_pool_bufsize_set() argument
1035 dev_warn(pp->dev->dev.parent, in mvneta_bm_pool_bufsize_set()
1041 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); in mvneta_bm_pool_bufsize_set()
1043 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); in mvneta_bm_pool_bufsize_set()
1047 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, in mvneta_mbus_io_win_set() argument
1053 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); in mvneta_mbus_io_win_set()
1055 if (pp->bm_win_id < 0) { in mvneta_mbus_io_win_set()
1059 pp->bm_win_id = i; in mvneta_mbus_io_win_set()
1066 i = pp->bm_win_id; in mvneta_mbus_io_win_set()
1069 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_mbus_io_win_set()
1070 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_mbus_io_win_set()
1073 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_mbus_io_win_set()
1075 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | in mvneta_mbus_io_win_set()
1078 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); in mvneta_mbus_io_win_set()
1080 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); in mvneta_mbus_io_win_set()
1082 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); in mvneta_mbus_io_win_set()
1085 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_mbus_io_win_set()
1090 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) in mvneta_bm_port_mbus_init() argument
1097 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, in mvneta_bm_port_mbus_init()
1102 pp->bm_win_id = -1; in mvneta_bm_port_mbus_init()
1105 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, in mvneta_bm_port_mbus_init()
1108 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); in mvneta_bm_port_mbus_init()
1118 struct mvneta_port *pp) in mvneta_bm_port_init() argument
1123 if (!pp->neta_armada3700) { in mvneta_bm_port_init()
1126 ret = mvneta_bm_port_mbus_init(pp); in mvneta_bm_port_init()
1132 netdev_info(pp->dev, "missing long pool id\n"); in mvneta_bm_port_init()
1137 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, in mvneta_bm_port_init()
1138 MVNETA_BM_LONG, pp->id, in mvneta_bm_port_init()
1139 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); in mvneta_bm_port_init()
1140 if (!pp->pool_long) { in mvneta_bm_port_init()
1141 netdev_info(pp->dev, "fail to obtain long pool for port\n"); in mvneta_bm_port_init()
1145 pp->pool_long->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1147 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, in mvneta_bm_port_init()
1148 pp->pool_long->id); in mvneta_bm_port_init()
1155 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, in mvneta_bm_port_init()
1156 MVNETA_BM_SHORT, pp->id, in mvneta_bm_port_init()
1158 if (!pp->pool_short) { in mvneta_bm_port_init()
1159 netdev_info(pp->dev, "fail to obtain short pool for port\n"); in mvneta_bm_port_init()
1160 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_port_init()
1165 pp->pool_short->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1166 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, in mvneta_bm_port_init()
1167 pp->pool_short->id); in mvneta_bm_port_init()
1174 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) in mvneta_bm_update_mtu() argument
1176 struct mvneta_bm_pool *bm_pool = pp->pool_long; in mvneta_bm_update_mtu()
1181 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); in mvneta_bm_update_mtu()
1200 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); in mvneta_bm_update_mtu()
1205 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_update_mtu()
1206 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); in mvneta_bm_update_mtu()
1208 pp->bm_priv = NULL; in mvneta_bm_update_mtu()
1209 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_bm_update_mtu()
1210 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); in mvneta_bm_update_mtu()
1211 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); in mvneta_bm_update_mtu()
1215 static void mvneta_port_up(struct mvneta_port *pp) in mvneta_port_up() argument
1223 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
1227 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); in mvneta_port_up()
1232 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
1237 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); in mvneta_port_up()
1241 static void mvneta_port_down(struct mvneta_port *pp) in mvneta_port_down() argument
1247 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; in mvneta_port_down()
1251 mvreg_write(pp, MVNETA_RXQ_CMD, in mvneta_port_down()
1258 netdev_warn(pp->dev, in mvneta_port_down()
1265 val = mvreg_read(pp, MVNETA_RXQ_CMD); in mvneta_port_down()
1271 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; in mvneta_port_down()
1274 mvreg_write(pp, MVNETA_TXQ_CMD, in mvneta_port_down()
1281 netdev_warn(pp->dev, in mvneta_port_down()
1289 val = mvreg_read(pp, MVNETA_TXQ_CMD); in mvneta_port_down()
1297 netdev_warn(pp->dev, in mvneta_port_down()
1304 val = mvreg_read(pp, MVNETA_PORT_STATUS); in mvneta_port_down()
1312 static void mvneta_port_enable(struct mvneta_port *pp) in mvneta_port_enable() argument
1317 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_enable()
1319 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_enable()
1323 static void mvneta_port_disable(struct mvneta_port *pp) in mvneta_port_disable() argument
1328 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_disable()
1330 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_disable()
1338 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) in mvneta_set_ucast_table() argument
1351 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); in mvneta_set_ucast_table()
1355 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_special_mcast_table() argument
1368 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); in mvneta_set_special_mcast_table()
1373 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_other_mcast_table() argument
1379 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1382 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1388 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); in mvneta_set_other_mcast_table()
1393 struct mvneta_port *pp = arg; in mvneta_percpu_unmask_interrupt() local
1398 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_percpu_unmask_interrupt()
1406 struct mvneta_port *pp = arg; in mvneta_percpu_mask_interrupt() local
1411 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_percpu_mask_interrupt()
1412 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_percpu_mask_interrupt()
1413 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_percpu_mask_interrupt()
1418 struct mvneta_port *pp = arg; in mvneta_percpu_clear_intr_cause() local
1423 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1424 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1425 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1437 static void mvneta_defaults_set(struct mvneta_port *pp) in mvneta_defaults_set() argument
1445 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); in mvneta_defaults_set()
1448 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_defaults_set()
1449 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); in mvneta_defaults_set()
1452 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); in mvneta_defaults_set()
1462 if (!pp->neta_armada3700) { in mvneta_defaults_set()
1476 txq_map = (cpu == pp->rxq_def) ? in mvneta_defaults_set()
1484 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); in mvneta_defaults_set()
1488 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_defaults_set()
1489 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_defaults_set()
1492 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); in mvneta_defaults_set()
1494 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); in mvneta_defaults_set()
1495 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); in mvneta_defaults_set()
1498 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_defaults_set()
1499 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_defaults_set()
1502 if (pp->bm_priv) in mvneta_defaults_set()
1508 mvreg_write(pp, MVNETA_ACC_MODE, val); in mvneta_defaults_set()
1510 if (pp->bm_priv) in mvneta_defaults_set()
1511 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); in mvneta_defaults_set()
1514 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_defaults_set()
1515 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_defaults_set()
1518 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); in mvneta_defaults_set()
1519 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); in mvneta_defaults_set()
1534 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); in mvneta_defaults_set()
1539 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); in mvneta_defaults_set()
1541 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); in mvneta_defaults_set()
1543 mvneta_set_ucast_table(pp, -1); in mvneta_defaults_set()
1544 mvneta_set_special_mcast_table(pp, -1); in mvneta_defaults_set()
1545 mvneta_set_other_mcast_table(pp, -1); in mvneta_defaults_set()
1548 mvreg_write(pp, MVNETA_INTR_ENABLE, in mvneta_defaults_set()
1552 mvneta_mib_counters_clear(pp); in mvneta_defaults_set()
1556 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) in mvneta_txq_max_tx_size_set() argument
1567 val = mvreg_read(pp, MVNETA_TX_MTU); in mvneta_txq_max_tx_size_set()
1570 mvreg_write(pp, MVNETA_TX_MTU, val); in mvneta_txq_max_tx_size_set()
1573 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); in mvneta_txq_max_tx_size_set()
1580 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); in mvneta_txq_max_tx_size_set()
1583 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); in mvneta_txq_max_tx_size_set()
1590 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); in mvneta_txq_max_tx_size_set()
1596 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, in mvneta_set_ucast_addr() argument
1612 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); in mvneta_set_ucast_addr()
1622 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); in mvneta_set_ucast_addr()
1626 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, in mvneta_mac_addr_set() argument
1637 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); in mvneta_mac_addr_set()
1638 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); in mvneta_mac_addr_set()
1642 mvneta_set_ucast_addr(pp, addr[5], queue); in mvneta_mac_addr_set()
1648 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, in mvneta_rx_pkts_coal_set() argument
1651 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1658 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, in mvneta_rx_time_coal_set() argument
1664 clk_rate = clk_get_rate(pp->clk); in mvneta_rx_time_coal_set()
1667 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1671 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, in mvneta_tx_done_pkts_coal_set() argument
1676 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1681 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1697 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, in mvneta_txq_sent_desc_dec() argument
1706 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1711 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1715 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, in mvneta_txq_sent_desc_num_get() argument
1721 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1731 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, in mvneta_txq_sent_desc_proc() argument
1737 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); in mvneta_txq_sent_desc_proc()
1741 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); in mvneta_txq_sent_desc_proc()
1776 static void mvneta_rx_error(struct mvneta_port *pp, in mvneta_rx_error() argument
1779 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_error()
1789 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", in mvneta_rx_error()
1793 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", in mvneta_rx_error()
1797 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", in mvneta_rx_error()
1801 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", in mvneta_rx_error()
1808 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) in mvneta_rx_csum() argument
1810 if ((pp->dev->features & NETIF_F_RXCSUM) && in mvneta_rx_csum()
1822 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, in mvneta_tx_done_policy() argument
1827 return &pp->txqs[queue]; in mvneta_tx_done_policy()
1831 static void mvneta_txq_bufs_free(struct mvneta_port *pp, in mvneta_txq_bufs_free() argument
1852 dma_unmap_single(pp->dev->dev.parent, in mvneta_txq_bufs_free()
1875 static void mvneta_txq_done(struct mvneta_port *pp, in mvneta_txq_done() argument
1878 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1881 tx_done = mvneta_txq_sent_desc_proc(pp, txq); in mvneta_txq_done()
1885 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); in mvneta_txq_done()
1897 static int mvneta_rx_refill(struct mvneta_port *pp, in mvneta_rx_refill() argument
1910 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; in mvneta_rx_refill()
1917 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) in mvneta_skb_tx_csum() argument
1948 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, in mvneta_rxq_drop_pkts() argument
1953 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rxq_drop_pkts()
1955 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rxq_drop_pkts()
1957 if (pp->bm_priv) { in mvneta_rxq_drop_pkts()
1964 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rxq_drop_pkts()
1966 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rxq_drop_pkts()
1987 mvneta_update_stats(struct mvneta_port *pp, in mvneta_update_stats() argument
1990 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_update_stats()
2003 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) in mvneta_rx_refill_queue() argument
2012 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { in mvneta_rx_refill_queue()
2018 stats = this_cpu_ptr(pp->stats); in mvneta_rx_refill_queue()
2034 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_xdp_put_buff() argument
2048 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, in mvneta_xdp_submit_frame() argument
2063 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, in mvneta_xdp_submit_frame()
2065 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { in mvneta_xdp_submit_frame()
2075 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, in mvneta_xdp_submit_frame()
2093 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) in mvneta_xdp_xmit_back() argument
2095 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit_back()
2107 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit_back()
2108 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back()
2111 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); in mvneta_xdp_xmit_back()
2119 mvneta_txq_pend_desc_add(pp, txq, 0); in mvneta_xdp_xmit_back()
2134 struct mvneta_port *pp = netdev_priv(dev); in mvneta_xdp_xmit() local
2135 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit()
2142 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) in mvneta_xdp_xmit()
2148 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit()
2149 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit()
2153 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); in mvneta_xdp_xmit()
2162 mvneta_txq_pend_desc_add(pp, txq, 0); in mvneta_xdp_xmit()
2176 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_run_xdp() argument
2184 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2189 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2199 err = xdp_do_redirect(pp->dev, xdp, prog); in mvneta_run_xdp()
2201 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); in mvneta_run_xdp()
2210 ret = mvneta_xdp_xmit_back(pp, xdp); in mvneta_run_xdp()
2212 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); in mvneta_run_xdp()
2218 trace_xdp_exception(pp->dev, prog, act); in mvneta_run_xdp()
2221 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); in mvneta_run_xdp()
2234 mvneta_swbm_rx_frame(struct mvneta_port *pp, in mvneta_swbm_rx_frame() argument
2242 struct net_device *dev = pp->dev; in mvneta_swbm_rx_frame()
2264 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, in mvneta_swbm_rx_frame()
2272 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, in mvneta_swbm_add_rx_fragment() argument
2279 struct net_device *dev = pp->dev; in mvneta_swbm_add_rx_fragment()
2299 skb_frag_off_set(frag, pp->rx_offset_correction); in mvneta_swbm_add_rx_fragment()
2319 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, in mvneta_swbm_build_skb() argument
2334 skb->ip_summed = mvneta_rx_csum(pp, desc_status); in mvneta_swbm_build_skb()
2349 struct mvneta_port *pp, int budget, in mvneta_rx_swbm() argument
2353 struct net_device *dev = pp->dev; in mvneta_rx_swbm()
2366 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_swbm()
2368 xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_rx_swbm()
2387 mvneta_rx_error(pp, rx_desc); in mvneta_rx_swbm()
2395 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2405 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2414 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); in mvneta_rx_swbm()
2419 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) in mvneta_rx_swbm()
2422 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); in mvneta_rx_swbm()
2424 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_swbm()
2426 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); in mvneta_rx_swbm()
2447 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); in mvneta_rx_swbm()
2453 mvneta_update_stats(pp, &ps); in mvneta_rx_swbm()
2456 refill = mvneta_rx_refill_queue(pp, rxq); in mvneta_rx_swbm()
2459 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); in mvneta_rx_swbm()
2466 struct mvneta_port *pp, int rx_todo, in mvneta_rx_hwbm() argument
2469 struct net_device *dev = pp->dev; in mvneta_rx_hwbm()
2475 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_hwbm()
2499 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rx_hwbm()
2505 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2508 mvneta_rx_error(pp, rx_desc); in mvneta_rx_hwbm()
2519 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, in mvneta_rx_hwbm()
2528 skb->ip_summed = mvneta_rx_csum(pp, rx_status); in mvneta_rx_hwbm()
2535 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2549 stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2564 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, in mvneta_rx_hwbm()
2577 skb->ip_summed = mvneta_rx_csum(pp, rx_status); in mvneta_rx_hwbm()
2583 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2592 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rx_hwbm()
2599 struct mvneta_port *pp, struct mvneta_tx_queue *txq) in mvneta_tso_put_hdr() argument
2607 tx_desc->command = mvneta_skb_tx_csum(pp, skb); in mvneta_tso_put_hdr()
2656 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx_tso() local
2684 mvneta_tso_put_hdr(skb, pp, txq); in mvneta_tx_tso()
2712 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_tso()
2722 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, in mvneta_tx_frag_process() argument
2737 dma_map_single(pp->dev->dev.parent, addr, in mvneta_tx_frag_process()
2740 if (dma_mapping_error(pp->dev->dev.parent, in mvneta_tx_frag_process()
2767 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_frag_process()
2780 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx() local
2782 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx()
2802 tx_cmd = mvneta_skb_tx_csum(pp, skb); in mvneta_tx()
2830 if (mvneta_tx_frag_process(pp, skb, txq)) { in mvneta_tx()
2844 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_tx()
2854 mvneta_txq_pend_desc_add(pp, txq, frags); in mvneta_tx()
2872 static void mvneta_txq_done_force(struct mvneta_port *pp, in mvneta_txq_done_force() argument
2876 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done_force()
2879 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); in mvneta_txq_done_force()
2890 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) in mvneta_tx_done_gbe() argument
2897 txq = mvneta_tx_done_policy(pp, cause_tx_done); in mvneta_tx_done_gbe()
2899 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
2903 mvneta_txq_done(pp, txq); in mvneta_tx_done_gbe()
2938 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, in mvneta_set_special_mcast_addr() argument
2951 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST in mvneta_set_special_mcast_addr()
2961 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, in mvneta_set_special_mcast_addr()
2973 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, in mvneta_set_other_mcast_addr() argument
2984 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); in mvneta_set_other_mcast_addr()
2994 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); in mvneta_set_other_mcast_addr()
3006 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, in mvneta_mcast_addr_set() argument
3012 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); in mvneta_mcast_addr_set()
3018 if (pp->mcast_count[crc_result] == 0) { in mvneta_mcast_addr_set()
3019 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", in mvneta_mcast_addr_set()
3024 pp->mcast_count[crc_result]--; in mvneta_mcast_addr_set()
3025 if (pp->mcast_count[crc_result] != 0) { in mvneta_mcast_addr_set()
3026 netdev_info(pp->dev, in mvneta_mcast_addr_set()
3028 pp->mcast_count[crc_result], crc_result); in mvneta_mcast_addr_set()
3032 pp->mcast_count[crc_result]++; in mvneta_mcast_addr_set()
3034 mvneta_set_other_mcast_addr(pp, crc_result, queue); in mvneta_mcast_addr_set()
3040 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, in mvneta_rx_unicast_promisc_set() argument
3045 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); in mvneta_rx_unicast_promisc_set()
3047 val = mvreg_read(pp, MVNETA_TYPE_PRIO); in mvneta_rx_unicast_promisc_set()
3054 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); in mvneta_rx_unicast_promisc_set()
3055 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); in mvneta_rx_unicast_promisc_set()
3062 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); in mvneta_rx_unicast_promisc_set()
3063 mvreg_write(pp, MVNETA_TYPE_PRIO, val); in mvneta_rx_unicast_promisc_set()
3069 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_rx_mode() local
3074 mvneta_rx_unicast_promisc_set(pp, 1); in mvneta_set_rx_mode()
3075 mvneta_set_ucast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3076 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3077 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3080 mvneta_rx_unicast_promisc_set(pp, 0); in mvneta_set_rx_mode()
3081 mvneta_set_ucast_table(pp, -1); in mvneta_set_rx_mode()
3082 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); in mvneta_set_rx_mode()
3086 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3087 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3090 mvneta_set_special_mcast_table(pp, -1); in mvneta_set_rx_mode()
3091 mvneta_set_other_mcast_table(pp, -1); in mvneta_set_rx_mode()
3095 mvneta_mcast_addr_set(pp, ha->addr, in mvneta_set_rx_mode()
3096 pp->rxq_def); in mvneta_set_rx_mode()
3106 struct mvneta_port *pp = (struct mvneta_port *)dev_id; in mvneta_isr() local
3108 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_isr()
3109 napi_schedule(&pp->napi); in mvneta_isr()
3119 disable_percpu_irq(port->pp->dev->irq); in mvneta_percpu_isr()
3125 static void mvneta_link_change(struct mvneta_port *pp) in mvneta_link_change() argument
3127 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_link_change()
3129 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); in mvneta_link_change()
3144 struct mvneta_port *pp = netdev_priv(napi->dev); in mvneta_poll() local
3145 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); in mvneta_poll()
3147 if (!netif_running(pp->dev)) { in mvneta_poll()
3153 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); in mvneta_poll()
3155 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); in mvneta_poll()
3157 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_poll()
3161 mvneta_link_change(pp); in mvneta_poll()
3166 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); in mvneta_poll()
3173 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : in mvneta_poll()
3179 if (pp->bm_priv) in mvneta_poll()
3180 rx_done = mvneta_rx_hwbm(napi, pp, budget, in mvneta_poll()
3181 &pp->rxqs[rx_queue]); in mvneta_poll()
3183 rx_done = mvneta_rx_swbm(napi, pp, budget, in mvneta_poll()
3184 &pp->rxqs[rx_queue]); in mvneta_poll()
3191 if (pp->neta_armada3700) { in mvneta_poll()
3195 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_poll()
3201 enable_percpu_irq(pp->dev->irq, 0); in mvneta_poll()
3205 if (pp->neta_armada3700) in mvneta_poll()
3206 pp->cause_rx_tx = cause_rx_tx; in mvneta_poll()
3213 static int mvneta_create_page_pool(struct mvneta_port *pp, in mvneta_create_page_pool() argument
3216 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_create_page_pool()
3222 .dev = pp->dev->dev.parent, in mvneta_create_page_pool()
3224 .offset = pp->rx_offset_correction, in mvneta_create_page_pool()
3236 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0); in mvneta_create_page_pool()
3256 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_rxq_fill() argument
3261 err = mvneta_create_page_pool(pp, rxq, num); in mvneta_rxq_fill()
3267 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill()
3269 netdev_err(pp->dev, in mvneta_rxq_fill()
3279 mvneta_rxq_non_occup_desc_add(pp, rxq, i); in mvneta_rxq_fill()
3285 static void mvneta_tx_reset(struct mvneta_port *pp) in mvneta_tx_reset() argument
3291 mvneta_txq_done_force(pp, &pp->txqs[queue]); in mvneta_tx_reset()
3293 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_tx_reset()
3294 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_tx_reset()
3297 static void mvneta_rx_reset(struct mvneta_port *pp) in mvneta_rx_reset() argument
3299 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_rx_reset()
3300 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_rx_reset()
3305 static int mvneta_rxq_sw_init(struct mvneta_port *pp, in mvneta_rxq_sw_init() argument
3308 rxq->size = pp->rx_ring_size; in mvneta_rxq_sw_init()
3311 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_sw_init()
3322 static void mvneta_rxq_hw_init(struct mvneta_port *pp, in mvneta_rxq_hw_init() argument
3326 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_hw_init()
3327 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_hw_init()
3330 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init()
3331 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init()
3333 if (!pp->bm_priv) { in mvneta_rxq_hw_init()
3335 mvneta_rxq_offset_set(pp, rxq, 0); in mvneta_rxq_hw_init()
3336 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? in mvneta_rxq_hw_init()
3338 MVNETA_RX_BUF_SIZE(pp->pkt_size)); in mvneta_rxq_hw_init()
3339 mvneta_rxq_bm_disable(pp, rxq); in mvneta_rxq_hw_init()
3340 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3343 mvneta_rxq_offset_set(pp, rxq, in mvneta_rxq_hw_init()
3344 NET_SKB_PAD - pp->rx_offset_correction); in mvneta_rxq_hw_init()
3346 mvneta_rxq_bm_enable(pp, rxq); in mvneta_rxq_hw_init()
3348 mvneta_rxq_long_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3349 mvneta_rxq_short_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3350 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3355 static int mvneta_rxq_init(struct mvneta_port *pp, in mvneta_rxq_init() argument
3361 ret = mvneta_rxq_sw_init(pp, rxq); in mvneta_rxq_init()
3365 mvneta_rxq_hw_init(pp, rxq); in mvneta_rxq_init()
3371 static void mvneta_rxq_deinit(struct mvneta_port *pp, in mvneta_rxq_deinit() argument
3374 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_rxq_deinit()
3377 dma_free_coherent(pp->dev->dev.parent, in mvneta_rxq_deinit()
3390 static int mvneta_txq_sw_init(struct mvneta_port *pp, in mvneta_txq_sw_init() argument
3395 txq->size = pp->tx_ring_size; in mvneta_txq_sw_init()
3405 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3418 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3425 if (pp->neta_armada3700) in mvneta_txq_sw_init()
3430 cpu = pp->rxq_def % num_present_cpus(); in mvneta_txq_sw_init()
3432 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); in mvneta_txq_sw_init()
3437 static void mvneta_txq_hw_init(struct mvneta_port *pp, in mvneta_txq_hw_init() argument
3441 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_hw_init()
3442 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_hw_init()
3445 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_hw_init()
3446 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_hw_init()
3448 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_hw_init()
3452 static int mvneta_txq_init(struct mvneta_port *pp, in mvneta_txq_init() argument
3457 ret = mvneta_txq_sw_init(pp, txq); in mvneta_txq_init()
3461 mvneta_txq_hw_init(pp, txq); in mvneta_txq_init()
3467 static void mvneta_txq_sw_deinit(struct mvneta_port *pp, in mvneta_txq_sw_deinit() argument
3470 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_sw_deinit()
3475 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3479 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3491 static void mvneta_txq_hw_deinit(struct mvneta_port *pp, in mvneta_txq_hw_deinit() argument
3495 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3496 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3499 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3500 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3503 static void mvneta_txq_deinit(struct mvneta_port *pp, in mvneta_txq_deinit() argument
3506 mvneta_txq_sw_deinit(pp, txq); in mvneta_txq_deinit()
3507 mvneta_txq_hw_deinit(pp, txq); in mvneta_txq_deinit()
3511 static void mvneta_cleanup_txqs(struct mvneta_port *pp) in mvneta_cleanup_txqs() argument
3516 mvneta_txq_deinit(pp, &pp->txqs[queue]); in mvneta_cleanup_txqs()
3520 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) in mvneta_cleanup_rxqs() argument
3525 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); in mvneta_cleanup_rxqs()
3530 static int mvneta_setup_rxqs(struct mvneta_port *pp) in mvneta_setup_rxqs() argument
3535 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); in mvneta_setup_rxqs()
3538 netdev_err(pp->dev, "%s: can't create rxq=%d\n", in mvneta_setup_rxqs()
3540 mvneta_cleanup_rxqs(pp); in mvneta_setup_rxqs()
3549 static int mvneta_setup_txqs(struct mvneta_port *pp) in mvneta_setup_txqs() argument
3554 int err = mvneta_txq_init(pp, &pp->txqs[queue]); in mvneta_setup_txqs()
3556 netdev_err(pp->dev, "%s: can't create txq=%d\n", in mvneta_setup_txqs()
3558 mvneta_cleanup_txqs(pp); in mvneta_setup_txqs()
3566 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) in mvneta_comphy_init() argument
3570 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); in mvneta_comphy_init()
3574 return phy_power_on(pp->comphy); in mvneta_comphy_init()
3577 static int mvneta_config_interface(struct mvneta_port *pp, in mvneta_config_interface() argument
3582 if (pp->comphy) { in mvneta_config_interface()
3586 ret = mvneta_comphy_init(pp, interface); in mvneta_config_interface()
3591 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3597 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3602 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3610 pp->phy_interface = interface; in mvneta_config_interface()
3615 static void mvneta_start_dev(struct mvneta_port *pp) in mvneta_start_dev() argument
3619 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); in mvneta_start_dev()
3621 mvneta_max_rx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3622 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3625 mvneta_port_enable(pp); in mvneta_start_dev()
3627 if (!pp->neta_armada3700) { in mvneta_start_dev()
3631 per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev()
3636 napi_enable(&pp->napi); in mvneta_start_dev()
3640 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_start_dev()
3642 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_start_dev()
3646 phylink_start(pp->phylink); in mvneta_start_dev()
3649 phylink_speed_up(pp->phylink); in mvneta_start_dev()
3651 netif_tx_start_all_queues(pp->dev); in mvneta_start_dev()
3653 clear_bit(__MVNETA_DOWN, &pp->state); in mvneta_start_dev()
3656 static void mvneta_stop_dev(struct mvneta_port *pp) in mvneta_stop_dev() argument
3660 set_bit(__MVNETA_DOWN, &pp->state); in mvneta_stop_dev()
3662 if (device_may_wakeup(&pp->dev->dev)) in mvneta_stop_dev()
3663 phylink_speed_down(pp->phylink, false); in mvneta_stop_dev()
3665 phylink_stop(pp->phylink); in mvneta_stop_dev()
3667 if (!pp->neta_armada3700) { in mvneta_stop_dev()
3670 per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev()
3675 napi_disable(&pp->napi); in mvneta_stop_dev()
3678 netif_carrier_off(pp->dev); in mvneta_stop_dev()
3680 mvneta_port_down(pp); in mvneta_stop_dev()
3681 netif_tx_stop_all_queues(pp->dev); in mvneta_stop_dev()
3684 mvneta_port_disable(pp); in mvneta_stop_dev()
3687 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); in mvneta_stop_dev()
3690 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_stop_dev()
3692 mvneta_tx_reset(pp); in mvneta_stop_dev()
3693 mvneta_rx_reset(pp); in mvneta_stop_dev()
3695 WARN_ON(phy_power_off(pp->comphy)); in mvneta_stop_dev()
3700 struct mvneta_port *pp = arg; in mvneta_percpu_enable() local
3702 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); in mvneta_percpu_enable()
3707 struct mvneta_port *pp = arg; in mvneta_percpu_disable() local
3709 disable_percpu_irq(pp->dev->irq); in mvneta_percpu_disable()
3715 struct mvneta_port *pp = netdev_priv(dev); in mvneta_change_mtu() local
3724 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { in mvneta_change_mtu()
3732 if (pp->bm_priv) in mvneta_change_mtu()
3733 mvneta_bm_update_mtu(pp, mtu); in mvneta_change_mtu()
3742 mvneta_stop_dev(pp); in mvneta_change_mtu()
3743 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_change_mtu()
3745 mvneta_cleanup_txqs(pp); in mvneta_change_mtu()
3746 mvneta_cleanup_rxqs(pp); in mvneta_change_mtu()
3748 if (pp->bm_priv) in mvneta_change_mtu()
3749 mvneta_bm_update_mtu(pp, mtu); in mvneta_change_mtu()
3751 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); in mvneta_change_mtu()
3753 ret = mvneta_setup_rxqs(pp); in mvneta_change_mtu()
3759 ret = mvneta_setup_txqs(pp); in mvneta_change_mtu()
3765 on_each_cpu(mvneta_percpu_enable, pp, true); in mvneta_change_mtu()
3766 mvneta_start_dev(pp); in mvneta_change_mtu()
3776 struct mvneta_port *pp = netdev_priv(dev); in mvneta_fix_features() local
3778 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { in mvneta_fix_features()
3782 pp->tx_csum_limit); in mvneta_fix_features()
3789 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) in mvneta_get_mac_addr() argument
3793 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); in mvneta_get_mac_addr()
3794 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); in mvneta_get_mac_addr()
3806 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_mac_addr() local
3814 mvneta_mac_addr_set(pp, dev->dev_addr, -1); in mvneta_set_mac_addr()
3817 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); in mvneta_set_mac_addr()
3828 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_validate() local
3857 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { in mvneta_validate()
3861 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { in mvneta_validate()
3889 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_pcs_get_state() local
3892 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_mac_pcs_get_state()
3917 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_an_restart() local
3918 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_an_restart()
3920 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_mac_an_restart()
3922 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_mac_an_restart()
3930 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_config() local
3931 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_mac_config()
3932 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); in mvneta_mac_config()
3933 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); in mvneta_mac_config()
3934 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); in mvneta_mac_config()
3935 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_config()
4001 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_mac_config()
4013 if (pp->phy_interface != state->interface) { in mvneta_mac_config()
4014 if (pp->comphy) in mvneta_mac_config()
4015 WARN_ON(phy_power_off(pp->comphy)); in mvneta_mac_config()
4016 WARN_ON(mvneta_config_interface(pp, state->interface)); in mvneta_mac_config()
4020 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); in mvneta_mac_config()
4022 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); in mvneta_mac_config()
4024 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); in mvneta_mac_config()
4026 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); in mvneta_mac_config()
4028 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); in mvneta_mac_config()
4031 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & in mvneta_mac_config()
4037 static void mvneta_set_eee(struct mvneta_port *pp, bool enable) in mvneta_set_eee() argument
4041 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); in mvneta_set_eee()
4046 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); in mvneta_set_eee()
4053 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_link_down() local
4056 mvneta_port_down(pp); in mvneta_mac_link_down()
4059 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_down()
4062 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_down()
4065 pp->eee_active = false; in mvneta_mac_link_down()
4066 mvneta_set_eee(pp, false); in mvneta_mac_link_down()
4076 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_link_up() local
4080 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_up()
4099 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_up()
4105 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_up()
4111 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_up()
4114 mvneta_port_up(pp); in mvneta_mac_link_up()
4116 if (phy && pp->eee_enabled) { in mvneta_mac_link_up()
4117 pp->eee_active = phy_init_eee(phy, 0) >= 0; in mvneta_mac_link_up()
4118 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); in mvneta_mac_link_up()
4131 static int mvneta_mdio_probe(struct mvneta_port *pp) in mvneta_mdio_probe() argument
4134 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); in mvneta_mdio_probe()
4137 netdev_err(pp->dev, "could not attach PHY: %d\n", err); in mvneta_mdio_probe()
4139 phylink_ethtool_get_wol(pp->phylink, &wol); in mvneta_mdio_probe()
4140 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); in mvneta_mdio_probe()
4144 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); in mvneta_mdio_probe()
4149 static void mvneta_mdio_remove(struct mvneta_port *pp) in mvneta_mdio_remove() argument
4151 phylink_disconnect_phy(pp->phylink); in mvneta_mdio_remove()
4158 static void mvneta_percpu_elect(struct mvneta_port *pp) in mvneta_percpu_elect() argument
4165 if (cpu_online(pp->rxq_def)) in mvneta_percpu_elect()
4166 elected_cpu = pp->rxq_def; in mvneta_percpu_elect()
4180 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); in mvneta_percpu_elect()
4190 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & in mvneta_percpu_elect()
4193 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); in mvneta_percpu_elect()
4199 pp, true); in mvneta_percpu_elect()
4208 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_online() local
4210 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_online()
4215 if (pp->neta_armada3700) in mvneta_cpu_online()
4218 spin_lock(&pp->lock); in mvneta_cpu_online()
4223 if (pp->is_stopped) { in mvneta_cpu_online()
4224 spin_unlock(&pp->lock); in mvneta_cpu_online()
4227 netif_tx_stop_all_queues(pp->dev); in mvneta_cpu_online()
4236 per_cpu_ptr(pp->ports, other_cpu); in mvneta_cpu_online()
4243 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_cpu_online()
4250 mvneta_percpu_enable(pp); in mvneta_cpu_online()
4256 mvneta_percpu_elect(pp); in mvneta_cpu_online()
4259 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_cpu_online()
4260 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_cpu_online()
4263 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_online()
4264 spin_unlock(&pp->lock); in mvneta_cpu_online()
4270 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_down_prepare() local
4272 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_down_prepare()
4278 spin_lock(&pp->lock); in mvneta_cpu_down_prepare()
4280 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_cpu_down_prepare()
4281 spin_unlock(&pp->lock); in mvneta_cpu_down_prepare()
4286 mvneta_percpu_disable(pp); in mvneta_cpu_down_prepare()
4292 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_dead() local
4296 spin_lock(&pp->lock); in mvneta_cpu_dead()
4297 mvneta_percpu_elect(pp); in mvneta_cpu_dead()
4298 spin_unlock(&pp->lock); in mvneta_cpu_dead()
4300 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_cpu_dead()
4301 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_cpu_dead()
4304 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_dead()
4310 struct mvneta_port *pp = netdev_priv(dev); in mvneta_open() local
4313 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); in mvneta_open()
4315 ret = mvneta_setup_rxqs(pp); in mvneta_open()
4319 ret = mvneta_setup_txqs(pp); in mvneta_open()
4324 if (pp->neta_armada3700) in mvneta_open()
4325 ret = request_irq(pp->dev->irq, mvneta_isr, 0, in mvneta_open()
4326 dev->name, pp); in mvneta_open()
4328 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, in mvneta_open()
4329 dev->name, pp->ports); in mvneta_open()
4331 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); in mvneta_open()
4335 if (!pp->neta_armada3700) { in mvneta_open()
4339 on_each_cpu(mvneta_percpu_enable, pp, true); in mvneta_open()
4341 pp->is_stopped = false; in mvneta_open()
4346 &pp->node_online); in mvneta_open()
4351 &pp->node_dead); in mvneta_open()
4356 ret = mvneta_mdio_probe(pp); in mvneta_open()
4362 mvneta_start_dev(pp); in mvneta_open()
4367 if (!pp->neta_armada3700) in mvneta_open()
4369 &pp->node_dead); in mvneta_open()
4371 if (!pp->neta_armada3700) in mvneta_open()
4373 &pp->node_online); in mvneta_open()
4375 if (pp->neta_armada3700) { in mvneta_open()
4376 free_irq(pp->dev->irq, pp); in mvneta_open()
4378 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_open()
4379 free_percpu_irq(pp->dev->irq, pp->ports); in mvneta_open()
4382 mvneta_cleanup_txqs(pp); in mvneta_open()
4384 mvneta_cleanup_rxqs(pp); in mvneta_open()
4391 struct mvneta_port *pp = netdev_priv(dev); in mvneta_stop() local
4393 if (!pp->neta_armada3700) { in mvneta_stop()
4399 spin_lock(&pp->lock); in mvneta_stop()
4400 pp->is_stopped = true; in mvneta_stop()
4401 spin_unlock(&pp->lock); in mvneta_stop()
4403 mvneta_stop_dev(pp); in mvneta_stop()
4404 mvneta_mdio_remove(pp); in mvneta_stop()
4407 &pp->node_online); in mvneta_stop()
4409 &pp->node_dead); in mvneta_stop()
4410 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_stop()
4411 free_percpu_irq(dev->irq, pp->ports); in mvneta_stop()
4413 mvneta_stop_dev(pp); in mvneta_stop()
4414 mvneta_mdio_remove(pp); in mvneta_stop()
4415 free_irq(dev->irq, pp); in mvneta_stop()
4418 mvneta_cleanup_rxqs(pp); in mvneta_stop()
4419 mvneta_cleanup_txqs(pp); in mvneta_stop()
4426 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ioctl() local
4428 return phylink_mii_ioctl(pp->phylink, ifr, cmd); in mvneta_ioctl()
4435 struct mvneta_port *pp = netdev_priv(dev); in mvneta_xdp_setup() local
4443 if (pp->bm_priv) { in mvneta_xdp_setup()
4449 need_update = !!pp->xdp_prog != !!prog; in mvneta_xdp_setup()
4453 old_prog = xchg(&pp->xdp_prog, prog); in mvneta_xdp_setup()
4480 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_ethtool_set_link_ksettings() local
4482 return phylink_ethtool_ksettings_set(pp->phylink, cmd); in mvneta_ethtool_set_link_ksettings()
4490 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_ethtool_get_link_ksettings() local
4492 return phylink_ethtool_ksettings_get(pp->phylink, cmd); in mvneta_ethtool_get_link_ksettings()
4497 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_nway_reset() local
4499 return phylink_ethtool_nway_reset(pp->phylink); in mvneta_ethtool_nway_reset()
4509 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_coalesce() local
4513 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
4516 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
4517 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
4521 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce()
4523 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
4536 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_coalesce() local
4538 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
4539 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
4541 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; in mvneta_ethtool_get_coalesce()
4561 struct mvneta_port *pp = netdev_priv(netdev); in mvneta_ethtool_get_ringparam() local
4565 ring->rx_pending = pp->rx_ring_size; in mvneta_ethtool_get_ringparam()
4566 ring->tx_pending = pp->tx_ring_size; in mvneta_ethtool_get_ringparam()
4572 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_ringparam() local
4576 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? in mvneta_ethtool_set_ringparam()
4579 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, in mvneta_ethtool_set_ringparam()
4581 if (pp->tx_ring_size != ring->tx_pending) in mvneta_ethtool_set_ringparam()
4583 pp->tx_ring_size, ring->tx_pending); in mvneta_ethtool_set_ringparam()
4600 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_pauseparam() local
4602 phylink_ethtool_get_pauseparam(pp->phylink, pause); in mvneta_ethtool_get_pauseparam()
4608 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_pauseparam() local
4610 return phylink_ethtool_set_pauseparam(pp->phylink, pause); in mvneta_ethtool_set_pauseparam()
4626 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, in mvneta_ethtool_update_pcpu_stats() argument
4644 stats = per_cpu_ptr(pp->stats, cpu); in mvneta_ethtool_update_pcpu_stats()
4670 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) in mvneta_ethtool_update_stats() argument
4674 void __iomem *base = pp->base; in mvneta_ethtool_update_stats()
4679 mvneta_ethtool_update_pcpu_stats(pp, &stats); in mvneta_ethtool_update_stats()
4686 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4693 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4698 val = phylink_get_eee_err(pp->phylink); in mvneta_ethtool_update_stats()
4699 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4702 pp->ethtool_stats[i] = stats.skb_alloc_error; in mvneta_ethtool_update_stats()
4705 pp->ethtool_stats[i] = stats.refill_error; in mvneta_ethtool_update_stats()
4708 pp->ethtool_stats[i] = stats.ps.xdp_redirect; in mvneta_ethtool_update_stats()
4711 pp->ethtool_stats[i] = stats.ps.xdp_pass; in mvneta_ethtool_update_stats()
4714 pp->ethtool_stats[i] = stats.ps.xdp_drop; in mvneta_ethtool_update_stats()
4717 pp->ethtool_stats[i] = stats.ps.xdp_tx; in mvneta_ethtool_update_stats()
4720 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; in mvneta_ethtool_update_stats()
4723 pp->ethtool_stats[i] = stats.ps.xdp_xmit; in mvneta_ethtool_update_stats()
4726 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; in mvneta_ethtool_update_stats()
4737 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_stats() local
4740 mvneta_ethtool_update_stats(pp); in mvneta_ethtool_get_stats()
4743 *data++ = pp->ethtool_stats[i]; in mvneta_ethtool_get_stats()
4773 static int mvneta_config_rss(struct mvneta_port *pp) in mvneta_config_rss() argument
4778 netif_tx_stop_all_queues(pp->dev); in mvneta_config_rss()
4780 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_config_rss()
4782 if (!pp->neta_armada3700) { in mvneta_config_rss()
4786 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
4792 napi_synchronize(&pp->napi); in mvneta_config_rss()
4793 napi_disable(&pp->napi); in mvneta_config_rss()
4796 pp->rxq_def = pp->indir[0]; in mvneta_config_rss()
4799 mvneta_set_rx_mode(pp->dev); in mvneta_config_rss()
4802 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_config_rss()
4803 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_config_rss()
4806 spin_lock(&pp->lock); in mvneta_config_rss()
4807 mvneta_percpu_elect(pp); in mvneta_config_rss()
4808 spin_unlock(&pp->lock); in mvneta_config_rss()
4810 if (!pp->neta_armada3700) { in mvneta_config_rss()
4814 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
4819 napi_enable(&pp->napi); in mvneta_config_rss()
4822 netif_tx_start_all_queues(pp->dev); in mvneta_config_rss()
4830 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_rxfh() local
4833 if (pp->neta_armada3700) in mvneta_ethtool_set_rxfh()
4846 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_set_rxfh()
4848 return mvneta_config_rss(pp); in mvneta_ethtool_set_rxfh()
4854 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_rxfh() local
4857 if (pp->neta_armada3700) in mvneta_ethtool_get_rxfh()
4866 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_get_rxfh()
4874 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_wol() local
4876 phylink_ethtool_get_wol(pp->phylink, wol); in mvneta_ethtool_get_wol()
4882 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_wol() local
4885 ret = phylink_ethtool_set_wol(pp->phylink, wol); in mvneta_ethtool_set_wol()
4895 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_eee() local
4898 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); in mvneta_ethtool_get_eee()
4900 eee->eee_enabled = pp->eee_enabled; in mvneta_ethtool_get_eee()
4901 eee->eee_active = pp->eee_active; in mvneta_ethtool_get_eee()
4902 eee->tx_lpi_enabled = pp->tx_lpi_enabled; in mvneta_ethtool_get_eee()
4905 return phylink_ethtool_get_eee(pp->phylink, eee); in mvneta_ethtool_get_eee()
4911 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_eee() local
4920 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); in mvneta_ethtool_set_eee()
4923 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); in mvneta_ethtool_set_eee()
4925 pp->eee_enabled = eee->eee_enabled; in mvneta_ethtool_set_eee()
4926 pp->tx_lpi_enabled = eee->tx_lpi_enabled; in mvneta_ethtool_set_eee()
4928 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); in mvneta_ethtool_set_eee()
4930 return phylink_ethtool_set_eee(pp->phylink, eee); in mvneta_ethtool_set_eee()
4933 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) in mvneta_clear_rx_prio_map() argument
4935 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); in mvneta_clear_rx_prio_map()
4938 static void mvneta_setup_rx_prio_map(struct mvneta_port *pp) in mvneta_setup_rx_prio_map() argument
4944 val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]); in mvneta_setup_rx_prio_map()
4946 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); in mvneta_setup_rx_prio_map()
4952 struct mvneta_port *pp = netdev_priv(dev); in mvneta_setup_mqprio() local
4963 mvneta_clear_rx_prio_map(pp); in mvneta_setup_mqprio()
4968 memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map)); in mvneta_setup_mqprio()
4970 mvneta_setup_rx_prio_map(pp); in mvneta_setup_mqprio()
5033 static int mvneta_init(struct device *dev, struct mvneta_port *pp) in mvneta_init() argument
5038 mvneta_port_disable(pp); in mvneta_init()
5041 mvneta_defaults_set(pp); in mvneta_init()
5043 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); in mvneta_init()
5044 if (!pp->txqs) in mvneta_init()
5049 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init()
5051 txq->size = pp->tx_ring_size; in mvneta_init()
5055 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); in mvneta_init()
5056 if (!pp->rxqs) in mvneta_init()
5061 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init()
5063 rxq->size = pp->rx_ring_size; in mvneta_init()
5067 = devm_kmalloc_array(pp->dev->dev.parent, in mvneta_init()
5079 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, in mvneta_conf_mbus_windows() argument
5087 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_conf_mbus_windows()
5088 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_conf_mbus_windows()
5091 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_conf_mbus_windows()
5101 mvreg_write(pp, MVNETA_WIN_BASE(i), in mvneta_conf_mbus_windows()
5106 mvreg_write(pp, MVNETA_WIN_SIZE(i), in mvneta_conf_mbus_windows()
5117 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); in mvneta_conf_mbus_windows()
5122 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_conf_mbus_windows()
5123 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); in mvneta_conf_mbus_windows()
5127 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) in mvneta_port_power_up() argument
5130 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); in mvneta_port_power_up()
5146 struct mvneta_port *pp; in mvneta_probe() local
5180 pp = netdev_priv(dev); in mvneta_probe()
5181 spin_lock_init(&pp->lock); in mvneta_probe()
5183 pp->phylink_config.dev = &dev->dev; in mvneta_probe()
5184 pp->phylink_config.type = PHYLINK_NETDEV; in mvneta_probe()
5186 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, in mvneta_probe()
5199 pp->phylink = phylink; in mvneta_probe()
5200 pp->comphy = comphy; in mvneta_probe()
5201 pp->phy_interface = phy_mode; in mvneta_probe()
5202 pp->dn = dn; in mvneta_probe()
5204 pp->rxq_def = rxq_def; in mvneta_probe()
5205 pp->indir[0] = rxq_def; in mvneta_probe()
5209 pp->neta_armada3700 = true; in mvneta_probe()
5211 pp->clk = devm_clk_get(&pdev->dev, "core"); in mvneta_probe()
5212 if (IS_ERR(pp->clk)) in mvneta_probe()
5213 pp->clk = devm_clk_get(&pdev->dev, NULL); in mvneta_probe()
5214 if (IS_ERR(pp->clk)) { in mvneta_probe()
5215 err = PTR_ERR(pp->clk); in mvneta_probe()
5219 clk_prepare_enable(pp->clk); in mvneta_probe()
5221 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); in mvneta_probe()
5222 if (!IS_ERR(pp->clk_bus)) in mvneta_probe()
5223 clk_prepare_enable(pp->clk_bus); in mvneta_probe()
5225 pp->base = devm_platform_ioremap_resource(pdev, 0); in mvneta_probe()
5226 if (IS_ERR(pp->base)) { in mvneta_probe()
5227 err = PTR_ERR(pp->base); in mvneta_probe()
5232 pp->ports = alloc_percpu(struct mvneta_pcpu_port); in mvneta_probe()
5233 if (!pp->ports) { in mvneta_probe()
5239 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); in mvneta_probe()
5240 if (!pp->stats) { in mvneta_probe()
5249 mvneta_get_mac_addr(pp, hw_mac_addr); in mvneta_probe()
5273 pp->tx_csum_limit = tx_csum_limit; in mvneta_probe()
5275 pp->dram_target_info = mv_mbus_dram_info(); in mvneta_probe()
5280 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_probe()
5281 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_probe()
5283 pp->tx_ring_size = MVNETA_MAX_TXD; in mvneta_probe()
5284 pp->rx_ring_size = MVNETA_MAX_RXD; in mvneta_probe()
5286 pp->dev = dev; in mvneta_probe()
5289 pp->id = global_port_id++; in mvneta_probe()
5294 pp->bm_priv = mvneta_bm_get(bm_node); in mvneta_probe()
5295 if (pp->bm_priv) { in mvneta_probe()
5296 err = mvneta_bm_port_init(pdev, pp); in mvneta_probe()
5300 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5301 pp->bm_priv = NULL; in mvneta_probe()
5308 pp->rx_offset_correction = max(0, in mvneta_probe()
5315 if (!pp->bm_priv) in mvneta_probe()
5316 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_probe()
5318 err = mvneta_init(&pdev->dev, pp); in mvneta_probe()
5322 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_probe()
5331 if (pp->neta_armada3700) { in mvneta_probe()
5332 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); in mvneta_probe()
5336 per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
5340 port->pp = pp; in mvneta_probe()
5365 platform_set_drvdata(pdev, pp->dev); in mvneta_probe()
5370 if (pp->bm_priv) { in mvneta_probe()
5371 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_probe()
5372 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_probe()
5373 1 << pp->id); in mvneta_probe()
5374 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5376 free_percpu(pp->stats); in mvneta_probe()
5378 free_percpu(pp->ports); in mvneta_probe()
5380 clk_disable_unprepare(pp->clk_bus); in mvneta_probe()
5381 clk_disable_unprepare(pp->clk); in mvneta_probe()
5383 if (pp->phylink) in mvneta_probe()
5384 phylink_destroy(pp->phylink); in mvneta_probe()
5394 struct mvneta_port *pp = netdev_priv(dev); in mvneta_remove() local
5397 clk_disable_unprepare(pp->clk_bus); in mvneta_remove()
5398 clk_disable_unprepare(pp->clk); in mvneta_remove()
5399 free_percpu(pp->ports); in mvneta_remove()
5400 free_percpu(pp->stats); in mvneta_remove()
5402 phylink_destroy(pp->phylink); in mvneta_remove()
5404 if (pp->bm_priv) { in mvneta_remove()
5405 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_remove()
5406 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_remove()
5407 1 << pp->id); in mvneta_remove()
5408 mvneta_bm_put(pp->bm_priv); in mvneta_remove()
5419 struct mvneta_port *pp = netdev_priv(dev); in mvneta_suspend() local
5424 if (!pp->neta_armada3700) { in mvneta_suspend()
5425 spin_lock(&pp->lock); in mvneta_suspend()
5426 pp->is_stopped = true; in mvneta_suspend()
5427 spin_unlock(&pp->lock); in mvneta_suspend()
5430 &pp->node_online); in mvneta_suspend()
5432 &pp->node_dead); in mvneta_suspend()
5436 mvneta_stop_dev(pp); in mvneta_suspend()
5440 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_suspend()
5442 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_suspend()
5446 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_suspend()
5448 mvneta_txq_hw_deinit(pp, txq); in mvneta_suspend()
5453 clk_disable_unprepare(pp->clk_bus); in mvneta_suspend()
5454 clk_disable_unprepare(pp->clk); in mvneta_suspend()
5463 struct mvneta_port *pp = netdev_priv(dev); in mvneta_resume() local
5466 clk_prepare_enable(pp->clk); in mvneta_resume()
5467 if (!IS_ERR(pp->clk_bus)) in mvneta_resume()
5468 clk_prepare_enable(pp->clk_bus); in mvneta_resume()
5469 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_resume()
5470 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_resume()
5471 if (pp->bm_priv) { in mvneta_resume()
5472 err = mvneta_bm_port_init(pdev, pp); in mvneta_resume()
5475 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_resume()
5476 pp->bm_priv = NULL; in mvneta_resume()
5479 mvneta_defaults_set(pp); in mvneta_resume()
5480 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_resume()
5492 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_resume()
5495 mvneta_rxq_hw_init(pp, rxq); in mvneta_resume()
5499 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_resume()
5502 mvneta_txq_hw_init(pp, txq); in mvneta_resume()
5505 if (!pp->neta_armada3700) { in mvneta_resume()
5506 spin_lock(&pp->lock); in mvneta_resume()
5507 pp->is_stopped = false; in mvneta_resume()
5508 spin_unlock(&pp->lock); in mvneta_resume()
5510 &pp->node_online); in mvneta_resume()
5512 &pp->node_dead); in mvneta_resume()
5516 mvneta_start_dev(pp); in mvneta_resume()