Lines Matching refs:priv
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) in stmmac_bus_clks_config() argument
154 ret = clk_prepare_enable(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
157 ret = clk_prepare_enable(priv->plat->pclk); in stmmac_bus_clks_config()
159 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
162 if (priv->plat->clks_config) { in stmmac_bus_clks_config()
163 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
165 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
166 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
171 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
172 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
173 if (priv->plat->clks_config) in stmmac_bus_clks_config()
174 priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv) in __stmmac_disable_all_queues() argument
204 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in __stmmac_disable_all_queues()
205 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in __stmmac_disable_all_queues()
210 struct stmmac_channel *ch = &priv->channel[queue]; in __stmmac_disable_all_queues()
212 if (stmmac_xdp_is_enabled(priv) && in __stmmac_disable_all_queues()
213 test_bit(queue, priv->af_xdp_zc_qps)) { in __stmmac_disable_all_queues()
229 static void stmmac_disable_all_queues(struct stmmac_priv *priv) in stmmac_disable_all_queues() argument
231 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_disable_all_queues()
237 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_disable_all_queues()
244 __stmmac_disable_all_queues(priv); in stmmac_disable_all_queues()
251 static void stmmac_enable_all_queues(struct stmmac_priv *priv) in stmmac_enable_all_queues() argument
253 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_enable_all_queues()
254 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_all_queues()
259 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_all_queues()
261 if (stmmac_xdp_is_enabled(priv) && in stmmac_enable_all_queues()
262 test_bit(queue, priv->af_xdp_zc_qps)) { in stmmac_enable_all_queues()
274 static void stmmac_service_event_schedule(struct stmmac_priv *priv) in stmmac_service_event_schedule() argument
276 if (!test_bit(STMMAC_DOWN, &priv->state) && in stmmac_service_event_schedule()
277 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) in stmmac_service_event_schedule()
278 queue_work(priv->wq, &priv->service_task); in stmmac_service_event_schedule()
281 static void stmmac_global_err(struct stmmac_priv *priv) in stmmac_global_err() argument
283 netif_carrier_off(priv->dev); in stmmac_global_err()
284 set_bit(STMMAC_RESET_REQUESTED, &priv->state); in stmmac_global_err()
285 stmmac_service_event_schedule(priv); in stmmac_global_err()
300 static void stmmac_clk_csr_set(struct stmmac_priv *priv) in stmmac_clk_csr_set() argument
304 clk_rate = clk_get_rate(priv->plat->stmmac_clk); in stmmac_clk_csr_set()
313 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { in stmmac_clk_csr_set()
315 priv->clk_csr = STMMAC_CSR_20_35M; in stmmac_clk_csr_set()
317 priv->clk_csr = STMMAC_CSR_35_60M; in stmmac_clk_csr_set()
319 priv->clk_csr = STMMAC_CSR_60_100M; in stmmac_clk_csr_set()
321 priv->clk_csr = STMMAC_CSR_100_150M; in stmmac_clk_csr_set()
323 priv->clk_csr = STMMAC_CSR_150_250M; in stmmac_clk_csr_set()
325 priv->clk_csr = STMMAC_CSR_250_300M; in stmmac_clk_csr_set()
328 if (priv->plat->has_sun8i) { in stmmac_clk_csr_set()
330 priv->clk_csr = 0x03; in stmmac_clk_csr_set()
332 priv->clk_csr = 0x02; in stmmac_clk_csr_set()
334 priv->clk_csr = 0x01; in stmmac_clk_csr_set()
336 priv->clk_csr = 0; in stmmac_clk_csr_set()
339 if (priv->plat->has_xgmac) { in stmmac_clk_csr_set()
341 priv->clk_csr = 0x5; in stmmac_clk_csr_set()
343 priv->clk_csr = 0x4; in stmmac_clk_csr_set()
345 priv->clk_csr = 0x3; in stmmac_clk_csr_set()
347 priv->clk_csr = 0x2; in stmmac_clk_csr_set()
349 priv->clk_csr = 0x1; in stmmac_clk_csr_set()
351 priv->clk_csr = 0x0; in stmmac_clk_csr_set()
361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) in stmmac_tx_avail() argument
363 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_avail()
369 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; in stmmac_tx_avail()
379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) in stmmac_rx_dirty() argument
381 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_dirty()
387 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; in stmmac_rx_dirty()
392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) in stmmac_lpi_entry_timer_config() argument
397 priv->eee_sw_timer_en = en ? 0 : 1; in stmmac_lpi_entry_timer_config()
398 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; in stmmac_lpi_entry_timer_config()
399 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); in stmmac_lpi_entry_timer_config()
408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv) in stmmac_enable_eee_mode() argument
410 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_eee_mode()
415 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_eee_mode()
422 if (!priv->tx_path_in_lpi_mode) in stmmac_enable_eee_mode()
423 stmmac_set_eee_mode(priv, priv->hw, in stmmac_enable_eee_mode()
424 priv->plat->en_tx_lpi_clockgating); in stmmac_enable_eee_mode()
434 void stmmac_disable_eee_mode(struct stmmac_priv *priv) in stmmac_disable_eee_mode() argument
436 if (!priv->eee_sw_timer_en) { in stmmac_disable_eee_mode()
437 stmmac_lpi_entry_timer_config(priv, 0); in stmmac_disable_eee_mode()
441 stmmac_reset_eee_mode(priv, priv->hw); in stmmac_disable_eee_mode()
442 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_disable_eee_mode()
443 priv->tx_path_in_lpi_mode = false; in stmmac_disable_eee_mode()
455 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); in stmmac_eee_ctrl_timer() local
457 if (stmmac_enable_eee_mode(priv)) in stmmac_eee_ctrl_timer()
458 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_ctrl_timer()
469 bool stmmac_eee_init(struct stmmac_priv *priv) in stmmac_eee_init() argument
471 int eee_tw_timer = priv->eee_tw_timer; in stmmac_eee_init()
476 if (priv->hw->pcs == STMMAC_PCS_TBI || in stmmac_eee_init()
477 priv->hw->pcs == STMMAC_PCS_RTBI) in stmmac_eee_init()
481 if (!priv->dma_cap.eee) in stmmac_eee_init()
484 mutex_lock(&priv->lock); in stmmac_eee_init()
487 if (!priv->eee_active) { in stmmac_eee_init()
488 if (priv->eee_enabled) { in stmmac_eee_init()
489 netdev_dbg(priv->dev, "disable EEE\n"); in stmmac_eee_init()
490 stmmac_lpi_entry_timer_config(priv, 0); in stmmac_eee_init()
491 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
492 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); in stmmac_eee_init()
493 if (priv->hw->xpcs) in stmmac_eee_init()
494 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
495 priv->plat->mult_fact_100ns, in stmmac_eee_init()
498 mutex_unlock(&priv->lock); in stmmac_eee_init()
502 if (priv->eee_active && !priv->eee_enabled) { in stmmac_eee_init()
503 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); in stmmac_eee_init()
504 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, in stmmac_eee_init()
506 if (priv->hw->xpcs) in stmmac_eee_init()
507 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
508 priv->plat->mult_fact_100ns, in stmmac_eee_init()
512 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { in stmmac_eee_init()
513 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
514 priv->tx_path_in_lpi_mode = false; in stmmac_eee_init()
515 stmmac_lpi_entry_timer_config(priv, 1); in stmmac_eee_init()
517 stmmac_lpi_entry_timer_config(priv, 0); in stmmac_eee_init()
518 mod_timer(&priv->eee_ctrl_timer, in stmmac_eee_init()
519 STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_init()
522 mutex_unlock(&priv->lock); in stmmac_eee_init()
523 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); in stmmac_eee_init()
535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, in stmmac_get_tx_hwtstamp() argument
542 if (!priv->hwts_tx_en) in stmmac_get_tx_hwtstamp()
550 if (stmmac_get_tx_timestamp_status(priv, p)) { in stmmac_get_tx_hwtstamp()
551 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); in stmmac_get_tx_hwtstamp()
553 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_get_tx_hwtstamp()
558 ns -= priv->plat->cdc_error_adj; in stmmac_get_tx_hwtstamp()
563 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); in stmmac_get_tx_hwtstamp()
578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, in stmmac_get_rx_hwtstamp() argument
585 if (!priv->hwts_rx_en) in stmmac_get_rx_hwtstamp()
588 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_get_rx_hwtstamp()
592 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { in stmmac_get_rx_hwtstamp()
593 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_get_rx_hwtstamp()
595 ns -= priv->plat->cdc_error_adj; in stmmac_get_rx_hwtstamp()
597 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); in stmmac_get_rx_hwtstamp()
602 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); in stmmac_get_rx_hwtstamp()
619 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_hwtstamp_set() local
630 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { in stmmac_hwtstamp_set()
631 netdev_alert(priv->dev, "No support for HW time stamping\n"); in stmmac_hwtstamp_set()
632 priv->hwts_tx_en = 0; in stmmac_hwtstamp_set()
633 priv->hwts_rx_en = 0; in stmmac_hwtstamp_set()
642 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", in stmmac_hwtstamp_set()
649 if (priv->adv_ts) { in stmmac_hwtstamp_set()
730 if (priv->synopsys_id < DWMAC_CORE_4_10) in stmmac_hwtstamp_set()
783 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); in stmmac_hwtstamp_set()
784 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; in stmmac_hwtstamp_set()
786 priv->systime_flags = STMMAC_HWTS_ACTIVE; in stmmac_hwtstamp_set()
788 if (priv->hwts_tx_en || priv->hwts_rx_en) { in stmmac_hwtstamp_set()
789 priv->systime_flags |= tstamp_all | ptp_v2 | in stmmac_hwtstamp_set()
795 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); in stmmac_hwtstamp_set()
797 memcpy(&priv->tstamp_config, &config, sizeof(config)); in stmmac_hwtstamp_set()
814 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_hwtstamp_get() local
815 struct hwtstamp_config *config = &priv->tstamp_config; in stmmac_hwtstamp_get()
817 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_hwtstamp_get()
834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) in stmmac_init_tstamp_counter() argument
836 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_tstamp_counter()
841 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_init_tstamp_counter()
844 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); in stmmac_init_tstamp_counter()
845 priv->systime_flags = systime_flags; in stmmac_init_tstamp_counter()
848 stmmac_config_sub_second_increment(priv, priv->ptpaddr, in stmmac_init_tstamp_counter()
849 priv->plat->clk_ptp_rate, in stmmac_init_tstamp_counter()
854 priv->sub_second_inc = sec_inc; in stmmac_init_tstamp_counter()
862 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); in stmmac_init_tstamp_counter()
863 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); in stmmac_init_tstamp_counter()
869 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); in stmmac_init_tstamp_counter()
882 static int stmmac_init_ptp(struct stmmac_priv *priv) in stmmac_init_ptp() argument
884 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_ptp()
887 if (priv->plat->ptp_clk_freq_config) in stmmac_init_ptp()
888 priv->plat->ptp_clk_freq_config(priv); in stmmac_init_ptp()
890 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); in stmmac_init_ptp()
894 priv->adv_ts = 0; in stmmac_init_ptp()
896 if (xmac && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
897 priv->adv_ts = 1; in stmmac_init_ptp()
899 else if (priv->extend_desc && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
900 priv->adv_ts = 1; in stmmac_init_ptp()
902 if (priv->dma_cap.time_stamp) in stmmac_init_ptp()
903 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); in stmmac_init_ptp()
905 if (priv->adv_ts) in stmmac_init_ptp()
906 netdev_info(priv->dev, in stmmac_init_ptp()
909 priv->hwts_tx_en = 0; in stmmac_init_ptp()
910 priv->hwts_rx_en = 0; in stmmac_init_ptp()
915 static void stmmac_release_ptp(struct stmmac_priv *priv) in stmmac_release_ptp() argument
917 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_release_ptp()
918 stmmac_ptp_unregister(priv); in stmmac_release_ptp()
927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) in stmmac_mac_flow_ctrl() argument
929 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_mac_flow_ctrl()
931 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, in stmmac_mac_flow_ctrl()
932 priv->pause, tx_cnt); in stmmac_mac_flow_ctrl()
938 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_select_pcs() local
940 if (!priv->hw->xpcs) in stmmac_mac_select_pcs()
943 return &priv->hw->xpcs->pcs; in stmmac_mac_select_pcs()
952 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) in stmmac_fpe_link_state_handle() argument
954 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; in stmmac_fpe_link_state_handle()
960 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); in stmmac_fpe_link_state_handle()
970 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_down() local
972 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_mac_link_down()
973 priv->eee_active = false; in stmmac_mac_link_down()
974 priv->tx_lpi_enabled = false; in stmmac_mac_link_down()
975 priv->eee_enabled = stmmac_eee_init(priv); in stmmac_mac_link_down()
976 stmmac_set_eee_pls(priv, priv->hw, false); in stmmac_mac_link_down()
978 if (priv->dma_cap.fpesel) in stmmac_mac_link_down()
979 stmmac_fpe_link_state_handle(priv, false); in stmmac_mac_link_down()
988 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_up() local
991 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
992 ctrl = old_ctrl & ~priv->hw->link.speed_mask; in stmmac_mac_link_up()
997 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1000 ctrl |= priv->hw->link.xgmii.speed5000; in stmmac_mac_link_up()
1003 ctrl |= priv->hw->link.xgmii.speed2500; in stmmac_mac_link_up()
1011 ctrl |= priv->hw->link.xlgmii.speed100000; in stmmac_mac_link_up()
1014 ctrl |= priv->hw->link.xlgmii.speed50000; in stmmac_mac_link_up()
1017 ctrl |= priv->hw->link.xlgmii.speed40000; in stmmac_mac_link_up()
1020 ctrl |= priv->hw->link.xlgmii.speed25000; in stmmac_mac_link_up()
1023 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1026 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1029 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1037 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1040 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1043 ctrl |= priv->hw->link.speed100; in stmmac_mac_link_up()
1046 ctrl |= priv->hw->link.speed10; in stmmac_mac_link_up()
1053 priv->speed = speed; in stmmac_mac_link_up()
1055 if (priv->plat->fix_mac_speed) in stmmac_mac_link_up()
1056 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); in stmmac_mac_link_up()
1059 ctrl &= ~priv->hw->link.duplex; in stmmac_mac_link_up()
1061 ctrl |= priv->hw->link.duplex; in stmmac_mac_link_up()
1065 priv->flow_ctrl = FLOW_AUTO; in stmmac_mac_link_up()
1067 priv->flow_ctrl = FLOW_RX; in stmmac_mac_link_up()
1069 priv->flow_ctrl = FLOW_TX; in stmmac_mac_link_up()
1071 priv->flow_ctrl = FLOW_OFF; in stmmac_mac_link_up()
1073 stmmac_mac_flow_ctrl(priv, duplex); in stmmac_mac_link_up()
1076 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1078 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_mac_link_up()
1079 if (phy && priv->dma_cap.eee) { in stmmac_mac_link_up()
1080 priv->eee_active = phy_init_eee(phy, 1) >= 0; in stmmac_mac_link_up()
1081 priv->eee_enabled = stmmac_eee_init(priv); in stmmac_mac_link_up()
1082 priv->tx_lpi_enabled = priv->eee_enabled; in stmmac_mac_link_up()
1083 stmmac_set_eee_pls(priv, priv->hw, true); in stmmac_mac_link_up()
1086 if (priv->dma_cap.fpesel) in stmmac_mac_link_up()
1087 stmmac_fpe_link_state_handle(priv, true); in stmmac_mac_link_up()
1105 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) in stmmac_check_pcs_mode() argument
1107 int interface = priv->plat->interface; in stmmac_check_pcs_mode()
1109 if (priv->dma_cap.pcs) { in stmmac_check_pcs_mode()
1114 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); in stmmac_check_pcs_mode()
1115 priv->hw->pcs = STMMAC_PCS_RGMII; in stmmac_check_pcs_mode()
1117 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); in stmmac_check_pcs_mode()
1118 priv->hw->pcs = STMMAC_PCS_SGMII; in stmmac_check_pcs_mode()
1133 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_init_phy() local
1137 fwnode = of_fwnode_handle(priv->plat->phylink_node); in stmmac_init_phy()
1139 fwnode = dev_fwnode(priv->device); in stmmac_init_phy()
1142 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); in stmmac_init_phy()
1148 int addr = priv->plat->phy_addr; in stmmac_init_phy()
1151 phydev = mdiobus_get_phy(priv->mii, addr); in stmmac_init_phy()
1153 netdev_err(priv->dev, "no phy at addr %d\n", addr); in stmmac_init_phy()
1157 ret = phylink_connect_phy(priv->phylink, phydev); in stmmac_init_phy()
1160 if (!priv->plat->pmt) { in stmmac_init_phy()
1163 phylink_ethtool_get_wol(priv->phylink, &wol); in stmmac_init_phy()
1164 device_set_wakeup_capable(priv->device, !!wol.supported); in stmmac_init_phy()
1170 static int stmmac_phy_setup(struct stmmac_priv *priv) in stmmac_phy_setup() argument
1172 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; in stmmac_phy_setup()
1173 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); in stmmac_phy_setup()
1174 int max_speed = priv->plat->max_speed; in stmmac_phy_setup()
1175 int mode = priv->plat->phy_interface; in stmmac_phy_setup()
1178 priv->phylink_config.dev = &priv->dev->dev; in stmmac_phy_setup()
1179 priv->phylink_config.type = PHYLINK_NETDEV; in stmmac_phy_setup()
1180 if (priv->plat->mdio_bus_data) in stmmac_phy_setup()
1181 priv->phylink_config.ovr_an_inband = in stmmac_phy_setup()
1185 fwnode = dev_fwnode(priv->device); in stmmac_phy_setup()
1188 __set_bit(mode, priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1191 if (priv->hw->xpcs) in stmmac_phy_setup()
1192 xpcs_get_interfaces(priv->hw->xpcs, in stmmac_phy_setup()
1193 priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1195 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | in stmmac_phy_setup()
1199 priv->phylink_config.mac_capabilities |= MAC_1000; in stmmac_phy_setup()
1201 if (priv->plat->has_gmac4) { in stmmac_phy_setup()
1203 priv->phylink_config.mac_capabilities |= MAC_2500FD; in stmmac_phy_setup()
1204 } else if (priv->plat->has_xgmac) { in stmmac_phy_setup()
1206 priv->phylink_config.mac_capabilities |= MAC_2500FD; in stmmac_phy_setup()
1208 priv->phylink_config.mac_capabilities |= MAC_5000FD; in stmmac_phy_setup()
1210 priv->phylink_config.mac_capabilities |= MAC_10000FD; in stmmac_phy_setup()
1212 priv->phylink_config.mac_capabilities |= MAC_25000FD; in stmmac_phy_setup()
1214 priv->phylink_config.mac_capabilities |= MAC_40000FD; in stmmac_phy_setup()
1216 priv->phylink_config.mac_capabilities |= MAC_50000FD; in stmmac_phy_setup()
1218 priv->phylink_config.mac_capabilities |= MAC_100000FD; in stmmac_phy_setup()
1222 if (priv->plat->tx_queues_to_use > 1) in stmmac_phy_setup()
1223 priv->phylink_config.mac_capabilities &= in stmmac_phy_setup()
1225 priv->phylink_config.mac_managed_pm = true; in stmmac_phy_setup()
1227 phylink = phylink_create(&priv->phylink_config, fwnode, in stmmac_phy_setup()
1232 priv->phylink = phylink; in stmmac_phy_setup()
1236 static void stmmac_display_rx_rings(struct stmmac_priv *priv, in stmmac_display_rx_rings() argument
1239 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_display_rx_rings()
1250 if (priv->extend_desc) { in stmmac_display_rx_rings()
1259 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, in stmmac_display_rx_rings()
1264 static void stmmac_display_tx_rings(struct stmmac_priv *priv, in stmmac_display_tx_rings() argument
1267 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_display_tx_rings()
1278 if (priv->extend_desc) { in stmmac_display_tx_rings()
1289 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, in stmmac_display_tx_rings()
1294 static void stmmac_display_rings(struct stmmac_priv *priv, in stmmac_display_rings() argument
1298 stmmac_display_rx_rings(priv, dma_conf); in stmmac_display_rings()
1301 stmmac_display_tx_rings(priv, dma_conf); in stmmac_display_rings()
1330 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, in stmmac_clear_rx_descriptors() argument
1339 if (priv->extend_desc) in stmmac_clear_rx_descriptors()
1340 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, in stmmac_clear_rx_descriptors()
1341 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1345 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], in stmmac_clear_rx_descriptors()
1346 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1359 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, in stmmac_clear_tx_descriptors() argument
1371 if (priv->extend_desc) in stmmac_clear_tx_descriptors()
1378 stmmac_init_tx_desc(priv, p, priv->mode, last); in stmmac_clear_tx_descriptors()
1389 static void stmmac_clear_descriptors(struct stmmac_priv *priv, in stmmac_clear_descriptors() argument
1392 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; in stmmac_clear_descriptors()
1393 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_clear_descriptors()
1398 stmmac_clear_rx_descriptors(priv, dma_conf, queue); in stmmac_clear_descriptors()
1402 stmmac_clear_tx_descriptors(priv, dma_conf, queue); in stmmac_clear_descriptors()
1416 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, in stmmac_init_rx_buffers() argument
1425 if (priv->dma_cap.addr64 <= 32) in stmmac_init_rx_buffers()
1432 buf->page_offset = stmmac_rx_offset(priv); in stmmac_init_rx_buffers()
1435 if (priv->sph && !buf->sec_page) { in stmmac_init_rx_buffers()
1441 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_init_rx_buffers()
1444 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_init_rx_buffers()
1449 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_init_rx_buffers()
1451 stmmac_init_desc3(priv, p); in stmmac_init_rx_buffers()
1462 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, in stmmac_free_rx_buffer() argument
1484 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, in stmmac_free_tx_buffer() argument
1493 dma_unmap_page(priv->device, in stmmac_free_tx_buffer()
1498 dma_unmap_single(priv->device, in stmmac_free_tx_buffer()
1530 static void dma_free_rx_skbufs(struct stmmac_priv *priv, in dma_free_rx_skbufs() argument
1538 stmmac_free_rx_buffer(priv, rx_q, i); in dma_free_rx_skbufs()
1541 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, in stmmac_alloc_rx_buffers() argument
1552 if (priv->extend_desc) in stmmac_alloc_rx_buffers()
1557 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, in stmmac_alloc_rx_buffers()
1574 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, in dma_free_rx_xskbufs() argument
1592 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, in stmmac_alloc_rx_buffers_zc() argument
1604 if (priv->extend_desc) in stmmac_alloc_rx_buffers_zc()
1616 stmmac_set_desc_addr(priv, p, dma_addr); in stmmac_alloc_rx_buffers_zc()
1623 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) in stmmac_get_xsk_pool() argument
1625 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) in stmmac_get_xsk_pool()
1628 return xsk_get_pool_from_qid(priv->dev, queue); in stmmac_get_xsk_pool()
1641 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, in __init_dma_rx_desc_rings() argument
1648 netif_dbg(priv, probe, priv->dev, in __init_dma_rx_desc_rings()
1652 stmmac_clear_rx_descriptors(priv, dma_conf, queue); in __init_dma_rx_desc_rings()
1656 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1662 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1670 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1679 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); in __init_dma_rx_desc_rings()
1681 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); in __init_dma_rx_desc_rings()
1687 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_rx_desc_rings()
1688 if (priv->extend_desc) in __init_dma_rx_desc_rings()
1689 stmmac_mode_init(priv, rx_q->dma_erx, in __init_dma_rx_desc_rings()
1693 stmmac_mode_init(priv, rx_q->dma_rx, in __init_dma_rx_desc_rings()
1705 struct stmmac_priv *priv = netdev_priv(dev); in init_dma_rx_desc_rings() local
1706 u32 rx_count = priv->plat->rx_queues_to_use; in init_dma_rx_desc_rings()
1711 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1715 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); in init_dma_rx_desc_rings()
1727 dma_free_rx_xskbufs(priv, dma_conf, queue); in init_dma_rx_desc_rings()
1729 dma_free_rx_skbufs(priv, dma_conf, queue); in init_dma_rx_desc_rings()
1749 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, in __init_dma_tx_desc_rings() argument
1756 netif_dbg(priv, probe, priv->dev, in __init_dma_tx_desc_rings()
1761 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_tx_desc_rings()
1762 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1763 stmmac_mode_init(priv, tx_q->dma_etx, in __init_dma_tx_desc_rings()
1767 stmmac_mode_init(priv, tx_q->dma_tx, in __init_dma_tx_desc_rings()
1772 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1777 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1784 stmmac_clear_desc(priv, p); in __init_dma_tx_desc_rings()
1799 struct stmmac_priv *priv = netdev_priv(dev); in init_dma_tx_desc_rings() local
1803 tx_queue_cnt = priv->plat->tx_queues_to_use; in init_dma_tx_desc_rings()
1806 __init_dma_tx_desc_rings(priv, dma_conf, queue); in init_dma_tx_desc_rings()
1824 struct stmmac_priv *priv = netdev_priv(dev); in init_dma_desc_rings() local
1833 stmmac_clear_descriptors(priv, dma_conf); in init_dma_desc_rings()
1835 if (netif_msg_hw(priv)) in init_dma_desc_rings()
1836 stmmac_display_rings(priv, dma_conf); in init_dma_desc_rings()
1847 static void dma_free_tx_skbufs(struct stmmac_priv *priv, in dma_free_tx_skbufs() argument
1857 stmmac_free_tx_buffer(priv, dma_conf, queue, i); in dma_free_tx_skbufs()
1870 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) in stmmac_free_tx_skbufs() argument
1872 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_free_tx_skbufs()
1876 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); in stmmac_free_tx_skbufs()
1885 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, in __free_dma_rx_desc_resources() argument
1893 dma_free_rx_xskbufs(priv, dma_conf, queue); in __free_dma_rx_desc_resources()
1895 dma_free_rx_skbufs(priv, dma_conf, queue); in __free_dma_rx_desc_resources()
1901 if (!priv->extend_desc) in __free_dma_rx_desc_resources()
1902 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1906 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1918 static void free_dma_rx_desc_resources(struct stmmac_priv *priv, in free_dma_rx_desc_resources() argument
1921 u32 rx_count = priv->plat->rx_queues_to_use; in free_dma_rx_desc_resources()
1926 __free_dma_rx_desc_resources(priv, dma_conf, queue); in free_dma_rx_desc_resources()
1935 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, in __free_dma_tx_desc_resources() argument
1944 dma_free_tx_skbufs(priv, dma_conf, queue); in __free_dma_tx_desc_resources()
1946 if (priv->extend_desc) { in __free_dma_tx_desc_resources()
1959 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); in __free_dma_tx_desc_resources()
1965 static void free_dma_tx_desc_resources(struct stmmac_priv *priv, in free_dma_tx_desc_resources() argument
1968 u32 tx_count = priv->plat->tx_queues_to_use; in free_dma_tx_desc_resources()
1973 __free_dma_tx_desc_resources(priv, dma_conf, queue); in free_dma_tx_desc_resources()
1986 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, in __alloc_dma_rx_desc_resources() argument
1991 struct stmmac_channel *ch = &priv->channel[queue]; in __alloc_dma_rx_desc_resources()
1992 bool xdp_prog = stmmac_xdp_is_enabled(priv); in __alloc_dma_rx_desc_resources()
1999 rx_q->priv_data = priv; in __alloc_dma_rx_desc_resources()
2005 pp_params.nid = dev_to_node(priv->device); in __alloc_dma_rx_desc_resources()
2006 pp_params.dev = priv->device; in __alloc_dma_rx_desc_resources()
2008 pp_params.offset = stmmac_rx_offset(priv); in __alloc_dma_rx_desc_resources()
2024 if (priv->extend_desc) { in __alloc_dma_rx_desc_resources()
2025 rx_q->dma_erx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2034 rx_q->dma_rx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2043 if (stmmac_xdp_is_enabled(priv) && in __alloc_dma_rx_desc_resources()
2044 test_bit(queue, priv->af_xdp_zc_qps)) in __alloc_dma_rx_desc_resources()
2049 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, in __alloc_dma_rx_desc_resources()
2053 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); in __alloc_dma_rx_desc_resources()
2060 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, in alloc_dma_rx_desc_resources() argument
2063 u32 rx_count = priv->plat->rx_queues_to_use; in alloc_dma_rx_desc_resources()
2069 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); in alloc_dma_rx_desc_resources()
2077 free_dma_rx_desc_resources(priv, dma_conf); in alloc_dma_rx_desc_resources()
2092 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, in __alloc_dma_tx_desc_resources() argument
2101 tx_q->priv_data = priv; in __alloc_dma_tx_desc_resources()
2115 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2124 addr = dma_alloc_coherent(priv->device, size, in __alloc_dma_tx_desc_resources()
2129 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2139 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, in alloc_dma_tx_desc_resources() argument
2142 u32 tx_count = priv->plat->tx_queues_to_use; in alloc_dma_tx_desc_resources()
2148 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); in alloc_dma_tx_desc_resources()
2156 free_dma_tx_desc_resources(priv, dma_conf); in alloc_dma_tx_desc_resources()
2169 static int alloc_dma_desc_resources(struct stmmac_priv *priv, in alloc_dma_desc_resources() argument
2173 int ret = alloc_dma_rx_desc_resources(priv, dma_conf); in alloc_dma_desc_resources()
2178 ret = alloc_dma_tx_desc_resources(priv, dma_conf); in alloc_dma_desc_resources()
2188 static void free_dma_desc_resources(struct stmmac_priv *priv, in free_dma_desc_resources() argument
2192 free_dma_tx_desc_resources(priv, dma_conf); in free_dma_desc_resources()
2197 free_dma_rx_desc_resources(priv, dma_conf); in free_dma_desc_resources()
2205 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) in stmmac_mac_enable_rx_queues() argument
2207 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_enable_rx_queues()
2212 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; in stmmac_mac_enable_rx_queues()
2213 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); in stmmac_mac_enable_rx_queues()
2224 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) in stmmac_start_rx_dma() argument
2226 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); in stmmac_start_rx_dma()
2227 stmmac_start_rx(priv, priv->ioaddr, chan); in stmmac_start_rx_dma()
2237 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) in stmmac_start_tx_dma() argument
2239 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); in stmmac_start_tx_dma()
2240 stmmac_start_tx(priv, priv->ioaddr, chan); in stmmac_start_tx_dma()
2250 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) in stmmac_stop_rx_dma() argument
2252 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); in stmmac_stop_rx_dma()
2253 stmmac_stop_rx(priv, priv->ioaddr, chan); in stmmac_stop_rx_dma()
2263 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) in stmmac_stop_tx_dma() argument
2265 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); in stmmac_stop_tx_dma()
2266 stmmac_stop_tx(priv, priv->ioaddr, chan); in stmmac_stop_tx_dma()
2269 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) in stmmac_enable_all_dma_irq() argument
2271 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_enable_all_dma_irq()
2272 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_enable_all_dma_irq()
2277 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_enable_all_dma_irq()
2281 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_enable_all_dma_irq()
2292 static void stmmac_start_all_dma(struct stmmac_priv *priv) in stmmac_start_all_dma() argument
2294 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_start_all_dma()
2295 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_start_all_dma()
2299 stmmac_start_rx_dma(priv, chan); in stmmac_start_all_dma()
2302 stmmac_start_tx_dma(priv, chan); in stmmac_start_all_dma()
2311 static void stmmac_stop_all_dma(struct stmmac_priv *priv) in stmmac_stop_all_dma() argument
2313 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_stop_all_dma()
2314 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_stop_all_dma()
2318 stmmac_stop_rx_dma(priv, chan); in stmmac_stop_all_dma()
2321 stmmac_stop_tx_dma(priv, chan); in stmmac_stop_all_dma()
2330 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) in stmmac_dma_operation_mode() argument
2332 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_dma_operation_mode()
2333 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_dma_operation_mode()
2334 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_dma_operation_mode()
2335 int txfifosz = priv->plat->tx_fifo_size; in stmmac_dma_operation_mode()
2342 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_dma_operation_mode()
2344 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_dma_operation_mode()
2350 if (priv->plat->force_thresh_dma_mode) { in stmmac_dma_operation_mode()
2353 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { in stmmac_dma_operation_mode()
2363 priv->xstats.threshold = SF_DMA_MODE; in stmmac_dma_operation_mode()
2371 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_dma_operation_mode()
2374 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2376 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, in stmmac_dma_operation_mode()
2381 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2385 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2386 priv->dma_conf.dma_buf_sz, in stmmac_dma_operation_mode()
2392 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2394 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, in stmmac_dma_operation_mode()
2399 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) in stmmac_xdp_xmit_zc() argument
2401 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc()
2402 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_zc()
2412 budget = min(budget, stmmac_tx_avail(priv, queue)); in stmmac_xdp_xmit_zc()
2421 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || in stmmac_xdp_xmit_zc()
2422 !netif_carrier_ok(priv->dev)) { in stmmac_xdp_xmit_zc()
2430 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_zc()
2454 stmmac_set_desc_addr(priv, tx_desc, dma_addr); in stmmac_xdp_xmit_zc()
2458 if (!priv->tx_coal_frames[queue]) in stmmac_xdp_xmit_zc()
2460 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_zc()
2467 stmmac_set_tx_ic(priv, tx_desc); in stmmac_xdp_xmit_zc()
2468 priv->xstats.tx_set_ic_bit++; in stmmac_xdp_xmit_zc()
2471 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, in stmmac_xdp_xmit_zc()
2472 true, priv->mode, true, true, in stmmac_xdp_xmit_zc()
2475 stmmac_enable_dma_transmission(priv, priv->ioaddr); in stmmac_xdp_xmit_zc()
2477 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_zc()
2482 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xdp_xmit_zc()
2494 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) in stmmac_bump_dma_threshold() argument
2496 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { in stmmac_bump_dma_threshold()
2499 if (priv->plat->force_thresh_dma_mode) in stmmac_bump_dma_threshold()
2500 stmmac_set_dma_operation_mode(priv, tc, tc, chan); in stmmac_bump_dma_threshold()
2502 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, in stmmac_bump_dma_threshold()
2505 priv->xstats.threshold = tc; in stmmac_bump_dma_threshold()
2516 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) in stmmac_tx_clean() argument
2518 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_clean()
2522 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2524 priv->xstats.tx_clean++; in stmmac_tx_clean()
2531 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { in stmmac_tx_clean()
2549 if (priv->extend_desc) in stmmac_tx_clean()
2556 status = stmmac_tx_status(priv, &priv->dev->stats, in stmmac_tx_clean()
2557 &priv->xstats, p, priv->ioaddr); in stmmac_tx_clean()
2573 priv->dev->stats.tx_errors++; in stmmac_tx_clean()
2575 stmmac_bump_dma_threshold(priv, queue); in stmmac_tx_clean()
2577 priv->dev->stats.tx_packets++; in stmmac_tx_clean()
2578 priv->xstats.tx_pkt_n++; in stmmac_tx_clean()
2579 priv->xstats.txq_stats[queue].tx_pkt_n++; in stmmac_tx_clean()
2582 stmmac_get_tx_hwtstamp(priv, p, skb); in stmmac_tx_clean()
2588 dma_unmap_page(priv->device, in stmmac_tx_clean()
2593 dma_unmap_single(priv->device, in stmmac_tx_clean()
2602 stmmac_clean_desc3(priv, tx_q, p); in stmmac_tx_clean()
2631 stmmac_release_tx_desc(priv, p, priv->mode); in stmmac_tx_clean()
2633 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_tx_clean()
2637 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), in stmmac_tx_clean()
2640 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, in stmmac_tx_clean()
2642 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { in stmmac_tx_clean()
2644 netif_dbg(priv, tx_done, priv->dev, in stmmac_tx_clean()
2646 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2663 work_done = stmmac_xdp_xmit_zc(priv, queue, in stmmac_tx_clean()
2671 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && in stmmac_tx_clean()
2672 priv->eee_sw_timer_en) { in stmmac_tx_clean()
2673 if (stmmac_enable_eee_mode(priv)) in stmmac_tx_clean()
2674 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_tx_clean()
2680 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), in stmmac_tx_clean()
2683 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2696 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) in stmmac_tx_err() argument
2698 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_tx_err()
2700 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2702 stmmac_stop_tx_dma(priv, chan); in stmmac_tx_err()
2703 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2704 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2705 stmmac_reset_tx_queue(priv, chan); in stmmac_tx_err()
2706 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_tx_err()
2708 stmmac_start_tx_dma(priv, chan); in stmmac_tx_err()
2710 priv->dev->stats.tx_errors++; in stmmac_tx_err()
2711 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2724 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, in stmmac_set_dma_operation_mode() argument
2727 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2728 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2729 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_dma_operation_mode()
2730 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_dma_operation_mode()
2731 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_set_dma_operation_mode()
2732 int txfifosz = priv->plat->tx_fifo_size; in stmmac_set_dma_operation_mode()
2735 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_set_dma_operation_mode()
2737 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_set_dma_operation_mode()
2743 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); in stmmac_set_dma_operation_mode()
2744 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); in stmmac_set_dma_operation_mode()
2747 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) in stmmac_safety_feat_interrupt() argument
2751 ret = stmmac_safety_feat_irq_status(priv, priv->dev, in stmmac_safety_feat_interrupt()
2752 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); in stmmac_safety_feat_interrupt()
2754 stmmac_global_err(priv); in stmmac_safety_feat_interrupt()
2761 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) in stmmac_napi_check() argument
2763 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, in stmmac_napi_check()
2764 &priv->xstats, chan, dir); in stmmac_napi_check()
2765 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_napi_check()
2766 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_napi_check()
2767 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_napi_check()
2775 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { in stmmac_napi_check()
2778 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_check()
2784 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { in stmmac_napi_check()
2787 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_check()
2803 static void stmmac_dma_interrupt(struct stmmac_priv *priv) in stmmac_dma_interrupt() argument
2805 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_dma_interrupt()
2806 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_dma_interrupt()
2817 status[chan] = stmmac_napi_check(priv, chan, in stmmac_dma_interrupt()
2823 stmmac_bump_dma_threshold(priv, chan); in stmmac_dma_interrupt()
2825 stmmac_tx_err(priv, chan); in stmmac_dma_interrupt()
2835 static void stmmac_mmc_setup(struct stmmac_priv *priv) in stmmac_mmc_setup() argument
2840 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); in stmmac_mmc_setup()
2842 if (priv->dma_cap.rmon) { in stmmac_mmc_setup()
2843 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); in stmmac_mmc_setup()
2844 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); in stmmac_mmc_setup()
2846 netdev_info(priv->dev, "No MAC Management Counters available\n"); in stmmac_mmc_setup()
2858 static int stmmac_get_hw_features(struct stmmac_priv *priv) in stmmac_get_hw_features() argument
2860 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; in stmmac_get_hw_features()
2870 static void stmmac_check_ether_addr(struct stmmac_priv *priv) in stmmac_check_ether_addr() argument
2874 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in stmmac_check_ether_addr()
2875 stmmac_get_umac_addr(priv, priv->hw, addr, 0); in stmmac_check_ether_addr()
2877 eth_hw_addr_set(priv->dev, addr); in stmmac_check_ether_addr()
2879 eth_hw_addr_random(priv->dev); in stmmac_check_ether_addr()
2880 dev_info(priv->device, "device MAC address %pM\n", in stmmac_check_ether_addr()
2881 priv->dev->dev_addr); in stmmac_check_ether_addr()
2893 static int stmmac_init_dma_engine(struct stmmac_priv *priv) in stmmac_init_dma_engine() argument
2895 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_init_dma_engine()
2896 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_init_dma_engine()
2904 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { in stmmac_init_dma_engine()
2905 dev_err(priv->device, "Invalid DMA configuration\n"); in stmmac_init_dma_engine()
2909 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) in stmmac_init_dma_engine()
2912 ret = stmmac_reset(priv, priv->ioaddr); in stmmac_init_dma_engine()
2914 dev_err(priv->device, "Failed to reset the dma\n"); in stmmac_init_dma_engine()
2919 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); in stmmac_init_dma_engine()
2921 if (priv->plat->axi) in stmmac_init_dma_engine()
2922 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); in stmmac_init_dma_engine()
2926 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_init_dma_engine()
2927 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_init_dma_engine()
2932 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_init_dma_engine()
2934 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
2940 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
2946 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_dma_engine()
2948 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
2952 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
2959 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) in stmmac_tx_timer_arm() argument
2961 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_timer_arm()
2964 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), in stmmac_tx_timer_arm()
2977 struct stmmac_priv *priv = tx_q->priv_data; in stmmac_tx_timer() local
2981 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer()
2988 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); in stmmac_tx_timer()
3004 static void stmmac_init_coalesce(struct stmmac_priv *priv) in stmmac_init_coalesce() argument
3006 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_init_coalesce()
3007 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_init_coalesce()
3011 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_coalesce()
3013 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; in stmmac_init_coalesce()
3014 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; in stmmac_init_coalesce()
3021 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; in stmmac_init_coalesce()
3024 static void stmmac_set_rings_length(struct stmmac_priv *priv) in stmmac_set_rings_length() argument
3026 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_rings_length()
3027 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_rings_length()
3032 stmmac_set_tx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3033 (priv->dma_conf.dma_tx_size - 1), chan); in stmmac_set_rings_length()
3037 stmmac_set_rx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3038 (priv->dma_conf.dma_rx_size - 1), chan); in stmmac_set_rings_length()
3046 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) in stmmac_set_tx_queue_weight() argument
3048 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_set_tx_queue_weight()
3053 weight = priv->plat->tx_queues_cfg[queue].weight; in stmmac_set_tx_queue_weight()
3054 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); in stmmac_set_tx_queue_weight()
3063 static void stmmac_configure_cbs(struct stmmac_priv *priv) in stmmac_configure_cbs() argument
3065 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_configure_cbs()
3071 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; in stmmac_configure_cbs()
3075 stmmac_config_cbs(priv, priv->hw, in stmmac_configure_cbs()
3076 priv->plat->tx_queues_cfg[queue].send_slope, in stmmac_configure_cbs()
3077 priv->plat->tx_queues_cfg[queue].idle_slope, in stmmac_configure_cbs()
3078 priv->plat->tx_queues_cfg[queue].high_credit, in stmmac_configure_cbs()
3079 priv->plat->tx_queues_cfg[queue].low_credit, in stmmac_configure_cbs()
3089 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) in stmmac_rx_queue_dma_chan_map() argument
3091 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_rx_queue_dma_chan_map()
3096 chan = priv->plat->rx_queues_cfg[queue].chan; in stmmac_rx_queue_dma_chan_map()
3097 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); in stmmac_rx_queue_dma_chan_map()
3106 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) in stmmac_mac_config_rx_queues_prio() argument
3108 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_prio()
3113 if (!priv->plat->rx_queues_cfg[queue].use_prio) in stmmac_mac_config_rx_queues_prio()
3116 prio = priv->plat->rx_queues_cfg[queue].prio; in stmmac_mac_config_rx_queues_prio()
3117 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_rx_queues_prio()
3126 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) in stmmac_mac_config_tx_queues_prio() argument
3128 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mac_config_tx_queues_prio()
3133 if (!priv->plat->tx_queues_cfg[queue].use_prio) in stmmac_mac_config_tx_queues_prio()
3136 prio = priv->plat->tx_queues_cfg[queue].prio; in stmmac_mac_config_tx_queues_prio()
3137 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_tx_queues_prio()
3146 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) in stmmac_mac_config_rx_queues_routing() argument
3148 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_routing()
3154 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) in stmmac_mac_config_rx_queues_routing()
3157 packet = priv->plat->rx_queues_cfg[queue].pkt_route; in stmmac_mac_config_rx_queues_routing()
3158 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); in stmmac_mac_config_rx_queues_routing()
3162 static void stmmac_mac_config_rss(struct stmmac_priv *priv) in stmmac_mac_config_rss() argument
3164 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { in stmmac_mac_config_rss()
3165 priv->rss.enable = false; in stmmac_mac_config_rss()
3169 if (priv->dev->features & NETIF_F_RXHASH) in stmmac_mac_config_rss()
3170 priv->rss.enable = true; in stmmac_mac_config_rss()
3172 priv->rss.enable = false; in stmmac_mac_config_rss()
3174 stmmac_rss_configure(priv, priv->hw, &priv->rss, in stmmac_mac_config_rss()
3175 priv->plat->rx_queues_to_use); in stmmac_mac_config_rss()
3183 static void stmmac_mtl_configuration(struct stmmac_priv *priv) in stmmac_mtl_configuration() argument
3185 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mtl_configuration()
3186 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mtl_configuration()
3189 stmmac_set_tx_queue_weight(priv); in stmmac_mtl_configuration()
3193 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3194 priv->plat->rx_sched_algorithm); in stmmac_mtl_configuration()
3198 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3199 priv->plat->tx_sched_algorithm); in stmmac_mtl_configuration()
3203 stmmac_configure_cbs(priv); in stmmac_mtl_configuration()
3206 stmmac_rx_queue_dma_chan_map(priv); in stmmac_mtl_configuration()
3209 stmmac_mac_enable_rx_queues(priv); in stmmac_mtl_configuration()
3213 stmmac_mac_config_rx_queues_prio(priv); in stmmac_mtl_configuration()
3217 stmmac_mac_config_tx_queues_prio(priv); in stmmac_mtl_configuration()
3221 stmmac_mac_config_rx_queues_routing(priv); in stmmac_mtl_configuration()
3225 stmmac_mac_config_rss(priv); in stmmac_mtl_configuration()
3228 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) in stmmac_safety_feat_configuration() argument
3230 if (priv->dma_cap.asp) { in stmmac_safety_feat_configuration()
3231 netdev_info(priv->dev, "Enabling Safety Features\n"); in stmmac_safety_feat_configuration()
3232 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, in stmmac_safety_feat_configuration()
3233 priv->plat->safety_feat_cfg); in stmmac_safety_feat_configuration()
3235 netdev_info(priv->dev, "No Safety Features support found\n"); in stmmac_safety_feat_configuration()
3239 static int stmmac_fpe_start_wq(struct stmmac_priv *priv) in stmmac_fpe_start_wq() argument
3243 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); in stmmac_fpe_start_wq()
3244 clear_bit(__FPE_REMOVING, &priv->fpe_task_state); in stmmac_fpe_start_wq()
3246 name = priv->wq_name; in stmmac_fpe_start_wq()
3247 sprintf(name, "%s-fpe", priv->dev->name); in stmmac_fpe_start_wq()
3249 priv->fpe_wq = create_singlethread_workqueue(name); in stmmac_fpe_start_wq()
3250 if (!priv->fpe_wq) { in stmmac_fpe_start_wq()
3251 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); in stmmac_fpe_start_wq()
3255 netdev_info(priv->dev, "FPE workqueue start"); in stmmac_fpe_start_wq()
3275 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_hw_setup() local
3276 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_hw_setup()
3277 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_hw_setup()
3283 ret = stmmac_init_dma_engine(priv); in stmmac_hw_setup()
3285 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", in stmmac_hw_setup()
3291 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); in stmmac_hw_setup()
3294 if (priv->hw->pcs) { in stmmac_hw_setup()
3295 int speed = priv->plat->mac_port_sel_speed; in stmmac_hw_setup()
3299 priv->hw->ps = speed; in stmmac_hw_setup()
3301 dev_warn(priv->device, "invalid port speed\n"); in stmmac_hw_setup()
3302 priv->hw->ps = 0; in stmmac_hw_setup()
3307 stmmac_core_init(priv, priv->hw, dev); in stmmac_hw_setup()
3310 stmmac_mtl_configuration(priv); in stmmac_hw_setup()
3313 stmmac_safety_feat_configuration(priv); in stmmac_hw_setup()
3315 ret = stmmac_rx_ipc(priv, priv->hw); in stmmac_hw_setup()
3317 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); in stmmac_hw_setup()
3318 priv->plat->rx_coe = STMMAC_RX_COE_NONE; in stmmac_hw_setup()
3319 priv->hw->rx_csum = 0; in stmmac_hw_setup()
3323 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_hw_setup()
3326 stmmac_dma_operation_mode(priv); in stmmac_hw_setup()
3328 stmmac_mmc_setup(priv); in stmmac_hw_setup()
3331 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_hw_setup()
3333 netdev_warn(priv->dev, in stmmac_hw_setup()
3338 ret = stmmac_init_ptp(priv); in stmmac_hw_setup()
3340 netdev_info(priv->dev, "PTP not supported by HW\n"); in stmmac_hw_setup()
3342 netdev_warn(priv->dev, "PTP init failed\n"); in stmmac_hw_setup()
3344 stmmac_ptp_register(priv); in stmmac_hw_setup()
3346 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; in stmmac_hw_setup()
3349 if (!priv->tx_lpi_timer) in stmmac_hw_setup()
3350 priv->tx_lpi_timer = eee_timer * 1000; in stmmac_hw_setup()
3352 if (priv->use_riwt) { in stmmac_hw_setup()
3356 if (!priv->rx_riwt[queue]) in stmmac_hw_setup()
3357 priv->rx_riwt[queue] = DEF_DMA_RIWT; in stmmac_hw_setup()
3359 stmmac_rx_watchdog(priv, priv->ioaddr, in stmmac_hw_setup()
3360 priv->rx_riwt[queue], queue); in stmmac_hw_setup()
3364 if (priv->hw->pcs) in stmmac_hw_setup()
3365 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); in stmmac_hw_setup()
3368 stmmac_set_rings_length(priv); in stmmac_hw_setup()
3371 if (priv->tso) { in stmmac_hw_setup()
3373 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3379 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
3384 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_hw_setup()
3386 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_hw_setup()
3390 if (priv->dma_cap.vlins) in stmmac_hw_setup()
3391 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); in stmmac_hw_setup()
3395 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3398 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); in stmmac_hw_setup()
3402 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); in stmmac_hw_setup()
3403 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); in stmmac_hw_setup()
3406 stmmac_start_all_dma(priv); in stmmac_hw_setup()
3408 if (priv->dma_cap.fpesel) { in stmmac_hw_setup()
3409 stmmac_fpe_start_wq(priv); in stmmac_hw_setup()
3411 if (priv->plat->fpe_cfg->enable) in stmmac_hw_setup()
3412 stmmac_fpe_handshake(priv, true); in stmmac_hw_setup()
3420 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_hw_teardown() local
3422 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_hw_teardown()
3428 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_free_irq() local
3433 irq_idx = priv->plat->tx_queues_to_use; in stmmac_free_irq()
3437 if (priv->tx_irq[j] > 0) { in stmmac_free_irq()
3438 irq_set_affinity_hint(priv->tx_irq[j], NULL); in stmmac_free_irq()
3439 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); in stmmac_free_irq()
3442 irq_idx = priv->plat->rx_queues_to_use; in stmmac_free_irq()
3446 if (priv->rx_irq[j] > 0) { in stmmac_free_irq()
3447 irq_set_affinity_hint(priv->rx_irq[j], NULL); in stmmac_free_irq()
3448 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); in stmmac_free_irq()
3452 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) in stmmac_free_irq()
3453 free_irq(priv->sfty_ue_irq, dev); in stmmac_free_irq()
3456 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) in stmmac_free_irq()
3457 free_irq(priv->sfty_ce_irq, dev); in stmmac_free_irq()
3460 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) in stmmac_free_irq()
3461 free_irq(priv->lpi_irq, dev); in stmmac_free_irq()
3464 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) in stmmac_free_irq()
3465 free_irq(priv->wol_irq, dev); in stmmac_free_irq()
3479 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_request_irq_multi_msi() local
3488 int_name = priv->int_name_mac; in stmmac_request_irq_multi_msi()
3493 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3503 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3504 int_name = priv->int_name_wol; in stmmac_request_irq_multi_msi()
3506 ret = request_irq(priv->wol_irq, in stmmac_request_irq_multi_msi()
3510 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3512 __func__, priv->wol_irq, ret); in stmmac_request_irq_multi_msi()
3521 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3522 int_name = priv->int_name_lpi; in stmmac_request_irq_multi_msi()
3524 ret = request_irq(priv->lpi_irq, in stmmac_request_irq_multi_msi()
3528 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3530 __func__, priv->lpi_irq, ret); in stmmac_request_irq_multi_msi()
3539 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3540 int_name = priv->int_name_sfty_ce; in stmmac_request_irq_multi_msi()
3542 ret = request_irq(priv->sfty_ce_irq, in stmmac_request_irq_multi_msi()
3546 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3548 __func__, priv->sfty_ce_irq, ret); in stmmac_request_irq_multi_msi()
3557 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3558 int_name = priv->int_name_sfty_ue; in stmmac_request_irq_multi_msi()
3560 ret = request_irq(priv->sfty_ue_irq, in stmmac_request_irq_multi_msi()
3564 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3566 __func__, priv->sfty_ue_irq, ret); in stmmac_request_irq_multi_msi()
3573 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3576 if (priv->rx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3579 int_name = priv->int_name_rx_irq[i]; in stmmac_request_irq_multi_msi()
3581 ret = request_irq(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3583 0, int_name, &priv->dma_conf.rx_queue[i]); in stmmac_request_irq_multi_msi()
3585 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3587 __func__, i, priv->rx_irq[i], ret); in stmmac_request_irq_multi_msi()
3594 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); in stmmac_request_irq_multi_msi()
3598 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3601 if (priv->tx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3604 int_name = priv->int_name_tx_irq[i]; in stmmac_request_irq_multi_msi()
3606 ret = request_irq(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3608 0, int_name, &priv->dma_conf.tx_queue[i]); in stmmac_request_irq_multi_msi()
3610 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3612 __func__, i, priv->tx_irq[i], ret); in stmmac_request_irq_multi_msi()
3619 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); in stmmac_request_irq_multi_msi()
3631 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_request_irq_single() local
3638 netdev_err(priv->dev, in stmmac_request_irq_single()
3648 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_single()
3649 ret = request_irq(priv->wol_irq, stmmac_interrupt, in stmmac_request_irq_single()
3652 netdev_err(priv->dev, in stmmac_request_irq_single()
3654 __func__, priv->wol_irq, ret); in stmmac_request_irq_single()
3661 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_single()
3662 ret = request_irq(priv->lpi_irq, stmmac_interrupt, in stmmac_request_irq_single()
3665 netdev_err(priv->dev, in stmmac_request_irq_single()
3667 __func__, priv->lpi_irq, ret); in stmmac_request_irq_single()
3682 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_request_irq() local
3686 if (priv->plat->multi_msi_en) in stmmac_request_irq()
3704 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) in stmmac_setup_dma_desc() argument
3711 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", in stmmac_setup_dma_desc()
3716 bfsize = stmmac_set_16kib_bfsize(priv, mtu); in stmmac_setup_dma_desc()
3727 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; in stmmac_setup_dma_desc()
3728 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; in stmmac_setup_dma_desc()
3736 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { in stmmac_setup_dma_desc()
3738 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; in stmmac_setup_dma_desc()
3744 ret = alloc_dma_desc_resources(priv, dma_conf); in stmmac_setup_dma_desc()
3746 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", in stmmac_setup_dma_desc()
3751 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); in stmmac_setup_dma_desc()
3753 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", in stmmac_setup_dma_desc()
3761 free_dma_desc_resources(priv, dma_conf); in stmmac_setup_dma_desc()
3780 struct stmmac_priv *priv = netdev_priv(dev); in __stmmac_open() local
3781 int mode = priv->plat->phy_interface; in __stmmac_open()
3785 ret = pm_runtime_resume_and_get(priv->device); in __stmmac_open()
3789 if (priv->hw->pcs != STMMAC_PCS_TBI && in __stmmac_open()
3790 priv->hw->pcs != STMMAC_PCS_RTBI && in __stmmac_open()
3791 (!priv->hw->xpcs || in __stmmac_open()
3792 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { in __stmmac_open()
3795 netdev_err(priv->dev, in __stmmac_open()
3803 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); in __stmmac_open()
3804 priv->xstats.threshold = tc; in __stmmac_open()
3806 priv->rx_copybreak = STMMAC_RX_COPYBREAK; in __stmmac_open()
3809 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); in __stmmac_open()
3811 stmmac_reset_queues_param(priv); in __stmmac_open()
3813 if (priv->plat->serdes_powerup) { in __stmmac_open()
3814 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); in __stmmac_open()
3816 netdev_err(priv->dev, "%s: Serdes powerup failed\n", in __stmmac_open()
3824 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in __stmmac_open()
3828 stmmac_init_coalesce(priv); in __stmmac_open()
3830 phylink_start(priv->phylink); in __stmmac_open()
3832 phylink_speed_up(priv->phylink); in __stmmac_open()
3838 stmmac_enable_all_queues(priv); in __stmmac_open()
3839 netif_tx_start_all_queues(priv->dev); in __stmmac_open()
3840 stmmac_enable_all_dma_irq(priv); in __stmmac_open()
3845 phylink_stop(priv->phylink); in __stmmac_open()
3847 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in __stmmac_open()
3848 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in __stmmac_open()
3852 free_dma_desc_resources(priv, &priv->dma_conf); in __stmmac_open()
3853 phylink_disconnect_phy(priv->phylink); in __stmmac_open()
3855 pm_runtime_put(priv->device); in __stmmac_open()
3861 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_open() local
3865 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); in stmmac_open()
3874 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) in stmmac_fpe_stop_wq() argument
3876 set_bit(__FPE_REMOVING, &priv->fpe_task_state); in stmmac_fpe_stop_wq()
3878 if (priv->fpe_wq) in stmmac_fpe_stop_wq()
3879 destroy_workqueue(priv->fpe_wq); in stmmac_fpe_stop_wq()
3881 netdev_info(priv->dev, "FPE workqueue stop"); in stmmac_fpe_stop_wq()
3892 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_release() local
3895 if (device_may_wakeup(priv->device)) in stmmac_release()
3896 phylink_speed_down(priv->phylink, false); in stmmac_release()
3898 phylink_stop(priv->phylink); in stmmac_release()
3899 phylink_disconnect_phy(priv->phylink); in stmmac_release()
3901 stmmac_disable_all_queues(priv); in stmmac_release()
3903 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_release()
3904 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_release()
3911 if (priv->eee_enabled) { in stmmac_release()
3912 priv->tx_path_in_lpi_mode = false; in stmmac_release()
3913 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_release()
3917 stmmac_stop_all_dma(priv); in stmmac_release()
3920 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_release()
3923 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_release()
3926 if (priv->plat->serdes_powerdown) in stmmac_release()
3927 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); in stmmac_release()
3931 stmmac_release_ptp(priv); in stmmac_release()
3933 pm_runtime_put(priv->device); in stmmac_release()
3935 if (priv->dma_cap.fpesel) in stmmac_release()
3936 stmmac_fpe_stop_wq(priv); in stmmac_release()
3941 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, in stmmac_vlan_insert() argument
3948 if (!priv->dma_cap.vlins) in stmmac_vlan_insert()
3964 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) in stmmac_vlan_insert()
3967 stmmac_set_tx_owner(priv, p); in stmmac_vlan_insert()
3968 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_vlan_insert()
3983 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, in stmmac_tso_allocator() argument
3986 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_allocator()
3997 priv->dma_conf.dma_tx_size); in stmmac_tso_allocator()
4006 if (priv->dma_cap.addr64 <= 32) in stmmac_tso_allocator()
4009 stmmac_set_desc_addr(priv, desc, curr_addr); in stmmac_tso_allocator()
4014 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, in stmmac_tso_allocator()
4023 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) in stmmac_flush_tx_descriptors() argument
4025 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_flush_tx_descriptors()
4028 if (likely(priv->extend_desc)) in stmmac_flush_tx_descriptors()
4042 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_flush_tx_descriptors()
4075 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_tso_xmit() local
4087 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_xmit()
4100 if (unlikely(stmmac_tx_avail(priv, queue) < in stmmac_tso_xmit()
4103 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_tso_xmit()
4106 netdev_err(priv->dev, in stmmac_tso_xmit()
4124 stmmac_set_mss(priv, mss_desc, mss); in stmmac_tso_xmit()
4127 priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4131 if (netif_msg_tx_queued(priv)) { in stmmac_tso_xmit()
4139 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); in stmmac_tso_xmit()
4151 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); in stmmac_tso_xmit()
4154 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), in stmmac_tso_xmit()
4156 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4164 if (priv->dma_cap.addr64 <= 32) { in stmmac_tso_xmit()
4174 stmmac_set_desc_addr(priv, first, des); in stmmac_tso_xmit()
4180 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); in stmmac_tso_xmit()
4186 des = skb_frag_dma_map(priv->device, frag, 0, in stmmac_tso_xmit()
4189 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4192 stmmac_tso_allocator(priv, des, skb_frag_size(frag), in stmmac_tso_xmit()
4211 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_tso_xmit()
4213 else if (!priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4215 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4218 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_tso_xmit()
4230 stmmac_set_tx_ic(priv, desc); in stmmac_tso_xmit()
4231 priv->xstats.tx_set_ic_bit++; in stmmac_tso_xmit()
4239 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4241 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { in stmmac_tso_xmit()
4242 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_tso_xmit()
4244 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tso_xmit()
4248 priv->xstats.tx_tso_frames++; in stmmac_tso_xmit()
4249 priv->xstats.tx_tso_nfrags += nfrags; in stmmac_tso_xmit()
4251 if (priv->sarc_type) in stmmac_tso_xmit()
4252 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_tso_xmit()
4257 priv->hwts_tx_en)) { in stmmac_tso_xmit()
4260 stmmac_enable_tx_timestamp(priv, first); in stmmac_tso_xmit()
4264 stmmac_prepare_tso_tx_desc(priv, first, 1, in stmmac_tso_xmit()
4278 stmmac_set_tx_owner(priv, mss_desc); in stmmac_tso_xmit()
4281 if (netif_msg_pktdata(priv)) { in stmmac_tso_xmit()
4291 stmmac_flush_tx_descriptors(priv, queue); in stmmac_tso_xmit()
4292 stmmac_tx_timer_arm(priv, queue); in stmmac_tso_xmit()
4297 dev_err(priv->device, "Tx dma map failed\n"); in stmmac_tso_xmit()
4299 priv->dev->stats.tx_dropped++; in stmmac_tso_xmit()
4314 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_xmit() local
4327 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xmit()
4330 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) in stmmac_xmit()
4331 stmmac_disable_eee_mode(priv); in stmmac_xmit()
4334 if (skb_is_gso(skb) && priv->tso) { in stmmac_xmit()
4337 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) in stmmac_xmit()
4341 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { in stmmac_xmit()
4343 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_xmit()
4346 netdev_err(priv->dev, in stmmac_xmit()
4354 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); in stmmac_xmit()
4362 if (likely(priv->extend_desc)) in stmmac_xmit()
4372 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); in stmmac_xmit()
4374 enh_desc = priv->plat->enh_desc; in stmmac_xmit()
4377 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); in stmmac_xmit()
4380 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); in stmmac_xmit()
4390 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4393 if (likely(priv->extend_desc)) in stmmac_xmit()
4400 des = skb_frag_dma_map(priv->device, frag, 0, len, in stmmac_xmit()
4402 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4407 stmmac_set_desc_addr(priv, desc, des); in stmmac_xmit()
4415 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, in stmmac_xmit()
4416 priv->mode, 1, last_segment, skb->len); in stmmac_xmit()
4431 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_xmit()
4433 else if (!priv->tx_coal_frames[queue]) in stmmac_xmit()
4435 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_xmit()
4438 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_xmit()
4444 if (likely(priv->extend_desc)) in stmmac_xmit()
4452 stmmac_set_tx_ic(priv, desc); in stmmac_xmit()
4453 priv->xstats.tx_set_ic_bit++; in stmmac_xmit()
4461 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4464 if (netif_msg_pktdata(priv)) { in stmmac_xmit()
4465 netdev_dbg(priv->dev, in stmmac_xmit()
4470 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); in stmmac_xmit()
4474 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { in stmmac_xmit()
4475 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_xmit()
4477 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_xmit()
4482 if (priv->sarc_type) in stmmac_xmit()
4483 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_xmit()
4494 des = dma_map_single(priv->device, skb->data, in stmmac_xmit()
4496 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4503 stmmac_set_desc_addr(priv, first, des); in stmmac_xmit()
4509 priv->hwts_tx_en)) { in stmmac_xmit()
4512 stmmac_enable_tx_timestamp(priv, first); in stmmac_xmit()
4516 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, in stmmac_xmit()
4517 csum_insertion, priv->mode, 0, last_segment, in stmmac_xmit()
4525 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); in stmmac_xmit()
4528 stmmac_set_tx_owner(priv, first); in stmmac_xmit()
4532 stmmac_enable_dma_transmission(priv, priv->ioaddr); in stmmac_xmit()
4534 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xmit()
4535 stmmac_tx_timer_arm(priv, queue); in stmmac_xmit()
4540 netdev_err(priv->dev, "Tx DMA map failed\n"); in stmmac_xmit()
4542 priv->dev->stats.tx_dropped++; in stmmac_xmit()
4574 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) in stmmac_rx_refill() argument
4576 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill()
4577 int dirty = stmmac_rx_dirty(priv, queue); in stmmac_rx_refill()
4581 if (priv->dma_cap.addr64 <= 32) in stmmac_rx_refill()
4589 if (priv->extend_desc) in stmmac_rx_refill()
4600 if (priv->sph && !buf->sec_page) { in stmmac_rx_refill()
4610 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_rx_refill()
4611 if (priv->sph) in stmmac_rx_refill()
4612 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_rx_refill()
4614 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_rx_refill()
4615 stmmac_refill_desc3(priv, rx_q, p); in stmmac_rx_refill()
4618 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4619 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill()
4622 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4624 if (!priv->use_riwt) in stmmac_rx_refill()
4628 stmmac_set_rx_owner(priv, p, use_rx_wd); in stmmac_rx_refill()
4630 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill()
4635 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill()
4638 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, in stmmac_rx_buf1_len() argument
4643 int coe = priv->hw->rx_csum; in stmmac_rx_buf1_len()
4646 if (priv->sph && len) in stmmac_rx_buf1_len()
4650 stmmac_get_rx_header_len(priv, p, &hlen); in stmmac_rx_buf1_len()
4651 if (priv->sph && hlen) { in stmmac_rx_buf1_len()
4652 priv->xstats.rx_split_hdr_pkt_n++; in stmmac_rx_buf1_len()
4658 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf1_len()
4660 plen = stmmac_get_rx_frame_len(priv, p, coe); in stmmac_rx_buf1_len()
4663 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); in stmmac_rx_buf1_len()
4666 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, in stmmac_rx_buf2_len() argument
4670 int coe = priv->hw->rx_csum; in stmmac_rx_buf2_len()
4674 if (!priv->sph) in stmmac_rx_buf2_len()
4679 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf2_len()
4681 plen = stmmac_get_rx_frame_len(priv, p, coe); in stmmac_rx_buf2_len()
4687 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, in stmmac_xdp_xmit_xdpf() argument
4690 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_xdpf()
4696 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) in stmmac_xdp_xmit_xdpf()
4699 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_xdpf()
4707 dma_addr = dma_map_single(priv->device, xdpf->data, in stmmac_xdp_xmit_xdpf()
4709 if (dma_mapping_error(priv->device, dma_addr)) in stmmac_xdp_xmit_xdpf()
4718 dma_sync_single_for_device(priv->device, dma_addr, in stmmac_xdp_xmit_xdpf()
4732 stmmac_set_desc_addr(priv, tx_desc, dma_addr); in stmmac_xdp_xmit_xdpf()
4734 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, in stmmac_xdp_xmit_xdpf()
4735 true, priv->mode, true, true, in stmmac_xdp_xmit_xdpf()
4740 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_xdpf()
4747 stmmac_set_tx_ic(priv, tx_desc); in stmmac_xdp_xmit_xdpf()
4748 priv->xstats.tx_set_ic_bit++; in stmmac_xdp_xmit_xdpf()
4751 stmmac_enable_dma_transmission(priv, priv->ioaddr); in stmmac_xdp_xmit_xdpf()
4753 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_xdpf()
4759 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, in stmmac_xdp_get_tx_queue() argument
4767 while (index >= priv->plat->tx_queues_to_use) in stmmac_xdp_get_tx_queue()
4768 index -= priv->plat->tx_queues_to_use; in stmmac_xdp_get_tx_queue()
4773 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, in stmmac_xdp_xmit_back() argument
4785 queue = stmmac_xdp_get_tx_queue(priv, cpu); in stmmac_xdp_xmit_back()
4786 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back()
4792 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); in stmmac_xdp_xmit_back()
4794 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xdp_xmit_back()
4801 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, in __stmmac_xdp_run_prog() argument
4814 res = stmmac_xdp_xmit_back(priv, xdp); in __stmmac_xdp_run_prog()
4817 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) in __stmmac_xdp_run_prog()
4823 bpf_warn_invalid_xdp_action(priv->dev, prog, act); in __stmmac_xdp_run_prog()
4826 trace_xdp_exception(priv->dev, prog, act); in __stmmac_xdp_run_prog()
4836 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, in stmmac_xdp_run_prog() argument
4842 prog = READ_ONCE(priv->xdp_prog); in stmmac_xdp_run_prog()
4848 res = __stmmac_xdp_run_prog(priv, prog, xdp); in stmmac_xdp_run_prog()
4853 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, in stmmac_finalize_xdp_rx() argument
4859 queue = stmmac_xdp_get_tx_queue(priv, cpu); in stmmac_finalize_xdp_rx()
4862 stmmac_tx_timer_arm(priv, queue); in stmmac_finalize_xdp_rx()
4889 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, in stmmac_dispatch_skb_zc() argument
4893 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_dispatch_skb_zc()
4896 int coe = priv->hw->rx_csum; in stmmac_dispatch_skb_zc()
4902 priv->dev->stats.rx_dropped++; in stmmac_dispatch_skb_zc()
4906 stmmac_get_rx_hwtstamp(priv, p, np, skb); in stmmac_dispatch_skb_zc()
4907 stmmac_rx_vlan(priv->dev, skb); in stmmac_dispatch_skb_zc()
4908 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_dispatch_skb_zc()
4915 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) in stmmac_dispatch_skb_zc()
4921 priv->dev->stats.rx_packets++; in stmmac_dispatch_skb_zc()
4922 priv->dev->stats.rx_bytes += len; in stmmac_dispatch_skb_zc()
4925 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) in stmmac_rx_refill_zc() argument
4927 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill_zc()
4932 budget = min(budget, stmmac_rx_dirty(priv, queue)); in stmmac_rx_refill_zc()
4947 if (priv->extend_desc) in stmmac_rx_refill_zc()
4953 stmmac_set_desc_addr(priv, rx_desc, dma_addr); in stmmac_rx_refill_zc()
4954 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); in stmmac_rx_refill_zc()
4955 stmmac_refill_desc3(priv, rx_q, rx_desc); in stmmac_rx_refill_zc()
4958 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
4959 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill_zc()
4962 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
4964 if (!priv->use_riwt) in stmmac_rx_refill_zc()
4968 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); in stmmac_rx_refill_zc()
4970 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill_zc()
4977 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill_zc()
4983 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) in stmmac_rx_zc() argument
4985 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_zc()
4987 int dirty = stmmac_rx_dirty(priv, queue); in stmmac_rx_zc()
4995 if (netif_msg_rx_status(priv)) { in stmmac_rx_zc()
4998 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx_zc()
4999 if (priv->extend_desc) { in stmmac_rx_zc()
5007 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx_zc()
5036 !stmmac_rx_refill_zc(priv, queue, dirty); in stmmac_rx_zc()
5040 if (priv->extend_desc) in stmmac_rx_zc()
5046 status = stmmac_rx_status(priv, &priv->dev->stats, in stmmac_rx_zc()
5047 &priv->xstats, p); in stmmac_rx_zc()
5054 priv->dma_conf.dma_rx_size); in stmmac_rx_zc()
5057 if (priv->extend_desc) in stmmac_rx_zc()
5068 if (priv->extend_desc) in stmmac_rx_zc()
5069 stmmac_rx_extended_status(priv, &priv->dev->stats, in stmmac_rx_zc()
5070 &priv->xstats, in stmmac_rx_zc()
5077 if (!priv->hwts_rx_en) in stmmac_rx_zc()
5078 priv->dev->stats.rx_errors++; in stmmac_rx_zc()
5098 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); in stmmac_rx_zc()
5111 prog = READ_ONCE(priv->xdp_prog); in stmmac_rx_zc()
5112 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); in stmmac_rx_zc()
5116 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); in stmmac_rx_zc()
5121 priv->dev->stats.rx_dropped++; in stmmac_rx_zc()
5140 stmmac_finalize_xdp_rx(priv, xdp_status); in stmmac_rx_zc()
5142 priv->xstats.rx_pkt_n += count; in stmmac_rx_zc()
5143 priv->xstats.rxq_stats[queue].rx_pkt_n += count; in stmmac_rx_zc()
5146 if (failure || stmmac_rx_dirty(priv, queue) > 0) in stmmac_rx_zc()
5165 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) in stmmac_rx() argument
5167 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx()
5168 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_rx()
5170 int status = 0, coe = priv->hw->rx_csum; in stmmac_rx()
5180 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; in stmmac_rx()
5182 if (netif_msg_rx_status(priv)) { in stmmac_rx()
5185 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx()
5186 if (priv->extend_desc) { in stmmac_rx()
5194 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx()
5225 if (priv->extend_desc) in stmmac_rx()
5231 status = stmmac_rx_status(priv, &priv->dev->stats, in stmmac_rx()
5232 &priv->xstats, p); in stmmac_rx()
5238 priv->dma_conf.dma_rx_size); in stmmac_rx()
5241 if (priv->extend_desc) in stmmac_rx()
5248 if (priv->extend_desc) in stmmac_rx()
5249 stmmac_rx_extended_status(priv, &priv->dev->stats, in stmmac_rx()
5250 &priv->xstats, rx_q->dma_erx + entry); in stmmac_rx()
5255 if (!priv->hwts_rx_en) in stmmac_rx()
5256 priv->dev->stats.rx_errors++; in stmmac_rx()
5274 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); in stmmac_rx()
5276 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); in stmmac_rx()
5293 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5302 skb = stmmac_xdp_run_prog(priv, &xdp); in stmmac_rx()
5319 priv->dev->stats.rx_dropped++; in stmmac_rx()
5348 priv->dev->stats.rx_dropped++; in stmmac_rx()
5361 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5365 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5373 dma_sync_single_for_cpu(priv->device, buf->sec_addr, in stmmac_rx()
5377 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5392 stmmac_get_rx_hwtstamp(priv, p, np, skb); in stmmac_rx()
5393 stmmac_rx_vlan(priv->dev, skb); in stmmac_rx()
5394 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_rx()
5401 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) in stmmac_rx()
5408 priv->dev->stats.rx_packets++; in stmmac_rx()
5409 priv->dev->stats.rx_bytes += len; in stmmac_rx()
5420 stmmac_finalize_xdp_rx(priv, xdp_status); in stmmac_rx()
5422 stmmac_rx_refill(priv, queue); in stmmac_rx()
5424 priv->xstats.rx_pkt_n += count; in stmmac_rx()
5425 priv->xstats.rxq_stats[queue].rx_pkt_n += count; in stmmac_rx()
5434 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rx() local
5438 priv->xstats.napi_poll++; in stmmac_napi_poll_rx()
5440 work_done = stmmac_rx(priv, budget, chan); in stmmac_napi_poll_rx()
5445 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_poll_rx()
5456 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_tx() local
5460 priv->xstats.napi_poll++; in stmmac_napi_poll_tx()
5462 work_done = stmmac_tx_clean(priv, budget, chan); in stmmac_napi_poll_tx()
5469 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_poll_tx()
5480 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rxtx() local
5484 priv->xstats.napi_poll++; in stmmac_napi_poll_rxtx()
5486 tx_done = stmmac_tx_clean(priv, budget, chan); in stmmac_napi_poll_rxtx()
5489 rx_done = stmmac_rx_zc(priv, budget, chan); in stmmac_napi_poll_rxtx()
5507 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_napi_poll_rxtx()
5525 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_tx_timeout() local
5527 stmmac_global_err(priv); in stmmac_tx_timeout()
5541 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_set_rx_mode() local
5543 stmmac_set_filter(priv, priv->hw, dev); in stmmac_set_rx_mode()
5559 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_change_mtu() local
5560 int txfifosz = priv->plat->tx_fifo_size; in stmmac_change_mtu()
5566 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_change_mtu()
5568 txfifosz /= priv->plat->tx_queues_to_use; in stmmac_change_mtu()
5570 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { in stmmac_change_mtu()
5571 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); in stmmac_change_mtu()
5582 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); in stmmac_change_mtu()
5584 dma_conf = stmmac_setup_dma_desc(priv, mtu); in stmmac_change_mtu()
5586 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", in stmmac_change_mtu()
5596 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); in stmmac_change_mtu()
5612 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_fix_features() local
5614 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) in stmmac_fix_features()
5617 if (!priv->plat->tx_coe) in stmmac_fix_features()
5625 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) in stmmac_fix_features()
5629 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { in stmmac_fix_features()
5631 priv->tso = true; in stmmac_fix_features()
5633 priv->tso = false; in stmmac_fix_features()
5642 struct stmmac_priv *priv = netdev_priv(netdev); in stmmac_set_features() local
5646 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_set_features()
5648 priv->hw->rx_csum = 0; in stmmac_set_features()
5652 stmmac_rx_ipc(priv, priv->hw); in stmmac_set_features()
5654 if (priv->sph_cap) { in stmmac_set_features()
5655 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_set_features()
5658 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) in stmmac_set_features()
5659 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_set_features()
5665 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) in stmmac_fpe_event_status() argument
5667 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; in stmmac_fpe_event_status()
5682 stmmac_fpe_send_mpacket(priv, priv->ioaddr, in stmmac_fpe_event_status()
5700 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && in stmmac_fpe_event_status()
5701 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && in stmmac_fpe_event_status()
5702 priv->fpe_wq) { in stmmac_fpe_event_status()
5703 queue_work(priv->fpe_wq, &priv->fpe_task); in stmmac_fpe_event_status()
5707 static void stmmac_common_interrupt(struct stmmac_priv *priv) in stmmac_common_interrupt() argument
5709 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_common_interrupt()
5710 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_common_interrupt()
5715 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_common_interrupt()
5718 if (priv->irq_wake) in stmmac_common_interrupt()
5719 pm_wakeup_event(priv->device, 0); in stmmac_common_interrupt()
5721 if (priv->dma_cap.estsel) in stmmac_common_interrupt()
5722 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, in stmmac_common_interrupt()
5723 &priv->xstats, tx_cnt); in stmmac_common_interrupt()
5725 if (priv->dma_cap.fpesel) { in stmmac_common_interrupt()
5726 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, in stmmac_common_interrupt()
5727 priv->dev); in stmmac_common_interrupt()
5729 stmmac_fpe_event_status(priv, status); in stmmac_common_interrupt()
5733 if ((priv->plat->has_gmac) || xmac) { in stmmac_common_interrupt()
5734 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); in stmmac_common_interrupt()
5739 priv->tx_path_in_lpi_mode = true; in stmmac_common_interrupt()
5741 priv->tx_path_in_lpi_mode = false; in stmmac_common_interrupt()
5745 status = stmmac_host_mtl_irq_status(priv, priv->hw, in stmmac_common_interrupt()
5750 if (priv->hw->pcs) { in stmmac_common_interrupt()
5751 if (priv->xstats.pcs_link) in stmmac_common_interrupt()
5752 netif_carrier_on(priv->dev); in stmmac_common_interrupt()
5754 netif_carrier_off(priv->dev); in stmmac_common_interrupt()
5757 stmmac_timestamp_interrupt(priv, priv); in stmmac_common_interrupt()
5775 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_interrupt() local
5778 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_interrupt()
5782 if (stmmac_safety_feat_interrupt(priv)) in stmmac_interrupt()
5786 stmmac_common_interrupt(priv); in stmmac_interrupt()
5789 stmmac_dma_interrupt(priv); in stmmac_interrupt()
5797 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_mac_interrupt() local
5800 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); in stmmac_mac_interrupt()
5805 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_mac_interrupt()
5809 stmmac_common_interrupt(priv); in stmmac_mac_interrupt()
5817 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_safety_interrupt() local
5820 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); in stmmac_safety_interrupt()
5825 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_safety_interrupt()
5829 stmmac_safety_feat_interrupt(priv); in stmmac_safety_interrupt()
5839 struct stmmac_priv *priv; in stmmac_msi_intr_tx() local
5843 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); in stmmac_msi_intr_tx()
5846 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); in stmmac_msi_intr_tx()
5851 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_tx()
5854 status = stmmac_napi_check(priv, chan, DMA_DIR_TX); in stmmac_msi_intr_tx()
5858 stmmac_bump_dma_threshold(priv, chan); in stmmac_msi_intr_tx()
5860 stmmac_tx_err(priv, chan); in stmmac_msi_intr_tx()
5871 struct stmmac_priv *priv; in stmmac_msi_intr_rx() local
5874 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); in stmmac_msi_intr_rx()
5877 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); in stmmac_msi_intr_rx()
5882 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_rx()
5885 stmmac_napi_check(priv, chan, DMA_DIR_RX); in stmmac_msi_intr_rx()
5896 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_poll_controller() local
5900 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_poll_controller()
5903 if (priv->plat->multi_msi_en) { in stmmac_poll_controller()
5904 for (i = 0; i < priv->plat->rx_queues_to_use; i++) in stmmac_poll_controller()
5905 stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); in stmmac_poll_controller()
5907 for (i = 0; i < priv->plat->tx_queues_to_use; i++) in stmmac_poll_controller()
5908 stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); in stmmac_poll_controller()
5928 struct stmmac_priv *priv = netdev_priv (dev); in stmmac_ioctl() local
5938 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); in stmmac_ioctl()
5956 struct stmmac_priv *priv = cb_priv; in stmmac_setup_tc_block_cb() local
5959 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) in stmmac_setup_tc_block_cb()
5962 __stmmac_disable_all_queues(priv); in stmmac_setup_tc_block_cb()
5966 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); in stmmac_setup_tc_block_cb()
5969 ret = stmmac_tc_setup_cls(priv, priv, type_data); in stmmac_setup_tc_block_cb()
5975 stmmac_enable_all_queues(priv); in stmmac_setup_tc_block_cb()
5984 struct stmmac_priv *priv = netdev_priv(ndev); in stmmac_setup_tc() local
5991 priv, priv, true); in stmmac_setup_tc()
5993 return stmmac_tc_setup_cbs(priv, priv, type_data); in stmmac_setup_tc()
5995 return stmmac_tc_setup_taprio(priv, priv, type_data); in stmmac_setup_tc()
5997 return stmmac_tc_setup_etf(priv, priv, type_data); in stmmac_setup_tc()
6023 struct stmmac_priv *priv = netdev_priv(ndev); in stmmac_set_mac_address() local
6026 ret = pm_runtime_resume_and_get(priv->device); in stmmac_set_mac_address()
6034 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); in stmmac_set_mac_address()
6037 pm_runtime_put(priv->device); in stmmac_set_mac_address()
6078 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_rings_status_show() local
6079 u32 rx_count = priv->plat->rx_queues_to_use; in stmmac_rings_status_show()
6080 u32 tx_count = priv->plat->tx_queues_to_use; in stmmac_rings_status_show()
6087 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rings_status_show()
6091 if (priv->extend_desc) { in stmmac_rings_status_show()
6094 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6098 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6103 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_rings_status_show()
6107 if (priv->extend_desc) { in stmmac_rings_status_show()
6110 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6114 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6125 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_dma_cap_show() local
6127 if (!priv->hw_cap_support) { in stmmac_dma_cap_show()
6137 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); in stmmac_dma_cap_show()
6139 (priv->dma_cap.mbps_1000) ? "Y" : "N"); in stmmac_dma_cap_show()
6141 (priv->dma_cap.half_duplex) ? "Y" : "N"); in stmmac_dma_cap_show()
6143 (priv->dma_cap.hash_filter) ? "Y" : "N"); in stmmac_dma_cap_show()
6145 (priv->dma_cap.multi_addr) ? "Y" : "N"); in stmmac_dma_cap_show()
6147 (priv->dma_cap.pcs) ? "Y" : "N"); in stmmac_dma_cap_show()
6149 (priv->dma_cap.sma_mdio) ? "Y" : "N"); in stmmac_dma_cap_show()
6151 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); in stmmac_dma_cap_show()
6153 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); in stmmac_dma_cap_show()
6155 (priv->dma_cap.rmon) ? "Y" : "N"); in stmmac_dma_cap_show()
6157 (priv->dma_cap.time_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6159 (priv->dma_cap.atime_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6161 (priv->dma_cap.eee) ? "Y" : "N"); in stmmac_dma_cap_show()
6162 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); in stmmac_dma_cap_show()
6164 (priv->dma_cap.tx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6165 if (priv->synopsys_id >= DWMAC_CORE_4_00) { in stmmac_dma_cap_show()
6167 (priv->dma_cap.rx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6170 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); in stmmac_dma_cap_show()
6172 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); in stmmac_dma_cap_show()
6175 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); in stmmac_dma_cap_show()
6177 priv->dma_cap.number_rx_channel); in stmmac_dma_cap_show()
6179 priv->dma_cap.number_tx_channel); in stmmac_dma_cap_show()
6181 priv->dma_cap.number_rx_queues); in stmmac_dma_cap_show()
6183 priv->dma_cap.number_tx_queues); in stmmac_dma_cap_show()
6185 (priv->dma_cap.enh_desc) ? "Y" : "N"); in stmmac_dma_cap_show()
6186 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); in stmmac_dma_cap_show()
6187 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); in stmmac_dma_cap_show()
6188 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); in stmmac_dma_cap_show()
6189 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); in stmmac_dma_cap_show()
6191 priv->dma_cap.pps_out_num); in stmmac_dma_cap_show()
6193 priv->dma_cap.asp ? "Y" : "N"); in stmmac_dma_cap_show()
6195 priv->dma_cap.frpsel ? "Y" : "N"); in stmmac_dma_cap_show()
6197 priv->dma_cap.addr64); in stmmac_dma_cap_show()
6199 priv->dma_cap.rssen ? "Y" : "N"); in stmmac_dma_cap_show()
6201 priv->dma_cap.vlhash ? "Y" : "N"); in stmmac_dma_cap_show()
6203 priv->dma_cap.sphen ? "Y" : "N"); in stmmac_dma_cap_show()
6205 priv->dma_cap.vlins ? "Y" : "N"); in stmmac_dma_cap_show()
6207 priv->dma_cap.dvlan ? "Y" : "N"); in stmmac_dma_cap_show()
6209 priv->dma_cap.l3l4fnum); in stmmac_dma_cap_show()
6211 priv->dma_cap.arpoffsel ? "Y" : "N"); in stmmac_dma_cap_show()
6213 priv->dma_cap.estsel ? "Y" : "N"); in stmmac_dma_cap_show()
6215 priv->dma_cap.fpesel ? "Y" : "N"); in stmmac_dma_cap_show()
6217 priv->dma_cap.tbssel ? "Y" : "N"); in stmmac_dma_cap_show()
6228 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_device_event() local
6235 if (priv->dbgfs_dir) in stmmac_device_event()
6236 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, in stmmac_device_event()
6237 priv->dbgfs_dir, in stmmac_device_event()
6252 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_init_fs() local
6257 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); in stmmac_init_fs()
6260 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6264 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6272 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_exit_fs() local
6274 debugfs_remove_recursive(priv->dbgfs_dir); in stmmac_exit_fs()
6302 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) in stmmac_vlan_update() argument
6309 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { in stmmac_vlan_update()
6316 if (!priv->dma_cap.vlhash) { in stmmac_vlan_update()
6324 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); in stmmac_vlan_update()
6329 struct stmmac_priv *priv = netdev_priv(ndev); in stmmac_vlan_rx_add_vid() local
6336 set_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6337 ret = stmmac_vlan_update(priv, is_double); in stmmac_vlan_rx_add_vid()
6339 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6343 if (priv->hw->num_vlan) { in stmmac_vlan_rx_add_vid()
6344 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_add_vid()
6354 struct stmmac_priv *priv = netdev_priv(ndev); in stmmac_vlan_rx_kill_vid() local
6358 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_kill_vid()
6365 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_kill_vid()
6367 if (priv->hw->num_vlan) { in stmmac_vlan_rx_kill_vid()
6368 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_kill_vid()
6373 ret = stmmac_vlan_update(priv, is_double); in stmmac_vlan_rx_kill_vid()
6376 pm_runtime_put(priv->device); in stmmac_vlan_rx_kill_vid()
6383 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_bpf() local
6387 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); in stmmac_bpf()
6389 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, in stmmac_bpf()
6399 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_xdp_xmit() local
6405 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) in stmmac_xdp_xmit()
6411 queue = stmmac_xdp_get_tx_queue(priv, cpu); in stmmac_xdp_xmit()
6412 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit()
6421 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); in stmmac_xdp_xmit()
6429 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xdp_xmit()
6430 stmmac_tx_timer_arm(priv, queue); in stmmac_xdp_xmit()
6438 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_disable_rx_queue() argument
6440 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_rx_queue()
6444 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_disable_rx_queue()
6447 stmmac_stop_rx_dma(priv, queue); in stmmac_disable_rx_queue()
6448 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_rx_queue()
6451 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_enable_rx_queue() argument
6453 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_enable_rx_queue()
6454 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_rx_queue()
6459 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6461 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); in stmmac_enable_rx_queue()
6465 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); in stmmac_enable_rx_queue()
6467 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6468 netdev_err(priv->dev, "Failed to init RX desc.\n"); in stmmac_enable_rx_queue()
6472 stmmac_reset_rx_queue(priv, queue); in stmmac_enable_rx_queue()
6473 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6475 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_rx_queue()
6480 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6485 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6489 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6490 priv->dma_conf.dma_buf_sz, in stmmac_enable_rx_queue()
6494 stmmac_start_rx_dma(priv, queue); in stmmac_enable_rx_queue()
6497 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_enable_rx_queue()
6501 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_disable_tx_queue() argument
6503 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_tx_queue()
6507 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_disable_tx_queue()
6510 stmmac_stop_tx_dma(priv, queue); in stmmac_disable_tx_queue()
6511 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_tx_queue()
6514 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_enable_tx_queue() argument
6516 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_tx_queue()
6517 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_tx_queue()
6521 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6523 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); in stmmac_enable_tx_queue()
6527 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6529 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6530 netdev_err(priv->dev, "Failed to init TX desc.\n"); in stmmac_enable_tx_queue()
6534 stmmac_reset_tx_queue(priv, queue); in stmmac_enable_tx_queue()
6535 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6537 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_tx_queue()
6541 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); in stmmac_enable_tx_queue()
6544 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_tx_queue()
6547 stmmac_start_tx_dma(priv, queue); in stmmac_enable_tx_queue()
6550 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_enable_tx_queue()
6556 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_xdp_release() local
6563 stmmac_disable_all_queues(priv); in stmmac_xdp_release()
6565 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_release()
6566 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_release()
6572 stmmac_stop_all_dma(priv); in stmmac_xdp_release()
6575 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_release()
6578 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_xdp_release()
6589 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_xdp_open() local
6590 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_xdp_open()
6591 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_xdp_open()
6600 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6607 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); in stmmac_xdp_open()
6616 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_xdp_open()
6617 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_xdp_open()
6621 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_xdp_open()
6625 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_xdp_open()
6627 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6633 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6638 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6642 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6643 priv->dma_conf.dma_buf_sz, in stmmac_xdp_open()
6647 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_xdp_open()
6652 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_xdp_open()
6654 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6658 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6666 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_xdp_open()
6669 stmmac_start_all_dma(priv); in stmmac_xdp_open()
6676 stmmac_enable_all_queues(priv); in stmmac_xdp_open()
6679 stmmac_enable_all_dma_irq(priv); in stmmac_xdp_open()
6684 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_open()
6685 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_open()
6689 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6696 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_xsk_wakeup() local
6701 if (test_bit(STMMAC_DOWN, &priv->state) || in stmmac_xsk_wakeup()
6702 !netif_carrier_ok(priv->dev)) in stmmac_xsk_wakeup()
6705 if (!stmmac_xdp_is_enabled(priv)) in stmmac_xsk_wakeup()
6708 if (queue >= priv->plat->rx_queues_to_use || in stmmac_xsk_wakeup()
6709 queue >= priv->plat->tx_queues_to_use) in stmmac_xsk_wakeup()
6712 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_xsk_wakeup()
6713 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xsk_wakeup()
6714 ch = &priv->channel[queue]; in stmmac_xsk_wakeup()
6753 static void stmmac_reset_subtask(struct stmmac_priv *priv) in stmmac_reset_subtask() argument
6755 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) in stmmac_reset_subtask()
6757 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_reset_subtask()
6760 netdev_err(priv->dev, "Reset adapter.\n"); in stmmac_reset_subtask()
6763 netif_trans_update(priv->dev); in stmmac_reset_subtask()
6764 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) in stmmac_reset_subtask()
6767 set_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
6768 dev_close(priv->dev); in stmmac_reset_subtask()
6769 dev_open(priv->dev, NULL); in stmmac_reset_subtask()
6770 clear_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
6771 clear_bit(STMMAC_RESETING, &priv->state); in stmmac_reset_subtask()
6777 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, in stmmac_service_task() local
6780 stmmac_reset_subtask(priv); in stmmac_service_task()
6781 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); in stmmac_service_task()
6792 static int stmmac_hw_init(struct stmmac_priv *priv) in stmmac_hw_init() argument
6797 if (priv->plat->has_sun8i) in stmmac_hw_init()
6799 priv->chain_mode = chain_mode; in stmmac_hw_init()
6802 ret = stmmac_hwif_init(priv); in stmmac_hw_init()
6807 priv->hw_cap_support = stmmac_get_hw_features(priv); in stmmac_hw_init()
6808 if (priv->hw_cap_support) { in stmmac_hw_init()
6809 dev_info(priv->device, "DMA HW capability register supported\n"); in stmmac_hw_init()
6816 priv->plat->enh_desc = priv->dma_cap.enh_desc; in stmmac_hw_init()
6817 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && in stmmac_hw_init()
6818 !priv->plat->use_phy_wol; in stmmac_hw_init()
6819 priv->hw->pmt = priv->plat->pmt; in stmmac_hw_init()
6820 if (priv->dma_cap.hash_tb_sz) { in stmmac_hw_init()
6821 priv->hw->multicast_filter_bins = in stmmac_hw_init()
6822 (BIT(priv->dma_cap.hash_tb_sz) << 5); in stmmac_hw_init()
6823 priv->hw->mcast_bits_log2 = in stmmac_hw_init()
6824 ilog2(priv->hw->multicast_filter_bins); in stmmac_hw_init()
6828 if (priv->plat->force_thresh_dma_mode) in stmmac_hw_init()
6829 priv->plat->tx_coe = 0; in stmmac_hw_init()
6831 priv->plat->tx_coe = priv->dma_cap.tx_coe; in stmmac_hw_init()
6834 priv->plat->rx_coe = priv->dma_cap.rx_coe; in stmmac_hw_init()
6836 if (priv->dma_cap.rx_coe_type2) in stmmac_hw_init()
6837 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; in stmmac_hw_init()
6838 else if (priv->dma_cap.rx_coe_type1) in stmmac_hw_init()
6839 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; in stmmac_hw_init()
6842 dev_info(priv->device, "No HW DMA feature register supported\n"); in stmmac_hw_init()
6845 if (priv->plat->rx_coe) { in stmmac_hw_init()
6846 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_hw_init()
6847 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); in stmmac_hw_init()
6848 if (priv->synopsys_id < DWMAC_CORE_4_00) in stmmac_hw_init()
6849 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); in stmmac_hw_init()
6851 if (priv->plat->tx_coe) in stmmac_hw_init()
6852 dev_info(priv->device, "TX Checksum insertion supported\n"); in stmmac_hw_init()
6854 if (priv->plat->pmt) { in stmmac_hw_init()
6855 dev_info(priv->device, "Wake-Up On Lan supported\n"); in stmmac_hw_init()
6856 device_set_wakeup_capable(priv->device, 1); in stmmac_hw_init()
6859 if (priv->dma_cap.tsoen) in stmmac_hw_init()
6860 dev_info(priv->device, "TSO supported\n"); in stmmac_hw_init()
6862 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; in stmmac_hw_init()
6863 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; in stmmac_hw_init()
6866 if (priv->hwif_quirks) { in stmmac_hw_init()
6867 ret = priv->hwif_quirks(priv); in stmmac_hw_init()
6877 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || in stmmac_hw_init()
6878 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { in stmmac_hw_init()
6879 priv->use_riwt = 1; in stmmac_hw_init()
6880 dev_info(priv->device, in stmmac_hw_init()
6889 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_napi_add() local
6892 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_add()
6895 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_add()
6897 ch->priv_data = priv; in stmmac_napi_add()
6901 if (queue < priv->plat->rx_queues_to_use) { in stmmac_napi_add()
6904 if (queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
6908 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_add()
6909 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
6918 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_napi_del() local
6921 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_del()
6924 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_del()
6926 if (queue < priv->plat->rx_queues_to_use) in stmmac_napi_del()
6928 if (queue < priv->plat->tx_queues_to_use) in stmmac_napi_del()
6930 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_del()
6931 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_del()
6939 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_reinit_queues() local
6947 priv->plat->rx_queues_to_use = rx_cnt; in stmmac_reinit_queues()
6948 priv->plat->tx_queues_to_use = tx_cnt; in stmmac_reinit_queues()
6960 struct stmmac_priv *priv = netdev_priv(dev); in stmmac_reinit_ringparam() local
6966 priv->dma_conf.dma_rx_size = rx_size; in stmmac_reinit_ringparam()
6967 priv->dma_conf.dma_tx_size = tx_size; in stmmac_reinit_ringparam()
6978 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, in stmmac_fpe_lp_task() local
6980 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; in stmmac_fpe_lp_task()
6994 stmmac_fpe_configure(priv, priv->ioaddr, in stmmac_fpe_lp_task()
6995 priv->plat->tx_queues_to_use, in stmmac_fpe_lp_task()
6996 priv->plat->rx_queues_to_use, in stmmac_fpe_lp_task()
6999 netdev_info(priv->dev, "configured FPE\n"); in stmmac_fpe_lp_task()
7003 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); in stmmac_fpe_lp_task()
7010 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, in stmmac_fpe_lp_task()
7012 stmmac_fpe_send_mpacket(priv, priv->ioaddr, in stmmac_fpe_lp_task()
7019 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); in stmmac_fpe_lp_task()
7022 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) in stmmac_fpe_handshake() argument
7024 if (priv->plat->fpe_cfg->hs_enable != enable) { in stmmac_fpe_handshake()
7026 stmmac_fpe_send_mpacket(priv, priv->ioaddr, in stmmac_fpe_handshake()
7029 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; in stmmac_fpe_handshake()
7030 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; in stmmac_fpe_handshake()
7033 priv->plat->fpe_cfg->hs_enable = enable; in stmmac_fpe_handshake()
7052 struct stmmac_priv *priv; in stmmac_dvr_probe() local
7063 priv = netdev_priv(ndev); in stmmac_dvr_probe()
7064 priv->device = device; in stmmac_dvr_probe()
7065 priv->dev = ndev; in stmmac_dvr_probe()
7068 priv->pause = pause; in stmmac_dvr_probe()
7069 priv->plat = plat_dat; in stmmac_dvr_probe()
7070 priv->ioaddr = res->addr; in stmmac_dvr_probe()
7071 priv->dev->base_addr = (unsigned long)res->addr; in stmmac_dvr_probe()
7072 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; in stmmac_dvr_probe()
7074 priv->dev->irq = res->irq; in stmmac_dvr_probe()
7075 priv->wol_irq = res->wol_irq; in stmmac_dvr_probe()
7076 priv->lpi_irq = res->lpi_irq; in stmmac_dvr_probe()
7077 priv->sfty_ce_irq = res->sfty_ce_irq; in stmmac_dvr_probe()
7078 priv->sfty_ue_irq = res->sfty_ue_irq; in stmmac_dvr_probe()
7080 priv->rx_irq[i] = res->rx_irq[i]; in stmmac_dvr_probe()
7082 priv->tx_irq[i] = res->tx_irq[i]; in stmmac_dvr_probe()
7085 eth_hw_addr_set(priv->dev, res->mac); in stmmac_dvr_probe()
7087 dev_set_drvdata(device, priv->dev); in stmmac_dvr_probe()
7092 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); in stmmac_dvr_probe()
7093 if (!priv->af_xdp_zc_qps) in stmmac_dvr_probe()
7097 priv->wq = create_singlethread_workqueue("stmmac_wq"); in stmmac_dvr_probe()
7098 if (!priv->wq) { in stmmac_dvr_probe()
7099 dev_err(priv->device, "failed to create workqueue\n"); in stmmac_dvr_probe()
7103 INIT_WORK(&priv->service_task, stmmac_service_task); in stmmac_dvr_probe()
7106 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); in stmmac_dvr_probe()
7112 priv->plat->phy_addr = phyaddr; in stmmac_dvr_probe()
7114 if (priv->plat->stmmac_rst) { in stmmac_dvr_probe()
7115 ret = reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7116 reset_control_deassert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7121 reset_control_reset(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7124 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_probe()
7126 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", in stmmac_dvr_probe()
7130 ret = stmmac_hw_init(priv); in stmmac_dvr_probe()
7136 if (priv->synopsys_id < DWMAC_CORE_5_20) in stmmac_dvr_probe()
7137 priv->plat->dma_cfg->dche = false; in stmmac_dvr_probe()
7139 stmmac_check_ether_addr(priv); in stmmac_dvr_probe()
7146 ret = stmmac_tc_init(priv, priv); in stmmac_dvr_probe()
7151 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { in stmmac_dvr_probe()
7153 if (priv->plat->has_gmac4) in stmmac_dvr_probe()
7155 priv->tso = true; in stmmac_dvr_probe()
7156 dev_info(priv->device, "TSO feature enabled\n"); in stmmac_dvr_probe()
7159 if (priv->dma_cap.sphen && !priv->plat->sph_disable) { in stmmac_dvr_probe()
7161 priv->sph_cap = true; in stmmac_dvr_probe()
7162 priv->sph = priv->sph_cap; in stmmac_dvr_probe()
7163 dev_info(priv->device, "SPH feature enabled\n"); in stmmac_dvr_probe()
7171 if (priv->plat->addr64) in stmmac_dvr_probe()
7172 priv->dma_cap.addr64 = priv->plat->addr64; in stmmac_dvr_probe()
7174 if (priv->dma_cap.addr64) { in stmmac_dvr_probe()
7176 DMA_BIT_MASK(priv->dma_cap.addr64)); in stmmac_dvr_probe()
7178 dev_info(priv->device, "Using %d bits DMA width\n", in stmmac_dvr_probe()
7179 priv->dma_cap.addr64); in stmmac_dvr_probe()
7186 priv->plat->dma_cfg->eame = true; in stmmac_dvr_probe()
7190 dev_err(priv->device, "Failed to set DMA Mask\n"); in stmmac_dvr_probe()
7194 priv->dma_cap.addr64 = 32; in stmmac_dvr_probe()
7203 if (priv->dma_cap.vlhash) { in stmmac_dvr_probe()
7207 if (priv->dma_cap.vlins) { in stmmac_dvr_probe()
7209 if (priv->dma_cap.dvlan) in stmmac_dvr_probe()
7213 priv->msg_enable = netif_msg_init(debug, default_msg_level); in stmmac_dvr_probe()
7216 rxq = priv->plat->rx_queues_to_use; in stmmac_dvr_probe()
7217 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); in stmmac_dvr_probe()
7218 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_dvr_probe()
7219 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); in stmmac_dvr_probe()
7221 if (priv->dma_cap.rssen && priv->plat->rss_en) in stmmac_dvr_probe()
7226 if (priv->plat->has_xgmac) in stmmac_dvr_probe()
7228 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) in stmmac_dvr_probe()
7235 if ((priv->plat->maxmtu < ndev->max_mtu) && in stmmac_dvr_probe()
7236 (priv->plat->maxmtu >= ndev->min_mtu)) in stmmac_dvr_probe()
7237 ndev->max_mtu = priv->plat->maxmtu; in stmmac_dvr_probe()
7238 else if (priv->plat->maxmtu < ndev->min_mtu) in stmmac_dvr_probe()
7239 dev_warn(priv->device, in stmmac_dvr_probe()
7241 __func__, priv->plat->maxmtu); in stmmac_dvr_probe()
7244 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ in stmmac_dvr_probe()
7249 mutex_init(&priv->lock); in stmmac_dvr_probe()
7257 if (priv->plat->clk_csr >= 0) in stmmac_dvr_probe()
7258 priv->clk_csr = priv->plat->clk_csr; in stmmac_dvr_probe()
7260 stmmac_clk_csr_set(priv); in stmmac_dvr_probe()
7262 stmmac_check_pcs_mode(priv); in stmmac_dvr_probe()
7269 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_probe()
7270 priv->hw->pcs != STMMAC_PCS_RTBI) { in stmmac_dvr_probe()
7274 dev_err_probe(priv->device, ret, in stmmac_dvr_probe()
7276 __func__, priv->plat->bus_id); in stmmac_dvr_probe()
7281 if (priv->plat->speed_mode_2500) in stmmac_dvr_probe()
7282 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); in stmmac_dvr_probe()
7284 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { in stmmac_dvr_probe()
7285 ret = stmmac_xpcs_setup(priv->mii); in stmmac_dvr_probe()
7290 ret = stmmac_phy_setup(priv); in stmmac_dvr_probe()
7298 dev_err(priv->device, "%s: ERROR %i registering the device\n", in stmmac_dvr_probe()
7307 if (priv->plat->dump_debug_regs) in stmmac_dvr_probe()
7308 priv->plat->dump_debug_regs(priv->plat->bsp_priv); in stmmac_dvr_probe()
7318 phylink_destroy(priv->phylink); in stmmac_dvr_probe()
7321 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_probe()
7322 priv->hw->pcs != STMMAC_PCS_RTBI) in stmmac_dvr_probe()
7327 destroy_workqueue(priv->wq); in stmmac_dvr_probe()
7328 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_probe()
7343 struct stmmac_priv *priv = netdev_priv(ndev); in stmmac_dvr_remove() local
7345 netdev_info(priv->dev, "%s: removing driver", __func__); in stmmac_dvr_remove()
7349 stmmac_stop_all_dma(priv); in stmmac_dvr_remove()
7350 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_dvr_remove()
7357 if (priv->plat->serdes_powerdown) in stmmac_dvr_remove()
7358 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_dvr_remove()
7363 phylink_destroy(priv->phylink); in stmmac_dvr_remove()
7364 if (priv->plat->stmmac_rst) in stmmac_dvr_remove()
7365 reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_remove()
7366 reset_control_assert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_remove()
7367 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_remove()
7368 priv->hw->pcs != STMMAC_PCS_RTBI) in stmmac_dvr_remove()
7370 destroy_workqueue(priv->wq); in stmmac_dvr_remove()
7371 mutex_destroy(&priv->lock); in stmmac_dvr_remove()
7372 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_remove()
7391 struct stmmac_priv *priv = netdev_priv(ndev); in stmmac_suspend() local
7397 mutex_lock(&priv->lock); in stmmac_suspend()
7401 stmmac_disable_all_queues(priv); in stmmac_suspend()
7403 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_suspend()
7404 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_suspend()
7406 if (priv->eee_enabled) { in stmmac_suspend()
7407 priv->tx_path_in_lpi_mode = false; in stmmac_suspend()
7408 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_suspend()
7412 stmmac_stop_all_dma(priv); in stmmac_suspend()
7414 if (priv->plat->serdes_powerdown) in stmmac_suspend()
7415 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_suspend()
7418 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7419 stmmac_pmt(priv, priv->hw, priv->wolopts); in stmmac_suspend()
7420 priv->irq_wake = 1; in stmmac_suspend()
7422 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_suspend()
7423 pinctrl_pm_select_sleep_state(priv->device); in stmmac_suspend()
7426 mutex_unlock(&priv->lock); in stmmac_suspend()
7429 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7430 phylink_suspend(priv->phylink, true); in stmmac_suspend()
7432 if (device_may_wakeup(priv->device)) in stmmac_suspend()
7433 phylink_speed_down(priv->phylink, false); in stmmac_suspend()
7434 phylink_suspend(priv->phylink, false); in stmmac_suspend()
7438 if (priv->dma_cap.fpesel) { in stmmac_suspend()
7440 stmmac_fpe_configure(priv, priv->ioaddr, in stmmac_suspend()
7441 priv->plat->tx_queues_to_use, in stmmac_suspend()
7442 priv->plat->rx_queues_to_use, false); in stmmac_suspend()
7444 stmmac_fpe_handshake(priv, false); in stmmac_suspend()
7445 stmmac_fpe_stop_wq(priv); in stmmac_suspend()
7448 priv->speed = SPEED_UNKNOWN; in stmmac_suspend()
7453 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_reset_rx_queue() argument
7455 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_reset_rx_queue()
7461 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_reset_tx_queue() argument
7463 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_reset_tx_queue()
7469 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_reset_tx_queue()
7476 static void stmmac_reset_queues_param(struct stmmac_priv *priv) in stmmac_reset_queues_param() argument
7478 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_reset_queues_param()
7479 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_reset_queues_param()
7483 stmmac_reset_rx_queue(priv, queue); in stmmac_reset_queues_param()
7486 stmmac_reset_tx_queue(priv, queue); in stmmac_reset_queues_param()
7498 struct stmmac_priv *priv = netdev_priv(ndev); in stmmac_resume() local
7510 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7511 mutex_lock(&priv->lock); in stmmac_resume()
7512 stmmac_pmt(priv, priv->hw, 0); in stmmac_resume()
7513 mutex_unlock(&priv->lock); in stmmac_resume()
7514 priv->irq_wake = 0; in stmmac_resume()
7516 pinctrl_pm_select_default_state(priv->device); in stmmac_resume()
7518 if (priv->mii) in stmmac_resume()
7519 stmmac_mdio_reset(priv->mii); in stmmac_resume()
7522 if (priv->plat->serdes_powerup) { in stmmac_resume()
7523 ret = priv->plat->serdes_powerup(ndev, in stmmac_resume()
7524 priv->plat->bsp_priv); in stmmac_resume()
7531 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7532 phylink_resume(priv->phylink); in stmmac_resume()
7534 phylink_resume(priv->phylink); in stmmac_resume()
7535 if (device_may_wakeup(priv->device)) in stmmac_resume()
7536 phylink_speed_up(priv->phylink); in stmmac_resume()
7541 mutex_lock(&priv->lock); in stmmac_resume()
7543 stmmac_reset_queues_param(priv); in stmmac_resume()
7545 stmmac_free_tx_skbufs(priv); in stmmac_resume()
7546 stmmac_clear_descriptors(priv, &priv->dma_conf); in stmmac_resume()
7549 stmmac_init_coalesce(priv); in stmmac_resume()
7552 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); in stmmac_resume()
7554 stmmac_enable_all_queues(priv); in stmmac_resume()
7555 stmmac_enable_all_dma_irq(priv); in stmmac_resume()
7557 mutex_unlock(&priv->lock); in stmmac_resume()