1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
57 */
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 PTP_TCR_TSCTRLSSR)
60
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
63
64 /* Module parameters */
65 #define TX_TIMEO 5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 #define STMMAC_RX_COPYBREAK 256
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
123 */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 int ret = 0;
153
154 if (enabled) {
155 ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 if (ret)
157 return ret;
158 ret = clk_prepare_enable(priv->plat->pclk);
159 if (ret) {
160 clk_disable_unprepare(priv->plat->stmmac_clk);
161 return ret;
162 }
163 if (priv->plat->clks_config) {
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 if (ret) {
166 clk_disable_unprepare(priv->plat->stmmac_clk);
167 clk_disable_unprepare(priv->plat->pclk);
168 return ret;
169 }
170 }
171 } else {
172 clk_disable_unprepare(priv->plat->stmmac_clk);
173 clk_disable_unprepare(priv->plat->pclk);
174 if (priv->plat->clks_config)
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 }
177
178 return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181
182 /**
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
185 * errors.
186 */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 if (unlikely(watchdog < 0))
190 watchdog = TX_TIMEO;
191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 buf_sz = DEFAULT_BUFSIZE;
193 if (unlikely(flow_ctrl > 1))
194 flow_ctrl = FLOW_AUTO;
195 else if (likely(flow_ctrl < 0))
196 flow_ctrl = FLOW_OFF;
197 if (unlikely((pause < 0) || (pause > 0xffff)))
198 pause = PAUSE_TIME;
199 if (eee_timer < 0)
200 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 u32 queue;
209
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
212
213 if (stmmac_xdp_is_enabled(priv) &&
214 test_bit(queue, priv->af_xdp_zc_qps)) {
215 napi_disable(&ch->rxtx_napi);
216 continue;
217 }
218
219 if (queue < rx_queues_cnt)
220 napi_disable(&ch->rx_napi);
221 if (queue < tx_queues_cnt)
222 napi_disable(&ch->tx_napi);
223 }
224 }
225
226 /**
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
229 */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 struct stmmac_rx_queue *rx_q;
234 u32 queue;
235
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue = 0; queue < rx_queues_cnt; queue++) {
238 rx_q = &priv->dma_conf.rx_queue[queue];
239 if (rx_q->xsk_pool) {
240 synchronize_rcu();
241 break;
242 }
243 }
244
245 __stmmac_disable_all_queues(priv);
246 }
247
248 /**
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
251 */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 u32 queue;
258
259 for (queue = 0; queue < maxq; queue++) {
260 struct stmmac_channel *ch = &priv->channel[queue];
261
262 if (stmmac_xdp_is_enabled(priv) &&
263 test_bit(queue, priv->af_xdp_zc_qps)) {
264 napi_enable(&ch->rxtx_napi);
265 continue;
266 }
267
268 if (queue < rx_queues_cnt)
269 napi_enable(&ch->rx_napi);
270 if (queue < tx_queues_cnt)
271 napi_enable(&ch->tx_napi);
272 }
273 }
274
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 queue_work(priv->wq, &priv->service_task);
280 }
281
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 netif_carrier_off(priv->dev);
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 stmmac_service_event_schedule(priv);
287 }
288
289 /**
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
293 * clock input.
294 * Note:
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
300 */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 u32 clk_rate;
304
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
312 * divider.
313 */
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 if (clk_rate < CSR_F_35M)
316 priv->clk_csr = STMMAC_CSR_20_35M;
317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 priv->clk_csr = STMMAC_CSR_35_60M;
319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 priv->clk_csr = STMMAC_CSR_60_100M;
321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 priv->clk_csr = STMMAC_CSR_100_150M;
323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 priv->clk_csr = STMMAC_CSR_150_250M;
325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 priv->clk_csr = STMMAC_CSR_250_300M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 int tx_lpi_timer;
396
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv->eee_sw_timer_en = en ? 0 : 1;
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402
403 /**
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
407 * EEE.
408 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 u32 tx_cnt = priv->plat->tx_queues_to_use;
412 u32 queue;
413
414 /* check if all TX queues have the work finished */
415 for (queue = 0; queue < tx_cnt; queue++) {
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417
418 if (tx_q->dirty_tx != tx_q->cur_tx)
419 return -EBUSY; /* still unfinished work */
420 }
421
422 /* Check and enter in LPI mode */
423 if (!priv->tx_path_in_lpi_mode)
424 stmmac_set_eee_mode(priv, priv->hw,
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 return 0;
427 }
428
429 /**
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
434 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 if (!priv->eee_sw_timer_en) {
438 stmmac_lpi_entry_timer_config(priv, 0);
439 return;
440 }
441
442 stmmac_reset_eee_mode(priv, priv->hw);
443 del_timer_sync(&priv->eee_ctrl_timer);
444 priv->tx_path_in_lpi_mode = false;
445 }
446
447 /**
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
450 * Description:
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
453 */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457
458 if (stmmac_enable_eee_mode(priv))
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461
462 /**
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
465 * Description:
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
468 * timer.
469 */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 int eee_tw_timer = priv->eee_tw_timer;
473
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
476 */
477 if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 priv->hw->pcs == STMMAC_PCS_RTBI)
479 return false;
480
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv->dma_cap.eee)
483 return false;
484
485 mutex_lock(&priv->lock);
486
487 /* Check if it needs to be deactivated */
488 if (!priv->eee_active) {
489 if (priv->eee_enabled) {
490 netdev_dbg(priv->dev, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv, 0);
492 del_timer_sync(&priv->eee_ctrl_timer);
493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 if (priv->hw->xpcs)
495 xpcs_config_eee(priv->hw->xpcs,
496 priv->plat->mult_fact_100ns,
497 false);
498 }
499 mutex_unlock(&priv->lock);
500 return false;
501 }
502
503 if (priv->eee_active && !priv->eee_enabled) {
504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 eee_tw_timer);
507 if (priv->hw->xpcs)
508 xpcs_config_eee(priv->hw->xpcs,
509 priv->plat->mult_fact_100ns,
510 true);
511 }
512
513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 del_timer_sync(&priv->eee_ctrl_timer);
515 priv->tx_path_in_lpi_mode = false;
516 stmmac_lpi_entry_timer_config(priv, 1);
517 } else {
518 stmmac_lpi_entry_timer_config(priv, 0);
519 mod_timer(&priv->eee_ctrl_timer,
520 STMMAC_LPI_T(priv->tx_lpi_timer));
521 }
522
523 mutex_unlock(&priv->lock);
524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 return true;
526 }
527
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
532 * Description :
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
535 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 struct dma_desc *p, struct sk_buff *skb)
538 {
539 struct skb_shared_hwtstamps shhwtstamp;
540 bool found = false;
541 u64 ns = 0;
542
543 if (!priv->hwts_tx_en)
544 return;
545
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 return;
549
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv, p)) {
552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 found = true;
554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 found = true;
556 }
557
558 if (found) {
559 ns -= priv->plat->cdc_error_adj;
560
561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp.hwtstamp = ns_to_ktime(ns);
563
564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb, &shhwtstamp);
567 }
568 }
569
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
575 * Description :
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
578 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 struct dma_desc *np, struct sk_buff *skb)
581 {
582 struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 struct dma_desc *desc = p;
584 u64 ns = 0;
585
586 if (!priv->hwts_rx_en)
587 return;
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 desc = np;
591
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595
596 ns -= priv->plat->cdc_error_adj;
597
598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 shhwtstamp = skb_hwtstamps(skb);
600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 } else {
603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 }
605 }
606
607 /**
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
612 * Description:
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
615 * Return Value:
616 * 0 on success and an appropriate -ve integer on failure.
617 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 struct stmmac_priv *priv = netdev_priv(dev);
621 struct hwtstamp_config config;
622 u32 ptp_v2 = 0;
623 u32 tstamp_all = 0;
624 u32 ptp_over_ipv4_udp = 0;
625 u32 ptp_over_ipv6_udp = 0;
626 u32 ptp_over_ethernet = 0;
627 u32 snap_type_sel = 0;
628 u32 ts_master_en = 0;
629 u32 ts_event_en = 0;
630
631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 netdev_alert(priv->dev, "No support for HW time stamping\n");
633 priv->hwts_tx_en = 0;
634 priv->hwts_rx_en = 0;
635
636 return -EOPNOTSUPP;
637 }
638
639 if (copy_from_user(&config, ifr->ifr_data,
640 sizeof(config)))
641 return -EFAULT;
642
643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__, config.flags, config.tx_type, config.rx_filter);
645
646 if (config.tx_type != HWTSTAMP_TX_OFF &&
647 config.tx_type != HWTSTAMP_TX_ON)
648 return -ERANGE;
649
650 if (priv->adv_ts) {
651 switch (config.rx_filter) {
652 case HWTSTAMP_FILTER_NONE:
653 /* time stamp no incoming packet at all */
654 config.rx_filter = HWTSTAMP_FILTER_NONE;
655 break;
656
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 /* PTP v1, UDP, any kind of event packet */
659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
664 * timestamping
665 */
666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 break;
670
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 /* PTP v1, UDP, Sync packet */
673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 /* take time stamp for SYNC messages only */
675 ts_event_en = PTP_TCR_TSEVNTENA;
676
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 break;
680
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 /* PTP v1, UDP, Delay_req packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en = PTP_TCR_TSMSTRENA;
686 ts_event_en = PTP_TCR_TSEVNTENA;
687
688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 break;
691
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 /* PTP v2, UDP, any kind of event packet */
694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 ptp_v2 = PTP_TCR_TSVER2ENA;
696 /* take time stamp for all event messages */
697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698
699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 break;
702
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 /* PTP v2, UDP, Sync packet */
705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 ptp_v2 = PTP_TCR_TSVER2ENA;
707 /* take time stamp for SYNC messages only */
708 ts_event_en = PTP_TCR_TSEVNTENA;
709
710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 break;
713
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 /* PTP v2, UDP, Delay_req packet */
716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 ptp_v2 = PTP_TCR_TSVER2ENA;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en = PTP_TCR_TSMSTRENA;
720 ts_event_en = PTP_TCR_TSEVNTENA;
721
722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 break;
725
726 case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 ptp_v2 = PTP_TCR_TSVER2ENA;
730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 if (priv->synopsys_id < DWMAC_CORE_4_10)
732 ts_event_en = PTP_TCR_TSEVNTENA;
733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 ptp_over_ethernet = PTP_TCR_TSIPENA;
736 break;
737
738 case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 ptp_v2 = PTP_TCR_TSVER2ENA;
742 /* take time stamp for SYNC messages only */
743 ts_event_en = PTP_TCR_TSEVNTENA;
744
745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 ptp_over_ethernet = PTP_TCR_TSIPENA;
748 break;
749
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 ptp_v2 = PTP_TCR_TSVER2ENA;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en = PTP_TCR_TSMSTRENA;
756 ts_event_en = PTP_TCR_TSEVNTENA;
757
758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 ptp_over_ethernet = PTP_TCR_TSIPENA;
761 break;
762
763 case HWTSTAMP_FILTER_NTP_ALL:
764 case HWTSTAMP_FILTER_ALL:
765 /* time stamp any incoming packet */
766 config.rx_filter = HWTSTAMP_FILTER_ALL;
767 tstamp_all = PTP_TCR_TSENALL;
768 break;
769
770 default:
771 return -ERANGE;
772 }
773 } else {
774 switch (config.rx_filter) {
775 case HWTSTAMP_FILTER_NONE:
776 config.rx_filter = HWTSTAMP_FILTER_NONE;
777 break;
778 default:
779 /* PTP v1, UDP, any kind of event packet */
780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 break;
782 }
783 }
784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786
787 priv->systime_flags = STMMAC_HWTS_ACTIVE;
788
789 if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 priv->systime_flags |= tstamp_all | ptp_v2 |
791 ptp_over_ethernet | ptp_over_ipv6_udp |
792 ptp_over_ipv4_udp | ts_event_en |
793 ts_master_en | snap_type_sel;
794 }
795
796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797
798 memcpy(&priv->tstamp_config, &config, sizeof(config));
799
800 return copy_to_user(ifr->ifr_data, &config,
801 sizeof(config)) ? -EFAULT : 0;
802 }
803
804 /**
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
809 * Description:
810 * This function obtain the current hardware timestamping settings
811 * as requested.
812 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct hwtstamp_config *config = &priv->tstamp_config;
817
818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 return -EOPNOTSUPP;
820
821 return copy_to_user(ifr->ifr_data, config,
822 sizeof(*config)) ? -EFAULT : 0;
823 }
824
825 /**
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
829 * Description:
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
834 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 struct timespec64 now;
839 u32 sec_inc = 0;
840 u64 temp = 0;
841
842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 return -EOPNOTSUPP;
844
845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 priv->systime_flags = systime_flags;
847
848 /* program Sub Second Increment reg */
849 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 priv->plat->clk_ptp_rate,
851 xmac, &sec_inc);
852 temp = div_u64(1000000000ULL, sec_inc);
853
854 /* Store sub second increment for later use */
855 priv->sub_second_inc = sec_inc;
856
857 /* calculate default added value:
858 * formula is :
859 * addend = (2^32)/freq_div_ratio;
860 * where, freq_div_ratio = 1e9ns/sec_inc
861 */
862 temp = (u64)(temp << 32);
863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865
866 /* initialize system time */
867 ktime_get_real_ts64(&now);
868
869 /* lower 32 bits of tv_sec are safe until y2106 */
870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871
872 return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875
876 /**
877 * stmmac_init_ptp - init PTP
878 * @priv: driver private structure
879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880 * This is done by looking at the HW cap. register.
881 * This function also registers the ptp driver.
882 */
stmmac_init_ptp(struct stmmac_priv * priv)883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 int ret;
887
888 if (priv->plat->ptp_clk_freq_config)
889 priv->plat->ptp_clk_freq_config(priv);
890
891 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 if (ret)
893 return ret;
894
895 priv->adv_ts = 0;
896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 if (xmac && priv->dma_cap.atime_stamp)
898 priv->adv_ts = 1;
899 /* Dwmac 3.x core with extend_desc can support adv_ts */
900 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 priv->adv_ts = 1;
902
903 if (priv->dma_cap.time_stamp)
904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905
906 if (priv->adv_ts)
907 netdev_info(priv->dev,
908 "IEEE 1588-2008 Advanced Timestamp supported\n");
909
910 priv->hwts_tx_en = 0;
911 priv->hwts_rx_en = 0;
912
913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 stmmac_hwtstamp_correct_latency(priv, priv);
915
916 return 0;
917 }
918
stmmac_release_ptp(struct stmmac_priv * priv)919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 stmmac_ptp_unregister(priv);
923 }
924
925 /**
926 * stmmac_mac_flow_ctrl - Configure flow control in all queues
927 * @priv: driver private structure
928 * @duplex: duplex passed to the next function
929 * Description: It is used for configuring the flow control in all queues
930 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 u32 tx_cnt = priv->plat->tx_queues_to_use;
934
935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 priv->pause, tx_cnt);
937 }
938
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 phy_interface_t interface)
941 {
942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
944 if (priv->hw->xpcs)
945 return &priv->hw->xpcs->pcs;
946
947 if (priv->hw->lynx_pcs)
948 return priv->hw->lynx_pcs;
949
950 return NULL;
951 }
952
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 const struct phylink_link_state *state)
955 {
956 /* Nothing to do, xpcs_config() handles everything */
957 }
958
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 bool *hs_enable = &fpe_cfg->hs_enable;
965
966 if (is_up && *hs_enable) {
967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 } else {
969 *lo_state = FPE_STATE_OFF;
970 *lp_state = FPE_STATE_OFF;
971 }
972 }
973
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)974 static void stmmac_mac_link_down(struct phylink_config *config,
975 unsigned int mode, phy_interface_t interface)
976 {
977 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978
979 stmmac_mac_set(priv, priv->ioaddr, false);
980 priv->eee_active = false;
981 priv->tx_lpi_enabled = false;
982 priv->eee_enabled = stmmac_eee_init(priv);
983 stmmac_set_eee_pls(priv, priv->hw, false);
984
985 if (priv->dma_cap.fpesel)
986 stmmac_fpe_link_state_handle(priv, false);
987 }
988
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)989 static void stmmac_mac_link_up(struct phylink_config *config,
990 struct phy_device *phy,
991 unsigned int mode, phy_interface_t interface,
992 int speed, int duplex,
993 bool tx_pause, bool rx_pause)
994 {
995 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 u32 old_ctrl, ctrl;
997
998 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 priv->plat->serdes_powerup)
1000 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001
1002 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004
1005 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 switch (speed) {
1007 case SPEED_10000:
1008 ctrl |= priv->hw->link.xgmii.speed10000;
1009 break;
1010 case SPEED_5000:
1011 ctrl |= priv->hw->link.xgmii.speed5000;
1012 break;
1013 case SPEED_2500:
1014 ctrl |= priv->hw->link.xgmii.speed2500;
1015 break;
1016 default:
1017 return;
1018 }
1019 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 switch (speed) {
1021 case SPEED_100000:
1022 ctrl |= priv->hw->link.xlgmii.speed100000;
1023 break;
1024 case SPEED_50000:
1025 ctrl |= priv->hw->link.xlgmii.speed50000;
1026 break;
1027 case SPEED_40000:
1028 ctrl |= priv->hw->link.xlgmii.speed40000;
1029 break;
1030 case SPEED_25000:
1031 ctrl |= priv->hw->link.xlgmii.speed25000;
1032 break;
1033 case SPEED_10000:
1034 ctrl |= priv->hw->link.xgmii.speed10000;
1035 break;
1036 case SPEED_2500:
1037 ctrl |= priv->hw->link.speed2500;
1038 break;
1039 case SPEED_1000:
1040 ctrl |= priv->hw->link.speed1000;
1041 break;
1042 default:
1043 return;
1044 }
1045 } else {
1046 switch (speed) {
1047 case SPEED_2500:
1048 ctrl |= priv->hw->link.speed2500;
1049 break;
1050 case SPEED_1000:
1051 ctrl |= priv->hw->link.speed1000;
1052 break;
1053 case SPEED_100:
1054 ctrl |= priv->hw->link.speed100;
1055 break;
1056 case SPEED_10:
1057 ctrl |= priv->hw->link.speed10;
1058 break;
1059 default:
1060 return;
1061 }
1062 }
1063
1064 priv->speed = speed;
1065
1066 if (priv->plat->fix_mac_speed)
1067 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068
1069 if (!duplex)
1070 ctrl &= ~priv->hw->link.duplex;
1071 else
1072 ctrl |= priv->hw->link.duplex;
1073
1074 /* Flow Control operation */
1075 if (rx_pause && tx_pause)
1076 priv->flow_ctrl = FLOW_AUTO;
1077 else if (rx_pause && !tx_pause)
1078 priv->flow_ctrl = FLOW_RX;
1079 else if (!rx_pause && tx_pause)
1080 priv->flow_ctrl = FLOW_TX;
1081 else
1082 priv->flow_ctrl = FLOW_OFF;
1083
1084 stmmac_mac_flow_ctrl(priv, duplex);
1085
1086 if (ctrl != old_ctrl)
1087 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088
1089 stmmac_mac_set(priv, priv->ioaddr, true);
1090 if (phy && priv->dma_cap.eee) {
1091 priv->eee_active =
1092 phy_init_eee(phy, !(priv->plat->flags &
1093 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 priv->eee_enabled = stmmac_eee_init(priv);
1095 priv->tx_lpi_enabled = priv->eee_enabled;
1096 stmmac_set_eee_pls(priv, priv->hw, true);
1097 }
1098
1099 if (priv->dma_cap.fpesel)
1100 stmmac_fpe_link_state_handle(priv, true);
1101
1102 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 .mac_select_pcs = stmmac_mac_select_pcs,
1108 .mac_config = stmmac_mac_config,
1109 .mac_link_down = stmmac_mac_link_down,
1110 .mac_link_up = stmmac_mac_link_up,
1111 };
1112
1113 /**
1114 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115 * @priv: driver private structure
1116 * Description: this is to verify if the HW supports the PCS.
1117 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118 * configured for the TBI, RTBI, or SGMII PHY interface.
1119 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 int interface = priv->plat->mac_interface;
1123
1124 if (priv->dma_cap.pcs) {
1125 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 priv->hw->pcs = STMMAC_PCS_RGMII;
1131 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 priv->hw->pcs = STMMAC_PCS_SGMII;
1134 }
1135 }
1136 }
1137
1138 /**
1139 * stmmac_init_phy - PHY initialization
1140 * @dev: net device structure
1141 * Description: it initializes the driver's PHY state, and attaches the PHY
1142 * to the mac driver.
1143 * Return value:
1144 * 0 on success
1145 */
stmmac_init_phy(struct net_device * dev)1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 struct stmmac_priv *priv = netdev_priv(dev);
1149 struct fwnode_handle *phy_fwnode;
1150 struct fwnode_handle *fwnode;
1151 int ret;
1152
1153 if (!phylink_expects_phy(priv->phylink))
1154 return 0;
1155
1156 fwnode = priv->plat->port_node;
1157 if (!fwnode)
1158 fwnode = dev_fwnode(priv->device);
1159
1160 if (fwnode)
1161 phy_fwnode = fwnode_get_phy_node(fwnode);
1162 else
1163 phy_fwnode = NULL;
1164
1165 /* Some DT bindings do not set-up the PHY handle. Let's try to
1166 * manually parse it
1167 */
1168 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 int addr = priv->plat->phy_addr;
1170 struct phy_device *phydev;
1171
1172 if (addr < 0) {
1173 netdev_err(priv->dev, "no phy found\n");
1174 return -ENODEV;
1175 }
1176
1177 phydev = mdiobus_get_phy(priv->mii, addr);
1178 if (!phydev) {
1179 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 return -ENODEV;
1181 }
1182
1183 ret = phylink_connect_phy(priv->phylink, phydev);
1184 } else {
1185 fwnode_handle_put(phy_fwnode);
1186 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 }
1188
1189 if (!priv->plat->pmt) {
1190 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191
1192 phylink_ethtool_get_wol(priv->phylink, &wol);
1193 device_set_wakeup_capable(priv->device, !!wol.supported);
1194 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 }
1196
1197 return ret;
1198 }
1199
stmmac_set_half_duplex(struct stmmac_priv * priv)1200 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1201 {
1202 /* Half-Duplex can only work with single tx queue */
1203 if (priv->plat->tx_queues_to_use > 1)
1204 priv->phylink_config.mac_capabilities &=
1205 ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1206 else
1207 priv->phylink_config.mac_capabilities |=
1208 (MAC_10HD | MAC_100HD | MAC_1000HD);
1209 }
1210
stmmac_phy_setup(struct stmmac_priv * priv)1211 static int stmmac_phy_setup(struct stmmac_priv *priv)
1212 {
1213 struct stmmac_mdio_bus_data *mdio_bus_data;
1214 int mode = priv->plat->phy_interface;
1215 struct fwnode_handle *fwnode;
1216 struct phylink *phylink;
1217 int max_speed;
1218
1219 priv->phylink_config.dev = &priv->dev->dev;
1220 priv->phylink_config.type = PHYLINK_NETDEV;
1221 priv->phylink_config.mac_managed_pm = true;
1222
1223 mdio_bus_data = priv->plat->mdio_bus_data;
1224 if (mdio_bus_data)
1225 priv->phylink_config.ovr_an_inband =
1226 mdio_bus_data->xpcs_an_inband;
1227
1228 /* Set the platform/firmware specified interface mode. Note, phylink
1229 * deals with the PHY interface mode, not the MAC interface mode.
1230 */
1231 __set_bit(mode, priv->phylink_config.supported_interfaces);
1232
1233 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1234 if (priv->hw->xpcs)
1235 xpcs_get_interfaces(priv->hw->xpcs,
1236 priv->phylink_config.supported_interfaces);
1237
1238 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1239 MAC_10FD | MAC_100FD |
1240 MAC_1000FD;
1241
1242 stmmac_set_half_duplex(priv);
1243
1244 /* Get the MAC specific capabilities */
1245 stmmac_mac_phylink_get_caps(priv);
1246
1247 max_speed = priv->plat->max_speed;
1248 if (max_speed)
1249 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1250
1251 fwnode = priv->plat->port_node;
1252 if (!fwnode)
1253 fwnode = dev_fwnode(priv->device);
1254
1255 phylink = phylink_create(&priv->phylink_config, fwnode,
1256 mode, &stmmac_phylink_mac_ops);
1257 if (IS_ERR(phylink))
1258 return PTR_ERR(phylink);
1259
1260 priv->phylink = phylink;
1261 return 0;
1262 }
1263
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1264 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1265 struct stmmac_dma_conf *dma_conf)
1266 {
1267 u32 rx_cnt = priv->plat->rx_queues_to_use;
1268 unsigned int desc_size;
1269 void *head_rx;
1270 u32 queue;
1271
1272 /* Display RX rings */
1273 for (queue = 0; queue < rx_cnt; queue++) {
1274 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1275
1276 pr_info("\tRX Queue %u rings\n", queue);
1277
1278 if (priv->extend_desc) {
1279 head_rx = (void *)rx_q->dma_erx;
1280 desc_size = sizeof(struct dma_extended_desc);
1281 } else {
1282 head_rx = (void *)rx_q->dma_rx;
1283 desc_size = sizeof(struct dma_desc);
1284 }
1285
1286 /* Display RX ring */
1287 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1288 rx_q->dma_rx_phy, desc_size);
1289 }
1290 }
1291
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1292 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1293 struct stmmac_dma_conf *dma_conf)
1294 {
1295 u32 tx_cnt = priv->plat->tx_queues_to_use;
1296 unsigned int desc_size;
1297 void *head_tx;
1298 u32 queue;
1299
1300 /* Display TX rings */
1301 for (queue = 0; queue < tx_cnt; queue++) {
1302 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1303
1304 pr_info("\tTX Queue %d rings\n", queue);
1305
1306 if (priv->extend_desc) {
1307 head_tx = (void *)tx_q->dma_etx;
1308 desc_size = sizeof(struct dma_extended_desc);
1309 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1310 head_tx = (void *)tx_q->dma_entx;
1311 desc_size = sizeof(struct dma_edesc);
1312 } else {
1313 head_tx = (void *)tx_q->dma_tx;
1314 desc_size = sizeof(struct dma_desc);
1315 }
1316
1317 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1318 tx_q->dma_tx_phy, desc_size);
1319 }
1320 }
1321
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1322 static void stmmac_display_rings(struct stmmac_priv *priv,
1323 struct stmmac_dma_conf *dma_conf)
1324 {
1325 /* Display RX ring */
1326 stmmac_display_rx_rings(priv, dma_conf);
1327
1328 /* Display TX ring */
1329 stmmac_display_tx_rings(priv, dma_conf);
1330 }
1331
stmmac_set_bfsize(int mtu,int bufsize)1332 static int stmmac_set_bfsize(int mtu, int bufsize)
1333 {
1334 int ret = bufsize;
1335
1336 if (mtu >= BUF_SIZE_8KiB)
1337 ret = BUF_SIZE_16KiB;
1338 else if (mtu >= BUF_SIZE_4KiB)
1339 ret = BUF_SIZE_8KiB;
1340 else if (mtu >= BUF_SIZE_2KiB)
1341 ret = BUF_SIZE_4KiB;
1342 else if (mtu > DEFAULT_BUFSIZE)
1343 ret = BUF_SIZE_2KiB;
1344 else
1345 ret = DEFAULT_BUFSIZE;
1346
1347 return ret;
1348 }
1349
1350 /**
1351 * stmmac_clear_rx_descriptors - clear RX descriptors
1352 * @priv: driver private structure
1353 * @dma_conf: structure to take the dma data
1354 * @queue: RX queue index
1355 * Description: this function is called to clear the RX descriptors
1356 * in case of both basic and extended descriptors are used.
1357 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1358 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1359 struct stmmac_dma_conf *dma_conf,
1360 u32 queue)
1361 {
1362 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1363 int i;
1364
1365 /* Clear the RX descriptors */
1366 for (i = 0; i < dma_conf->dma_rx_size; i++)
1367 if (priv->extend_desc)
1368 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1369 priv->use_riwt, priv->mode,
1370 (i == dma_conf->dma_rx_size - 1),
1371 dma_conf->dma_buf_sz);
1372 else
1373 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1374 priv->use_riwt, priv->mode,
1375 (i == dma_conf->dma_rx_size - 1),
1376 dma_conf->dma_buf_sz);
1377 }
1378
1379 /**
1380 * stmmac_clear_tx_descriptors - clear tx descriptors
1381 * @priv: driver private structure
1382 * @dma_conf: structure to take the dma data
1383 * @queue: TX queue index.
1384 * Description: this function is called to clear the TX descriptors
1385 * in case of both basic and extended descriptors are used.
1386 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1387 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1388 struct stmmac_dma_conf *dma_conf,
1389 u32 queue)
1390 {
1391 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1392 int i;
1393
1394 /* Clear the TX descriptors */
1395 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1396 int last = (i == (dma_conf->dma_tx_size - 1));
1397 struct dma_desc *p;
1398
1399 if (priv->extend_desc)
1400 p = &tx_q->dma_etx[i].basic;
1401 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1402 p = &tx_q->dma_entx[i].basic;
1403 else
1404 p = &tx_q->dma_tx[i];
1405
1406 stmmac_init_tx_desc(priv, p, priv->mode, last);
1407 }
1408 }
1409
1410 /**
1411 * stmmac_clear_descriptors - clear descriptors
1412 * @priv: driver private structure
1413 * @dma_conf: structure to take the dma data
1414 * Description: this function is called to clear the TX and RX descriptors
1415 * in case of both basic and extended descriptors are used.
1416 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1417 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1418 struct stmmac_dma_conf *dma_conf)
1419 {
1420 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1421 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1422 u32 queue;
1423
1424 /* Clear the RX descriptors */
1425 for (queue = 0; queue < rx_queue_cnt; queue++)
1426 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1427
1428 /* Clear the TX descriptors */
1429 for (queue = 0; queue < tx_queue_cnt; queue++)
1430 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1431 }
1432
1433 /**
1434 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1435 * @priv: driver private structure
1436 * @dma_conf: structure to take the dma data
1437 * @p: descriptor pointer
1438 * @i: descriptor index
1439 * @flags: gfp flag
1440 * @queue: RX queue index
1441 * Description: this function is called to allocate a receive buffer, perform
1442 * the DMA mapping and init the descriptor.
1443 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1444 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1445 struct stmmac_dma_conf *dma_conf,
1446 struct dma_desc *p,
1447 int i, gfp_t flags, u32 queue)
1448 {
1449 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1450 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1451 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1452
1453 if (priv->dma_cap.host_dma_width <= 32)
1454 gfp |= GFP_DMA32;
1455
1456 if (!buf->page) {
1457 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1458 if (!buf->page)
1459 return -ENOMEM;
1460 buf->page_offset = stmmac_rx_offset(priv);
1461 }
1462
1463 if (priv->sph && !buf->sec_page) {
1464 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1465 if (!buf->sec_page)
1466 return -ENOMEM;
1467
1468 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1469 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1470 } else {
1471 buf->sec_page = NULL;
1472 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1473 }
1474
1475 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1476
1477 stmmac_set_desc_addr(priv, p, buf->addr);
1478 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1479 stmmac_init_desc3(priv, p);
1480
1481 return 0;
1482 }
1483
1484 /**
1485 * stmmac_free_rx_buffer - free RX dma buffers
1486 * @priv: private structure
1487 * @rx_q: RX queue
1488 * @i: buffer index.
1489 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1490 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1491 struct stmmac_rx_queue *rx_q,
1492 int i)
1493 {
1494 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1495
1496 if (buf->page)
1497 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1498 buf->page = NULL;
1499
1500 if (buf->sec_page)
1501 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1502 buf->sec_page = NULL;
1503 }
1504
1505 /**
1506 * stmmac_free_tx_buffer - free RX dma buffers
1507 * @priv: private structure
1508 * @dma_conf: structure to take the dma data
1509 * @queue: RX queue index
1510 * @i: buffer index.
1511 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1512 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1513 struct stmmac_dma_conf *dma_conf,
1514 u32 queue, int i)
1515 {
1516 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1517
1518 if (tx_q->tx_skbuff_dma[i].buf &&
1519 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1520 if (tx_q->tx_skbuff_dma[i].map_as_page)
1521 dma_unmap_page(priv->device,
1522 tx_q->tx_skbuff_dma[i].buf,
1523 tx_q->tx_skbuff_dma[i].len,
1524 DMA_TO_DEVICE);
1525 else
1526 dma_unmap_single(priv->device,
1527 tx_q->tx_skbuff_dma[i].buf,
1528 tx_q->tx_skbuff_dma[i].len,
1529 DMA_TO_DEVICE);
1530 }
1531
1532 if (tx_q->xdpf[i] &&
1533 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1534 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1535 xdp_return_frame(tx_q->xdpf[i]);
1536 tx_q->xdpf[i] = NULL;
1537 }
1538
1539 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1540 tx_q->xsk_frames_done++;
1541
1542 if (tx_q->tx_skbuff[i] &&
1543 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1544 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1545 tx_q->tx_skbuff[i] = NULL;
1546 }
1547
1548 tx_q->tx_skbuff_dma[i].buf = 0;
1549 tx_q->tx_skbuff_dma[i].map_as_page = false;
1550 }
1551
1552 /**
1553 * dma_free_rx_skbufs - free RX dma buffers
1554 * @priv: private structure
1555 * @dma_conf: structure to take the dma data
1556 * @queue: RX queue index
1557 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1558 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1559 struct stmmac_dma_conf *dma_conf,
1560 u32 queue)
1561 {
1562 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1563 int i;
1564
1565 for (i = 0; i < dma_conf->dma_rx_size; i++)
1566 stmmac_free_rx_buffer(priv, rx_q, i);
1567 }
1568
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1569 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1570 struct stmmac_dma_conf *dma_conf,
1571 u32 queue, gfp_t flags)
1572 {
1573 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1574 int i;
1575
1576 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1577 struct dma_desc *p;
1578 int ret;
1579
1580 if (priv->extend_desc)
1581 p = &((rx_q->dma_erx + i)->basic);
1582 else
1583 p = rx_q->dma_rx + i;
1584
1585 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1586 queue);
1587 if (ret)
1588 return ret;
1589
1590 rx_q->buf_alloc_num++;
1591 }
1592
1593 return 0;
1594 }
1595
1596 /**
1597 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1598 * @priv: private structure
1599 * @dma_conf: structure to take the dma data
1600 * @queue: RX queue index
1601 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1602 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1603 struct stmmac_dma_conf *dma_conf,
1604 u32 queue)
1605 {
1606 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1607 int i;
1608
1609 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1610 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1611
1612 if (!buf->xdp)
1613 continue;
1614
1615 xsk_buff_free(buf->xdp);
1616 buf->xdp = NULL;
1617 }
1618 }
1619
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1620 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1621 struct stmmac_dma_conf *dma_conf,
1622 u32 queue)
1623 {
1624 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1625 int i;
1626
1627 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1628 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1629 * use this macro to make sure no size violations.
1630 */
1631 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1632
1633 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1634 struct stmmac_rx_buffer *buf;
1635 dma_addr_t dma_addr;
1636 struct dma_desc *p;
1637
1638 if (priv->extend_desc)
1639 p = (struct dma_desc *)(rx_q->dma_erx + i);
1640 else
1641 p = rx_q->dma_rx + i;
1642
1643 buf = &rx_q->buf_pool[i];
1644
1645 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1646 if (!buf->xdp)
1647 return -ENOMEM;
1648
1649 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1650 stmmac_set_desc_addr(priv, p, dma_addr);
1651 rx_q->buf_alloc_num++;
1652 }
1653
1654 return 0;
1655 }
1656
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1657 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1658 {
1659 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1660 return NULL;
1661
1662 return xsk_get_pool_from_qid(priv->dev, queue);
1663 }
1664
1665 /**
1666 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1667 * @priv: driver private structure
1668 * @dma_conf: structure to take the dma data
1669 * @queue: RX queue index
1670 * @flags: gfp flag.
1671 * Description: this function initializes the DMA RX descriptors
1672 * and allocates the socket buffers. It supports the chained and ring
1673 * modes.
1674 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1675 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1676 struct stmmac_dma_conf *dma_conf,
1677 u32 queue, gfp_t flags)
1678 {
1679 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1680 int ret;
1681
1682 netif_dbg(priv, probe, priv->dev,
1683 "(%s) dma_rx_phy=0x%08x\n", __func__,
1684 (u32)rx_q->dma_rx_phy);
1685
1686 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1687
1688 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1689
1690 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1691
1692 if (rx_q->xsk_pool) {
1693 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1694 MEM_TYPE_XSK_BUFF_POOL,
1695 NULL));
1696 netdev_info(priv->dev,
1697 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1698 rx_q->queue_index);
1699 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1700 } else {
1701 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1702 MEM_TYPE_PAGE_POOL,
1703 rx_q->page_pool));
1704 netdev_info(priv->dev,
1705 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1706 rx_q->queue_index);
1707 }
1708
1709 if (rx_q->xsk_pool) {
1710 /* RX XDP ZC buffer pool may not be populated, e.g.
1711 * xdpsock TX-only.
1712 */
1713 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1714 } else {
1715 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1716 if (ret < 0)
1717 return -ENOMEM;
1718 }
1719
1720 /* Setup the chained descriptor addresses */
1721 if (priv->mode == STMMAC_CHAIN_MODE) {
1722 if (priv->extend_desc)
1723 stmmac_mode_init(priv, rx_q->dma_erx,
1724 rx_q->dma_rx_phy,
1725 dma_conf->dma_rx_size, 1);
1726 else
1727 stmmac_mode_init(priv, rx_q->dma_rx,
1728 rx_q->dma_rx_phy,
1729 dma_conf->dma_rx_size, 0);
1730 }
1731
1732 return 0;
1733 }
1734
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1735 static int init_dma_rx_desc_rings(struct net_device *dev,
1736 struct stmmac_dma_conf *dma_conf,
1737 gfp_t flags)
1738 {
1739 struct stmmac_priv *priv = netdev_priv(dev);
1740 u32 rx_count = priv->plat->rx_queues_to_use;
1741 int queue;
1742 int ret;
1743
1744 /* RX INITIALIZATION */
1745 netif_dbg(priv, probe, priv->dev,
1746 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1747
1748 for (queue = 0; queue < rx_count; queue++) {
1749 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1750 if (ret)
1751 goto err_init_rx_buffers;
1752 }
1753
1754 return 0;
1755
1756 err_init_rx_buffers:
1757 while (queue >= 0) {
1758 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1759
1760 if (rx_q->xsk_pool)
1761 dma_free_rx_xskbufs(priv, dma_conf, queue);
1762 else
1763 dma_free_rx_skbufs(priv, dma_conf, queue);
1764
1765 rx_q->buf_alloc_num = 0;
1766 rx_q->xsk_pool = NULL;
1767
1768 queue--;
1769 }
1770
1771 return ret;
1772 }
1773
1774 /**
1775 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1776 * @priv: driver private structure
1777 * @dma_conf: structure to take the dma data
1778 * @queue: TX queue index
1779 * Description: this function initializes the DMA TX descriptors
1780 * and allocates the socket buffers. It supports the chained and ring
1781 * modes.
1782 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1783 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1784 struct stmmac_dma_conf *dma_conf,
1785 u32 queue)
1786 {
1787 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1788 int i;
1789
1790 netif_dbg(priv, probe, priv->dev,
1791 "(%s) dma_tx_phy=0x%08x\n", __func__,
1792 (u32)tx_q->dma_tx_phy);
1793
1794 /* Setup the chained descriptor addresses */
1795 if (priv->mode == STMMAC_CHAIN_MODE) {
1796 if (priv->extend_desc)
1797 stmmac_mode_init(priv, tx_q->dma_etx,
1798 tx_q->dma_tx_phy,
1799 dma_conf->dma_tx_size, 1);
1800 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1801 stmmac_mode_init(priv, tx_q->dma_tx,
1802 tx_q->dma_tx_phy,
1803 dma_conf->dma_tx_size, 0);
1804 }
1805
1806 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1807
1808 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1809 struct dma_desc *p;
1810
1811 if (priv->extend_desc)
1812 p = &((tx_q->dma_etx + i)->basic);
1813 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1814 p = &((tx_q->dma_entx + i)->basic);
1815 else
1816 p = tx_q->dma_tx + i;
1817
1818 stmmac_clear_desc(priv, p);
1819
1820 tx_q->tx_skbuff_dma[i].buf = 0;
1821 tx_q->tx_skbuff_dma[i].map_as_page = false;
1822 tx_q->tx_skbuff_dma[i].len = 0;
1823 tx_q->tx_skbuff_dma[i].last_segment = false;
1824 tx_q->tx_skbuff[i] = NULL;
1825 }
1826
1827 return 0;
1828 }
1829
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1830 static int init_dma_tx_desc_rings(struct net_device *dev,
1831 struct stmmac_dma_conf *dma_conf)
1832 {
1833 struct stmmac_priv *priv = netdev_priv(dev);
1834 u32 tx_queue_cnt;
1835 u32 queue;
1836
1837 tx_queue_cnt = priv->plat->tx_queues_to_use;
1838
1839 for (queue = 0; queue < tx_queue_cnt; queue++)
1840 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1841
1842 return 0;
1843 }
1844
1845 /**
1846 * init_dma_desc_rings - init the RX/TX descriptor rings
1847 * @dev: net device structure
1848 * @dma_conf: structure to take the dma data
1849 * @flags: gfp flag.
1850 * Description: this function initializes the DMA RX/TX descriptors
1851 * and allocates the socket buffers. It supports the chained and ring
1852 * modes.
1853 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1854 static int init_dma_desc_rings(struct net_device *dev,
1855 struct stmmac_dma_conf *dma_conf,
1856 gfp_t flags)
1857 {
1858 struct stmmac_priv *priv = netdev_priv(dev);
1859 int ret;
1860
1861 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1862 if (ret)
1863 return ret;
1864
1865 ret = init_dma_tx_desc_rings(dev, dma_conf);
1866
1867 stmmac_clear_descriptors(priv, dma_conf);
1868
1869 if (netif_msg_hw(priv))
1870 stmmac_display_rings(priv, dma_conf);
1871
1872 return ret;
1873 }
1874
1875 /**
1876 * dma_free_tx_skbufs - free TX dma buffers
1877 * @priv: private structure
1878 * @dma_conf: structure to take the dma data
1879 * @queue: TX queue index
1880 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1881 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1882 struct stmmac_dma_conf *dma_conf,
1883 u32 queue)
1884 {
1885 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1886 int i;
1887
1888 tx_q->xsk_frames_done = 0;
1889
1890 for (i = 0; i < dma_conf->dma_tx_size; i++)
1891 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1892
1893 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1894 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1895 tx_q->xsk_frames_done = 0;
1896 tx_q->xsk_pool = NULL;
1897 }
1898 }
1899
1900 /**
1901 * stmmac_free_tx_skbufs - free TX skb buffers
1902 * @priv: private structure
1903 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1904 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1905 {
1906 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1907 u32 queue;
1908
1909 for (queue = 0; queue < tx_queue_cnt; queue++)
1910 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1911 }
1912
1913 /**
1914 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1915 * @priv: private structure
1916 * @dma_conf: structure to take the dma data
1917 * @queue: RX queue index
1918 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1919 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1920 struct stmmac_dma_conf *dma_conf,
1921 u32 queue)
1922 {
1923 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1924
1925 /* Release the DMA RX socket buffers */
1926 if (rx_q->xsk_pool)
1927 dma_free_rx_xskbufs(priv, dma_conf, queue);
1928 else
1929 dma_free_rx_skbufs(priv, dma_conf, queue);
1930
1931 rx_q->buf_alloc_num = 0;
1932 rx_q->xsk_pool = NULL;
1933
1934 /* Free DMA regions of consistent memory previously allocated */
1935 if (!priv->extend_desc)
1936 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1937 sizeof(struct dma_desc),
1938 rx_q->dma_rx, rx_q->dma_rx_phy);
1939 else
1940 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1941 sizeof(struct dma_extended_desc),
1942 rx_q->dma_erx, rx_q->dma_rx_phy);
1943
1944 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1945 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1946
1947 kfree(rx_q->buf_pool);
1948 if (rx_q->page_pool)
1949 page_pool_destroy(rx_q->page_pool);
1950 }
1951
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1952 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1953 struct stmmac_dma_conf *dma_conf)
1954 {
1955 u32 rx_count = priv->plat->rx_queues_to_use;
1956 u32 queue;
1957
1958 /* Free RX queue resources */
1959 for (queue = 0; queue < rx_count; queue++)
1960 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1961 }
1962
1963 /**
1964 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1965 * @priv: private structure
1966 * @dma_conf: structure to take the dma data
1967 * @queue: TX queue index
1968 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1969 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1970 struct stmmac_dma_conf *dma_conf,
1971 u32 queue)
1972 {
1973 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1974 size_t size;
1975 void *addr;
1976
1977 /* Release the DMA TX socket buffers */
1978 dma_free_tx_skbufs(priv, dma_conf, queue);
1979
1980 if (priv->extend_desc) {
1981 size = sizeof(struct dma_extended_desc);
1982 addr = tx_q->dma_etx;
1983 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1984 size = sizeof(struct dma_edesc);
1985 addr = tx_q->dma_entx;
1986 } else {
1987 size = sizeof(struct dma_desc);
1988 addr = tx_q->dma_tx;
1989 }
1990
1991 size *= dma_conf->dma_tx_size;
1992
1993 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1994
1995 kfree(tx_q->tx_skbuff_dma);
1996 kfree(tx_q->tx_skbuff);
1997 }
1998
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1999 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2000 struct stmmac_dma_conf *dma_conf)
2001 {
2002 u32 tx_count = priv->plat->tx_queues_to_use;
2003 u32 queue;
2004
2005 /* Free TX queue resources */
2006 for (queue = 0; queue < tx_count; queue++)
2007 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2008 }
2009
2010 /**
2011 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2012 * @priv: private structure
2013 * @dma_conf: structure to take the dma data
2014 * @queue: RX queue index
2015 * Description: according to which descriptor can be used (extend or basic)
2016 * this function allocates the resources for TX and RX paths. In case of
2017 * reception, for example, it pre-allocated the RX socket buffer in order to
2018 * allow zero-copy mechanism.
2019 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2020 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2021 struct stmmac_dma_conf *dma_conf,
2022 u32 queue)
2023 {
2024 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2025 struct stmmac_channel *ch = &priv->channel[queue];
2026 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2027 struct page_pool_params pp_params = { 0 };
2028 unsigned int num_pages;
2029 unsigned int napi_id;
2030 int ret;
2031
2032 rx_q->queue_index = queue;
2033 rx_q->priv_data = priv;
2034
2035 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2036 pp_params.pool_size = dma_conf->dma_rx_size;
2037 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2038 pp_params.order = ilog2(num_pages);
2039 pp_params.nid = dev_to_node(priv->device);
2040 pp_params.dev = priv->device;
2041 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2042 pp_params.offset = stmmac_rx_offset(priv);
2043 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2044
2045 rx_q->page_pool = page_pool_create(&pp_params);
2046 if (IS_ERR(rx_q->page_pool)) {
2047 ret = PTR_ERR(rx_q->page_pool);
2048 rx_q->page_pool = NULL;
2049 return ret;
2050 }
2051
2052 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2053 sizeof(*rx_q->buf_pool),
2054 GFP_KERNEL);
2055 if (!rx_q->buf_pool)
2056 return -ENOMEM;
2057
2058 if (priv->extend_desc) {
2059 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2060 dma_conf->dma_rx_size *
2061 sizeof(struct dma_extended_desc),
2062 &rx_q->dma_rx_phy,
2063 GFP_KERNEL);
2064 if (!rx_q->dma_erx)
2065 return -ENOMEM;
2066
2067 } else {
2068 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2069 dma_conf->dma_rx_size *
2070 sizeof(struct dma_desc),
2071 &rx_q->dma_rx_phy,
2072 GFP_KERNEL);
2073 if (!rx_q->dma_rx)
2074 return -ENOMEM;
2075 }
2076
2077 if (stmmac_xdp_is_enabled(priv) &&
2078 test_bit(queue, priv->af_xdp_zc_qps))
2079 napi_id = ch->rxtx_napi.napi_id;
2080 else
2081 napi_id = ch->rx_napi.napi_id;
2082
2083 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2084 rx_q->queue_index,
2085 napi_id);
2086 if (ret) {
2087 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2088 return -EINVAL;
2089 }
2090
2091 return 0;
2092 }
2093
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2094 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2095 struct stmmac_dma_conf *dma_conf)
2096 {
2097 u32 rx_count = priv->plat->rx_queues_to_use;
2098 u32 queue;
2099 int ret;
2100
2101 /* RX queues buffers and DMA */
2102 for (queue = 0; queue < rx_count; queue++) {
2103 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2104 if (ret)
2105 goto err_dma;
2106 }
2107
2108 return 0;
2109
2110 err_dma:
2111 free_dma_rx_desc_resources(priv, dma_conf);
2112
2113 return ret;
2114 }
2115
2116 /**
2117 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2118 * @priv: private structure
2119 * @dma_conf: structure to take the dma data
2120 * @queue: TX queue index
2121 * Description: according to which descriptor can be used (extend or basic)
2122 * this function allocates the resources for TX and RX paths. In case of
2123 * reception, for example, it pre-allocated the RX socket buffer in order to
2124 * allow zero-copy mechanism.
2125 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2126 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2127 struct stmmac_dma_conf *dma_conf,
2128 u32 queue)
2129 {
2130 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2131 size_t size;
2132 void *addr;
2133
2134 tx_q->queue_index = queue;
2135 tx_q->priv_data = priv;
2136
2137 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2138 sizeof(*tx_q->tx_skbuff_dma),
2139 GFP_KERNEL);
2140 if (!tx_q->tx_skbuff_dma)
2141 return -ENOMEM;
2142
2143 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2144 sizeof(struct sk_buff *),
2145 GFP_KERNEL);
2146 if (!tx_q->tx_skbuff)
2147 return -ENOMEM;
2148
2149 if (priv->extend_desc)
2150 size = sizeof(struct dma_extended_desc);
2151 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 size = sizeof(struct dma_edesc);
2153 else
2154 size = sizeof(struct dma_desc);
2155
2156 size *= dma_conf->dma_tx_size;
2157
2158 addr = dma_alloc_coherent(priv->device, size,
2159 &tx_q->dma_tx_phy, GFP_KERNEL);
2160 if (!addr)
2161 return -ENOMEM;
2162
2163 if (priv->extend_desc)
2164 tx_q->dma_etx = addr;
2165 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2166 tx_q->dma_entx = addr;
2167 else
2168 tx_q->dma_tx = addr;
2169
2170 return 0;
2171 }
2172
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2173 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2174 struct stmmac_dma_conf *dma_conf)
2175 {
2176 u32 tx_count = priv->plat->tx_queues_to_use;
2177 u32 queue;
2178 int ret;
2179
2180 /* TX queues buffers and DMA */
2181 for (queue = 0; queue < tx_count; queue++) {
2182 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2183 if (ret)
2184 goto err_dma;
2185 }
2186
2187 return 0;
2188
2189 err_dma:
2190 free_dma_tx_desc_resources(priv, dma_conf);
2191 return ret;
2192 }
2193
2194 /**
2195 * alloc_dma_desc_resources - alloc TX/RX resources.
2196 * @priv: private structure
2197 * @dma_conf: structure to take the dma data
2198 * Description: according to which descriptor can be used (extend or basic)
2199 * this function allocates the resources for TX and RX paths. In case of
2200 * reception, for example, it pre-allocated the RX socket buffer in order to
2201 * allow zero-copy mechanism.
2202 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2203 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2204 struct stmmac_dma_conf *dma_conf)
2205 {
2206 /* RX Allocation */
2207 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2208
2209 if (ret)
2210 return ret;
2211
2212 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2213
2214 return ret;
2215 }
2216
2217 /**
2218 * free_dma_desc_resources - free dma desc resources
2219 * @priv: private structure
2220 * @dma_conf: structure to take the dma data
2221 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2222 static void free_dma_desc_resources(struct stmmac_priv *priv,
2223 struct stmmac_dma_conf *dma_conf)
2224 {
2225 /* Release the DMA TX socket buffers */
2226 free_dma_tx_desc_resources(priv, dma_conf);
2227
2228 /* Release the DMA RX socket buffers later
2229 * to ensure all pending XDP_TX buffers are returned.
2230 */
2231 free_dma_rx_desc_resources(priv, dma_conf);
2232 }
2233
2234 /**
2235 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2236 * @priv: driver private structure
2237 * Description: It is used for enabling the rx queues in the MAC
2238 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2239 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2240 {
2241 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2242 int queue;
2243 u8 mode;
2244
2245 for (queue = 0; queue < rx_queues_count; queue++) {
2246 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2247 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2248 }
2249 }
2250
2251 /**
2252 * stmmac_start_rx_dma - start RX DMA channel
2253 * @priv: driver private structure
2254 * @chan: RX channel index
2255 * Description:
2256 * This starts a RX DMA channel
2257 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2258 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2259 {
2260 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2261 stmmac_start_rx(priv, priv->ioaddr, chan);
2262 }
2263
2264 /**
2265 * stmmac_start_tx_dma - start TX DMA channel
2266 * @priv: driver private structure
2267 * @chan: TX channel index
2268 * Description:
2269 * This starts a TX DMA channel
2270 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2271 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2272 {
2273 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2274 stmmac_start_tx(priv, priv->ioaddr, chan);
2275 }
2276
2277 /**
2278 * stmmac_stop_rx_dma - stop RX DMA channel
2279 * @priv: driver private structure
2280 * @chan: RX channel index
2281 * Description:
2282 * This stops a RX DMA channel
2283 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2284 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2285 {
2286 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2287 stmmac_stop_rx(priv, priv->ioaddr, chan);
2288 }
2289
2290 /**
2291 * stmmac_stop_tx_dma - stop TX DMA channel
2292 * @priv: driver private structure
2293 * @chan: TX channel index
2294 * Description:
2295 * This stops a TX DMA channel
2296 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2297 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2298 {
2299 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2300 stmmac_stop_tx(priv, priv->ioaddr, chan);
2301 }
2302
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2303 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2304 {
2305 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2306 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2307 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2308 u32 chan;
2309
2310 for (chan = 0; chan < dma_csr_ch; chan++) {
2311 struct stmmac_channel *ch = &priv->channel[chan];
2312 unsigned long flags;
2313
2314 spin_lock_irqsave(&ch->lock, flags);
2315 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2316 spin_unlock_irqrestore(&ch->lock, flags);
2317 }
2318 }
2319
2320 /**
2321 * stmmac_start_all_dma - start all RX and TX DMA channels
2322 * @priv: driver private structure
2323 * Description:
2324 * This starts all the RX and TX DMA channels
2325 */
stmmac_start_all_dma(struct stmmac_priv * priv)2326 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2327 {
2328 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2329 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2330 u32 chan = 0;
2331
2332 for (chan = 0; chan < rx_channels_count; chan++)
2333 stmmac_start_rx_dma(priv, chan);
2334
2335 for (chan = 0; chan < tx_channels_count; chan++)
2336 stmmac_start_tx_dma(priv, chan);
2337 }
2338
2339 /**
2340 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2341 * @priv: driver private structure
2342 * Description:
2343 * This stops the RX and TX DMA channels
2344 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2345 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2346 {
2347 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 u32 chan = 0;
2350
2351 for (chan = 0; chan < rx_channels_count; chan++)
2352 stmmac_stop_rx_dma(priv, chan);
2353
2354 for (chan = 0; chan < tx_channels_count; chan++)
2355 stmmac_stop_tx_dma(priv, chan);
2356 }
2357
2358 /**
2359 * stmmac_dma_operation_mode - HW DMA operation mode
2360 * @priv: driver private structure
2361 * Description: it is used for configuring the DMA operation mode register in
2362 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2363 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2364 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2365 {
2366 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 int rxfifosz = priv->plat->rx_fifo_size;
2369 int txfifosz = priv->plat->tx_fifo_size;
2370 u32 txmode = 0;
2371 u32 rxmode = 0;
2372 u32 chan = 0;
2373 u8 qmode = 0;
2374
2375 if (rxfifosz == 0)
2376 rxfifosz = priv->dma_cap.rx_fifo_size;
2377 if (txfifosz == 0)
2378 txfifosz = priv->dma_cap.tx_fifo_size;
2379
2380 /* Adjust for real per queue fifo size */
2381 rxfifosz /= rx_channels_count;
2382 txfifosz /= tx_channels_count;
2383
2384 if (priv->plat->force_thresh_dma_mode) {
2385 txmode = tc;
2386 rxmode = tc;
2387 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2388 /*
2389 * In case of GMAC, SF mode can be enabled
2390 * to perform the TX COE in HW. This depends on:
2391 * 1) TX COE if actually supported
2392 * 2) There is no bugged Jumbo frame support
2393 * that needs to not insert csum in the TDES.
2394 */
2395 txmode = SF_DMA_MODE;
2396 rxmode = SF_DMA_MODE;
2397 priv->xstats.threshold = SF_DMA_MODE;
2398 } else {
2399 txmode = tc;
2400 rxmode = SF_DMA_MODE;
2401 }
2402
2403 /* configure all channels */
2404 for (chan = 0; chan < rx_channels_count; chan++) {
2405 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2406 u32 buf_size;
2407
2408 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2409
2410 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2411 rxfifosz, qmode);
2412
2413 if (rx_q->xsk_pool) {
2414 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2415 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2416 buf_size,
2417 chan);
2418 } else {
2419 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2420 priv->dma_conf.dma_buf_sz,
2421 chan);
2422 }
2423 }
2424
2425 for (chan = 0; chan < tx_channels_count; chan++) {
2426 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2427
2428 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2429 txfifosz, qmode);
2430 }
2431 }
2432
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2433 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2434 {
2435 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2436 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2437 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2438 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2439 unsigned int entry = tx_q->cur_tx;
2440 struct dma_desc *tx_desc = NULL;
2441 struct xdp_desc xdp_desc;
2442 bool work_done = true;
2443 u32 tx_set_ic_bit = 0;
2444 unsigned long flags;
2445
2446 /* Avoids TX time-out as we are sharing with slow path */
2447 txq_trans_cond_update(nq);
2448
2449 budget = min(budget, stmmac_tx_avail(priv, queue));
2450
2451 while (budget-- > 0) {
2452 dma_addr_t dma_addr;
2453 bool set_ic;
2454
2455 /* We are sharing with slow path and stop XSK TX desc submission when
2456 * available TX ring is less than threshold.
2457 */
2458 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2459 !netif_carrier_ok(priv->dev)) {
2460 work_done = false;
2461 break;
2462 }
2463
2464 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2465 break;
2466
2467 if (likely(priv->extend_desc))
2468 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2469 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2470 tx_desc = &tx_q->dma_entx[entry].basic;
2471 else
2472 tx_desc = tx_q->dma_tx + entry;
2473
2474 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2475 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2476
2477 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2478
2479 /* To return XDP buffer to XSK pool, we simple call
2480 * xsk_tx_completed(), so we don't need to fill up
2481 * 'buf' and 'xdpf'.
2482 */
2483 tx_q->tx_skbuff_dma[entry].buf = 0;
2484 tx_q->xdpf[entry] = NULL;
2485
2486 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2487 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2488 tx_q->tx_skbuff_dma[entry].last_segment = true;
2489 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2490
2491 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2492
2493 tx_q->tx_count_frames++;
2494
2495 if (!priv->tx_coal_frames[queue])
2496 set_ic = false;
2497 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2498 set_ic = true;
2499 else
2500 set_ic = false;
2501
2502 if (set_ic) {
2503 tx_q->tx_count_frames = 0;
2504 stmmac_set_tx_ic(priv, tx_desc);
2505 tx_set_ic_bit++;
2506 }
2507
2508 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2509 true, priv->mode, true, true,
2510 xdp_desc.len);
2511
2512 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2513
2514 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2515 entry = tx_q->cur_tx;
2516 }
2517 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2518 txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2519 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2520
2521 if (tx_desc) {
2522 stmmac_flush_tx_descriptors(priv, queue);
2523 xsk_tx_release(pool);
2524 }
2525
2526 /* Return true if all of the 3 conditions are met
2527 * a) TX Budget is still available
2528 * b) work_done = true when XSK TX desc peek is empty (no more
2529 * pending XSK TX for transmission)
2530 */
2531 return !!budget && work_done;
2532 }
2533
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2534 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2535 {
2536 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2537 tc += 64;
2538
2539 if (priv->plat->force_thresh_dma_mode)
2540 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2541 else
2542 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2543 chan);
2544
2545 priv->xstats.threshold = tc;
2546 }
2547 }
2548
2549 /**
2550 * stmmac_tx_clean - to manage the transmission completion
2551 * @priv: driver private structure
2552 * @budget: napi budget limiting this functions packet handling
2553 * @queue: TX queue index
2554 * Description: it reclaims the transmit resources after transmission completes.
2555 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2556 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2557 {
2558 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2559 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2560 unsigned int bytes_compl = 0, pkts_compl = 0;
2561 unsigned int entry, xmits = 0, count = 0;
2562 u32 tx_packets = 0, tx_errors = 0;
2563 unsigned long flags;
2564
2565 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2566
2567 tx_q->xsk_frames_done = 0;
2568
2569 entry = tx_q->dirty_tx;
2570
2571 /* Try to clean all TX complete frame in 1 shot */
2572 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2573 struct xdp_frame *xdpf;
2574 struct sk_buff *skb;
2575 struct dma_desc *p;
2576 int status;
2577
2578 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2579 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2580 xdpf = tx_q->xdpf[entry];
2581 skb = NULL;
2582 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2583 xdpf = NULL;
2584 skb = tx_q->tx_skbuff[entry];
2585 } else {
2586 xdpf = NULL;
2587 skb = NULL;
2588 }
2589
2590 if (priv->extend_desc)
2591 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2592 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2593 p = &tx_q->dma_entx[entry].basic;
2594 else
2595 p = tx_q->dma_tx + entry;
2596
2597 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2598 /* Check if the descriptor is owned by the DMA */
2599 if (unlikely(status & tx_dma_own))
2600 break;
2601
2602 count++;
2603
2604 /* Make sure descriptor fields are read after reading
2605 * the own bit.
2606 */
2607 dma_rmb();
2608
2609 /* Just consider the last segment and ...*/
2610 if (likely(!(status & tx_not_ls))) {
2611 /* ... verify the status error condition */
2612 if (unlikely(status & tx_err)) {
2613 tx_errors++;
2614 if (unlikely(status & tx_err_bump_tc))
2615 stmmac_bump_dma_threshold(priv, queue);
2616 } else {
2617 tx_packets++;
2618 }
2619 if (skb)
2620 stmmac_get_tx_hwtstamp(priv, p, skb);
2621 }
2622
2623 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2624 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2625 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2626 dma_unmap_page(priv->device,
2627 tx_q->tx_skbuff_dma[entry].buf,
2628 tx_q->tx_skbuff_dma[entry].len,
2629 DMA_TO_DEVICE);
2630 else
2631 dma_unmap_single(priv->device,
2632 tx_q->tx_skbuff_dma[entry].buf,
2633 tx_q->tx_skbuff_dma[entry].len,
2634 DMA_TO_DEVICE);
2635 tx_q->tx_skbuff_dma[entry].buf = 0;
2636 tx_q->tx_skbuff_dma[entry].len = 0;
2637 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2638 }
2639
2640 stmmac_clean_desc3(priv, tx_q, p);
2641
2642 tx_q->tx_skbuff_dma[entry].last_segment = false;
2643 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2644
2645 if (xdpf &&
2646 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2647 xdp_return_frame_rx_napi(xdpf);
2648 tx_q->xdpf[entry] = NULL;
2649 }
2650
2651 if (xdpf &&
2652 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2653 xdp_return_frame(xdpf);
2654 tx_q->xdpf[entry] = NULL;
2655 }
2656
2657 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2658 tx_q->xsk_frames_done++;
2659
2660 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2661 if (likely(skb)) {
2662 pkts_compl++;
2663 bytes_compl += skb->len;
2664 dev_consume_skb_any(skb);
2665 tx_q->tx_skbuff[entry] = NULL;
2666 }
2667 }
2668
2669 stmmac_release_tx_desc(priv, p, priv->mode);
2670
2671 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2672 }
2673 tx_q->dirty_tx = entry;
2674
2675 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2676 pkts_compl, bytes_compl);
2677
2678 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2679 queue))) &&
2680 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2681
2682 netif_dbg(priv, tx_done, priv->dev,
2683 "%s: restart transmit\n", __func__);
2684 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2685 }
2686
2687 if (tx_q->xsk_pool) {
2688 bool work_done;
2689
2690 if (tx_q->xsk_frames_done)
2691 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2692
2693 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2694 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2695
2696 /* For XSK TX, we try to send as many as possible.
2697 * If XSK work done (XSK TX desc empty and budget still
2698 * available), return "budget - 1" to reenable TX IRQ.
2699 * Else, return "budget" to make NAPI continue polling.
2700 */
2701 work_done = stmmac_xdp_xmit_zc(priv, queue,
2702 STMMAC_XSK_TX_BUDGET_MAX);
2703 if (work_done)
2704 xmits = budget - 1;
2705 else
2706 xmits = budget;
2707 }
2708
2709 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2710 priv->eee_sw_timer_en) {
2711 if (stmmac_enable_eee_mode(priv))
2712 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2713 }
2714
2715 /* We still have pending packets, let's call for a new scheduling */
2716 if (tx_q->dirty_tx != tx_q->cur_tx)
2717 stmmac_tx_timer_arm(priv, queue);
2718
2719 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2720 txq_stats->tx_packets += tx_packets;
2721 txq_stats->tx_pkt_n += tx_packets;
2722 txq_stats->tx_clean++;
2723 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2724
2725 priv->xstats.tx_errors += tx_errors;
2726
2727 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2728
2729 /* Combine decisions from TX clean and XSK TX */
2730 return max(count, xmits);
2731 }
2732
2733 /**
2734 * stmmac_tx_err - to manage the tx error
2735 * @priv: driver private structure
2736 * @chan: channel index
2737 * Description: it cleans the descriptors and restarts the transmission
2738 * in case of transmission errors.
2739 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2740 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2741 {
2742 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2743
2744 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2745
2746 stmmac_stop_tx_dma(priv, chan);
2747 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2748 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2749 stmmac_reset_tx_queue(priv, chan);
2750 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2751 tx_q->dma_tx_phy, chan);
2752 stmmac_start_tx_dma(priv, chan);
2753
2754 priv->xstats.tx_errors++;
2755 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2756 }
2757
2758 /**
2759 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2760 * @priv: driver private structure
2761 * @txmode: TX operating mode
2762 * @rxmode: RX operating mode
2763 * @chan: channel index
2764 * Description: it is used for configuring of the DMA operation mode in
2765 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2766 * mode.
2767 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2768 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2769 u32 rxmode, u32 chan)
2770 {
2771 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2772 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2773 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2774 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2775 int rxfifosz = priv->plat->rx_fifo_size;
2776 int txfifosz = priv->plat->tx_fifo_size;
2777
2778 if (rxfifosz == 0)
2779 rxfifosz = priv->dma_cap.rx_fifo_size;
2780 if (txfifosz == 0)
2781 txfifosz = priv->dma_cap.tx_fifo_size;
2782
2783 /* Adjust for real per queue fifo size */
2784 rxfifosz /= rx_channels_count;
2785 txfifosz /= tx_channels_count;
2786
2787 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2788 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2789 }
2790
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2791 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2792 {
2793 int ret;
2794
2795 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2796 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2797 if (ret && (ret != -EINVAL)) {
2798 stmmac_global_err(priv);
2799 return true;
2800 }
2801
2802 return false;
2803 }
2804
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2805 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2806 {
2807 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2808 &priv->xstats, chan, dir);
2809 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2810 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2811 struct stmmac_channel *ch = &priv->channel[chan];
2812 struct napi_struct *rx_napi;
2813 struct napi_struct *tx_napi;
2814 unsigned long flags;
2815
2816 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2817 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2818
2819 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2820 if (napi_schedule_prep(rx_napi)) {
2821 spin_lock_irqsave(&ch->lock, flags);
2822 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2823 spin_unlock_irqrestore(&ch->lock, flags);
2824 __napi_schedule(rx_napi);
2825 }
2826 }
2827
2828 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2829 if (napi_schedule_prep(tx_napi)) {
2830 spin_lock_irqsave(&ch->lock, flags);
2831 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2832 spin_unlock_irqrestore(&ch->lock, flags);
2833 __napi_schedule(tx_napi);
2834 }
2835 }
2836
2837 return status;
2838 }
2839
2840 /**
2841 * stmmac_dma_interrupt - DMA ISR
2842 * @priv: driver private structure
2843 * Description: this is the DMA ISR. It is called by the main ISR.
2844 * It calls the dwmac dma routine and schedule poll method in case of some
2845 * work can be done.
2846 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2847 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2848 {
2849 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2850 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2851 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2852 tx_channel_count : rx_channel_count;
2853 u32 chan;
2854 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2855
2856 /* Make sure we never check beyond our status buffer. */
2857 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2858 channels_to_check = ARRAY_SIZE(status);
2859
2860 for (chan = 0; chan < channels_to_check; chan++)
2861 status[chan] = stmmac_napi_check(priv, chan,
2862 DMA_DIR_RXTX);
2863
2864 for (chan = 0; chan < tx_channel_count; chan++) {
2865 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2866 /* Try to bump up the dma threshold on this failure */
2867 stmmac_bump_dma_threshold(priv, chan);
2868 } else if (unlikely(status[chan] == tx_hard_error)) {
2869 stmmac_tx_err(priv, chan);
2870 }
2871 }
2872 }
2873
2874 /**
2875 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2876 * @priv: driver private structure
2877 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2878 */
stmmac_mmc_setup(struct stmmac_priv * priv)2879 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2880 {
2881 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2882 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2883
2884 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2885
2886 if (priv->dma_cap.rmon) {
2887 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2888 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2889 } else
2890 netdev_info(priv->dev, "No MAC Management Counters available\n");
2891 }
2892
2893 /**
2894 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2895 * @priv: driver private structure
2896 * Description:
2897 * new GMAC chip generations have a new register to indicate the
2898 * presence of the optional feature/functions.
2899 * This can be also used to override the value passed through the
2900 * platform and necessary for old MAC10/100 and GMAC chips.
2901 */
stmmac_get_hw_features(struct stmmac_priv * priv)2902 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2903 {
2904 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2905 }
2906
2907 /**
2908 * stmmac_check_ether_addr - check if the MAC addr is valid
2909 * @priv: driver private structure
2910 * Description:
2911 * it is to verify if the MAC address is valid, in case of failures it
2912 * generates a random MAC address
2913 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2914 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2915 {
2916 u8 addr[ETH_ALEN];
2917
2918 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2919 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2920 if (is_valid_ether_addr(addr))
2921 eth_hw_addr_set(priv->dev, addr);
2922 else
2923 eth_hw_addr_random(priv->dev);
2924 dev_info(priv->device, "device MAC address %pM\n",
2925 priv->dev->dev_addr);
2926 }
2927 }
2928
2929 /**
2930 * stmmac_init_dma_engine - DMA init.
2931 * @priv: driver private structure
2932 * Description:
2933 * It inits the DMA invoking the specific MAC/GMAC callback.
2934 * Some DMA parameters can be passed from the platform;
2935 * in case of these are not passed a default is kept for the MAC or GMAC.
2936 */
stmmac_init_dma_engine(struct stmmac_priv * priv)2937 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2938 {
2939 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2940 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2941 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2942 struct stmmac_rx_queue *rx_q;
2943 struct stmmac_tx_queue *tx_q;
2944 u32 chan = 0;
2945 int atds = 0;
2946 int ret = 0;
2947
2948 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2949 dev_err(priv->device, "Invalid DMA configuration\n");
2950 return -EINVAL;
2951 }
2952
2953 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2954 atds = 1;
2955
2956 ret = stmmac_reset(priv, priv->ioaddr);
2957 if (ret) {
2958 dev_err(priv->device, "Failed to reset the dma\n");
2959 return ret;
2960 }
2961
2962 /* DMA Configuration */
2963 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2964
2965 if (priv->plat->axi)
2966 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2967
2968 /* DMA CSR Channel configuration */
2969 for (chan = 0; chan < dma_csr_ch; chan++) {
2970 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2971 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2972 }
2973
2974 /* DMA RX Channel Configuration */
2975 for (chan = 0; chan < rx_channels_count; chan++) {
2976 rx_q = &priv->dma_conf.rx_queue[chan];
2977
2978 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2979 rx_q->dma_rx_phy, chan);
2980
2981 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2982 (rx_q->buf_alloc_num *
2983 sizeof(struct dma_desc));
2984 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2985 rx_q->rx_tail_addr, chan);
2986 }
2987
2988 /* DMA TX Channel Configuration */
2989 for (chan = 0; chan < tx_channels_count; chan++) {
2990 tx_q = &priv->dma_conf.tx_queue[chan];
2991
2992 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2993 tx_q->dma_tx_phy, chan);
2994
2995 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2996 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2997 tx_q->tx_tail_addr, chan);
2998 }
2999
3000 return ret;
3001 }
3002
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3003 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3004 {
3005 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3006 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3007
3008 if (!tx_coal_timer)
3009 return;
3010
3011 hrtimer_start(&tx_q->txtimer,
3012 STMMAC_COAL_TIMER(tx_coal_timer),
3013 HRTIMER_MODE_REL);
3014 }
3015
3016 /**
3017 * stmmac_tx_timer - mitigation sw timer for tx.
3018 * @t: data pointer
3019 * Description:
3020 * This is the timer handler to directly invoke the stmmac_tx_clean.
3021 */
stmmac_tx_timer(struct hrtimer * t)3022 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3023 {
3024 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3025 struct stmmac_priv *priv = tx_q->priv_data;
3026 struct stmmac_channel *ch;
3027 struct napi_struct *napi;
3028
3029 ch = &priv->channel[tx_q->queue_index];
3030 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3031
3032 if (likely(napi_schedule_prep(napi))) {
3033 unsigned long flags;
3034
3035 spin_lock_irqsave(&ch->lock, flags);
3036 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3037 spin_unlock_irqrestore(&ch->lock, flags);
3038 __napi_schedule(napi);
3039 }
3040
3041 return HRTIMER_NORESTART;
3042 }
3043
3044 /**
3045 * stmmac_init_coalesce - init mitigation options.
3046 * @priv: driver private structure
3047 * Description:
3048 * This inits the coalesce parameters: i.e. timer rate,
3049 * timer handler and default threshold used for enabling the
3050 * interrupt on completion bit.
3051 */
stmmac_init_coalesce(struct stmmac_priv * priv)3052 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3053 {
3054 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3055 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3056 u32 chan;
3057
3058 for (chan = 0; chan < tx_channel_count; chan++) {
3059 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3060
3061 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3062 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3063
3064 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3065 tx_q->txtimer.function = stmmac_tx_timer;
3066 }
3067
3068 for (chan = 0; chan < rx_channel_count; chan++)
3069 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3070 }
3071
stmmac_set_rings_length(struct stmmac_priv * priv)3072 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3073 {
3074 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3075 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3076 u32 chan;
3077
3078 /* set TX ring length */
3079 for (chan = 0; chan < tx_channels_count; chan++)
3080 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3081 (priv->dma_conf.dma_tx_size - 1), chan);
3082
3083 /* set RX ring length */
3084 for (chan = 0; chan < rx_channels_count; chan++)
3085 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3086 (priv->dma_conf.dma_rx_size - 1), chan);
3087 }
3088
3089 /**
3090 * stmmac_set_tx_queue_weight - Set TX queue weight
3091 * @priv: driver private structure
3092 * Description: It is used for setting TX queues weight
3093 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3094 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3095 {
3096 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3097 u32 weight;
3098 u32 queue;
3099
3100 for (queue = 0; queue < tx_queues_count; queue++) {
3101 weight = priv->plat->tx_queues_cfg[queue].weight;
3102 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3103 }
3104 }
3105
3106 /**
3107 * stmmac_configure_cbs - Configure CBS in TX queue
3108 * @priv: driver private structure
3109 * Description: It is used for configuring CBS in AVB TX queues
3110 */
stmmac_configure_cbs(struct stmmac_priv * priv)3111 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3112 {
3113 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3114 u32 mode_to_use;
3115 u32 queue;
3116
3117 /* queue 0 is reserved for legacy traffic */
3118 for (queue = 1; queue < tx_queues_count; queue++) {
3119 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3120 if (mode_to_use == MTL_QUEUE_DCB)
3121 continue;
3122
3123 stmmac_config_cbs(priv, priv->hw,
3124 priv->plat->tx_queues_cfg[queue].send_slope,
3125 priv->plat->tx_queues_cfg[queue].idle_slope,
3126 priv->plat->tx_queues_cfg[queue].high_credit,
3127 priv->plat->tx_queues_cfg[queue].low_credit,
3128 queue);
3129 }
3130 }
3131
3132 /**
3133 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3134 * @priv: driver private structure
3135 * Description: It is used for mapping RX queues to RX dma channels
3136 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3137 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3138 {
3139 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3140 u32 queue;
3141 u32 chan;
3142
3143 for (queue = 0; queue < rx_queues_count; queue++) {
3144 chan = priv->plat->rx_queues_cfg[queue].chan;
3145 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3146 }
3147 }
3148
3149 /**
3150 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3151 * @priv: driver private structure
3152 * Description: It is used for configuring the RX Queue Priority
3153 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3154 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3155 {
3156 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3157 u32 queue;
3158 u32 prio;
3159
3160 for (queue = 0; queue < rx_queues_count; queue++) {
3161 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3162 continue;
3163
3164 prio = priv->plat->rx_queues_cfg[queue].prio;
3165 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3166 }
3167 }
3168
3169 /**
3170 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3171 * @priv: driver private structure
3172 * Description: It is used for configuring the TX Queue Priority
3173 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3174 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3175 {
3176 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3177 u32 queue;
3178 u32 prio;
3179
3180 for (queue = 0; queue < tx_queues_count; queue++) {
3181 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3182 continue;
3183
3184 prio = priv->plat->tx_queues_cfg[queue].prio;
3185 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3186 }
3187 }
3188
3189 /**
3190 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3191 * @priv: driver private structure
3192 * Description: It is used for configuring the RX queue routing
3193 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3194 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3195 {
3196 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3197 u32 queue;
3198 u8 packet;
3199
3200 for (queue = 0; queue < rx_queues_count; queue++) {
3201 /* no specific packet type routing specified for the queue */
3202 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3203 continue;
3204
3205 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3206 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3207 }
3208 }
3209
stmmac_mac_config_rss(struct stmmac_priv * priv)3210 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3211 {
3212 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3213 priv->rss.enable = false;
3214 return;
3215 }
3216
3217 if (priv->dev->features & NETIF_F_RXHASH)
3218 priv->rss.enable = true;
3219 else
3220 priv->rss.enable = false;
3221
3222 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3223 priv->plat->rx_queues_to_use);
3224 }
3225
3226 /**
3227 * stmmac_mtl_configuration - Configure MTL
3228 * @priv: driver private structure
3229 * Description: It is used for configurring MTL
3230 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3231 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3232 {
3233 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3234 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3235
3236 if (tx_queues_count > 1)
3237 stmmac_set_tx_queue_weight(priv);
3238
3239 /* Configure MTL RX algorithms */
3240 if (rx_queues_count > 1)
3241 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3242 priv->plat->rx_sched_algorithm);
3243
3244 /* Configure MTL TX algorithms */
3245 if (tx_queues_count > 1)
3246 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3247 priv->plat->tx_sched_algorithm);
3248
3249 /* Configure CBS in AVB TX queues */
3250 if (tx_queues_count > 1)
3251 stmmac_configure_cbs(priv);
3252
3253 /* Map RX MTL to DMA channels */
3254 stmmac_rx_queue_dma_chan_map(priv);
3255
3256 /* Enable MAC RX Queues */
3257 stmmac_mac_enable_rx_queues(priv);
3258
3259 /* Set RX priorities */
3260 if (rx_queues_count > 1)
3261 stmmac_mac_config_rx_queues_prio(priv);
3262
3263 /* Set TX priorities */
3264 if (tx_queues_count > 1)
3265 stmmac_mac_config_tx_queues_prio(priv);
3266
3267 /* Set RX routing */
3268 if (rx_queues_count > 1)
3269 stmmac_mac_config_rx_queues_routing(priv);
3270
3271 /* Receive Side Scaling */
3272 if (rx_queues_count > 1)
3273 stmmac_mac_config_rss(priv);
3274 }
3275
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3276 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3277 {
3278 if (priv->dma_cap.asp) {
3279 netdev_info(priv->dev, "Enabling Safety Features\n");
3280 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3281 priv->plat->safety_feat_cfg);
3282 } else {
3283 netdev_info(priv->dev, "No Safety Features support found\n");
3284 }
3285 }
3286
stmmac_fpe_start_wq(struct stmmac_priv * priv)3287 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3288 {
3289 char *name;
3290
3291 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3292 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3293
3294 name = priv->wq_name;
3295 sprintf(name, "%s-fpe", priv->dev->name);
3296
3297 priv->fpe_wq = create_singlethread_workqueue(name);
3298 if (!priv->fpe_wq) {
3299 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3300
3301 return -ENOMEM;
3302 }
3303 netdev_info(priv->dev, "FPE workqueue start");
3304
3305 return 0;
3306 }
3307
3308 /**
3309 * stmmac_hw_setup - setup mac in a usable state.
3310 * @dev : pointer to the device structure.
3311 * @ptp_register: register PTP if set
3312 * Description:
3313 * this is the main function to setup the HW in a usable state because the
3314 * dma engine is reset, the core registers are configured (e.g. AXI,
3315 * Checksum features, timers). The DMA is ready to start receiving and
3316 * transmitting.
3317 * Return value:
3318 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3319 * file on failure.
3320 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3321 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3322 {
3323 struct stmmac_priv *priv = netdev_priv(dev);
3324 u32 rx_cnt = priv->plat->rx_queues_to_use;
3325 u32 tx_cnt = priv->plat->tx_queues_to_use;
3326 bool sph_en;
3327 u32 chan;
3328 int ret;
3329
3330 /* DMA initialization and SW reset */
3331 ret = stmmac_init_dma_engine(priv);
3332 if (ret < 0) {
3333 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3334 __func__);
3335 return ret;
3336 }
3337
3338 /* Copy the MAC addr into the HW */
3339 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3340
3341 /* PS and related bits will be programmed according to the speed */
3342 if (priv->hw->pcs) {
3343 int speed = priv->plat->mac_port_sel_speed;
3344
3345 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3346 (speed == SPEED_1000)) {
3347 priv->hw->ps = speed;
3348 } else {
3349 dev_warn(priv->device, "invalid port speed\n");
3350 priv->hw->ps = 0;
3351 }
3352 }
3353
3354 /* Initialize the MAC Core */
3355 stmmac_core_init(priv, priv->hw, dev);
3356
3357 /* Initialize MTL*/
3358 stmmac_mtl_configuration(priv);
3359
3360 /* Initialize Safety Features */
3361 stmmac_safety_feat_configuration(priv);
3362
3363 ret = stmmac_rx_ipc(priv, priv->hw);
3364 if (!ret) {
3365 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3366 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3367 priv->hw->rx_csum = 0;
3368 }
3369
3370 /* Enable the MAC Rx/Tx */
3371 stmmac_mac_set(priv, priv->ioaddr, true);
3372
3373 /* Set the HW DMA mode and the COE */
3374 stmmac_dma_operation_mode(priv);
3375
3376 stmmac_mmc_setup(priv);
3377
3378 if (ptp_register) {
3379 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3380 if (ret < 0)
3381 netdev_warn(priv->dev,
3382 "failed to enable PTP reference clock: %pe\n",
3383 ERR_PTR(ret));
3384 }
3385
3386 ret = stmmac_init_ptp(priv);
3387 if (ret == -EOPNOTSUPP)
3388 netdev_info(priv->dev, "PTP not supported by HW\n");
3389 else if (ret)
3390 netdev_warn(priv->dev, "PTP init failed\n");
3391 else if (ptp_register)
3392 stmmac_ptp_register(priv);
3393
3394 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3395
3396 /* Convert the timer from msec to usec */
3397 if (!priv->tx_lpi_timer)
3398 priv->tx_lpi_timer = eee_timer * 1000;
3399
3400 if (priv->use_riwt) {
3401 u32 queue;
3402
3403 for (queue = 0; queue < rx_cnt; queue++) {
3404 if (!priv->rx_riwt[queue])
3405 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3406
3407 stmmac_rx_watchdog(priv, priv->ioaddr,
3408 priv->rx_riwt[queue], queue);
3409 }
3410 }
3411
3412 if (priv->hw->pcs)
3413 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3414
3415 /* set TX and RX rings length */
3416 stmmac_set_rings_length(priv);
3417
3418 /* Enable TSO */
3419 if (priv->tso) {
3420 for (chan = 0; chan < tx_cnt; chan++) {
3421 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3422
3423 /* TSO and TBS cannot co-exist */
3424 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3425 continue;
3426
3427 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3428 }
3429 }
3430
3431 /* Enable Split Header */
3432 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3433 for (chan = 0; chan < rx_cnt; chan++)
3434 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3435
3436
3437 /* VLAN Tag Insertion */
3438 if (priv->dma_cap.vlins)
3439 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3440
3441 /* TBS */
3442 for (chan = 0; chan < tx_cnt; chan++) {
3443 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3444 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3445
3446 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3447 }
3448
3449 /* Configure real RX and TX queues */
3450 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3451 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3452
3453 /* Start the ball rolling... */
3454 stmmac_start_all_dma(priv);
3455
3456 if (priv->dma_cap.fpesel) {
3457 stmmac_fpe_start_wq(priv);
3458
3459 if (priv->plat->fpe_cfg->enable)
3460 stmmac_fpe_handshake(priv, true);
3461 }
3462
3463 return 0;
3464 }
3465
stmmac_hw_teardown(struct net_device * dev)3466 static void stmmac_hw_teardown(struct net_device *dev)
3467 {
3468 struct stmmac_priv *priv = netdev_priv(dev);
3469
3470 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3471 }
3472
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3473 static void stmmac_free_irq(struct net_device *dev,
3474 enum request_irq_err irq_err, int irq_idx)
3475 {
3476 struct stmmac_priv *priv = netdev_priv(dev);
3477 int j;
3478
3479 switch (irq_err) {
3480 case REQ_IRQ_ERR_ALL:
3481 irq_idx = priv->plat->tx_queues_to_use;
3482 fallthrough;
3483 case REQ_IRQ_ERR_TX:
3484 for (j = irq_idx - 1; j >= 0; j--) {
3485 if (priv->tx_irq[j] > 0) {
3486 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3487 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3488 }
3489 }
3490 irq_idx = priv->plat->rx_queues_to_use;
3491 fallthrough;
3492 case REQ_IRQ_ERR_RX:
3493 for (j = irq_idx - 1; j >= 0; j--) {
3494 if (priv->rx_irq[j] > 0) {
3495 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3496 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3497 }
3498 }
3499
3500 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3501 free_irq(priv->sfty_ue_irq, dev);
3502 fallthrough;
3503 case REQ_IRQ_ERR_SFTY_UE:
3504 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3505 free_irq(priv->sfty_ce_irq, dev);
3506 fallthrough;
3507 case REQ_IRQ_ERR_SFTY_CE:
3508 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3509 free_irq(priv->lpi_irq, dev);
3510 fallthrough;
3511 case REQ_IRQ_ERR_LPI:
3512 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3513 free_irq(priv->wol_irq, dev);
3514 fallthrough;
3515 case REQ_IRQ_ERR_WOL:
3516 free_irq(dev->irq, dev);
3517 fallthrough;
3518 case REQ_IRQ_ERR_MAC:
3519 case REQ_IRQ_ERR_NO:
3520 /* If MAC IRQ request error, no more IRQ to free */
3521 break;
3522 }
3523 }
3524
stmmac_request_irq_multi_msi(struct net_device * dev)3525 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3526 {
3527 struct stmmac_priv *priv = netdev_priv(dev);
3528 enum request_irq_err irq_err;
3529 cpumask_t cpu_mask;
3530 int irq_idx = 0;
3531 char *int_name;
3532 int ret;
3533 int i;
3534
3535 /* For common interrupt */
3536 int_name = priv->int_name_mac;
3537 sprintf(int_name, "%s:%s", dev->name, "mac");
3538 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3539 0, int_name, dev);
3540 if (unlikely(ret < 0)) {
3541 netdev_err(priv->dev,
3542 "%s: alloc mac MSI %d (error: %d)\n",
3543 __func__, dev->irq, ret);
3544 irq_err = REQ_IRQ_ERR_MAC;
3545 goto irq_error;
3546 }
3547
3548 /* Request the Wake IRQ in case of another line
3549 * is used for WoL
3550 */
3551 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3552 int_name = priv->int_name_wol;
3553 sprintf(int_name, "%s:%s", dev->name, "wol");
3554 ret = request_irq(priv->wol_irq,
3555 stmmac_mac_interrupt,
3556 0, int_name, dev);
3557 if (unlikely(ret < 0)) {
3558 netdev_err(priv->dev,
3559 "%s: alloc wol MSI %d (error: %d)\n",
3560 __func__, priv->wol_irq, ret);
3561 irq_err = REQ_IRQ_ERR_WOL;
3562 goto irq_error;
3563 }
3564 }
3565
3566 /* Request the LPI IRQ in case of another line
3567 * is used for LPI
3568 */
3569 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3570 int_name = priv->int_name_lpi;
3571 sprintf(int_name, "%s:%s", dev->name, "lpi");
3572 ret = request_irq(priv->lpi_irq,
3573 stmmac_mac_interrupt,
3574 0, int_name, dev);
3575 if (unlikely(ret < 0)) {
3576 netdev_err(priv->dev,
3577 "%s: alloc lpi MSI %d (error: %d)\n",
3578 __func__, priv->lpi_irq, ret);
3579 irq_err = REQ_IRQ_ERR_LPI;
3580 goto irq_error;
3581 }
3582 }
3583
3584 /* Request the Safety Feature Correctible Error line in
3585 * case of another line is used
3586 */
3587 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3588 int_name = priv->int_name_sfty_ce;
3589 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3590 ret = request_irq(priv->sfty_ce_irq,
3591 stmmac_safety_interrupt,
3592 0, int_name, dev);
3593 if (unlikely(ret < 0)) {
3594 netdev_err(priv->dev,
3595 "%s: alloc sfty ce MSI %d (error: %d)\n",
3596 __func__, priv->sfty_ce_irq, ret);
3597 irq_err = REQ_IRQ_ERR_SFTY_CE;
3598 goto irq_error;
3599 }
3600 }
3601
3602 /* Request the Safety Feature Uncorrectible Error line in
3603 * case of another line is used
3604 */
3605 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3606 int_name = priv->int_name_sfty_ue;
3607 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3608 ret = request_irq(priv->sfty_ue_irq,
3609 stmmac_safety_interrupt,
3610 0, int_name, dev);
3611 if (unlikely(ret < 0)) {
3612 netdev_err(priv->dev,
3613 "%s: alloc sfty ue MSI %d (error: %d)\n",
3614 __func__, priv->sfty_ue_irq, ret);
3615 irq_err = REQ_IRQ_ERR_SFTY_UE;
3616 goto irq_error;
3617 }
3618 }
3619
3620 /* Request Rx MSI irq */
3621 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3622 if (i >= MTL_MAX_RX_QUEUES)
3623 break;
3624 if (priv->rx_irq[i] == 0)
3625 continue;
3626
3627 int_name = priv->int_name_rx_irq[i];
3628 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3629 ret = request_irq(priv->rx_irq[i],
3630 stmmac_msi_intr_rx,
3631 0, int_name, &priv->dma_conf.rx_queue[i]);
3632 if (unlikely(ret < 0)) {
3633 netdev_err(priv->dev,
3634 "%s: alloc rx-%d MSI %d (error: %d)\n",
3635 __func__, i, priv->rx_irq[i], ret);
3636 irq_err = REQ_IRQ_ERR_RX;
3637 irq_idx = i;
3638 goto irq_error;
3639 }
3640 cpumask_clear(&cpu_mask);
3641 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3642 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3643 }
3644
3645 /* Request Tx MSI irq */
3646 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3647 if (i >= MTL_MAX_TX_QUEUES)
3648 break;
3649 if (priv->tx_irq[i] == 0)
3650 continue;
3651
3652 int_name = priv->int_name_tx_irq[i];
3653 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3654 ret = request_irq(priv->tx_irq[i],
3655 stmmac_msi_intr_tx,
3656 0, int_name, &priv->dma_conf.tx_queue[i]);
3657 if (unlikely(ret < 0)) {
3658 netdev_err(priv->dev,
3659 "%s: alloc tx-%d MSI %d (error: %d)\n",
3660 __func__, i, priv->tx_irq[i], ret);
3661 irq_err = REQ_IRQ_ERR_TX;
3662 irq_idx = i;
3663 goto irq_error;
3664 }
3665 cpumask_clear(&cpu_mask);
3666 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3667 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3668 }
3669
3670 return 0;
3671
3672 irq_error:
3673 stmmac_free_irq(dev, irq_err, irq_idx);
3674 return ret;
3675 }
3676
stmmac_request_irq_single(struct net_device * dev)3677 static int stmmac_request_irq_single(struct net_device *dev)
3678 {
3679 struct stmmac_priv *priv = netdev_priv(dev);
3680 enum request_irq_err irq_err;
3681 int ret;
3682
3683 ret = request_irq(dev->irq, stmmac_interrupt,
3684 IRQF_SHARED, dev->name, dev);
3685 if (unlikely(ret < 0)) {
3686 netdev_err(priv->dev,
3687 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3688 __func__, dev->irq, ret);
3689 irq_err = REQ_IRQ_ERR_MAC;
3690 goto irq_error;
3691 }
3692
3693 /* Request the Wake IRQ in case of another line
3694 * is used for WoL
3695 */
3696 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3697 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3698 IRQF_SHARED, dev->name, dev);
3699 if (unlikely(ret < 0)) {
3700 netdev_err(priv->dev,
3701 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3702 __func__, priv->wol_irq, ret);
3703 irq_err = REQ_IRQ_ERR_WOL;
3704 goto irq_error;
3705 }
3706 }
3707
3708 /* Request the IRQ lines */
3709 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3710 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3711 IRQF_SHARED, dev->name, dev);
3712 if (unlikely(ret < 0)) {
3713 netdev_err(priv->dev,
3714 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3715 __func__, priv->lpi_irq, ret);
3716 irq_err = REQ_IRQ_ERR_LPI;
3717 goto irq_error;
3718 }
3719 }
3720
3721 return 0;
3722
3723 irq_error:
3724 stmmac_free_irq(dev, irq_err, 0);
3725 return ret;
3726 }
3727
stmmac_request_irq(struct net_device * dev)3728 static int stmmac_request_irq(struct net_device *dev)
3729 {
3730 struct stmmac_priv *priv = netdev_priv(dev);
3731 int ret;
3732
3733 /* Request the IRQ lines */
3734 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3735 ret = stmmac_request_irq_multi_msi(dev);
3736 else
3737 ret = stmmac_request_irq_single(dev);
3738
3739 return ret;
3740 }
3741
3742 /**
3743 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3744 * @priv: driver private structure
3745 * @mtu: MTU to setup the dma queue and buf with
3746 * Description: Allocate and generate a dma_conf based on the provided MTU.
3747 * Allocate the Tx/Rx DMA queue and init them.
3748 * Return value:
3749 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3750 */
3751 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3752 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3753 {
3754 struct stmmac_dma_conf *dma_conf;
3755 int chan, bfsize, ret;
3756
3757 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3758 if (!dma_conf) {
3759 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3760 __func__);
3761 return ERR_PTR(-ENOMEM);
3762 }
3763
3764 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3765 if (bfsize < 0)
3766 bfsize = 0;
3767
3768 if (bfsize < BUF_SIZE_16KiB)
3769 bfsize = stmmac_set_bfsize(mtu, 0);
3770
3771 dma_conf->dma_buf_sz = bfsize;
3772 /* Chose the tx/rx size from the already defined one in the
3773 * priv struct. (if defined)
3774 */
3775 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3776 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3777
3778 if (!dma_conf->dma_tx_size)
3779 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3780 if (!dma_conf->dma_rx_size)
3781 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3782
3783 /* Earlier check for TBS */
3784 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3785 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3786 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3787
3788 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3789 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3790 }
3791
3792 ret = alloc_dma_desc_resources(priv, dma_conf);
3793 if (ret < 0) {
3794 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3795 __func__);
3796 goto alloc_error;
3797 }
3798
3799 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3800 if (ret < 0) {
3801 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3802 __func__);
3803 goto init_error;
3804 }
3805
3806 return dma_conf;
3807
3808 init_error:
3809 free_dma_desc_resources(priv, dma_conf);
3810 alloc_error:
3811 kfree(dma_conf);
3812 return ERR_PTR(ret);
3813 }
3814
3815 /**
3816 * __stmmac_open - open entry point of the driver
3817 * @dev : pointer to the device structure.
3818 * @dma_conf : structure to take the dma data
3819 * Description:
3820 * This function is the open entry point of the driver.
3821 * Return value:
3822 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3823 * file on failure.
3824 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3825 static int __stmmac_open(struct net_device *dev,
3826 struct stmmac_dma_conf *dma_conf)
3827 {
3828 struct stmmac_priv *priv = netdev_priv(dev);
3829 int mode = priv->plat->phy_interface;
3830 u32 chan;
3831 int ret;
3832
3833 ret = pm_runtime_resume_and_get(priv->device);
3834 if (ret < 0)
3835 return ret;
3836
3837 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3838 priv->hw->pcs != STMMAC_PCS_RTBI &&
3839 (!priv->hw->xpcs ||
3840 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3841 !priv->hw->lynx_pcs) {
3842 ret = stmmac_init_phy(dev);
3843 if (ret) {
3844 netdev_err(priv->dev,
3845 "%s: Cannot attach to PHY (error: %d)\n",
3846 __func__, ret);
3847 goto init_phy_error;
3848 }
3849 }
3850
3851 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3852
3853 buf_sz = dma_conf->dma_buf_sz;
3854 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3855
3856 stmmac_reset_queues_param(priv);
3857
3858 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3859 priv->plat->serdes_powerup) {
3860 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3861 if (ret < 0) {
3862 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3863 __func__);
3864 goto init_error;
3865 }
3866 }
3867
3868 ret = stmmac_hw_setup(dev, true);
3869 if (ret < 0) {
3870 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3871 goto init_error;
3872 }
3873
3874 stmmac_init_coalesce(priv);
3875
3876 phylink_start(priv->phylink);
3877 /* We may have called phylink_speed_down before */
3878 phylink_speed_up(priv->phylink);
3879
3880 ret = stmmac_request_irq(dev);
3881 if (ret)
3882 goto irq_error;
3883
3884 stmmac_enable_all_queues(priv);
3885 netif_tx_start_all_queues(priv->dev);
3886 stmmac_enable_all_dma_irq(priv);
3887
3888 return 0;
3889
3890 irq_error:
3891 phylink_stop(priv->phylink);
3892
3893 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3894 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3895
3896 stmmac_hw_teardown(dev);
3897 init_error:
3898 phylink_disconnect_phy(priv->phylink);
3899 init_phy_error:
3900 pm_runtime_put(priv->device);
3901 return ret;
3902 }
3903
stmmac_open(struct net_device * dev)3904 static int stmmac_open(struct net_device *dev)
3905 {
3906 struct stmmac_priv *priv = netdev_priv(dev);
3907 struct stmmac_dma_conf *dma_conf;
3908 int ret;
3909
3910 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3911 if (IS_ERR(dma_conf))
3912 return PTR_ERR(dma_conf);
3913
3914 ret = __stmmac_open(dev, dma_conf);
3915 if (ret)
3916 free_dma_desc_resources(priv, dma_conf);
3917
3918 kfree(dma_conf);
3919 return ret;
3920 }
3921
stmmac_fpe_stop_wq(struct stmmac_priv * priv)3922 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3923 {
3924 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3925
3926 if (priv->fpe_wq)
3927 destroy_workqueue(priv->fpe_wq);
3928
3929 netdev_info(priv->dev, "FPE workqueue stop");
3930 }
3931
3932 /**
3933 * stmmac_release - close entry point of the driver
3934 * @dev : device pointer.
3935 * Description:
3936 * This is the stop entry point of the driver.
3937 */
stmmac_release(struct net_device * dev)3938 static int stmmac_release(struct net_device *dev)
3939 {
3940 struct stmmac_priv *priv = netdev_priv(dev);
3941 u32 chan;
3942
3943 if (device_may_wakeup(priv->device))
3944 phylink_speed_down(priv->phylink, false);
3945 /* Stop and disconnect the PHY */
3946 phylink_stop(priv->phylink);
3947 phylink_disconnect_phy(priv->phylink);
3948
3949 stmmac_disable_all_queues(priv);
3950
3951 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3952 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3953
3954 netif_tx_disable(dev);
3955
3956 /* Free the IRQ lines */
3957 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3958
3959 if (priv->eee_enabled) {
3960 priv->tx_path_in_lpi_mode = false;
3961 del_timer_sync(&priv->eee_ctrl_timer);
3962 }
3963
3964 /* Stop TX/RX DMA and clear the descriptors */
3965 stmmac_stop_all_dma(priv);
3966
3967 /* Release and free the Rx/Tx resources */
3968 free_dma_desc_resources(priv, &priv->dma_conf);
3969
3970 /* Disable the MAC Rx/Tx */
3971 stmmac_mac_set(priv, priv->ioaddr, false);
3972
3973 /* Powerdown Serdes if there is */
3974 if (priv->plat->serdes_powerdown)
3975 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3976
3977 netif_carrier_off(dev);
3978
3979 stmmac_release_ptp(priv);
3980
3981 pm_runtime_put(priv->device);
3982
3983 if (priv->dma_cap.fpesel)
3984 stmmac_fpe_stop_wq(priv);
3985
3986 return 0;
3987 }
3988
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3989 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3990 struct stmmac_tx_queue *tx_q)
3991 {
3992 u16 tag = 0x0, inner_tag = 0x0;
3993 u32 inner_type = 0x0;
3994 struct dma_desc *p;
3995
3996 if (!priv->dma_cap.vlins)
3997 return false;
3998 if (!skb_vlan_tag_present(skb))
3999 return false;
4000 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4001 inner_tag = skb_vlan_tag_get(skb);
4002 inner_type = STMMAC_VLAN_INSERT;
4003 }
4004
4005 tag = skb_vlan_tag_get(skb);
4006
4007 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4008 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4009 else
4010 p = &tx_q->dma_tx[tx_q->cur_tx];
4011
4012 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4013 return false;
4014
4015 stmmac_set_tx_owner(priv, p);
4016 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4017 return true;
4018 }
4019
4020 /**
4021 * stmmac_tso_allocator - close entry point of the driver
4022 * @priv: driver private structure
4023 * @des: buffer start address
4024 * @total_len: total length to fill in descriptors
4025 * @last_segment: condition for the last descriptor
4026 * @queue: TX queue index
4027 * Description:
4028 * This function fills descriptor and request new descriptors according to
4029 * buffer length to fill
4030 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4031 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4032 int total_len, bool last_segment, u32 queue)
4033 {
4034 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4035 struct dma_desc *desc;
4036 u32 buff_size;
4037 int tmp_len;
4038
4039 tmp_len = total_len;
4040
4041 while (tmp_len > 0) {
4042 dma_addr_t curr_addr;
4043
4044 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4045 priv->dma_conf.dma_tx_size);
4046 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4047
4048 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4049 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4050 else
4051 desc = &tx_q->dma_tx[tx_q->cur_tx];
4052
4053 curr_addr = des + (total_len - tmp_len);
4054 if (priv->dma_cap.addr64 <= 32)
4055 desc->des0 = cpu_to_le32(curr_addr);
4056 else
4057 stmmac_set_desc_addr(priv, desc, curr_addr);
4058
4059 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4060 TSO_MAX_BUFF_SIZE : tmp_len;
4061
4062 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4063 0, 1,
4064 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4065 0, 0);
4066
4067 tmp_len -= TSO_MAX_BUFF_SIZE;
4068 }
4069 }
4070
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4071 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4072 {
4073 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4074 int desc_size;
4075
4076 if (likely(priv->extend_desc))
4077 desc_size = sizeof(struct dma_extended_desc);
4078 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4079 desc_size = sizeof(struct dma_edesc);
4080 else
4081 desc_size = sizeof(struct dma_desc);
4082
4083 /* The own bit must be the latest setting done when prepare the
4084 * descriptor and then barrier is needed to make sure that
4085 * all is coherent before granting the DMA engine.
4086 */
4087 wmb();
4088
4089 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4090 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4091 }
4092
4093 /**
4094 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4095 * @skb : the socket buffer
4096 * @dev : device pointer
4097 * Description: this is the transmit function that is called on TSO frames
4098 * (support available on GMAC4 and newer chips).
4099 * Diagram below show the ring programming in case of TSO frames:
4100 *
4101 * First Descriptor
4102 * --------
4103 * | DES0 |---> buffer1 = L2/L3/L4 header
4104 * | DES1 |---> TCP Payload (can continue on next descr...)
4105 * | DES2 |---> buffer 1 and 2 len
4106 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4107 * --------
4108 * |
4109 * ...
4110 * |
4111 * --------
4112 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4113 * | DES1 | --|
4114 * | DES2 | --> buffer 1 and 2 len
4115 * | DES3 |
4116 * --------
4117 *
4118 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4119 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4120 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4121 {
4122 struct dma_desc *desc, *first, *mss_desc = NULL;
4123 struct stmmac_priv *priv = netdev_priv(dev);
4124 int nfrags = skb_shinfo(skb)->nr_frags;
4125 u32 queue = skb_get_queue_mapping(skb);
4126 unsigned int first_entry, tx_packets;
4127 struct stmmac_txq_stats *txq_stats;
4128 int tmp_pay_len = 0, first_tx;
4129 struct stmmac_tx_queue *tx_q;
4130 bool has_vlan, set_ic;
4131 u8 proto_hdr_len, hdr;
4132 unsigned long flags;
4133 u32 pay_len, mss;
4134 dma_addr_t des;
4135 int i;
4136
4137 tx_q = &priv->dma_conf.tx_queue[queue];
4138 txq_stats = &priv->xstats.txq_stats[queue];
4139 first_tx = tx_q->cur_tx;
4140
4141 /* Compute header lengths */
4142 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4143 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4144 hdr = sizeof(struct udphdr);
4145 } else {
4146 proto_hdr_len = skb_tcp_all_headers(skb);
4147 hdr = tcp_hdrlen(skb);
4148 }
4149
4150 /* Desc availability based on threshold should be enough safe */
4151 if (unlikely(stmmac_tx_avail(priv, queue) <
4152 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4153 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4154 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4155 queue));
4156 /* This is a hard error, log it. */
4157 netdev_err(priv->dev,
4158 "%s: Tx Ring full when queue awake\n",
4159 __func__);
4160 }
4161 return NETDEV_TX_BUSY;
4162 }
4163
4164 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4165
4166 mss = skb_shinfo(skb)->gso_size;
4167
4168 /* set new MSS value if needed */
4169 if (mss != tx_q->mss) {
4170 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4171 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4172 else
4173 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4174
4175 stmmac_set_mss(priv, mss_desc, mss);
4176 tx_q->mss = mss;
4177 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4178 priv->dma_conf.dma_tx_size);
4179 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4180 }
4181
4182 if (netif_msg_tx_queued(priv)) {
4183 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4184 __func__, hdr, proto_hdr_len, pay_len, mss);
4185 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4186 skb->data_len);
4187 }
4188
4189 /* Check if VLAN can be inserted by HW */
4190 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4191
4192 first_entry = tx_q->cur_tx;
4193 WARN_ON(tx_q->tx_skbuff[first_entry]);
4194
4195 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4196 desc = &tx_q->dma_entx[first_entry].basic;
4197 else
4198 desc = &tx_q->dma_tx[first_entry];
4199 first = desc;
4200
4201 if (has_vlan)
4202 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4203
4204 /* first descriptor: fill Headers on Buf1 */
4205 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4206 DMA_TO_DEVICE);
4207 if (dma_mapping_error(priv->device, des))
4208 goto dma_map_err;
4209
4210 tx_q->tx_skbuff_dma[first_entry].buf = des;
4211 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4212 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4213 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4214
4215 if (priv->dma_cap.addr64 <= 32) {
4216 first->des0 = cpu_to_le32(des);
4217
4218 /* Fill start of payload in buff2 of first descriptor */
4219 if (pay_len)
4220 first->des1 = cpu_to_le32(des + proto_hdr_len);
4221
4222 /* If needed take extra descriptors to fill the remaining payload */
4223 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4224 } else {
4225 stmmac_set_desc_addr(priv, first, des);
4226 tmp_pay_len = pay_len;
4227 des += proto_hdr_len;
4228 pay_len = 0;
4229 }
4230
4231 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4232
4233 /* Prepare fragments */
4234 for (i = 0; i < nfrags; i++) {
4235 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4236
4237 des = skb_frag_dma_map(priv->device, frag, 0,
4238 skb_frag_size(frag),
4239 DMA_TO_DEVICE);
4240 if (dma_mapping_error(priv->device, des))
4241 goto dma_map_err;
4242
4243 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4244 (i == nfrags - 1), queue);
4245
4246 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4247 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4248 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4249 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4250 }
4251
4252 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4253
4254 /* Only the last descriptor gets to point to the skb. */
4255 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4256 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4257
4258 /* Manage tx mitigation */
4259 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4260 tx_q->tx_count_frames += tx_packets;
4261
4262 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4263 set_ic = true;
4264 else if (!priv->tx_coal_frames[queue])
4265 set_ic = false;
4266 else if (tx_packets > priv->tx_coal_frames[queue])
4267 set_ic = true;
4268 else if ((tx_q->tx_count_frames %
4269 priv->tx_coal_frames[queue]) < tx_packets)
4270 set_ic = true;
4271 else
4272 set_ic = false;
4273
4274 if (set_ic) {
4275 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4276 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4277 else
4278 desc = &tx_q->dma_tx[tx_q->cur_tx];
4279
4280 tx_q->tx_count_frames = 0;
4281 stmmac_set_tx_ic(priv, desc);
4282 }
4283
4284 /* We've used all descriptors we need for this skb, however,
4285 * advance cur_tx so that it references a fresh descriptor.
4286 * ndo_start_xmit will fill this descriptor the next time it's
4287 * called and stmmac_tx_clean may clean up to this descriptor.
4288 */
4289 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4290
4291 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4292 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4293 __func__);
4294 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4295 }
4296
4297 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4298 txq_stats->tx_bytes += skb->len;
4299 txq_stats->tx_tso_frames++;
4300 txq_stats->tx_tso_nfrags += nfrags;
4301 if (set_ic)
4302 txq_stats->tx_set_ic_bit++;
4303 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4304
4305 if (priv->sarc_type)
4306 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4307
4308 skb_tx_timestamp(skb);
4309
4310 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4311 priv->hwts_tx_en)) {
4312 /* declare that device is doing timestamping */
4313 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4314 stmmac_enable_tx_timestamp(priv, first);
4315 }
4316
4317 /* Complete the first descriptor before granting the DMA */
4318 stmmac_prepare_tso_tx_desc(priv, first, 1,
4319 proto_hdr_len,
4320 pay_len,
4321 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4322 hdr / 4, (skb->len - proto_hdr_len));
4323
4324 /* If context desc is used to change MSS */
4325 if (mss_desc) {
4326 /* Make sure that first descriptor has been completely
4327 * written, including its own bit. This is because MSS is
4328 * actually before first descriptor, so we need to make
4329 * sure that MSS's own bit is the last thing written.
4330 */
4331 dma_wmb();
4332 stmmac_set_tx_owner(priv, mss_desc);
4333 }
4334
4335 if (netif_msg_pktdata(priv)) {
4336 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4337 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4338 tx_q->cur_tx, first, nfrags);
4339 pr_info(">>> frame to be transmitted: ");
4340 print_pkt(skb->data, skb_headlen(skb));
4341 }
4342
4343 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4344
4345 stmmac_flush_tx_descriptors(priv, queue);
4346 stmmac_tx_timer_arm(priv, queue);
4347
4348 return NETDEV_TX_OK;
4349
4350 dma_map_err:
4351 dev_err(priv->device, "Tx dma map failed\n");
4352 dev_kfree_skb(skb);
4353 priv->xstats.tx_dropped++;
4354 return NETDEV_TX_OK;
4355 }
4356
4357 /**
4358 * stmmac_xmit - Tx entry point of the driver
4359 * @skb : the socket buffer
4360 * @dev : device pointer
4361 * Description : this is the tx entry point of the driver.
4362 * It programs the chain or the ring and supports oversized frames
4363 * and SG feature.
4364 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4365 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4366 {
4367 unsigned int first_entry, tx_packets, enh_desc;
4368 struct stmmac_priv *priv = netdev_priv(dev);
4369 unsigned int nopaged_len = skb_headlen(skb);
4370 int i, csum_insertion = 0, is_jumbo = 0;
4371 u32 queue = skb_get_queue_mapping(skb);
4372 int nfrags = skb_shinfo(skb)->nr_frags;
4373 int gso = skb_shinfo(skb)->gso_type;
4374 struct stmmac_txq_stats *txq_stats;
4375 struct dma_edesc *tbs_desc = NULL;
4376 struct dma_desc *desc, *first;
4377 struct stmmac_tx_queue *tx_q;
4378 bool has_vlan, set_ic;
4379 int entry, first_tx;
4380 unsigned long flags;
4381 dma_addr_t des;
4382
4383 tx_q = &priv->dma_conf.tx_queue[queue];
4384 txq_stats = &priv->xstats.txq_stats[queue];
4385 first_tx = tx_q->cur_tx;
4386
4387 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4388 stmmac_disable_eee_mode(priv);
4389
4390 /* Manage oversized TCP frames for GMAC4 device */
4391 if (skb_is_gso(skb) && priv->tso) {
4392 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4393 return stmmac_tso_xmit(skb, dev);
4394 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4395 return stmmac_tso_xmit(skb, dev);
4396 }
4397
4398 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4399 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4400 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4401 queue));
4402 /* This is a hard error, log it. */
4403 netdev_err(priv->dev,
4404 "%s: Tx Ring full when queue awake\n",
4405 __func__);
4406 }
4407 return NETDEV_TX_BUSY;
4408 }
4409
4410 /* Check if VLAN can be inserted by HW */
4411 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4412
4413 entry = tx_q->cur_tx;
4414 first_entry = entry;
4415 WARN_ON(tx_q->tx_skbuff[first_entry]);
4416
4417 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4418
4419 if (likely(priv->extend_desc))
4420 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4421 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4422 desc = &tx_q->dma_entx[entry].basic;
4423 else
4424 desc = tx_q->dma_tx + entry;
4425
4426 first = desc;
4427
4428 if (has_vlan)
4429 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4430
4431 enh_desc = priv->plat->enh_desc;
4432 /* To program the descriptors according to the size of the frame */
4433 if (enh_desc)
4434 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4435
4436 if (unlikely(is_jumbo)) {
4437 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4438 if (unlikely(entry < 0) && (entry != -EINVAL))
4439 goto dma_map_err;
4440 }
4441
4442 for (i = 0; i < nfrags; i++) {
4443 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4444 int len = skb_frag_size(frag);
4445 bool last_segment = (i == (nfrags - 1));
4446
4447 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4448 WARN_ON(tx_q->tx_skbuff[entry]);
4449
4450 if (likely(priv->extend_desc))
4451 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4452 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4453 desc = &tx_q->dma_entx[entry].basic;
4454 else
4455 desc = tx_q->dma_tx + entry;
4456
4457 des = skb_frag_dma_map(priv->device, frag, 0, len,
4458 DMA_TO_DEVICE);
4459 if (dma_mapping_error(priv->device, des))
4460 goto dma_map_err; /* should reuse desc w/o issues */
4461
4462 tx_q->tx_skbuff_dma[entry].buf = des;
4463
4464 stmmac_set_desc_addr(priv, desc, des);
4465
4466 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4467 tx_q->tx_skbuff_dma[entry].len = len;
4468 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4469 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4470
4471 /* Prepare the descriptor and set the own bit too */
4472 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4473 priv->mode, 1, last_segment, skb->len);
4474 }
4475
4476 /* Only the last descriptor gets to point to the skb. */
4477 tx_q->tx_skbuff[entry] = skb;
4478 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4479
4480 /* According to the coalesce parameter the IC bit for the latest
4481 * segment is reset and the timer re-started to clean the tx status.
4482 * This approach takes care about the fragments: desc is the first
4483 * element in case of no SG.
4484 */
4485 tx_packets = (entry + 1) - first_tx;
4486 tx_q->tx_count_frames += tx_packets;
4487
4488 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4489 set_ic = true;
4490 else if (!priv->tx_coal_frames[queue])
4491 set_ic = false;
4492 else if (tx_packets > priv->tx_coal_frames[queue])
4493 set_ic = true;
4494 else if ((tx_q->tx_count_frames %
4495 priv->tx_coal_frames[queue]) < tx_packets)
4496 set_ic = true;
4497 else
4498 set_ic = false;
4499
4500 if (set_ic) {
4501 if (likely(priv->extend_desc))
4502 desc = &tx_q->dma_etx[entry].basic;
4503 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4504 desc = &tx_q->dma_entx[entry].basic;
4505 else
4506 desc = &tx_q->dma_tx[entry];
4507
4508 tx_q->tx_count_frames = 0;
4509 stmmac_set_tx_ic(priv, desc);
4510 }
4511
4512 /* We've used all descriptors we need for this skb, however,
4513 * advance cur_tx so that it references a fresh descriptor.
4514 * ndo_start_xmit will fill this descriptor the next time it's
4515 * called and stmmac_tx_clean may clean up to this descriptor.
4516 */
4517 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4518 tx_q->cur_tx = entry;
4519
4520 if (netif_msg_pktdata(priv)) {
4521 netdev_dbg(priv->dev,
4522 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4523 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4524 entry, first, nfrags);
4525
4526 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4527 print_pkt(skb->data, skb->len);
4528 }
4529
4530 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4531 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4532 __func__);
4533 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4534 }
4535
4536 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4537 txq_stats->tx_bytes += skb->len;
4538 if (set_ic)
4539 txq_stats->tx_set_ic_bit++;
4540 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4541
4542 if (priv->sarc_type)
4543 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4544
4545 skb_tx_timestamp(skb);
4546
4547 /* Ready to fill the first descriptor and set the OWN bit w/o any
4548 * problems because all the descriptors are actually ready to be
4549 * passed to the DMA engine.
4550 */
4551 if (likely(!is_jumbo)) {
4552 bool last_segment = (nfrags == 0);
4553
4554 des = dma_map_single(priv->device, skb->data,
4555 nopaged_len, DMA_TO_DEVICE);
4556 if (dma_mapping_error(priv->device, des))
4557 goto dma_map_err;
4558
4559 tx_q->tx_skbuff_dma[first_entry].buf = des;
4560 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4561 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4562
4563 stmmac_set_desc_addr(priv, first, des);
4564
4565 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4566 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4567
4568 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4569 priv->hwts_tx_en)) {
4570 /* declare that device is doing timestamping */
4571 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4572 stmmac_enable_tx_timestamp(priv, first);
4573 }
4574
4575 /* Prepare the first descriptor setting the OWN bit too */
4576 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4577 csum_insertion, priv->mode, 0, last_segment,
4578 skb->len);
4579 }
4580
4581 if (tx_q->tbs & STMMAC_TBS_EN) {
4582 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4583
4584 tbs_desc = &tx_q->dma_entx[first_entry];
4585 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4586 }
4587
4588 stmmac_set_tx_owner(priv, first);
4589
4590 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4591
4592 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4593
4594 stmmac_flush_tx_descriptors(priv, queue);
4595 stmmac_tx_timer_arm(priv, queue);
4596
4597 return NETDEV_TX_OK;
4598
4599 dma_map_err:
4600 netdev_err(priv->dev, "Tx DMA map failed\n");
4601 dev_kfree_skb(skb);
4602 priv->xstats.tx_dropped++;
4603 return NETDEV_TX_OK;
4604 }
4605
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4606 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4607 {
4608 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4609 __be16 vlan_proto = veth->h_vlan_proto;
4610 u16 vlanid;
4611
4612 if ((vlan_proto == htons(ETH_P_8021Q) &&
4613 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4614 (vlan_proto == htons(ETH_P_8021AD) &&
4615 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4616 /* pop the vlan tag */
4617 vlanid = ntohs(veth->h_vlan_TCI);
4618 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4619 skb_pull(skb, VLAN_HLEN);
4620 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4621 }
4622 }
4623
4624 /**
4625 * stmmac_rx_refill - refill used skb preallocated buffers
4626 * @priv: driver private structure
4627 * @queue: RX queue index
4628 * Description : this is to reallocate the skb for the reception process
4629 * that is based on zero-copy.
4630 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4631 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4632 {
4633 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4634 int dirty = stmmac_rx_dirty(priv, queue);
4635 unsigned int entry = rx_q->dirty_rx;
4636 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4637
4638 if (priv->dma_cap.host_dma_width <= 32)
4639 gfp |= GFP_DMA32;
4640
4641 while (dirty-- > 0) {
4642 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4643 struct dma_desc *p;
4644 bool use_rx_wd;
4645
4646 if (priv->extend_desc)
4647 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4648 else
4649 p = rx_q->dma_rx + entry;
4650
4651 if (!buf->page) {
4652 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4653 if (!buf->page)
4654 break;
4655 }
4656
4657 if (priv->sph && !buf->sec_page) {
4658 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4659 if (!buf->sec_page)
4660 break;
4661
4662 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4663 }
4664
4665 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4666
4667 stmmac_set_desc_addr(priv, p, buf->addr);
4668 if (priv->sph)
4669 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4670 else
4671 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4672 stmmac_refill_desc3(priv, rx_q, p);
4673
4674 rx_q->rx_count_frames++;
4675 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4676 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4677 rx_q->rx_count_frames = 0;
4678
4679 use_rx_wd = !priv->rx_coal_frames[queue];
4680 use_rx_wd |= rx_q->rx_count_frames > 0;
4681 if (!priv->use_riwt)
4682 use_rx_wd = false;
4683
4684 dma_wmb();
4685 stmmac_set_rx_owner(priv, p, use_rx_wd);
4686
4687 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4688 }
4689 rx_q->dirty_rx = entry;
4690 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4691 (rx_q->dirty_rx * sizeof(struct dma_desc));
4692 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4693 }
4694
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4695 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4696 struct dma_desc *p,
4697 int status, unsigned int len)
4698 {
4699 unsigned int plen = 0, hlen = 0;
4700 int coe = priv->hw->rx_csum;
4701
4702 /* Not first descriptor, buffer is always zero */
4703 if (priv->sph && len)
4704 return 0;
4705
4706 /* First descriptor, get split header length */
4707 stmmac_get_rx_header_len(priv, p, &hlen);
4708 if (priv->sph && hlen) {
4709 priv->xstats.rx_split_hdr_pkt_n++;
4710 return hlen;
4711 }
4712
4713 /* First descriptor, not last descriptor and not split header */
4714 if (status & rx_not_ls)
4715 return priv->dma_conf.dma_buf_sz;
4716
4717 plen = stmmac_get_rx_frame_len(priv, p, coe);
4718
4719 /* First descriptor and last descriptor and not split header */
4720 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4721 }
4722
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4723 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4724 struct dma_desc *p,
4725 int status, unsigned int len)
4726 {
4727 int coe = priv->hw->rx_csum;
4728 unsigned int plen = 0;
4729
4730 /* Not split header, buffer is not available */
4731 if (!priv->sph)
4732 return 0;
4733
4734 /* Not last descriptor */
4735 if (status & rx_not_ls)
4736 return priv->dma_conf.dma_buf_sz;
4737
4738 plen = stmmac_get_rx_frame_len(priv, p, coe);
4739
4740 /* Last descriptor */
4741 return plen - len;
4742 }
4743
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4744 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4745 struct xdp_frame *xdpf, bool dma_map)
4746 {
4747 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4748 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4749 unsigned int entry = tx_q->cur_tx;
4750 struct dma_desc *tx_desc;
4751 dma_addr_t dma_addr;
4752 bool set_ic;
4753
4754 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4755 return STMMAC_XDP_CONSUMED;
4756
4757 if (likely(priv->extend_desc))
4758 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4759 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4760 tx_desc = &tx_q->dma_entx[entry].basic;
4761 else
4762 tx_desc = tx_q->dma_tx + entry;
4763
4764 if (dma_map) {
4765 dma_addr = dma_map_single(priv->device, xdpf->data,
4766 xdpf->len, DMA_TO_DEVICE);
4767 if (dma_mapping_error(priv->device, dma_addr))
4768 return STMMAC_XDP_CONSUMED;
4769
4770 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4771 } else {
4772 struct page *page = virt_to_page(xdpf->data);
4773
4774 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4775 xdpf->headroom;
4776 dma_sync_single_for_device(priv->device, dma_addr,
4777 xdpf->len, DMA_BIDIRECTIONAL);
4778
4779 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4780 }
4781
4782 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4783 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4784 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4785 tx_q->tx_skbuff_dma[entry].last_segment = true;
4786 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4787
4788 tx_q->xdpf[entry] = xdpf;
4789
4790 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4791
4792 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4793 true, priv->mode, true, true,
4794 xdpf->len);
4795
4796 tx_q->tx_count_frames++;
4797
4798 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4799 set_ic = true;
4800 else
4801 set_ic = false;
4802
4803 if (set_ic) {
4804 unsigned long flags;
4805 tx_q->tx_count_frames = 0;
4806 stmmac_set_tx_ic(priv, tx_desc);
4807 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4808 txq_stats->tx_set_ic_bit++;
4809 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4810 }
4811
4812 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4813
4814 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4815 tx_q->cur_tx = entry;
4816
4817 return STMMAC_XDP_TX;
4818 }
4819
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4820 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4821 int cpu)
4822 {
4823 int index = cpu;
4824
4825 if (unlikely(index < 0))
4826 index = 0;
4827
4828 while (index >= priv->plat->tx_queues_to_use)
4829 index -= priv->plat->tx_queues_to_use;
4830
4831 return index;
4832 }
4833
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4834 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4835 struct xdp_buff *xdp)
4836 {
4837 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4838 int cpu = smp_processor_id();
4839 struct netdev_queue *nq;
4840 int queue;
4841 int res;
4842
4843 if (unlikely(!xdpf))
4844 return STMMAC_XDP_CONSUMED;
4845
4846 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4847 nq = netdev_get_tx_queue(priv->dev, queue);
4848
4849 __netif_tx_lock(nq, cpu);
4850 /* Avoids TX time-out as we are sharing with slow path */
4851 txq_trans_cond_update(nq);
4852
4853 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4854 if (res == STMMAC_XDP_TX)
4855 stmmac_flush_tx_descriptors(priv, queue);
4856
4857 __netif_tx_unlock(nq);
4858
4859 return res;
4860 }
4861
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4862 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4863 struct bpf_prog *prog,
4864 struct xdp_buff *xdp)
4865 {
4866 u32 act;
4867 int res;
4868
4869 act = bpf_prog_run_xdp(prog, xdp);
4870 switch (act) {
4871 case XDP_PASS:
4872 res = STMMAC_XDP_PASS;
4873 break;
4874 case XDP_TX:
4875 res = stmmac_xdp_xmit_back(priv, xdp);
4876 break;
4877 case XDP_REDIRECT:
4878 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4879 res = STMMAC_XDP_CONSUMED;
4880 else
4881 res = STMMAC_XDP_REDIRECT;
4882 break;
4883 default:
4884 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4885 fallthrough;
4886 case XDP_ABORTED:
4887 trace_xdp_exception(priv->dev, prog, act);
4888 fallthrough;
4889 case XDP_DROP:
4890 res = STMMAC_XDP_CONSUMED;
4891 break;
4892 }
4893
4894 return res;
4895 }
4896
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4897 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4898 struct xdp_buff *xdp)
4899 {
4900 struct bpf_prog *prog;
4901 int res;
4902
4903 prog = READ_ONCE(priv->xdp_prog);
4904 if (!prog) {
4905 res = STMMAC_XDP_PASS;
4906 goto out;
4907 }
4908
4909 res = __stmmac_xdp_run_prog(priv, prog, xdp);
4910 out:
4911 return ERR_PTR(-res);
4912 }
4913
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4914 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4915 int xdp_status)
4916 {
4917 int cpu = smp_processor_id();
4918 int queue;
4919
4920 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4921
4922 if (xdp_status & STMMAC_XDP_TX)
4923 stmmac_tx_timer_arm(priv, queue);
4924
4925 if (xdp_status & STMMAC_XDP_REDIRECT)
4926 xdp_do_flush();
4927 }
4928
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4929 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4930 struct xdp_buff *xdp)
4931 {
4932 unsigned int metasize = xdp->data - xdp->data_meta;
4933 unsigned int datasize = xdp->data_end - xdp->data;
4934 struct sk_buff *skb;
4935
4936 skb = __napi_alloc_skb(&ch->rxtx_napi,
4937 xdp->data_end - xdp->data_hard_start,
4938 GFP_ATOMIC | __GFP_NOWARN);
4939 if (unlikely(!skb))
4940 return NULL;
4941
4942 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4943 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4944 if (metasize)
4945 skb_metadata_set(skb, metasize);
4946
4947 return skb;
4948 }
4949
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4950 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4951 struct dma_desc *p, struct dma_desc *np,
4952 struct xdp_buff *xdp)
4953 {
4954 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4955 struct stmmac_channel *ch = &priv->channel[queue];
4956 unsigned int len = xdp->data_end - xdp->data;
4957 enum pkt_hash_types hash_type;
4958 int coe = priv->hw->rx_csum;
4959 unsigned long flags;
4960 struct sk_buff *skb;
4961 u32 hash;
4962
4963 skb = stmmac_construct_skb_zc(ch, xdp);
4964 if (!skb) {
4965 priv->xstats.rx_dropped++;
4966 return;
4967 }
4968
4969 stmmac_get_rx_hwtstamp(priv, p, np, skb);
4970 stmmac_rx_vlan(priv->dev, skb);
4971 skb->protocol = eth_type_trans(skb, priv->dev);
4972
4973 if (unlikely(!coe))
4974 skb_checksum_none_assert(skb);
4975 else
4976 skb->ip_summed = CHECKSUM_UNNECESSARY;
4977
4978 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4979 skb_set_hash(skb, hash, hash_type);
4980
4981 skb_record_rx_queue(skb, queue);
4982 napi_gro_receive(&ch->rxtx_napi, skb);
4983
4984 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
4985 rxq_stats->rx_pkt_n++;
4986 rxq_stats->rx_bytes += len;
4987 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
4988 }
4989
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)4990 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4991 {
4992 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4993 unsigned int entry = rx_q->dirty_rx;
4994 struct dma_desc *rx_desc = NULL;
4995 bool ret = true;
4996
4997 budget = min(budget, stmmac_rx_dirty(priv, queue));
4998
4999 while (budget-- > 0 && entry != rx_q->cur_rx) {
5000 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5001 dma_addr_t dma_addr;
5002 bool use_rx_wd;
5003
5004 if (!buf->xdp) {
5005 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5006 if (!buf->xdp) {
5007 ret = false;
5008 break;
5009 }
5010 }
5011
5012 if (priv->extend_desc)
5013 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5014 else
5015 rx_desc = rx_q->dma_rx + entry;
5016
5017 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5018 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5019 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5020 stmmac_refill_desc3(priv, rx_q, rx_desc);
5021
5022 rx_q->rx_count_frames++;
5023 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5024 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5025 rx_q->rx_count_frames = 0;
5026
5027 use_rx_wd = !priv->rx_coal_frames[queue];
5028 use_rx_wd |= rx_q->rx_count_frames > 0;
5029 if (!priv->use_riwt)
5030 use_rx_wd = false;
5031
5032 dma_wmb();
5033 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5034
5035 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5036 }
5037
5038 if (rx_desc) {
5039 rx_q->dirty_rx = entry;
5040 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5041 (rx_q->dirty_rx * sizeof(struct dma_desc));
5042 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5043 }
5044
5045 return ret;
5046 }
5047
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5048 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5049 {
5050 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5051 * to represent incoming packet, whereas cb field in the same structure
5052 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5053 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5054 */
5055 return (struct stmmac_xdp_buff *)xdp;
5056 }
5057
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5058 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5059 {
5060 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5061 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5062 unsigned int count = 0, error = 0, len = 0;
5063 int dirty = stmmac_rx_dirty(priv, queue);
5064 unsigned int next_entry = rx_q->cur_rx;
5065 u32 rx_errors = 0, rx_dropped = 0;
5066 unsigned int desc_size;
5067 struct bpf_prog *prog;
5068 bool failure = false;
5069 unsigned long flags;
5070 int xdp_status = 0;
5071 int status = 0;
5072
5073 if (netif_msg_rx_status(priv)) {
5074 void *rx_head;
5075
5076 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5077 if (priv->extend_desc) {
5078 rx_head = (void *)rx_q->dma_erx;
5079 desc_size = sizeof(struct dma_extended_desc);
5080 } else {
5081 rx_head = (void *)rx_q->dma_rx;
5082 desc_size = sizeof(struct dma_desc);
5083 }
5084
5085 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5086 rx_q->dma_rx_phy, desc_size);
5087 }
5088 while (count < limit) {
5089 struct stmmac_rx_buffer *buf;
5090 struct stmmac_xdp_buff *ctx;
5091 unsigned int buf1_len = 0;
5092 struct dma_desc *np, *p;
5093 int entry;
5094 int res;
5095
5096 if (!count && rx_q->state_saved) {
5097 error = rx_q->state.error;
5098 len = rx_q->state.len;
5099 } else {
5100 rx_q->state_saved = false;
5101 error = 0;
5102 len = 0;
5103 }
5104
5105 if (count >= limit)
5106 break;
5107
5108 read_again:
5109 buf1_len = 0;
5110 entry = next_entry;
5111 buf = &rx_q->buf_pool[entry];
5112
5113 if (dirty >= STMMAC_RX_FILL_BATCH) {
5114 failure = failure ||
5115 !stmmac_rx_refill_zc(priv, queue, dirty);
5116 dirty = 0;
5117 }
5118
5119 if (priv->extend_desc)
5120 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5121 else
5122 p = rx_q->dma_rx + entry;
5123
5124 /* read the status of the incoming frame */
5125 status = stmmac_rx_status(priv, &priv->xstats, p);
5126 /* check if managed by the DMA otherwise go ahead */
5127 if (unlikely(status & dma_own))
5128 break;
5129
5130 /* Prefetch the next RX descriptor */
5131 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5132 priv->dma_conf.dma_rx_size);
5133 next_entry = rx_q->cur_rx;
5134
5135 if (priv->extend_desc)
5136 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5137 else
5138 np = rx_q->dma_rx + next_entry;
5139
5140 prefetch(np);
5141
5142 /* Ensure a valid XSK buffer before proceed */
5143 if (!buf->xdp)
5144 break;
5145
5146 if (priv->extend_desc)
5147 stmmac_rx_extended_status(priv, &priv->xstats,
5148 rx_q->dma_erx + entry);
5149 if (unlikely(status == discard_frame)) {
5150 xsk_buff_free(buf->xdp);
5151 buf->xdp = NULL;
5152 dirty++;
5153 error = 1;
5154 if (!priv->hwts_rx_en)
5155 rx_errors++;
5156 }
5157
5158 if (unlikely(error && (status & rx_not_ls)))
5159 goto read_again;
5160 if (unlikely(error)) {
5161 count++;
5162 continue;
5163 }
5164
5165 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5166 if (likely(status & rx_not_ls)) {
5167 xsk_buff_free(buf->xdp);
5168 buf->xdp = NULL;
5169 dirty++;
5170 count++;
5171 goto read_again;
5172 }
5173
5174 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5175 ctx->priv = priv;
5176 ctx->desc = p;
5177 ctx->ndesc = np;
5178
5179 /* XDP ZC Frame only support primary buffers for now */
5180 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5181 len += buf1_len;
5182
5183 /* ACS is disabled; strip manually. */
5184 if (likely(!(status & rx_not_ls))) {
5185 buf1_len -= ETH_FCS_LEN;
5186 len -= ETH_FCS_LEN;
5187 }
5188
5189 /* RX buffer is good and fit into a XSK pool buffer */
5190 buf->xdp->data_end = buf->xdp->data + buf1_len;
5191 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5192
5193 prog = READ_ONCE(priv->xdp_prog);
5194 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5195
5196 switch (res) {
5197 case STMMAC_XDP_PASS:
5198 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5199 xsk_buff_free(buf->xdp);
5200 break;
5201 case STMMAC_XDP_CONSUMED:
5202 xsk_buff_free(buf->xdp);
5203 rx_dropped++;
5204 break;
5205 case STMMAC_XDP_TX:
5206 case STMMAC_XDP_REDIRECT:
5207 xdp_status |= res;
5208 break;
5209 }
5210
5211 buf->xdp = NULL;
5212 dirty++;
5213 count++;
5214 }
5215
5216 if (status & rx_not_ls) {
5217 rx_q->state_saved = true;
5218 rx_q->state.error = error;
5219 rx_q->state.len = len;
5220 }
5221
5222 stmmac_finalize_xdp_rx(priv, xdp_status);
5223
5224 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5225 rxq_stats->rx_pkt_n += count;
5226 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5227
5228 priv->xstats.rx_dropped += rx_dropped;
5229 priv->xstats.rx_errors += rx_errors;
5230
5231 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5232 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5233 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5234 else
5235 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5236
5237 return (int)count;
5238 }
5239
5240 return failure ? limit : (int)count;
5241 }
5242
5243 /**
5244 * stmmac_rx - manage the receive process
5245 * @priv: driver private structure
5246 * @limit: napi bugget
5247 * @queue: RX queue index.
5248 * Description : this the function called by the napi poll method.
5249 * It gets all the frames inside the ring.
5250 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5251 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5252 {
5253 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5254 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5255 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5256 struct stmmac_channel *ch = &priv->channel[queue];
5257 unsigned int count = 0, error = 0, len = 0;
5258 int status = 0, coe = priv->hw->rx_csum;
5259 unsigned int next_entry = rx_q->cur_rx;
5260 enum dma_data_direction dma_dir;
5261 unsigned int desc_size;
5262 struct sk_buff *skb = NULL;
5263 struct stmmac_xdp_buff ctx;
5264 unsigned long flags;
5265 int xdp_status = 0;
5266 int buf_sz;
5267
5268 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5269 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5270
5271 if (netif_msg_rx_status(priv)) {
5272 void *rx_head;
5273
5274 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5275 if (priv->extend_desc) {
5276 rx_head = (void *)rx_q->dma_erx;
5277 desc_size = sizeof(struct dma_extended_desc);
5278 } else {
5279 rx_head = (void *)rx_q->dma_rx;
5280 desc_size = sizeof(struct dma_desc);
5281 }
5282
5283 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5284 rx_q->dma_rx_phy, desc_size);
5285 }
5286 while (count < limit) {
5287 unsigned int buf1_len = 0, buf2_len = 0;
5288 enum pkt_hash_types hash_type;
5289 struct stmmac_rx_buffer *buf;
5290 struct dma_desc *np, *p;
5291 int entry;
5292 u32 hash;
5293
5294 if (!count && rx_q->state_saved) {
5295 skb = rx_q->state.skb;
5296 error = rx_q->state.error;
5297 len = rx_q->state.len;
5298 } else {
5299 rx_q->state_saved = false;
5300 skb = NULL;
5301 error = 0;
5302 len = 0;
5303 }
5304
5305 if (count >= limit)
5306 break;
5307
5308 read_again:
5309 buf1_len = 0;
5310 buf2_len = 0;
5311 entry = next_entry;
5312 buf = &rx_q->buf_pool[entry];
5313
5314 if (priv->extend_desc)
5315 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5316 else
5317 p = rx_q->dma_rx + entry;
5318
5319 /* read the status of the incoming frame */
5320 status = stmmac_rx_status(priv, &priv->xstats, p);
5321 /* check if managed by the DMA otherwise go ahead */
5322 if (unlikely(status & dma_own))
5323 break;
5324
5325 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5326 priv->dma_conf.dma_rx_size);
5327 next_entry = rx_q->cur_rx;
5328
5329 if (priv->extend_desc)
5330 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5331 else
5332 np = rx_q->dma_rx + next_entry;
5333
5334 prefetch(np);
5335
5336 if (priv->extend_desc)
5337 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5338 if (unlikely(status == discard_frame)) {
5339 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5340 buf->page = NULL;
5341 error = 1;
5342 if (!priv->hwts_rx_en)
5343 rx_errors++;
5344 }
5345
5346 if (unlikely(error && (status & rx_not_ls)))
5347 goto read_again;
5348 if (unlikely(error)) {
5349 dev_kfree_skb(skb);
5350 skb = NULL;
5351 count++;
5352 continue;
5353 }
5354
5355 /* Buffer is good. Go on. */
5356
5357 prefetch(page_address(buf->page) + buf->page_offset);
5358 if (buf->sec_page)
5359 prefetch(page_address(buf->sec_page));
5360
5361 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5362 len += buf1_len;
5363 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5364 len += buf2_len;
5365
5366 /* ACS is disabled; strip manually. */
5367 if (likely(!(status & rx_not_ls))) {
5368 if (buf2_len) {
5369 buf2_len -= ETH_FCS_LEN;
5370 len -= ETH_FCS_LEN;
5371 } else if (buf1_len) {
5372 buf1_len -= ETH_FCS_LEN;
5373 len -= ETH_FCS_LEN;
5374 }
5375 }
5376
5377 if (!skb) {
5378 unsigned int pre_len, sync_len;
5379
5380 dma_sync_single_for_cpu(priv->device, buf->addr,
5381 buf1_len, dma_dir);
5382
5383 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5384 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5385 buf->page_offset, buf1_len, true);
5386
5387 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5388 buf->page_offset;
5389
5390 ctx.priv = priv;
5391 ctx.desc = p;
5392 ctx.ndesc = np;
5393
5394 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5395 /* Due xdp_adjust_tail: DMA sync for_device
5396 * cover max len CPU touch
5397 */
5398 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5399 buf->page_offset;
5400 sync_len = max(sync_len, pre_len);
5401
5402 /* For Not XDP_PASS verdict */
5403 if (IS_ERR(skb)) {
5404 unsigned int xdp_res = -PTR_ERR(skb);
5405
5406 if (xdp_res & STMMAC_XDP_CONSUMED) {
5407 page_pool_put_page(rx_q->page_pool,
5408 virt_to_head_page(ctx.xdp.data),
5409 sync_len, true);
5410 buf->page = NULL;
5411 rx_dropped++;
5412
5413 /* Clear skb as it was set as
5414 * status by XDP program.
5415 */
5416 skb = NULL;
5417
5418 if (unlikely((status & rx_not_ls)))
5419 goto read_again;
5420
5421 count++;
5422 continue;
5423 } else if (xdp_res & (STMMAC_XDP_TX |
5424 STMMAC_XDP_REDIRECT)) {
5425 xdp_status |= xdp_res;
5426 buf->page = NULL;
5427 skb = NULL;
5428 count++;
5429 continue;
5430 }
5431 }
5432 }
5433
5434 if (!skb) {
5435 /* XDP program may expand or reduce tail */
5436 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5437
5438 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5439 if (!skb) {
5440 rx_dropped++;
5441 count++;
5442 goto drain_data;
5443 }
5444
5445 /* XDP program may adjust header */
5446 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5447 skb_put(skb, buf1_len);
5448
5449 /* Data payload copied into SKB, page ready for recycle */
5450 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5451 buf->page = NULL;
5452 } else if (buf1_len) {
5453 dma_sync_single_for_cpu(priv->device, buf->addr,
5454 buf1_len, dma_dir);
5455 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5456 buf->page, buf->page_offset, buf1_len,
5457 priv->dma_conf.dma_buf_sz);
5458
5459 /* Data payload appended into SKB */
5460 skb_mark_for_recycle(skb);
5461 buf->page = NULL;
5462 }
5463
5464 if (buf2_len) {
5465 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5466 buf2_len, dma_dir);
5467 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5468 buf->sec_page, 0, buf2_len,
5469 priv->dma_conf.dma_buf_sz);
5470
5471 /* Data payload appended into SKB */
5472 skb_mark_for_recycle(skb);
5473 buf->sec_page = NULL;
5474 }
5475
5476 drain_data:
5477 if (likely(status & rx_not_ls))
5478 goto read_again;
5479 if (!skb)
5480 continue;
5481
5482 /* Got entire packet into SKB. Finish it. */
5483
5484 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5485 stmmac_rx_vlan(priv->dev, skb);
5486 skb->protocol = eth_type_trans(skb, priv->dev);
5487
5488 if (unlikely(!coe))
5489 skb_checksum_none_assert(skb);
5490 else
5491 skb->ip_summed = CHECKSUM_UNNECESSARY;
5492
5493 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5494 skb_set_hash(skb, hash, hash_type);
5495
5496 skb_record_rx_queue(skb, queue);
5497 napi_gro_receive(&ch->rx_napi, skb);
5498 skb = NULL;
5499
5500 rx_packets++;
5501 rx_bytes += len;
5502 count++;
5503 }
5504
5505 if (status & rx_not_ls || skb) {
5506 rx_q->state_saved = true;
5507 rx_q->state.skb = skb;
5508 rx_q->state.error = error;
5509 rx_q->state.len = len;
5510 }
5511
5512 stmmac_finalize_xdp_rx(priv, xdp_status);
5513
5514 stmmac_rx_refill(priv, queue);
5515
5516 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5517 rxq_stats->rx_packets += rx_packets;
5518 rxq_stats->rx_bytes += rx_bytes;
5519 rxq_stats->rx_pkt_n += count;
5520 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5521
5522 priv->xstats.rx_dropped += rx_dropped;
5523 priv->xstats.rx_errors += rx_errors;
5524
5525 return count;
5526 }
5527
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5528 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5529 {
5530 struct stmmac_channel *ch =
5531 container_of(napi, struct stmmac_channel, rx_napi);
5532 struct stmmac_priv *priv = ch->priv_data;
5533 struct stmmac_rxq_stats *rxq_stats;
5534 u32 chan = ch->index;
5535 unsigned long flags;
5536 int work_done;
5537
5538 rxq_stats = &priv->xstats.rxq_stats[chan];
5539 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5540 rxq_stats->napi_poll++;
5541 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5542
5543 work_done = stmmac_rx(priv, budget, chan);
5544 if (work_done < budget && napi_complete_done(napi, work_done)) {
5545 unsigned long flags;
5546
5547 spin_lock_irqsave(&ch->lock, flags);
5548 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5549 spin_unlock_irqrestore(&ch->lock, flags);
5550 }
5551
5552 return work_done;
5553 }
5554
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5555 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5556 {
5557 struct stmmac_channel *ch =
5558 container_of(napi, struct stmmac_channel, tx_napi);
5559 struct stmmac_priv *priv = ch->priv_data;
5560 struct stmmac_txq_stats *txq_stats;
5561 u32 chan = ch->index;
5562 unsigned long flags;
5563 int work_done;
5564
5565 txq_stats = &priv->xstats.txq_stats[chan];
5566 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5567 txq_stats->napi_poll++;
5568 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5569
5570 work_done = stmmac_tx_clean(priv, budget, chan);
5571 work_done = min(work_done, budget);
5572
5573 if (work_done < budget && napi_complete_done(napi, work_done)) {
5574 unsigned long flags;
5575
5576 spin_lock_irqsave(&ch->lock, flags);
5577 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5578 spin_unlock_irqrestore(&ch->lock, flags);
5579 }
5580
5581 return work_done;
5582 }
5583
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5584 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5585 {
5586 struct stmmac_channel *ch =
5587 container_of(napi, struct stmmac_channel, rxtx_napi);
5588 struct stmmac_priv *priv = ch->priv_data;
5589 int rx_done, tx_done, rxtx_done;
5590 struct stmmac_rxq_stats *rxq_stats;
5591 struct stmmac_txq_stats *txq_stats;
5592 u32 chan = ch->index;
5593 unsigned long flags;
5594
5595 rxq_stats = &priv->xstats.rxq_stats[chan];
5596 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5597 rxq_stats->napi_poll++;
5598 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5599
5600 txq_stats = &priv->xstats.txq_stats[chan];
5601 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5602 txq_stats->napi_poll++;
5603 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5604
5605 tx_done = stmmac_tx_clean(priv, budget, chan);
5606 tx_done = min(tx_done, budget);
5607
5608 rx_done = stmmac_rx_zc(priv, budget, chan);
5609
5610 rxtx_done = max(tx_done, rx_done);
5611
5612 /* If either TX or RX work is not complete, return budget
5613 * and keep pooling
5614 */
5615 if (rxtx_done >= budget)
5616 return budget;
5617
5618 /* all work done, exit the polling mode */
5619 if (napi_complete_done(napi, rxtx_done)) {
5620 unsigned long flags;
5621
5622 spin_lock_irqsave(&ch->lock, flags);
5623 /* Both RX and TX work done are compelte,
5624 * so enable both RX & TX IRQs.
5625 */
5626 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5627 spin_unlock_irqrestore(&ch->lock, flags);
5628 }
5629
5630 return min(rxtx_done, budget - 1);
5631 }
5632
5633 /**
5634 * stmmac_tx_timeout
5635 * @dev : Pointer to net device structure
5636 * @txqueue: the index of the hanging transmit queue
5637 * Description: this function is called when a packet transmission fails to
5638 * complete within a reasonable time. The driver will mark the error in the
5639 * netdev structure and arrange for the device to be reset to a sane state
5640 * in order to transmit a new packet.
5641 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5642 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5643 {
5644 struct stmmac_priv *priv = netdev_priv(dev);
5645
5646 stmmac_global_err(priv);
5647 }
5648
5649 /**
5650 * stmmac_set_rx_mode - entry point for multicast addressing
5651 * @dev : pointer to the device structure
5652 * Description:
5653 * This function is a driver entry point which gets called by the kernel
5654 * whenever multicast addresses must be enabled/disabled.
5655 * Return value:
5656 * void.
5657 */
stmmac_set_rx_mode(struct net_device * dev)5658 static void stmmac_set_rx_mode(struct net_device *dev)
5659 {
5660 struct stmmac_priv *priv = netdev_priv(dev);
5661
5662 stmmac_set_filter(priv, priv->hw, dev);
5663 }
5664
5665 /**
5666 * stmmac_change_mtu - entry point to change MTU size for the device.
5667 * @dev : device pointer.
5668 * @new_mtu : the new MTU size for the device.
5669 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5670 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5671 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5672 * Return value:
5673 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5674 * file on failure.
5675 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5676 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5677 {
5678 struct stmmac_priv *priv = netdev_priv(dev);
5679 int txfifosz = priv->plat->tx_fifo_size;
5680 struct stmmac_dma_conf *dma_conf;
5681 const int mtu = new_mtu;
5682 int ret;
5683
5684 if (txfifosz == 0)
5685 txfifosz = priv->dma_cap.tx_fifo_size;
5686
5687 txfifosz /= priv->plat->tx_queues_to_use;
5688
5689 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5690 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5691 return -EINVAL;
5692 }
5693
5694 new_mtu = STMMAC_ALIGN(new_mtu);
5695
5696 /* If condition true, FIFO is too small or MTU too large */
5697 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5698 return -EINVAL;
5699
5700 if (netif_running(dev)) {
5701 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5702 /* Try to allocate the new DMA conf with the new mtu */
5703 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5704 if (IS_ERR(dma_conf)) {
5705 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5706 mtu);
5707 return PTR_ERR(dma_conf);
5708 }
5709
5710 stmmac_release(dev);
5711
5712 ret = __stmmac_open(dev, dma_conf);
5713 if (ret) {
5714 free_dma_desc_resources(priv, dma_conf);
5715 kfree(dma_conf);
5716 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5717 return ret;
5718 }
5719
5720 kfree(dma_conf);
5721
5722 stmmac_set_rx_mode(dev);
5723 }
5724
5725 dev->mtu = mtu;
5726 netdev_update_features(dev);
5727
5728 return 0;
5729 }
5730
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5731 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5732 netdev_features_t features)
5733 {
5734 struct stmmac_priv *priv = netdev_priv(dev);
5735
5736 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5737 features &= ~NETIF_F_RXCSUM;
5738
5739 if (!priv->plat->tx_coe)
5740 features &= ~NETIF_F_CSUM_MASK;
5741
5742 /* Some GMAC devices have a bugged Jumbo frame support that
5743 * needs to have the Tx COE disabled for oversized frames
5744 * (due to limited buffer sizes). In this case we disable
5745 * the TX csum insertion in the TDES and not use SF.
5746 */
5747 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5748 features &= ~NETIF_F_CSUM_MASK;
5749
5750 /* Disable tso if asked by ethtool */
5751 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5752 if (features & NETIF_F_TSO)
5753 priv->tso = true;
5754 else
5755 priv->tso = false;
5756 }
5757
5758 return features;
5759 }
5760
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5761 static int stmmac_set_features(struct net_device *netdev,
5762 netdev_features_t features)
5763 {
5764 struct stmmac_priv *priv = netdev_priv(netdev);
5765
5766 /* Keep the COE Type in case of csum is supporting */
5767 if (features & NETIF_F_RXCSUM)
5768 priv->hw->rx_csum = priv->plat->rx_coe;
5769 else
5770 priv->hw->rx_csum = 0;
5771 /* No check needed because rx_coe has been set before and it will be
5772 * fixed in case of issue.
5773 */
5774 stmmac_rx_ipc(priv, priv->hw);
5775
5776 if (priv->sph_cap) {
5777 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5778 u32 chan;
5779
5780 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5781 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5782 }
5783
5784 return 0;
5785 }
5786
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5787 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5788 {
5789 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5790 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5791 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5792 bool *hs_enable = &fpe_cfg->hs_enable;
5793
5794 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5795 return;
5796
5797 /* If LP has sent verify mPacket, LP is FPE capable */
5798 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5799 if (*lp_state < FPE_STATE_CAPABLE)
5800 *lp_state = FPE_STATE_CAPABLE;
5801
5802 /* If user has requested FPE enable, quickly response */
5803 if (*hs_enable)
5804 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5805 MPACKET_RESPONSE);
5806 }
5807
5808 /* If Local has sent verify mPacket, Local is FPE capable */
5809 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5810 if (*lo_state < FPE_STATE_CAPABLE)
5811 *lo_state = FPE_STATE_CAPABLE;
5812 }
5813
5814 /* If LP has sent response mPacket, LP is entering FPE ON */
5815 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5816 *lp_state = FPE_STATE_ENTERING_ON;
5817
5818 /* If Local has sent response mPacket, Local is entering FPE ON */
5819 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5820 *lo_state = FPE_STATE_ENTERING_ON;
5821
5822 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5823 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5824 priv->fpe_wq) {
5825 queue_work(priv->fpe_wq, &priv->fpe_task);
5826 }
5827 }
5828
stmmac_common_interrupt(struct stmmac_priv * priv)5829 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5830 {
5831 u32 rx_cnt = priv->plat->rx_queues_to_use;
5832 u32 tx_cnt = priv->plat->tx_queues_to_use;
5833 u32 queues_count;
5834 u32 queue;
5835 bool xmac;
5836
5837 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5838 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5839
5840 if (priv->irq_wake)
5841 pm_wakeup_event(priv->device, 0);
5842
5843 if (priv->dma_cap.estsel)
5844 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5845 &priv->xstats, tx_cnt);
5846
5847 if (priv->dma_cap.fpesel) {
5848 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5849 priv->dev);
5850
5851 stmmac_fpe_event_status(priv, status);
5852 }
5853
5854 /* To handle GMAC own interrupts */
5855 if ((priv->plat->has_gmac) || xmac) {
5856 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5857
5858 if (unlikely(status)) {
5859 /* For LPI we need to save the tx status */
5860 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5861 priv->tx_path_in_lpi_mode = true;
5862 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5863 priv->tx_path_in_lpi_mode = false;
5864 }
5865
5866 for (queue = 0; queue < queues_count; queue++) {
5867 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5868 queue);
5869 }
5870
5871 /* PCS link status */
5872 if (priv->hw->pcs &&
5873 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5874 if (priv->xstats.pcs_link)
5875 netif_carrier_on(priv->dev);
5876 else
5877 netif_carrier_off(priv->dev);
5878 }
5879
5880 stmmac_timestamp_interrupt(priv, priv);
5881 }
5882 }
5883
5884 /**
5885 * stmmac_interrupt - main ISR
5886 * @irq: interrupt number.
5887 * @dev_id: to pass the net device pointer.
5888 * Description: this is the main driver interrupt service routine.
5889 * It can call:
5890 * o DMA service routine (to manage incoming frame reception and transmission
5891 * status)
5892 * o Core interrupts to manage: remote wake-up, management counter, LPI
5893 * interrupts.
5894 */
stmmac_interrupt(int irq,void * dev_id)5895 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5896 {
5897 struct net_device *dev = (struct net_device *)dev_id;
5898 struct stmmac_priv *priv = netdev_priv(dev);
5899
5900 /* Check if adapter is up */
5901 if (test_bit(STMMAC_DOWN, &priv->state))
5902 return IRQ_HANDLED;
5903
5904 /* Check if a fatal error happened */
5905 if (stmmac_safety_feat_interrupt(priv))
5906 return IRQ_HANDLED;
5907
5908 /* To handle Common interrupts */
5909 stmmac_common_interrupt(priv);
5910
5911 /* To handle DMA interrupts */
5912 stmmac_dma_interrupt(priv);
5913
5914 return IRQ_HANDLED;
5915 }
5916
stmmac_mac_interrupt(int irq,void * dev_id)5917 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5918 {
5919 struct net_device *dev = (struct net_device *)dev_id;
5920 struct stmmac_priv *priv = netdev_priv(dev);
5921
5922 if (unlikely(!dev)) {
5923 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5924 return IRQ_NONE;
5925 }
5926
5927 /* Check if adapter is up */
5928 if (test_bit(STMMAC_DOWN, &priv->state))
5929 return IRQ_HANDLED;
5930
5931 /* To handle Common interrupts */
5932 stmmac_common_interrupt(priv);
5933
5934 return IRQ_HANDLED;
5935 }
5936
stmmac_safety_interrupt(int irq,void * dev_id)5937 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5938 {
5939 struct net_device *dev = (struct net_device *)dev_id;
5940 struct stmmac_priv *priv = netdev_priv(dev);
5941
5942 if (unlikely(!dev)) {
5943 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5944 return IRQ_NONE;
5945 }
5946
5947 /* Check if adapter is up */
5948 if (test_bit(STMMAC_DOWN, &priv->state))
5949 return IRQ_HANDLED;
5950
5951 /* Check if a fatal error happened */
5952 stmmac_safety_feat_interrupt(priv);
5953
5954 return IRQ_HANDLED;
5955 }
5956
stmmac_msi_intr_tx(int irq,void * data)5957 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5958 {
5959 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5960 struct stmmac_dma_conf *dma_conf;
5961 int chan = tx_q->queue_index;
5962 struct stmmac_priv *priv;
5963 int status;
5964
5965 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5966 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5967
5968 if (unlikely(!data)) {
5969 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5970 return IRQ_NONE;
5971 }
5972
5973 /* Check if adapter is up */
5974 if (test_bit(STMMAC_DOWN, &priv->state))
5975 return IRQ_HANDLED;
5976
5977 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5978
5979 if (unlikely(status & tx_hard_error_bump_tc)) {
5980 /* Try to bump up the dma threshold on this failure */
5981 stmmac_bump_dma_threshold(priv, chan);
5982 } else if (unlikely(status == tx_hard_error)) {
5983 stmmac_tx_err(priv, chan);
5984 }
5985
5986 return IRQ_HANDLED;
5987 }
5988
stmmac_msi_intr_rx(int irq,void * data)5989 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5990 {
5991 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5992 struct stmmac_dma_conf *dma_conf;
5993 int chan = rx_q->queue_index;
5994 struct stmmac_priv *priv;
5995
5996 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5997 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5998
5999 if (unlikely(!data)) {
6000 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6001 return IRQ_NONE;
6002 }
6003
6004 /* Check if adapter is up */
6005 if (test_bit(STMMAC_DOWN, &priv->state))
6006 return IRQ_HANDLED;
6007
6008 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6009
6010 return IRQ_HANDLED;
6011 }
6012
6013 /**
6014 * stmmac_ioctl - Entry point for the Ioctl
6015 * @dev: Device pointer.
6016 * @rq: An IOCTL specefic structure, that can contain a pointer to
6017 * a proprietary structure used to pass information to the driver.
6018 * @cmd: IOCTL command
6019 * Description:
6020 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6021 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6022 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6023 {
6024 struct stmmac_priv *priv = netdev_priv (dev);
6025 int ret = -EOPNOTSUPP;
6026
6027 if (!netif_running(dev))
6028 return -EINVAL;
6029
6030 switch (cmd) {
6031 case SIOCGMIIPHY:
6032 case SIOCGMIIREG:
6033 case SIOCSMIIREG:
6034 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6035 break;
6036 case SIOCSHWTSTAMP:
6037 ret = stmmac_hwtstamp_set(dev, rq);
6038 break;
6039 case SIOCGHWTSTAMP:
6040 ret = stmmac_hwtstamp_get(dev, rq);
6041 break;
6042 default:
6043 break;
6044 }
6045
6046 return ret;
6047 }
6048
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6049 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6050 void *cb_priv)
6051 {
6052 struct stmmac_priv *priv = cb_priv;
6053 int ret = -EOPNOTSUPP;
6054
6055 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6056 return ret;
6057
6058 __stmmac_disable_all_queues(priv);
6059
6060 switch (type) {
6061 case TC_SETUP_CLSU32:
6062 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6063 break;
6064 case TC_SETUP_CLSFLOWER:
6065 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6066 break;
6067 default:
6068 break;
6069 }
6070
6071 stmmac_enable_all_queues(priv);
6072 return ret;
6073 }
6074
6075 static LIST_HEAD(stmmac_block_cb_list);
6076
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6077 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6078 void *type_data)
6079 {
6080 struct stmmac_priv *priv = netdev_priv(ndev);
6081
6082 switch (type) {
6083 case TC_QUERY_CAPS:
6084 return stmmac_tc_query_caps(priv, priv, type_data);
6085 case TC_SETUP_BLOCK:
6086 return flow_block_cb_setup_simple(type_data,
6087 &stmmac_block_cb_list,
6088 stmmac_setup_tc_block_cb,
6089 priv, priv, true);
6090 case TC_SETUP_QDISC_CBS:
6091 return stmmac_tc_setup_cbs(priv, priv, type_data);
6092 case TC_SETUP_QDISC_TAPRIO:
6093 return stmmac_tc_setup_taprio(priv, priv, type_data);
6094 case TC_SETUP_QDISC_ETF:
6095 return stmmac_tc_setup_etf(priv, priv, type_data);
6096 default:
6097 return -EOPNOTSUPP;
6098 }
6099 }
6100
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6101 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6102 struct net_device *sb_dev)
6103 {
6104 int gso = skb_shinfo(skb)->gso_type;
6105
6106 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6107 /*
6108 * There is no way to determine the number of TSO/USO
6109 * capable Queues. Let's use always the Queue 0
6110 * because if TSO/USO is supported then at least this
6111 * one will be capable.
6112 */
6113 return 0;
6114 }
6115
6116 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6117 }
6118
stmmac_set_mac_address(struct net_device * ndev,void * addr)6119 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6120 {
6121 struct stmmac_priv *priv = netdev_priv(ndev);
6122 int ret = 0;
6123
6124 ret = pm_runtime_resume_and_get(priv->device);
6125 if (ret < 0)
6126 return ret;
6127
6128 ret = eth_mac_addr(ndev, addr);
6129 if (ret)
6130 goto set_mac_error;
6131
6132 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6133
6134 set_mac_error:
6135 pm_runtime_put(priv->device);
6136
6137 return ret;
6138 }
6139
6140 #ifdef CONFIG_DEBUG_FS
6141 static struct dentry *stmmac_fs_dir;
6142
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6143 static void sysfs_display_ring(void *head, int size, int extend_desc,
6144 struct seq_file *seq, dma_addr_t dma_phy_addr)
6145 {
6146 int i;
6147 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6148 struct dma_desc *p = (struct dma_desc *)head;
6149 dma_addr_t dma_addr;
6150
6151 for (i = 0; i < size; i++) {
6152 if (extend_desc) {
6153 dma_addr = dma_phy_addr + i * sizeof(*ep);
6154 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6155 i, &dma_addr,
6156 le32_to_cpu(ep->basic.des0),
6157 le32_to_cpu(ep->basic.des1),
6158 le32_to_cpu(ep->basic.des2),
6159 le32_to_cpu(ep->basic.des3));
6160 ep++;
6161 } else {
6162 dma_addr = dma_phy_addr + i * sizeof(*p);
6163 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6164 i, &dma_addr,
6165 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6166 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6167 p++;
6168 }
6169 seq_printf(seq, "\n");
6170 }
6171 }
6172
stmmac_rings_status_show(struct seq_file * seq,void * v)6173 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6174 {
6175 struct net_device *dev = seq->private;
6176 struct stmmac_priv *priv = netdev_priv(dev);
6177 u32 rx_count = priv->plat->rx_queues_to_use;
6178 u32 tx_count = priv->plat->tx_queues_to_use;
6179 u32 queue;
6180
6181 if ((dev->flags & IFF_UP) == 0)
6182 return 0;
6183
6184 for (queue = 0; queue < rx_count; queue++) {
6185 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6186
6187 seq_printf(seq, "RX Queue %d:\n", queue);
6188
6189 if (priv->extend_desc) {
6190 seq_printf(seq, "Extended descriptor ring:\n");
6191 sysfs_display_ring((void *)rx_q->dma_erx,
6192 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6193 } else {
6194 seq_printf(seq, "Descriptor ring:\n");
6195 sysfs_display_ring((void *)rx_q->dma_rx,
6196 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6197 }
6198 }
6199
6200 for (queue = 0; queue < tx_count; queue++) {
6201 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6202
6203 seq_printf(seq, "TX Queue %d:\n", queue);
6204
6205 if (priv->extend_desc) {
6206 seq_printf(seq, "Extended descriptor ring:\n");
6207 sysfs_display_ring((void *)tx_q->dma_etx,
6208 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6209 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6210 seq_printf(seq, "Descriptor ring:\n");
6211 sysfs_display_ring((void *)tx_q->dma_tx,
6212 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6213 }
6214 }
6215
6216 return 0;
6217 }
6218 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6219
stmmac_dma_cap_show(struct seq_file * seq,void * v)6220 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6221 {
6222 static const char * const dwxgmac_timestamp_source[] = {
6223 "None",
6224 "Internal",
6225 "External",
6226 "Both",
6227 };
6228 static const char * const dwxgmac_safety_feature_desc[] = {
6229 "No",
6230 "All Safety Features with ECC and Parity",
6231 "All Safety Features without ECC or Parity",
6232 "All Safety Features with Parity Only",
6233 "ECC Only",
6234 "UNDEFINED",
6235 "UNDEFINED",
6236 "UNDEFINED",
6237 };
6238 struct net_device *dev = seq->private;
6239 struct stmmac_priv *priv = netdev_priv(dev);
6240
6241 if (!priv->hw_cap_support) {
6242 seq_printf(seq, "DMA HW features not supported\n");
6243 return 0;
6244 }
6245
6246 seq_printf(seq, "==============================\n");
6247 seq_printf(seq, "\tDMA HW features\n");
6248 seq_printf(seq, "==============================\n");
6249
6250 seq_printf(seq, "\t10/100 Mbps: %s\n",
6251 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6252 seq_printf(seq, "\t1000 Mbps: %s\n",
6253 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6254 seq_printf(seq, "\tHalf duplex: %s\n",
6255 (priv->dma_cap.half_duplex) ? "Y" : "N");
6256 if (priv->plat->has_xgmac) {
6257 seq_printf(seq,
6258 "\tNumber of Additional MAC address registers: %d\n",
6259 priv->dma_cap.multi_addr);
6260 } else {
6261 seq_printf(seq, "\tHash Filter: %s\n",
6262 (priv->dma_cap.hash_filter) ? "Y" : "N");
6263 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6264 (priv->dma_cap.multi_addr) ? "Y" : "N");
6265 }
6266 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6267 (priv->dma_cap.pcs) ? "Y" : "N");
6268 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6269 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6270 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6271 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6272 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6273 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6274 seq_printf(seq, "\tRMON module: %s\n",
6275 (priv->dma_cap.rmon) ? "Y" : "N");
6276 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6277 (priv->dma_cap.time_stamp) ? "Y" : "N");
6278 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6279 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6280 if (priv->plat->has_xgmac)
6281 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6282 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6283 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6284 (priv->dma_cap.eee) ? "Y" : "N");
6285 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6286 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6287 (priv->dma_cap.tx_coe) ? "Y" : "N");
6288 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6289 priv->plat->has_xgmac) {
6290 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6291 (priv->dma_cap.rx_coe) ? "Y" : "N");
6292 } else {
6293 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6294 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6295 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6296 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6297 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6298 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6299 }
6300 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6301 priv->dma_cap.number_rx_channel);
6302 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6303 priv->dma_cap.number_tx_channel);
6304 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6305 priv->dma_cap.number_rx_queues);
6306 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6307 priv->dma_cap.number_tx_queues);
6308 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6309 (priv->dma_cap.enh_desc) ? "Y" : "N");
6310 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6311 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6312 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6313 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6314 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6315 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6316 priv->dma_cap.pps_out_num);
6317 seq_printf(seq, "\tSafety Features: %s\n",
6318 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6319 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6320 priv->dma_cap.frpsel ? "Y" : "N");
6321 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6322 priv->dma_cap.host_dma_width);
6323 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6324 priv->dma_cap.rssen ? "Y" : "N");
6325 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6326 priv->dma_cap.vlhash ? "Y" : "N");
6327 seq_printf(seq, "\tSplit Header: %s\n",
6328 priv->dma_cap.sphen ? "Y" : "N");
6329 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6330 priv->dma_cap.vlins ? "Y" : "N");
6331 seq_printf(seq, "\tDouble VLAN: %s\n",
6332 priv->dma_cap.dvlan ? "Y" : "N");
6333 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6334 priv->dma_cap.l3l4fnum);
6335 seq_printf(seq, "\tARP Offloading: %s\n",
6336 priv->dma_cap.arpoffsel ? "Y" : "N");
6337 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6338 priv->dma_cap.estsel ? "Y" : "N");
6339 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6340 priv->dma_cap.fpesel ? "Y" : "N");
6341 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6342 priv->dma_cap.tbssel ? "Y" : "N");
6343 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6344 priv->dma_cap.tbs_ch_num);
6345 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6346 priv->dma_cap.sgfsel ? "Y" : "N");
6347 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6348 BIT(priv->dma_cap.ttsfd) >> 1);
6349 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6350 priv->dma_cap.numtc);
6351 seq_printf(seq, "\tDCB Feature: %s\n",
6352 priv->dma_cap.dcben ? "Y" : "N");
6353 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6354 priv->dma_cap.advthword ? "Y" : "N");
6355 seq_printf(seq, "\tPTP Offload: %s\n",
6356 priv->dma_cap.ptoen ? "Y" : "N");
6357 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6358 priv->dma_cap.osten ? "Y" : "N");
6359 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6360 priv->dma_cap.pfcen ? "Y" : "N");
6361 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6362 BIT(priv->dma_cap.frpes) << 6);
6363 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6364 BIT(priv->dma_cap.frpbs) << 6);
6365 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6366 priv->dma_cap.frppipe_num);
6367 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6368 priv->dma_cap.nrvf_num ?
6369 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6370 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6371 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6372 seq_printf(seq, "\tDepth of GCL: %lu\n",
6373 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6374 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6375 priv->dma_cap.cbtisel ? "Y" : "N");
6376 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6377 priv->dma_cap.aux_snapshot_n);
6378 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6379 priv->dma_cap.pou_ost_en ? "Y" : "N");
6380 seq_printf(seq, "\tEnhanced DMA: %s\n",
6381 priv->dma_cap.edma ? "Y" : "N");
6382 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6383 priv->dma_cap.ediffc ? "Y" : "N");
6384 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6385 priv->dma_cap.vxn ? "Y" : "N");
6386 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6387 priv->dma_cap.dbgmem ? "Y" : "N");
6388 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6389 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6390 return 0;
6391 }
6392 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6393
6394 /* Use network device events to rename debugfs file entries.
6395 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6396 static int stmmac_device_event(struct notifier_block *unused,
6397 unsigned long event, void *ptr)
6398 {
6399 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6400 struct stmmac_priv *priv = netdev_priv(dev);
6401
6402 if (dev->netdev_ops != &stmmac_netdev_ops)
6403 goto done;
6404
6405 switch (event) {
6406 case NETDEV_CHANGENAME:
6407 if (priv->dbgfs_dir)
6408 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6409 priv->dbgfs_dir,
6410 stmmac_fs_dir,
6411 dev->name);
6412 break;
6413 }
6414 done:
6415 return NOTIFY_DONE;
6416 }
6417
6418 static struct notifier_block stmmac_notifier = {
6419 .notifier_call = stmmac_device_event,
6420 };
6421
stmmac_init_fs(struct net_device * dev)6422 static void stmmac_init_fs(struct net_device *dev)
6423 {
6424 struct stmmac_priv *priv = netdev_priv(dev);
6425
6426 rtnl_lock();
6427
6428 /* Create per netdev entries */
6429 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6430
6431 /* Entry to report DMA RX/TX rings */
6432 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6433 &stmmac_rings_status_fops);
6434
6435 /* Entry to report the DMA HW features */
6436 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6437 &stmmac_dma_cap_fops);
6438
6439 rtnl_unlock();
6440 }
6441
stmmac_exit_fs(struct net_device * dev)6442 static void stmmac_exit_fs(struct net_device *dev)
6443 {
6444 struct stmmac_priv *priv = netdev_priv(dev);
6445
6446 debugfs_remove_recursive(priv->dbgfs_dir);
6447 }
6448 #endif /* CONFIG_DEBUG_FS */
6449
stmmac_vid_crc32_le(__le16 vid_le)6450 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6451 {
6452 unsigned char *data = (unsigned char *)&vid_le;
6453 unsigned char data_byte = 0;
6454 u32 crc = ~0x0;
6455 u32 temp = 0;
6456 int i, bits;
6457
6458 bits = get_bitmask_order(VLAN_VID_MASK);
6459 for (i = 0; i < bits; i++) {
6460 if ((i % 8) == 0)
6461 data_byte = data[i / 8];
6462
6463 temp = ((crc & 1) ^ data_byte) & 1;
6464 crc >>= 1;
6465 data_byte >>= 1;
6466
6467 if (temp)
6468 crc ^= 0xedb88320;
6469 }
6470
6471 return crc;
6472 }
6473
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6474 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6475 {
6476 u32 crc, hash = 0;
6477 __le16 pmatch = 0;
6478 int count = 0;
6479 u16 vid = 0;
6480
6481 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6482 __le16 vid_le = cpu_to_le16(vid);
6483 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6484 hash |= (1 << crc);
6485 count++;
6486 }
6487
6488 if (!priv->dma_cap.vlhash) {
6489 if (count > 2) /* VID = 0 always passes filter */
6490 return -EOPNOTSUPP;
6491
6492 pmatch = cpu_to_le16(vid);
6493 hash = 0;
6494 }
6495
6496 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6497 }
6498
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6499 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6500 {
6501 struct stmmac_priv *priv = netdev_priv(ndev);
6502 bool is_double = false;
6503 int ret;
6504
6505 ret = pm_runtime_resume_and_get(priv->device);
6506 if (ret < 0)
6507 return ret;
6508
6509 if (be16_to_cpu(proto) == ETH_P_8021AD)
6510 is_double = true;
6511
6512 set_bit(vid, priv->active_vlans);
6513 ret = stmmac_vlan_update(priv, is_double);
6514 if (ret) {
6515 clear_bit(vid, priv->active_vlans);
6516 goto err_pm_put;
6517 }
6518
6519 if (priv->hw->num_vlan) {
6520 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6521 if (ret)
6522 goto err_pm_put;
6523 }
6524 err_pm_put:
6525 pm_runtime_put(priv->device);
6526
6527 return ret;
6528 }
6529
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6530 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6531 {
6532 struct stmmac_priv *priv = netdev_priv(ndev);
6533 bool is_double = false;
6534 int ret;
6535
6536 ret = pm_runtime_resume_and_get(priv->device);
6537 if (ret < 0)
6538 return ret;
6539
6540 if (be16_to_cpu(proto) == ETH_P_8021AD)
6541 is_double = true;
6542
6543 clear_bit(vid, priv->active_vlans);
6544
6545 if (priv->hw->num_vlan) {
6546 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6547 if (ret)
6548 goto del_vlan_error;
6549 }
6550
6551 ret = stmmac_vlan_update(priv, is_double);
6552
6553 del_vlan_error:
6554 pm_runtime_put(priv->device);
6555
6556 return ret;
6557 }
6558
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6559 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6560 {
6561 struct stmmac_priv *priv = netdev_priv(dev);
6562
6563 switch (bpf->command) {
6564 case XDP_SETUP_PROG:
6565 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6566 case XDP_SETUP_XSK_POOL:
6567 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6568 bpf->xsk.queue_id);
6569 default:
6570 return -EOPNOTSUPP;
6571 }
6572 }
6573
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6574 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6575 struct xdp_frame **frames, u32 flags)
6576 {
6577 struct stmmac_priv *priv = netdev_priv(dev);
6578 int cpu = smp_processor_id();
6579 struct netdev_queue *nq;
6580 int i, nxmit = 0;
6581 int queue;
6582
6583 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6584 return -ENETDOWN;
6585
6586 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6587 return -EINVAL;
6588
6589 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6590 nq = netdev_get_tx_queue(priv->dev, queue);
6591
6592 __netif_tx_lock(nq, cpu);
6593 /* Avoids TX time-out as we are sharing with slow path */
6594 txq_trans_cond_update(nq);
6595
6596 for (i = 0; i < num_frames; i++) {
6597 int res;
6598
6599 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6600 if (res == STMMAC_XDP_CONSUMED)
6601 break;
6602
6603 nxmit++;
6604 }
6605
6606 if (flags & XDP_XMIT_FLUSH) {
6607 stmmac_flush_tx_descriptors(priv, queue);
6608 stmmac_tx_timer_arm(priv, queue);
6609 }
6610
6611 __netif_tx_unlock(nq);
6612
6613 return nxmit;
6614 }
6615
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6616 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6617 {
6618 struct stmmac_channel *ch = &priv->channel[queue];
6619 unsigned long flags;
6620
6621 spin_lock_irqsave(&ch->lock, flags);
6622 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6623 spin_unlock_irqrestore(&ch->lock, flags);
6624
6625 stmmac_stop_rx_dma(priv, queue);
6626 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6627 }
6628
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6629 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6630 {
6631 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6632 struct stmmac_channel *ch = &priv->channel[queue];
6633 unsigned long flags;
6634 u32 buf_size;
6635 int ret;
6636
6637 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6638 if (ret) {
6639 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6640 return;
6641 }
6642
6643 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6644 if (ret) {
6645 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6646 netdev_err(priv->dev, "Failed to init RX desc.\n");
6647 return;
6648 }
6649
6650 stmmac_reset_rx_queue(priv, queue);
6651 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6652
6653 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6654 rx_q->dma_rx_phy, rx_q->queue_index);
6655
6656 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6657 sizeof(struct dma_desc));
6658 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6659 rx_q->rx_tail_addr, rx_q->queue_index);
6660
6661 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6662 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6663 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6664 buf_size,
6665 rx_q->queue_index);
6666 } else {
6667 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6668 priv->dma_conf.dma_buf_sz,
6669 rx_q->queue_index);
6670 }
6671
6672 stmmac_start_rx_dma(priv, queue);
6673
6674 spin_lock_irqsave(&ch->lock, flags);
6675 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6676 spin_unlock_irqrestore(&ch->lock, flags);
6677 }
6678
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6679 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6680 {
6681 struct stmmac_channel *ch = &priv->channel[queue];
6682 unsigned long flags;
6683
6684 spin_lock_irqsave(&ch->lock, flags);
6685 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6686 spin_unlock_irqrestore(&ch->lock, flags);
6687
6688 stmmac_stop_tx_dma(priv, queue);
6689 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6690 }
6691
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6692 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6693 {
6694 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6695 struct stmmac_channel *ch = &priv->channel[queue];
6696 unsigned long flags;
6697 int ret;
6698
6699 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6700 if (ret) {
6701 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6702 return;
6703 }
6704
6705 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6706 if (ret) {
6707 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6708 netdev_err(priv->dev, "Failed to init TX desc.\n");
6709 return;
6710 }
6711
6712 stmmac_reset_tx_queue(priv, queue);
6713 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6714
6715 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6716 tx_q->dma_tx_phy, tx_q->queue_index);
6717
6718 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6719 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6720
6721 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6722 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6723 tx_q->tx_tail_addr, tx_q->queue_index);
6724
6725 stmmac_start_tx_dma(priv, queue);
6726
6727 spin_lock_irqsave(&ch->lock, flags);
6728 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6729 spin_unlock_irqrestore(&ch->lock, flags);
6730 }
6731
stmmac_xdp_release(struct net_device * dev)6732 void stmmac_xdp_release(struct net_device *dev)
6733 {
6734 struct stmmac_priv *priv = netdev_priv(dev);
6735 u32 chan;
6736
6737 /* Ensure tx function is not running */
6738 netif_tx_disable(dev);
6739
6740 /* Disable NAPI process */
6741 stmmac_disable_all_queues(priv);
6742
6743 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6744 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6745
6746 /* Free the IRQ lines */
6747 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6748
6749 /* Stop TX/RX DMA channels */
6750 stmmac_stop_all_dma(priv);
6751
6752 /* Release and free the Rx/Tx resources */
6753 free_dma_desc_resources(priv, &priv->dma_conf);
6754
6755 /* Disable the MAC Rx/Tx */
6756 stmmac_mac_set(priv, priv->ioaddr, false);
6757
6758 /* set trans_start so we don't get spurious
6759 * watchdogs during reset
6760 */
6761 netif_trans_update(dev);
6762 netif_carrier_off(dev);
6763 }
6764
stmmac_xdp_open(struct net_device * dev)6765 int stmmac_xdp_open(struct net_device *dev)
6766 {
6767 struct stmmac_priv *priv = netdev_priv(dev);
6768 u32 rx_cnt = priv->plat->rx_queues_to_use;
6769 u32 tx_cnt = priv->plat->tx_queues_to_use;
6770 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6771 struct stmmac_rx_queue *rx_q;
6772 struct stmmac_tx_queue *tx_q;
6773 u32 buf_size;
6774 bool sph_en;
6775 u32 chan;
6776 int ret;
6777
6778 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6779 if (ret < 0) {
6780 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6781 __func__);
6782 goto dma_desc_error;
6783 }
6784
6785 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6786 if (ret < 0) {
6787 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6788 __func__);
6789 goto init_error;
6790 }
6791
6792 stmmac_reset_queues_param(priv);
6793
6794 /* DMA CSR Channel configuration */
6795 for (chan = 0; chan < dma_csr_ch; chan++) {
6796 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6797 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6798 }
6799
6800 /* Adjust Split header */
6801 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6802
6803 /* DMA RX Channel Configuration */
6804 for (chan = 0; chan < rx_cnt; chan++) {
6805 rx_q = &priv->dma_conf.rx_queue[chan];
6806
6807 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6808 rx_q->dma_rx_phy, chan);
6809
6810 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6811 (rx_q->buf_alloc_num *
6812 sizeof(struct dma_desc));
6813 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6814 rx_q->rx_tail_addr, chan);
6815
6816 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6817 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6818 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6819 buf_size,
6820 rx_q->queue_index);
6821 } else {
6822 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6823 priv->dma_conf.dma_buf_sz,
6824 rx_q->queue_index);
6825 }
6826
6827 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6828 }
6829
6830 /* DMA TX Channel Configuration */
6831 for (chan = 0; chan < tx_cnt; chan++) {
6832 tx_q = &priv->dma_conf.tx_queue[chan];
6833
6834 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6835 tx_q->dma_tx_phy, chan);
6836
6837 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6838 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6839 tx_q->tx_tail_addr, chan);
6840
6841 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6842 tx_q->txtimer.function = stmmac_tx_timer;
6843 }
6844
6845 /* Enable the MAC Rx/Tx */
6846 stmmac_mac_set(priv, priv->ioaddr, true);
6847
6848 /* Start Rx & Tx DMA Channels */
6849 stmmac_start_all_dma(priv);
6850
6851 ret = stmmac_request_irq(dev);
6852 if (ret)
6853 goto irq_error;
6854
6855 /* Enable NAPI process*/
6856 stmmac_enable_all_queues(priv);
6857 netif_carrier_on(dev);
6858 netif_tx_start_all_queues(dev);
6859 stmmac_enable_all_dma_irq(priv);
6860
6861 return 0;
6862
6863 irq_error:
6864 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6865 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6866
6867 stmmac_hw_teardown(dev);
6868 init_error:
6869 free_dma_desc_resources(priv, &priv->dma_conf);
6870 dma_desc_error:
6871 return ret;
6872 }
6873
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6874 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6875 {
6876 struct stmmac_priv *priv = netdev_priv(dev);
6877 struct stmmac_rx_queue *rx_q;
6878 struct stmmac_tx_queue *tx_q;
6879 struct stmmac_channel *ch;
6880
6881 if (test_bit(STMMAC_DOWN, &priv->state) ||
6882 !netif_carrier_ok(priv->dev))
6883 return -ENETDOWN;
6884
6885 if (!stmmac_xdp_is_enabled(priv))
6886 return -EINVAL;
6887
6888 if (queue >= priv->plat->rx_queues_to_use ||
6889 queue >= priv->plat->tx_queues_to_use)
6890 return -EINVAL;
6891
6892 rx_q = &priv->dma_conf.rx_queue[queue];
6893 tx_q = &priv->dma_conf.tx_queue[queue];
6894 ch = &priv->channel[queue];
6895
6896 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6897 return -EINVAL;
6898
6899 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6900 /* EQoS does not have per-DMA channel SW interrupt,
6901 * so we schedule RX Napi straight-away.
6902 */
6903 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6904 __napi_schedule(&ch->rxtx_napi);
6905 }
6906
6907 return 0;
6908 }
6909
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6910 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6911 {
6912 struct stmmac_priv *priv = netdev_priv(dev);
6913 u32 tx_cnt = priv->plat->tx_queues_to_use;
6914 u32 rx_cnt = priv->plat->rx_queues_to_use;
6915 unsigned int start;
6916 int q;
6917
6918 for (q = 0; q < tx_cnt; q++) {
6919 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6920 u64 tx_packets;
6921 u64 tx_bytes;
6922
6923 do {
6924 start = u64_stats_fetch_begin(&txq_stats->syncp);
6925 tx_packets = txq_stats->tx_packets;
6926 tx_bytes = txq_stats->tx_bytes;
6927 } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6928
6929 stats->tx_packets += tx_packets;
6930 stats->tx_bytes += tx_bytes;
6931 }
6932
6933 for (q = 0; q < rx_cnt; q++) {
6934 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6935 u64 rx_packets;
6936 u64 rx_bytes;
6937
6938 do {
6939 start = u64_stats_fetch_begin(&rxq_stats->syncp);
6940 rx_packets = rxq_stats->rx_packets;
6941 rx_bytes = rxq_stats->rx_bytes;
6942 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6943
6944 stats->rx_packets += rx_packets;
6945 stats->rx_bytes += rx_bytes;
6946 }
6947
6948 stats->rx_dropped = priv->xstats.rx_dropped;
6949 stats->rx_errors = priv->xstats.rx_errors;
6950 stats->tx_dropped = priv->xstats.tx_dropped;
6951 stats->tx_errors = priv->xstats.tx_errors;
6952 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6953 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6954 stats->rx_length_errors = priv->xstats.rx_length;
6955 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6956 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6957 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6958 }
6959
6960 static const struct net_device_ops stmmac_netdev_ops = {
6961 .ndo_open = stmmac_open,
6962 .ndo_start_xmit = stmmac_xmit,
6963 .ndo_stop = stmmac_release,
6964 .ndo_change_mtu = stmmac_change_mtu,
6965 .ndo_fix_features = stmmac_fix_features,
6966 .ndo_set_features = stmmac_set_features,
6967 .ndo_set_rx_mode = stmmac_set_rx_mode,
6968 .ndo_tx_timeout = stmmac_tx_timeout,
6969 .ndo_eth_ioctl = stmmac_ioctl,
6970 .ndo_get_stats64 = stmmac_get_stats64,
6971 .ndo_setup_tc = stmmac_setup_tc,
6972 .ndo_select_queue = stmmac_select_queue,
6973 .ndo_set_mac_address = stmmac_set_mac_address,
6974 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6975 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6976 .ndo_bpf = stmmac_bpf,
6977 .ndo_xdp_xmit = stmmac_xdp_xmit,
6978 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6979 };
6980
stmmac_reset_subtask(struct stmmac_priv * priv)6981 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6982 {
6983 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6984 return;
6985 if (test_bit(STMMAC_DOWN, &priv->state))
6986 return;
6987
6988 netdev_err(priv->dev, "Reset adapter.\n");
6989
6990 rtnl_lock();
6991 netif_trans_update(priv->dev);
6992 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6993 usleep_range(1000, 2000);
6994
6995 set_bit(STMMAC_DOWN, &priv->state);
6996 dev_close(priv->dev);
6997 dev_open(priv->dev, NULL);
6998 clear_bit(STMMAC_DOWN, &priv->state);
6999 clear_bit(STMMAC_RESETING, &priv->state);
7000 rtnl_unlock();
7001 }
7002
stmmac_service_task(struct work_struct * work)7003 static void stmmac_service_task(struct work_struct *work)
7004 {
7005 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7006 service_task);
7007
7008 stmmac_reset_subtask(priv);
7009 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7010 }
7011
7012 /**
7013 * stmmac_hw_init - Init the MAC device
7014 * @priv: driver private structure
7015 * Description: this function is to configure the MAC device according to
7016 * some platform parameters or the HW capability register. It prepares the
7017 * driver to use either ring or chain modes and to setup either enhanced or
7018 * normal descriptors.
7019 */
stmmac_hw_init(struct stmmac_priv * priv)7020 static int stmmac_hw_init(struct stmmac_priv *priv)
7021 {
7022 int ret;
7023
7024 /* dwmac-sun8i only work in chain mode */
7025 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7026 chain_mode = 1;
7027 priv->chain_mode = chain_mode;
7028
7029 /* Initialize HW Interface */
7030 ret = stmmac_hwif_init(priv);
7031 if (ret)
7032 return ret;
7033
7034 /* Get the HW capability (new GMAC newer than 3.50a) */
7035 priv->hw_cap_support = stmmac_get_hw_features(priv);
7036 if (priv->hw_cap_support) {
7037 dev_info(priv->device, "DMA HW capability register supported\n");
7038
7039 /* We can override some gmac/dma configuration fields: e.g.
7040 * enh_desc, tx_coe (e.g. that are passed through the
7041 * platform) with the values from the HW capability
7042 * register (if supported).
7043 */
7044 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7045 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7046 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7047 priv->hw->pmt = priv->plat->pmt;
7048 if (priv->dma_cap.hash_tb_sz) {
7049 priv->hw->multicast_filter_bins =
7050 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7051 priv->hw->mcast_bits_log2 =
7052 ilog2(priv->hw->multicast_filter_bins);
7053 }
7054
7055 /* TXCOE doesn't work in thresh DMA mode */
7056 if (priv->plat->force_thresh_dma_mode)
7057 priv->plat->tx_coe = 0;
7058 else
7059 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7060
7061 /* In case of GMAC4 rx_coe is from HW cap register. */
7062 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7063
7064 if (priv->dma_cap.rx_coe_type2)
7065 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7066 else if (priv->dma_cap.rx_coe_type1)
7067 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7068
7069 } else {
7070 dev_info(priv->device, "No HW DMA feature register supported\n");
7071 }
7072
7073 if (priv->plat->rx_coe) {
7074 priv->hw->rx_csum = priv->plat->rx_coe;
7075 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7076 if (priv->synopsys_id < DWMAC_CORE_4_00)
7077 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7078 }
7079 if (priv->plat->tx_coe)
7080 dev_info(priv->device, "TX Checksum insertion supported\n");
7081
7082 if (priv->plat->pmt) {
7083 dev_info(priv->device, "Wake-Up On Lan supported\n");
7084 device_set_wakeup_capable(priv->device, 1);
7085 }
7086
7087 if (priv->dma_cap.tsoen)
7088 dev_info(priv->device, "TSO supported\n");
7089
7090 priv->hw->vlan_fail_q_en =
7091 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7092 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7093
7094 /* Run HW quirks, if any */
7095 if (priv->hwif_quirks) {
7096 ret = priv->hwif_quirks(priv);
7097 if (ret)
7098 return ret;
7099 }
7100
7101 /* Rx Watchdog is available in the COREs newer than the 3.40.
7102 * In some case, for example on bugged HW this feature
7103 * has to be disable and this can be done by passing the
7104 * riwt_off field from the platform.
7105 */
7106 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7107 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7108 priv->use_riwt = 1;
7109 dev_info(priv->device,
7110 "Enable RX Mitigation via HW Watchdog Timer\n");
7111 }
7112
7113 return 0;
7114 }
7115
stmmac_napi_add(struct net_device * dev)7116 static void stmmac_napi_add(struct net_device *dev)
7117 {
7118 struct stmmac_priv *priv = netdev_priv(dev);
7119 u32 queue, maxq;
7120
7121 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7122
7123 for (queue = 0; queue < maxq; queue++) {
7124 struct stmmac_channel *ch = &priv->channel[queue];
7125
7126 ch->priv_data = priv;
7127 ch->index = queue;
7128 spin_lock_init(&ch->lock);
7129
7130 if (queue < priv->plat->rx_queues_to_use) {
7131 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7132 }
7133 if (queue < priv->plat->tx_queues_to_use) {
7134 netif_napi_add_tx(dev, &ch->tx_napi,
7135 stmmac_napi_poll_tx);
7136 }
7137 if (queue < priv->plat->rx_queues_to_use &&
7138 queue < priv->plat->tx_queues_to_use) {
7139 netif_napi_add(dev, &ch->rxtx_napi,
7140 stmmac_napi_poll_rxtx);
7141 }
7142 }
7143 }
7144
stmmac_napi_del(struct net_device * dev)7145 static void stmmac_napi_del(struct net_device *dev)
7146 {
7147 struct stmmac_priv *priv = netdev_priv(dev);
7148 u32 queue, maxq;
7149
7150 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7151
7152 for (queue = 0; queue < maxq; queue++) {
7153 struct stmmac_channel *ch = &priv->channel[queue];
7154
7155 if (queue < priv->plat->rx_queues_to_use)
7156 netif_napi_del(&ch->rx_napi);
7157 if (queue < priv->plat->tx_queues_to_use)
7158 netif_napi_del(&ch->tx_napi);
7159 if (queue < priv->plat->rx_queues_to_use &&
7160 queue < priv->plat->tx_queues_to_use) {
7161 netif_napi_del(&ch->rxtx_napi);
7162 }
7163 }
7164 }
7165
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7166 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7167 {
7168 struct stmmac_priv *priv = netdev_priv(dev);
7169 int ret = 0, i;
7170
7171 if (netif_running(dev))
7172 stmmac_release(dev);
7173
7174 stmmac_napi_del(dev);
7175
7176 priv->plat->rx_queues_to_use = rx_cnt;
7177 priv->plat->tx_queues_to_use = tx_cnt;
7178 if (!netif_is_rxfh_configured(dev))
7179 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7180 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7181 rx_cnt);
7182
7183 stmmac_set_half_duplex(priv);
7184 stmmac_napi_add(dev);
7185
7186 if (netif_running(dev))
7187 ret = stmmac_open(dev);
7188
7189 return ret;
7190 }
7191
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7192 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7193 {
7194 struct stmmac_priv *priv = netdev_priv(dev);
7195 int ret = 0;
7196
7197 if (netif_running(dev))
7198 stmmac_release(dev);
7199
7200 priv->dma_conf.dma_rx_size = rx_size;
7201 priv->dma_conf.dma_tx_size = tx_size;
7202
7203 if (netif_running(dev))
7204 ret = stmmac_open(dev);
7205
7206 return ret;
7207 }
7208
7209 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)7210 static void stmmac_fpe_lp_task(struct work_struct *work)
7211 {
7212 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7213 fpe_task);
7214 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7215 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7216 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7217 bool *hs_enable = &fpe_cfg->hs_enable;
7218 bool *enable = &fpe_cfg->enable;
7219 int retries = 20;
7220
7221 while (retries-- > 0) {
7222 /* Bail out immediately if FPE handshake is OFF */
7223 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7224 break;
7225
7226 if (*lo_state == FPE_STATE_ENTERING_ON &&
7227 *lp_state == FPE_STATE_ENTERING_ON) {
7228 stmmac_fpe_configure(priv, priv->ioaddr,
7229 priv->plat->tx_queues_to_use,
7230 priv->plat->rx_queues_to_use,
7231 *enable);
7232
7233 netdev_info(priv->dev, "configured FPE\n");
7234
7235 *lo_state = FPE_STATE_ON;
7236 *lp_state = FPE_STATE_ON;
7237 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7238 break;
7239 }
7240
7241 if ((*lo_state == FPE_STATE_CAPABLE ||
7242 *lo_state == FPE_STATE_ENTERING_ON) &&
7243 *lp_state != FPE_STATE_ON) {
7244 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7245 *lo_state, *lp_state);
7246 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7247 MPACKET_VERIFY);
7248 }
7249 /* Sleep then retry */
7250 msleep(500);
7251 }
7252
7253 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7254 }
7255
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7256 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7257 {
7258 if (priv->plat->fpe_cfg->hs_enable != enable) {
7259 if (enable) {
7260 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7261 MPACKET_VERIFY);
7262 } else {
7263 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7264 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7265 }
7266
7267 priv->plat->fpe_cfg->hs_enable = enable;
7268 }
7269 }
7270
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7271 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7272 {
7273 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7274 struct dma_desc *desc_contains_ts = ctx->desc;
7275 struct stmmac_priv *priv = ctx->priv;
7276 struct dma_desc *ndesc = ctx->ndesc;
7277 struct dma_desc *desc = ctx->desc;
7278 u64 ns = 0;
7279
7280 if (!priv->hwts_rx_en)
7281 return -ENODATA;
7282
7283 /* For GMAC4, the valid timestamp is from CTX next desc. */
7284 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7285 desc_contains_ts = ndesc;
7286
7287 /* Check if timestamp is available */
7288 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7289 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7290 ns -= priv->plat->cdc_error_adj;
7291 *timestamp = ns_to_ktime(ns);
7292 return 0;
7293 }
7294
7295 return -ENODATA;
7296 }
7297
7298 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7299 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7300 };
7301
7302 /**
7303 * stmmac_dvr_probe
7304 * @device: device pointer
7305 * @plat_dat: platform data pointer
7306 * @res: stmmac resource pointer
7307 * Description: this is the main probe function used to
7308 * call the alloc_etherdev, allocate the priv structure.
7309 * Return:
7310 * returns 0 on success, otherwise errno.
7311 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7312 int stmmac_dvr_probe(struct device *device,
7313 struct plat_stmmacenet_data *plat_dat,
7314 struct stmmac_resources *res)
7315 {
7316 struct net_device *ndev = NULL;
7317 struct stmmac_priv *priv;
7318 u32 rxq;
7319 int i, ret = 0;
7320
7321 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7322 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7323 if (!ndev)
7324 return -ENOMEM;
7325
7326 SET_NETDEV_DEV(ndev, device);
7327
7328 priv = netdev_priv(ndev);
7329 priv->device = device;
7330 priv->dev = ndev;
7331
7332 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7333 u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7334 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7335 u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7336
7337 stmmac_set_ethtool_ops(ndev);
7338 priv->pause = pause;
7339 priv->plat = plat_dat;
7340 priv->ioaddr = res->addr;
7341 priv->dev->base_addr = (unsigned long)res->addr;
7342 priv->plat->dma_cfg->multi_msi_en =
7343 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7344
7345 priv->dev->irq = res->irq;
7346 priv->wol_irq = res->wol_irq;
7347 priv->lpi_irq = res->lpi_irq;
7348 priv->sfty_ce_irq = res->sfty_ce_irq;
7349 priv->sfty_ue_irq = res->sfty_ue_irq;
7350 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7351 priv->rx_irq[i] = res->rx_irq[i];
7352 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7353 priv->tx_irq[i] = res->tx_irq[i];
7354
7355 if (!is_zero_ether_addr(res->mac))
7356 eth_hw_addr_set(priv->dev, res->mac);
7357
7358 dev_set_drvdata(device, priv->dev);
7359
7360 /* Verify driver arguments */
7361 stmmac_verify_args();
7362
7363 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7364 if (!priv->af_xdp_zc_qps)
7365 return -ENOMEM;
7366
7367 /* Allocate workqueue */
7368 priv->wq = create_singlethread_workqueue("stmmac_wq");
7369 if (!priv->wq) {
7370 dev_err(priv->device, "failed to create workqueue\n");
7371 ret = -ENOMEM;
7372 goto error_wq_init;
7373 }
7374
7375 INIT_WORK(&priv->service_task, stmmac_service_task);
7376
7377 /* Initialize Link Partner FPE workqueue */
7378 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7379
7380 /* Override with kernel parameters if supplied XXX CRS XXX
7381 * this needs to have multiple instances
7382 */
7383 if ((phyaddr >= 0) && (phyaddr <= 31))
7384 priv->plat->phy_addr = phyaddr;
7385
7386 if (priv->plat->stmmac_rst) {
7387 ret = reset_control_assert(priv->plat->stmmac_rst);
7388 reset_control_deassert(priv->plat->stmmac_rst);
7389 /* Some reset controllers have only reset callback instead of
7390 * assert + deassert callbacks pair.
7391 */
7392 if (ret == -ENOTSUPP)
7393 reset_control_reset(priv->plat->stmmac_rst);
7394 }
7395
7396 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7397 if (ret == -ENOTSUPP)
7398 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7399 ERR_PTR(ret));
7400
7401 /* Init MAC and get the capabilities */
7402 ret = stmmac_hw_init(priv);
7403 if (ret)
7404 goto error_hw_init;
7405
7406 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7407 */
7408 if (priv->synopsys_id < DWMAC_CORE_5_20)
7409 priv->plat->dma_cfg->dche = false;
7410
7411 stmmac_check_ether_addr(priv);
7412
7413 ndev->netdev_ops = &stmmac_netdev_ops;
7414
7415 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7416
7417 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7418 NETIF_F_RXCSUM;
7419 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7420 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7421
7422 ret = stmmac_tc_init(priv, priv);
7423 if (!ret) {
7424 ndev->hw_features |= NETIF_F_HW_TC;
7425 }
7426
7427 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7428 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7429 if (priv->plat->has_gmac4)
7430 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7431 priv->tso = true;
7432 dev_info(priv->device, "TSO feature enabled\n");
7433 }
7434
7435 if (priv->dma_cap.sphen &&
7436 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7437 ndev->hw_features |= NETIF_F_GRO;
7438 priv->sph_cap = true;
7439 priv->sph = priv->sph_cap;
7440 dev_info(priv->device, "SPH feature enabled\n");
7441 }
7442
7443 /* Ideally our host DMA address width is the same as for the
7444 * device. However, it may differ and then we have to use our
7445 * host DMA width for allocation and the device DMA width for
7446 * register handling.
7447 */
7448 if (priv->plat->host_dma_width)
7449 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7450 else
7451 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7452
7453 if (priv->dma_cap.host_dma_width) {
7454 ret = dma_set_mask_and_coherent(device,
7455 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7456 if (!ret) {
7457 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7458 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7459
7460 /*
7461 * If more than 32 bits can be addressed, make sure to
7462 * enable enhanced addressing mode.
7463 */
7464 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7465 priv->plat->dma_cfg->eame = true;
7466 } else {
7467 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7468 if (ret) {
7469 dev_err(priv->device, "Failed to set DMA Mask\n");
7470 goto error_hw_init;
7471 }
7472
7473 priv->dma_cap.host_dma_width = 32;
7474 }
7475 }
7476
7477 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7478 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7479 #ifdef STMMAC_VLAN_TAG_USED
7480 /* Both mac100 and gmac support receive VLAN tag detection */
7481 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7482 if (priv->dma_cap.vlhash) {
7483 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7484 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7485 }
7486 if (priv->dma_cap.vlins) {
7487 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7488 if (priv->dma_cap.dvlan)
7489 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7490 }
7491 #endif
7492 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7493
7494 priv->xstats.threshold = tc;
7495
7496 /* Initialize RSS */
7497 rxq = priv->plat->rx_queues_to_use;
7498 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7499 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7500 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7501
7502 if (priv->dma_cap.rssen && priv->plat->rss_en)
7503 ndev->features |= NETIF_F_RXHASH;
7504
7505 ndev->vlan_features |= ndev->features;
7506 /* TSO doesn't work on VLANs yet */
7507 ndev->vlan_features &= ~NETIF_F_TSO;
7508
7509 /* MTU range: 46 - hw-specific max */
7510 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7511 if (priv->plat->has_xgmac)
7512 ndev->max_mtu = XGMAC_JUMBO_LEN;
7513 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7514 ndev->max_mtu = JUMBO_LEN;
7515 else
7516 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7517 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7518 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7519 */
7520 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7521 (priv->plat->maxmtu >= ndev->min_mtu))
7522 ndev->max_mtu = priv->plat->maxmtu;
7523 else if (priv->plat->maxmtu < ndev->min_mtu)
7524 dev_warn(priv->device,
7525 "%s: warning: maxmtu having invalid value (%d)\n",
7526 __func__, priv->plat->maxmtu);
7527
7528 if (flow_ctrl)
7529 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7530
7531 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7532
7533 /* Setup channels NAPI */
7534 stmmac_napi_add(ndev);
7535
7536 mutex_init(&priv->lock);
7537
7538 /* If a specific clk_csr value is passed from the platform
7539 * this means that the CSR Clock Range selection cannot be
7540 * changed at run-time and it is fixed. Viceversa the driver'll try to
7541 * set the MDC clock dynamically according to the csr actual
7542 * clock input.
7543 */
7544 if (priv->plat->clk_csr >= 0)
7545 priv->clk_csr = priv->plat->clk_csr;
7546 else
7547 stmmac_clk_csr_set(priv);
7548
7549 stmmac_check_pcs_mode(priv);
7550
7551 pm_runtime_get_noresume(device);
7552 pm_runtime_set_active(device);
7553 if (!pm_runtime_enabled(device))
7554 pm_runtime_enable(device);
7555
7556 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7557 priv->hw->pcs != STMMAC_PCS_RTBI) {
7558 /* MDIO bus Registration */
7559 ret = stmmac_mdio_register(ndev);
7560 if (ret < 0) {
7561 dev_err_probe(priv->device, ret,
7562 "%s: MDIO bus (id: %d) registration failed\n",
7563 __func__, priv->plat->bus_id);
7564 goto error_mdio_register;
7565 }
7566 }
7567
7568 if (priv->plat->speed_mode_2500)
7569 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7570
7571 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7572 ret = stmmac_xpcs_setup(priv->mii);
7573 if (ret)
7574 goto error_xpcs_setup;
7575 }
7576
7577 ret = stmmac_phy_setup(priv);
7578 if (ret) {
7579 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7580 goto error_phy_setup;
7581 }
7582
7583 ret = register_netdev(ndev);
7584 if (ret) {
7585 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7586 __func__, ret);
7587 goto error_netdev_register;
7588 }
7589
7590 #ifdef CONFIG_DEBUG_FS
7591 stmmac_init_fs(ndev);
7592 #endif
7593
7594 if (priv->plat->dump_debug_regs)
7595 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7596
7597 /* Let pm_runtime_put() disable the clocks.
7598 * If CONFIG_PM is not enabled, the clocks will stay powered.
7599 */
7600 pm_runtime_put(device);
7601
7602 return ret;
7603
7604 error_netdev_register:
7605 phylink_destroy(priv->phylink);
7606 error_xpcs_setup:
7607 error_phy_setup:
7608 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7609 priv->hw->pcs != STMMAC_PCS_RTBI)
7610 stmmac_mdio_unregister(ndev);
7611 error_mdio_register:
7612 stmmac_napi_del(ndev);
7613 error_hw_init:
7614 destroy_workqueue(priv->wq);
7615 error_wq_init:
7616 bitmap_free(priv->af_xdp_zc_qps);
7617
7618 return ret;
7619 }
7620 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7621
7622 /**
7623 * stmmac_dvr_remove
7624 * @dev: device pointer
7625 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7626 * changes the link status, releases the DMA descriptor rings.
7627 */
stmmac_dvr_remove(struct device * dev)7628 void stmmac_dvr_remove(struct device *dev)
7629 {
7630 struct net_device *ndev = dev_get_drvdata(dev);
7631 struct stmmac_priv *priv = netdev_priv(ndev);
7632
7633 netdev_info(priv->dev, "%s: removing driver", __func__);
7634
7635 pm_runtime_get_sync(dev);
7636
7637 stmmac_stop_all_dma(priv);
7638 stmmac_mac_set(priv, priv->ioaddr, false);
7639 netif_carrier_off(ndev);
7640 unregister_netdev(ndev);
7641
7642 #ifdef CONFIG_DEBUG_FS
7643 stmmac_exit_fs(ndev);
7644 #endif
7645 phylink_destroy(priv->phylink);
7646 if (priv->plat->stmmac_rst)
7647 reset_control_assert(priv->plat->stmmac_rst);
7648 reset_control_assert(priv->plat->stmmac_ahb_rst);
7649 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7650 priv->hw->pcs != STMMAC_PCS_RTBI)
7651 stmmac_mdio_unregister(ndev);
7652 destroy_workqueue(priv->wq);
7653 mutex_destroy(&priv->lock);
7654 bitmap_free(priv->af_xdp_zc_qps);
7655
7656 pm_runtime_disable(dev);
7657 pm_runtime_put_noidle(dev);
7658 }
7659 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7660
7661 /**
7662 * stmmac_suspend - suspend callback
7663 * @dev: device pointer
7664 * Description: this is the function to suspend the device and it is called
7665 * by the platform driver to stop the network queue, release the resources,
7666 * program the PMT register (for WoL), clean and release driver resources.
7667 */
stmmac_suspend(struct device * dev)7668 int stmmac_suspend(struct device *dev)
7669 {
7670 struct net_device *ndev = dev_get_drvdata(dev);
7671 struct stmmac_priv *priv = netdev_priv(ndev);
7672 u32 chan;
7673
7674 if (!ndev || !netif_running(ndev))
7675 return 0;
7676
7677 mutex_lock(&priv->lock);
7678
7679 netif_device_detach(ndev);
7680
7681 stmmac_disable_all_queues(priv);
7682
7683 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7684 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7685
7686 if (priv->eee_enabled) {
7687 priv->tx_path_in_lpi_mode = false;
7688 del_timer_sync(&priv->eee_ctrl_timer);
7689 }
7690
7691 /* Stop TX/RX DMA */
7692 stmmac_stop_all_dma(priv);
7693
7694 if (priv->plat->serdes_powerdown)
7695 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7696
7697 /* Enable Power down mode by programming the PMT regs */
7698 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7699 stmmac_pmt(priv, priv->hw, priv->wolopts);
7700 priv->irq_wake = 1;
7701 } else {
7702 stmmac_mac_set(priv, priv->ioaddr, false);
7703 pinctrl_pm_select_sleep_state(priv->device);
7704 }
7705
7706 mutex_unlock(&priv->lock);
7707
7708 rtnl_lock();
7709 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7710 phylink_suspend(priv->phylink, true);
7711 } else {
7712 if (device_may_wakeup(priv->device))
7713 phylink_speed_down(priv->phylink, false);
7714 phylink_suspend(priv->phylink, false);
7715 }
7716 rtnl_unlock();
7717
7718 if (priv->dma_cap.fpesel) {
7719 /* Disable FPE */
7720 stmmac_fpe_configure(priv, priv->ioaddr,
7721 priv->plat->tx_queues_to_use,
7722 priv->plat->rx_queues_to_use, false);
7723
7724 stmmac_fpe_handshake(priv, false);
7725 stmmac_fpe_stop_wq(priv);
7726 }
7727
7728 priv->speed = SPEED_UNKNOWN;
7729 return 0;
7730 }
7731 EXPORT_SYMBOL_GPL(stmmac_suspend);
7732
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7733 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7734 {
7735 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7736
7737 rx_q->cur_rx = 0;
7738 rx_q->dirty_rx = 0;
7739 }
7740
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7741 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7742 {
7743 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7744
7745 tx_q->cur_tx = 0;
7746 tx_q->dirty_tx = 0;
7747 tx_q->mss = 0;
7748
7749 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7750 }
7751
7752 /**
7753 * stmmac_reset_queues_param - reset queue parameters
7754 * @priv: device pointer
7755 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7756 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7757 {
7758 u32 rx_cnt = priv->plat->rx_queues_to_use;
7759 u32 tx_cnt = priv->plat->tx_queues_to_use;
7760 u32 queue;
7761
7762 for (queue = 0; queue < rx_cnt; queue++)
7763 stmmac_reset_rx_queue(priv, queue);
7764
7765 for (queue = 0; queue < tx_cnt; queue++)
7766 stmmac_reset_tx_queue(priv, queue);
7767 }
7768
7769 /**
7770 * stmmac_resume - resume callback
7771 * @dev: device pointer
7772 * Description: when resume this function is invoked to setup the DMA and CORE
7773 * in a usable state.
7774 */
stmmac_resume(struct device * dev)7775 int stmmac_resume(struct device *dev)
7776 {
7777 struct net_device *ndev = dev_get_drvdata(dev);
7778 struct stmmac_priv *priv = netdev_priv(ndev);
7779 int ret;
7780
7781 if (!netif_running(ndev))
7782 return 0;
7783
7784 /* Power Down bit, into the PM register, is cleared
7785 * automatically as soon as a magic packet or a Wake-up frame
7786 * is received. Anyway, it's better to manually clear
7787 * this bit because it can generate problems while resuming
7788 * from another devices (e.g. serial console).
7789 */
7790 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7791 mutex_lock(&priv->lock);
7792 stmmac_pmt(priv, priv->hw, 0);
7793 mutex_unlock(&priv->lock);
7794 priv->irq_wake = 0;
7795 } else {
7796 pinctrl_pm_select_default_state(priv->device);
7797 /* reset the phy so that it's ready */
7798 if (priv->mii)
7799 stmmac_mdio_reset(priv->mii);
7800 }
7801
7802 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7803 priv->plat->serdes_powerup) {
7804 ret = priv->plat->serdes_powerup(ndev,
7805 priv->plat->bsp_priv);
7806
7807 if (ret < 0)
7808 return ret;
7809 }
7810
7811 rtnl_lock();
7812 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7813 phylink_resume(priv->phylink);
7814 } else {
7815 phylink_resume(priv->phylink);
7816 if (device_may_wakeup(priv->device))
7817 phylink_speed_up(priv->phylink);
7818 }
7819 rtnl_unlock();
7820
7821 rtnl_lock();
7822 mutex_lock(&priv->lock);
7823
7824 stmmac_reset_queues_param(priv);
7825
7826 stmmac_free_tx_skbufs(priv);
7827 stmmac_clear_descriptors(priv, &priv->dma_conf);
7828
7829 stmmac_hw_setup(ndev, false);
7830 stmmac_init_coalesce(priv);
7831 stmmac_set_rx_mode(ndev);
7832
7833 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7834
7835 stmmac_enable_all_queues(priv);
7836 stmmac_enable_all_dma_irq(priv);
7837
7838 mutex_unlock(&priv->lock);
7839 rtnl_unlock();
7840
7841 netif_device_attach(ndev);
7842
7843 return 0;
7844 }
7845 EXPORT_SYMBOL_GPL(stmmac_resume);
7846
7847 #ifndef MODULE
stmmac_cmdline_opt(char * str)7848 static int __init stmmac_cmdline_opt(char *str)
7849 {
7850 char *opt;
7851
7852 if (!str || !*str)
7853 return 1;
7854 while ((opt = strsep(&str, ",")) != NULL) {
7855 if (!strncmp(opt, "debug:", 6)) {
7856 if (kstrtoint(opt + 6, 0, &debug))
7857 goto err;
7858 } else if (!strncmp(opt, "phyaddr:", 8)) {
7859 if (kstrtoint(opt + 8, 0, &phyaddr))
7860 goto err;
7861 } else if (!strncmp(opt, "buf_sz:", 7)) {
7862 if (kstrtoint(opt + 7, 0, &buf_sz))
7863 goto err;
7864 } else if (!strncmp(opt, "tc:", 3)) {
7865 if (kstrtoint(opt + 3, 0, &tc))
7866 goto err;
7867 } else if (!strncmp(opt, "watchdog:", 9)) {
7868 if (kstrtoint(opt + 9, 0, &watchdog))
7869 goto err;
7870 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7871 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7872 goto err;
7873 } else if (!strncmp(opt, "pause:", 6)) {
7874 if (kstrtoint(opt + 6, 0, &pause))
7875 goto err;
7876 } else if (!strncmp(opt, "eee_timer:", 10)) {
7877 if (kstrtoint(opt + 10, 0, &eee_timer))
7878 goto err;
7879 } else if (!strncmp(opt, "chain_mode:", 11)) {
7880 if (kstrtoint(opt + 11, 0, &chain_mode))
7881 goto err;
7882 }
7883 }
7884 return 1;
7885
7886 err:
7887 pr_err("%s: ERROR broken module parameter conversion", __func__);
7888 return 1;
7889 }
7890
7891 __setup("stmmaceth=", stmmac_cmdline_opt);
7892 #endif /* MODULE */
7893
stmmac_init(void)7894 static int __init stmmac_init(void)
7895 {
7896 #ifdef CONFIG_DEBUG_FS
7897 /* Create debugfs main directory if it doesn't exist yet */
7898 if (!stmmac_fs_dir)
7899 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7900 register_netdevice_notifier(&stmmac_notifier);
7901 #endif
7902
7903 return 0;
7904 }
7905
stmmac_exit(void)7906 static void __exit stmmac_exit(void)
7907 {
7908 #ifdef CONFIG_DEBUG_FS
7909 unregister_netdevice_notifier(&stmmac_notifier);
7910 debugfs_remove_recursive(stmmac_fs_dir);
7911 #endif
7912 }
7913
7914 module_init(stmmac_init)
7915 module_exit(stmmac_exit)
7916
7917 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7918 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7919 MODULE_LICENSE("GPL");
7920