1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
5 */
6
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 #include "dwxlgmac2.h"
13 #include "dwxgmac2.h"
14
dwxgmac2_core_init(struct mac_device_info * hw,struct net_device * dev)15 static void dwxgmac2_core_init(struct mac_device_info *hw,
16 struct net_device *dev)
17 {
18 void __iomem *ioaddr = hw->pcsr;
19 u32 tx, rx;
20
21 tx = readl(ioaddr + XGMAC_TX_CONFIG);
22 rx = readl(ioaddr + XGMAC_RX_CONFIG);
23
24 tx |= XGMAC_CORE_INIT_TX;
25 rx |= XGMAC_CORE_INIT_RX;
26
27 if (hw->ps) {
28 tx |= XGMAC_CONFIG_TE;
29 tx &= ~hw->link.speed_mask;
30
31 switch (hw->ps) {
32 case SPEED_10000:
33 tx |= hw->link.xgmii.speed10000;
34 break;
35 case SPEED_2500:
36 tx |= hw->link.speed2500;
37 break;
38 case SPEED_1000:
39 default:
40 tx |= hw->link.speed1000;
41 break;
42 }
43 }
44
45 writel(tx, ioaddr + XGMAC_TX_CONFIG);
46 writel(rx, ioaddr + XGMAC_RX_CONFIG);
47 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
48 }
49
xgmac_phylink_get_caps(struct stmmac_priv * priv)50 static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
51 {
52 priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
53 MAC_10000FD | MAC_25000FD |
54 MAC_40000FD | MAC_50000FD |
55 MAC_100000FD;
56 }
57
dwxgmac2_set_mac(void __iomem * ioaddr,bool enable)58 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
59 {
60 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
61 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
62
63 if (enable) {
64 tx |= XGMAC_CONFIG_TE;
65 rx |= XGMAC_CONFIG_RE;
66 } else {
67 tx &= ~XGMAC_CONFIG_TE;
68 rx &= ~XGMAC_CONFIG_RE;
69 }
70
71 writel(tx, ioaddr + XGMAC_TX_CONFIG);
72 writel(rx, ioaddr + XGMAC_RX_CONFIG);
73 }
74
dwxgmac2_rx_ipc(struct mac_device_info * hw)75 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
76 {
77 void __iomem *ioaddr = hw->pcsr;
78 u32 value;
79
80 value = readl(ioaddr + XGMAC_RX_CONFIG);
81 if (hw->rx_csum)
82 value |= XGMAC_CONFIG_IPC;
83 else
84 value &= ~XGMAC_CONFIG_IPC;
85 writel(value, ioaddr + XGMAC_RX_CONFIG);
86
87 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
88 }
89
dwxgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)90 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
91 u32 queue)
92 {
93 void __iomem *ioaddr = hw->pcsr;
94 u32 value;
95
96 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
97 if (mode == MTL_QUEUE_AVB)
98 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
99 else if (mode == MTL_QUEUE_DCB)
100 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
101 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
102 }
103
dwxgmac2_rx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)104 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
105 u32 queue)
106 {
107 void __iomem *ioaddr = hw->pcsr;
108 u32 value, reg;
109
110 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
111 if (queue >= 4)
112 queue -= 4;
113
114 value = readl(ioaddr + reg);
115 value &= ~XGMAC_PSRQ(queue);
116 value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
117
118 writel(value, ioaddr + reg);
119 }
120
dwxgmac2_tx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)121 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
122 u32 queue)
123 {
124 void __iomem *ioaddr = hw->pcsr;
125 u32 value, reg;
126
127 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
128 if (queue >= 4)
129 queue -= 4;
130
131 value = readl(ioaddr + reg);
132 value &= ~XGMAC_PSTC(queue);
133 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
134
135 writel(value, ioaddr + reg);
136 }
137
dwxgmac2_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)138 static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
139 u8 packet, u32 queue)
140 {
141 void __iomem *ioaddr = hw->pcsr;
142 u32 value;
143
144 static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
145 { XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
146 { XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
147 { XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
148 { XGMAC_UPQ, XGMAC_UPQ_SHIFT },
149 { XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
150 };
151
152 value = readl(ioaddr + XGMAC_RXQ_CTRL1);
153
154 /* routing configuration */
155 value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
156 value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
157 dwxgmac2_route_possibilities[packet - 1].reg_mask;
158
159 /* some packets require extra ops */
160 if (packet == PACKET_AVCPQ)
161 value |= FIELD_PREP(XGMAC_TACPQE, 1);
162 else if (packet == PACKET_MCBCQ)
163 value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
164
165 writel(value, ioaddr + XGMAC_RXQ_CTRL1);
166 }
167
dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)168 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
169 u32 rx_alg)
170 {
171 void __iomem *ioaddr = hw->pcsr;
172 u32 value;
173
174 value = readl(ioaddr + XGMAC_MTL_OPMODE);
175 value &= ~XGMAC_RAA;
176
177 switch (rx_alg) {
178 case MTL_RX_ALGORITHM_SP:
179 break;
180 case MTL_RX_ALGORITHM_WSP:
181 value |= XGMAC_RAA;
182 break;
183 default:
184 break;
185 }
186
187 writel(value, ioaddr + XGMAC_MTL_OPMODE);
188 }
189
dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)190 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
191 u32 tx_alg)
192 {
193 void __iomem *ioaddr = hw->pcsr;
194 bool ets = true;
195 u32 value;
196 int i;
197
198 value = readl(ioaddr + XGMAC_MTL_OPMODE);
199 value &= ~XGMAC_ETSALG;
200
201 switch (tx_alg) {
202 case MTL_TX_ALGORITHM_WRR:
203 value |= XGMAC_WRR;
204 break;
205 case MTL_TX_ALGORITHM_WFQ:
206 value |= XGMAC_WFQ;
207 break;
208 case MTL_TX_ALGORITHM_DWRR:
209 value |= XGMAC_DWRR;
210 break;
211 default:
212 ets = false;
213 break;
214 }
215
216 writel(value, ioaddr + XGMAC_MTL_OPMODE);
217
218 /* Set ETS if desired */
219 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
220 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
221 value &= ~XGMAC_TSA;
222 if (ets)
223 value |= XGMAC_ETS;
224 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
225 }
226 }
227
dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)228 static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
229 struct mac_device_info *hw,
230 u32 weight, u32 queue)
231 {
232 void __iomem *ioaddr = hw->pcsr;
233
234 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
235 }
236
dwxgmac2_map_mtl_to_dma(struct mac_device_info * hw,u32 queue,u32 chan)237 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
238 u32 chan)
239 {
240 void __iomem *ioaddr = hw->pcsr;
241 u32 value, reg;
242
243 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
244 if (queue >= 4)
245 queue -= 4;
246
247 value = readl(ioaddr + reg);
248 value &= ~XGMAC_QxMDMACH(queue);
249 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
250
251 writel(value, ioaddr + reg);
252 }
253
dwxgmac2_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)254 static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
255 struct mac_device_info *hw,
256 u32 send_slope, u32 idle_slope,
257 u32 high_credit, u32 low_credit, u32 queue)
258 {
259 void __iomem *ioaddr = hw->pcsr;
260 u32 value;
261
262 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
263 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
264 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
265 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
266
267 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
268 value &= ~XGMAC_TSA;
269 value |= XGMAC_CC | XGMAC_CBS;
270 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
271 }
272
dwxgmac2_dump_regs(struct mac_device_info * hw,u32 * reg_space)273 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
274 {
275 void __iomem *ioaddr = hw->pcsr;
276 int i;
277
278 for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
279 reg_space[i] = readl(ioaddr + i * 4);
280 }
281
dwxgmac2_host_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)282 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
283 struct stmmac_extra_stats *x)
284 {
285 void __iomem *ioaddr = hw->pcsr;
286 u32 stat, en;
287 int ret = 0;
288
289 en = readl(ioaddr + XGMAC_INT_EN);
290 stat = readl(ioaddr + XGMAC_INT_STATUS);
291
292 stat &= en;
293
294 if (stat & XGMAC_PMTIS) {
295 x->irq_receive_pmt_irq_n++;
296 readl(ioaddr + XGMAC_PMT);
297 }
298
299 if (stat & XGMAC_LPIIS) {
300 u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
301
302 if (lpi & XGMAC_TLPIEN) {
303 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
304 x->irq_tx_path_in_lpi_mode_n++;
305 }
306 if (lpi & XGMAC_TLPIEX) {
307 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
308 x->irq_tx_path_exit_lpi_mode_n++;
309 }
310 if (lpi & XGMAC_RLPIEN)
311 x->irq_rx_path_in_lpi_mode_n++;
312 if (lpi & XGMAC_RLPIEX)
313 x->irq_rx_path_exit_lpi_mode_n++;
314 }
315
316 return ret;
317 }
318
dwxgmac2_host_mtl_irq_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)319 static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
320 struct mac_device_info *hw, u32 chan)
321 {
322 void __iomem *ioaddr = hw->pcsr;
323 int ret = 0;
324 u32 status;
325
326 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
327 if (status & BIT(chan)) {
328 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
329
330 if (chan_status & XGMAC_RXOVFIS)
331 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
332
333 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
334 }
335
336 return ret;
337 }
338
dwxgmac2_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)339 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
340 unsigned int fc, unsigned int pause_time,
341 u32 tx_cnt)
342 {
343 void __iomem *ioaddr = hw->pcsr;
344 u32 i;
345
346 if (fc & FLOW_RX)
347 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
348 if (fc & FLOW_TX) {
349 for (i = 0; i < tx_cnt; i++) {
350 u32 value = XGMAC_TFE;
351
352 if (duplex)
353 value |= pause_time << XGMAC_PT_SHIFT;
354
355 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
356 }
357 }
358 }
359
dwxgmac2_pmt(struct mac_device_info * hw,unsigned long mode)360 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
361 {
362 void __iomem *ioaddr = hw->pcsr;
363 u32 val = 0x0;
364
365 if (mode & WAKE_MAGIC)
366 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
367 if (mode & WAKE_UCAST)
368 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
369 if (val) {
370 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
371 cfg |= XGMAC_CONFIG_RE;
372 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
373 }
374
375 writel(val, ioaddr + XGMAC_PMT);
376 }
377
dwxgmac2_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)378 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
379 const unsigned char *addr,
380 unsigned int reg_n)
381 {
382 void __iomem *ioaddr = hw->pcsr;
383 u32 value;
384
385 value = (addr[5] << 8) | addr[4];
386 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
387
388 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
389 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
390 }
391
dwxgmac2_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)392 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
393 unsigned char *addr, unsigned int reg_n)
394 {
395 void __iomem *ioaddr = hw->pcsr;
396 u32 hi_addr, lo_addr;
397
398 /* Read the MAC address from the hardware */
399 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
400 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
401
402 /* Extract the MAC address from the high and low words */
403 addr[0] = lo_addr & 0xff;
404 addr[1] = (lo_addr >> 8) & 0xff;
405 addr[2] = (lo_addr >> 16) & 0xff;
406 addr[3] = (lo_addr >> 24) & 0xff;
407 addr[4] = hi_addr & 0xff;
408 addr[5] = (hi_addr >> 8) & 0xff;
409 }
410
dwxgmac2_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)411 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
412 bool en_tx_lpi_clockgating)
413 {
414 void __iomem *ioaddr = hw->pcsr;
415 u32 value;
416
417 value = readl(ioaddr + XGMAC_LPI_CTRL);
418
419 value |= XGMAC_LPITXEN | XGMAC_LPITXA;
420 if (en_tx_lpi_clockgating)
421 value |= XGMAC_TXCGE;
422
423 writel(value, ioaddr + XGMAC_LPI_CTRL);
424 }
425
dwxgmac2_reset_eee_mode(struct mac_device_info * hw)426 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
427 {
428 void __iomem *ioaddr = hw->pcsr;
429 u32 value;
430
431 value = readl(ioaddr + XGMAC_LPI_CTRL);
432 value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
433 writel(value, ioaddr + XGMAC_LPI_CTRL);
434 }
435
dwxgmac2_set_eee_pls(struct mac_device_info * hw,int link)436 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
437 {
438 void __iomem *ioaddr = hw->pcsr;
439 u32 value;
440
441 value = readl(ioaddr + XGMAC_LPI_CTRL);
442 if (link)
443 value |= XGMAC_PLS;
444 else
445 value &= ~XGMAC_PLS;
446 writel(value, ioaddr + XGMAC_LPI_CTRL);
447 }
448
dwxgmac2_set_eee_timer(struct mac_device_info * hw,int ls,int tw)449 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
450 {
451 void __iomem *ioaddr = hw->pcsr;
452 u32 value;
453
454 value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
455 writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
456 }
457
dwxgmac2_set_mchash(void __iomem * ioaddr,u32 * mcfilterbits,int mcbitslog2)458 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
459 int mcbitslog2)
460 {
461 int numhashregs, regs;
462
463 switch (mcbitslog2) {
464 case 6:
465 numhashregs = 2;
466 break;
467 case 7:
468 numhashregs = 4;
469 break;
470 case 8:
471 numhashregs = 8;
472 break;
473 default:
474 return;
475 }
476
477 for (regs = 0; regs < numhashregs; regs++)
478 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
479 }
480
dwxgmac2_set_filter(struct mac_device_info * hw,struct net_device * dev)481 static void dwxgmac2_set_filter(struct mac_device_info *hw,
482 struct net_device *dev)
483 {
484 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
485 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
486 int mcbitslog2 = hw->mcast_bits_log2;
487 u32 mc_filter[8];
488 int i;
489
490 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
491 value |= XGMAC_FILTER_HPF;
492
493 memset(mc_filter, 0, sizeof(mc_filter));
494
495 if (dev->flags & IFF_PROMISC) {
496 value |= XGMAC_FILTER_PR;
497 value |= XGMAC_FILTER_PCF;
498 } else if ((dev->flags & IFF_ALLMULTI) ||
499 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
500 value |= XGMAC_FILTER_PM;
501
502 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
503 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
504 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
505 struct netdev_hw_addr *ha;
506
507 value |= XGMAC_FILTER_HMC;
508
509 netdev_for_each_mc_addr(ha, dev) {
510 u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
511 (32 - mcbitslog2));
512 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
513 }
514 }
515
516 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
517
518 /* Handle multiple unicast addresses */
519 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
520 value |= XGMAC_FILTER_PR;
521 } else {
522 struct netdev_hw_addr *ha;
523 int reg = 1;
524
525 netdev_for_each_uc_addr(ha, dev) {
526 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
527 reg++;
528 }
529
530 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
531 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
532 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
533 }
534 }
535
536 writel(value, ioaddr + XGMAC_PACKET_FILTER);
537 }
538
dwxgmac2_set_mac_loopback(void __iomem * ioaddr,bool enable)539 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
540 {
541 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
542
543 if (enable)
544 value |= XGMAC_CONFIG_LM;
545 else
546 value &= ~XGMAC_CONFIG_LM;
547
548 writel(value, ioaddr + XGMAC_RX_CONFIG);
549 }
550
dwxgmac2_rss_write_reg(void __iomem * ioaddr,bool is_key,int idx,u32 val)551 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
552 u32 val)
553 {
554 u32 ctrl = 0;
555
556 writel(val, ioaddr + XGMAC_RSS_DATA);
557 ctrl |= idx << XGMAC_RSSIA_SHIFT;
558 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
559 ctrl |= XGMAC_OB;
560 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
561
562 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
563 !(ctrl & XGMAC_OB), 100, 10000);
564 }
565
dwxgmac2_rss_configure(struct mac_device_info * hw,struct stmmac_rss * cfg,u32 num_rxq)566 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
567 struct stmmac_rss *cfg, u32 num_rxq)
568 {
569 void __iomem *ioaddr = hw->pcsr;
570 u32 value, *key;
571 int i, ret;
572
573 value = readl(ioaddr + XGMAC_RSS_CTRL);
574 if (!cfg || !cfg->enable) {
575 value &= ~XGMAC_RSSE;
576 writel(value, ioaddr + XGMAC_RSS_CTRL);
577 return 0;
578 }
579
580 key = (u32 *)cfg->key;
581 for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
582 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
583 if (ret)
584 return ret;
585 }
586
587 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
588 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
589 if (ret)
590 return ret;
591 }
592
593 for (i = 0; i < num_rxq; i++)
594 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
595
596 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
597 writel(value, ioaddr + XGMAC_RSS_CTRL);
598 return 0;
599 }
600
dwxgmac2_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)601 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
602 __le16 perfect_match, bool is_double)
603 {
604 void __iomem *ioaddr = hw->pcsr;
605
606 writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
607
608 if (hash) {
609 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
610
611 value |= XGMAC_FILTER_VTFE;
612
613 writel(value, ioaddr + XGMAC_PACKET_FILTER);
614
615 value = readl(ioaddr + XGMAC_VLAN_TAG);
616
617 value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
618 if (is_double) {
619 value |= XGMAC_VLAN_EDVLP;
620 value |= XGMAC_VLAN_ESVL;
621 value |= XGMAC_VLAN_DOVLTC;
622 } else {
623 value &= ~XGMAC_VLAN_EDVLP;
624 value &= ~XGMAC_VLAN_ESVL;
625 value &= ~XGMAC_VLAN_DOVLTC;
626 }
627
628 value &= ~XGMAC_VLAN_VID;
629 writel(value, ioaddr + XGMAC_VLAN_TAG);
630 } else if (perfect_match) {
631 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
632
633 value |= XGMAC_FILTER_VTFE;
634
635 writel(value, ioaddr + XGMAC_PACKET_FILTER);
636
637 value = readl(ioaddr + XGMAC_VLAN_TAG);
638
639 value &= ~XGMAC_VLAN_VTHM;
640 value |= XGMAC_VLAN_ETV;
641 if (is_double) {
642 value |= XGMAC_VLAN_EDVLP;
643 value |= XGMAC_VLAN_ESVL;
644 value |= XGMAC_VLAN_DOVLTC;
645 } else {
646 value &= ~XGMAC_VLAN_EDVLP;
647 value &= ~XGMAC_VLAN_ESVL;
648 value &= ~XGMAC_VLAN_DOVLTC;
649 }
650
651 value &= ~XGMAC_VLAN_VID;
652 writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
653 } else {
654 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
655
656 value &= ~XGMAC_FILTER_VTFE;
657
658 writel(value, ioaddr + XGMAC_PACKET_FILTER);
659
660 value = readl(ioaddr + XGMAC_VLAN_TAG);
661
662 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
663 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
664 value &= ~XGMAC_VLAN_DOVLTC;
665 value &= ~XGMAC_VLAN_VID;
666
667 writel(value, ioaddr + XGMAC_VLAN_TAG);
668 }
669 }
670
671 struct dwxgmac3_error_desc {
672 bool valid;
673 const char *desc;
674 const char *detailed_desc;
675 };
676
677 #define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
678
dwxgmac3_log_error(struct net_device * ndev,u32 value,bool corr,const char * module_name,const struct dwxgmac3_error_desc * desc,unsigned long field_offset,struct stmmac_safety_stats * stats)679 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
680 const char *module_name,
681 const struct dwxgmac3_error_desc *desc,
682 unsigned long field_offset,
683 struct stmmac_safety_stats *stats)
684 {
685 unsigned long loc, mask;
686 u8 *bptr = (u8 *)stats;
687 unsigned long *ptr;
688
689 ptr = (unsigned long *)(bptr + field_offset);
690
691 mask = value;
692 for_each_set_bit(loc, &mask, 32) {
693 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
694 "correctable" : "uncorrectable", module_name,
695 desc[loc].desc, desc[loc].detailed_desc);
696
697 /* Update counters */
698 ptr[loc]++;
699 }
700 }
701
702 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
703 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
704 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
705 { true, "TPES", "TSO Data Path Parity Check Error" },
706 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
707 { true, "MTPES", "MTL Data Path Parity Check Error" },
708 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
709 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
710 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
711 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
712 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
713 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
714 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
715 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
716 { true, "TTES", "TX FSM Timeout Error" },
717 { true, "RTES", "RX FSM Timeout Error" },
718 { true, "CTES", "CSR FSM Timeout Error" },
719 { true, "ATES", "APP FSM Timeout Error" },
720 { true, "PTES", "PTP FSM Timeout Error" },
721 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
722 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
723 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
724 { true, "MSTTES", "Master Read/Write Timeout Error" },
725 { true, "SLVTES", "Slave Read/Write Timeout Error" },
726 { true, "ATITES", "Application Timeout on ATI Interface Error" },
727 { true, "ARITES", "Application Timeout on ARI Interface Error" },
728 { true, "FSMPES", "FSM State Parity Error" },
729 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
730 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
731 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
732 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
733 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
734 { true, "CPI", "Control Register Parity Check Error" },
735 };
736
dwxgmac3_handle_mac_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)737 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
738 void __iomem *ioaddr, bool correctable,
739 struct stmmac_safety_stats *stats)
740 {
741 u32 value;
742
743 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
744 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
745
746 dwxgmac3_log_error(ndev, value, correctable, "MAC",
747 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
748 }
749
750 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
751 { true, "TXCES", "MTL TX Memory Error" },
752 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
753 { true, "TXUES", "MTL TX Memory Error" },
754 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
755 { true, "RXCES", "MTL RX Memory Error" },
756 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
757 { true, "RXUES", "MTL RX Memory Error" },
758 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
759 { true, "ECES", "MTL EST Memory Error" },
760 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
761 { true, "EUES", "MTL EST Memory Error" },
762 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
763 { true, "RPCES", "MTL RX Parser Memory Error" },
764 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
765 { true, "RPUES", "MTL RX Parser Memory Error" },
766 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
767 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
768 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
769 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
770 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
771 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
772 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
773 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
774 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
775 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
776 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
777 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
778 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
779 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
780 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
781 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
782 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
783 };
784
dwxgmac3_handle_mtl_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)785 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
786 void __iomem *ioaddr, bool correctable,
787 struct stmmac_safety_stats *stats)
788 {
789 u32 value;
790
791 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
792 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
793
794 dwxgmac3_log_error(ndev, value, correctable, "MTL",
795 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
796 }
797
798 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
799 { true, "TCES", "DMA TSO Memory Error" },
800 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
801 { true, "TUES", "DMA TSO Memory Error" },
802 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
803 { true, "DCES", "DMA DCACHE Memory Error" },
804 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
805 { true, "DUES", "DMA DCACHE Memory Error" },
806 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
807 { false, "UNKNOWN", "Unknown Error" }, /* 8 */
808 { false, "UNKNOWN", "Unknown Error" }, /* 9 */
809 { false, "UNKNOWN", "Unknown Error" }, /* 10 */
810 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
811 { false, "UNKNOWN", "Unknown Error" }, /* 12 */
812 { false, "UNKNOWN", "Unknown Error" }, /* 13 */
813 { false, "UNKNOWN", "Unknown Error" }, /* 14 */
814 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
815 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
816 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
817 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
818 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
819 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
820 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
821 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
822 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
823 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
824 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
825 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
826 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
827 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
828 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
829 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
830 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
831 };
832
dwxgmac3_handle_dma_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)833 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
834 void __iomem *ioaddr, bool correctable,
835 struct stmmac_safety_stats *stats)
836 {
837 u32 value;
838
839 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
840 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
841
842 dwxgmac3_log_error(ndev, value, correctable, "DMA",
843 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
844 }
845
846 static int
dwxgmac3_safety_feat_config(void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_feature_cfg * safety_cfg)847 dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
848 struct stmmac_safety_feature_cfg *safety_cfg)
849 {
850 u32 value;
851
852 if (!asp)
853 return -EINVAL;
854
855 /* 1. Enable Safety Features */
856 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
857
858 /* 2. Enable MTL Safety Interrupts */
859 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
860 value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
861 value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
862 value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
863 value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
864 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
865
866 /* 3. Enable DMA Safety Interrupts */
867 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
868 value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
869 value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
870 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
871
872 /* 0x2: Without ECC or Parity Ports on External Application Interface
873 * 0x4: Only ECC Protection for External Memory feature is selected
874 */
875 if (asp == 0x2 || asp == 0x4)
876 return 0;
877
878 /* 4. Enable Parity and Timeout for FSM */
879 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
880 value |= XGMAC_PRTYEN; /* FSM Parity Feature */
881 value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
882 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
883
884 return 0;
885 }
886
dwxgmac3_safety_feat_irq_status(struct net_device * ndev,void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_stats * stats)887 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
888 void __iomem *ioaddr,
889 unsigned int asp,
890 struct stmmac_safety_stats *stats)
891 {
892 bool err, corr;
893 u32 mtl, dma;
894 int ret = 0;
895
896 if (!asp)
897 return -EINVAL;
898
899 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
900 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
901
902 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
903 corr = false;
904 if (err) {
905 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
906 ret |= !corr;
907 }
908
909 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
910 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
911 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
912 if (err) {
913 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
914 ret |= !corr;
915 }
916
917 err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
918 corr = dma & XGMAC_DECIS;
919 if (err) {
920 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
921 ret |= !corr;
922 }
923
924 return ret;
925 }
926
927 static const struct dwxgmac3_error {
928 const struct dwxgmac3_error_desc *desc;
929 } dwxgmac3_all_errors[] = {
930 { dwxgmac3_mac_errors },
931 { dwxgmac3_mtl_errors },
932 { dwxgmac3_dma_errors },
933 };
934
dwxgmac3_safety_feat_dump(struct stmmac_safety_stats * stats,int index,unsigned long * count,const char ** desc)935 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
936 int index, unsigned long *count,
937 const char **desc)
938 {
939 int module = index / 32, offset = index % 32;
940 unsigned long *ptr = (unsigned long *)stats;
941
942 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
943 return -EINVAL;
944 if (!dwxgmac3_all_errors[module].desc[offset].valid)
945 return -EINVAL;
946 if (count)
947 *count = *(ptr + index);
948 if (desc)
949 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
950 return 0;
951 }
952
dwxgmac3_rxp_disable(void __iomem * ioaddr)953 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
954 {
955 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
956
957 val &= ~XGMAC_FRPE;
958 writel(val, ioaddr + XGMAC_MTL_OPMODE);
959
960 return 0;
961 }
962
dwxgmac3_rxp_enable(void __iomem * ioaddr)963 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
964 {
965 u32 val;
966
967 val = readl(ioaddr + XGMAC_MTL_OPMODE);
968 val |= XGMAC_FRPE;
969 writel(val, ioaddr + XGMAC_MTL_OPMODE);
970 }
971
dwxgmac3_rxp_update_single_entry(void __iomem * ioaddr,struct stmmac_tc_entry * entry,int pos)972 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
973 struct stmmac_tc_entry *entry,
974 int pos)
975 {
976 int ret, i;
977
978 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
979 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
980 u32 val;
981
982 /* Wait for ready */
983 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
984 val, !(val & XGMAC_STARTBUSY), 1, 10000);
985 if (ret)
986 return ret;
987
988 /* Write data */
989 val = *((u32 *)&entry->val + i);
990 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
991
992 /* Write pos */
993 val = real_pos & XGMAC_ADDR;
994 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
995
996 /* Write OP */
997 val |= XGMAC_WRRDN;
998 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
999
1000 /* Start Write */
1001 val |= XGMAC_STARTBUSY;
1002 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1003
1004 /* Wait for done */
1005 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1006 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1007 if (ret)
1008 return ret;
1009 }
1010
1011 return 0;
1012 }
1013
1014 static struct stmmac_tc_entry *
dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry * entries,unsigned int count,u32 curr_prio)1015 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1016 unsigned int count, u32 curr_prio)
1017 {
1018 struct stmmac_tc_entry *entry;
1019 u32 min_prio = ~0x0;
1020 int i, min_prio_idx;
1021 bool found = false;
1022
1023 for (i = count - 1; i >= 0; i--) {
1024 entry = &entries[i];
1025
1026 /* Do not update unused entries */
1027 if (!entry->in_use)
1028 continue;
1029 /* Do not update already updated entries (i.e. fragments) */
1030 if (entry->in_hw)
1031 continue;
1032 /* Let last entry be updated last */
1033 if (entry->is_last)
1034 continue;
1035 /* Do not return fragments */
1036 if (entry->is_frag)
1037 continue;
1038 /* Check if we already checked this prio */
1039 if (entry->prio < curr_prio)
1040 continue;
1041 /* Check if this is the minimum prio */
1042 if (entry->prio < min_prio) {
1043 min_prio = entry->prio;
1044 min_prio_idx = i;
1045 found = true;
1046 }
1047 }
1048
1049 if (found)
1050 return &entries[min_prio_idx];
1051 return NULL;
1052 }
1053
dwxgmac3_rxp_config(void __iomem * ioaddr,struct stmmac_tc_entry * entries,unsigned int count)1054 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1055 struct stmmac_tc_entry *entries,
1056 unsigned int count)
1057 {
1058 struct stmmac_tc_entry *entry, *frag;
1059 int i, ret, nve = 0;
1060 u32 curr_prio = 0;
1061 u32 old_val, val;
1062
1063 /* Force disable RX */
1064 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1065 val = old_val & ~XGMAC_CONFIG_RE;
1066 writel(val, ioaddr + XGMAC_RX_CONFIG);
1067
1068 /* Disable RX Parser */
1069 ret = dwxgmac3_rxp_disable(ioaddr);
1070 if (ret)
1071 goto re_enable;
1072
1073 /* Set all entries as NOT in HW */
1074 for (i = 0; i < count; i++) {
1075 entry = &entries[i];
1076 entry->in_hw = false;
1077 }
1078
1079 /* Update entries by reverse order */
1080 while (1) {
1081 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1082 if (!entry)
1083 break;
1084
1085 curr_prio = entry->prio;
1086 frag = entry->frag_ptr;
1087
1088 /* Set special fragment requirements */
1089 if (frag) {
1090 entry->val.af = 0;
1091 entry->val.rf = 0;
1092 entry->val.nc = 1;
1093 entry->val.ok_index = nve + 2;
1094 }
1095
1096 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1097 if (ret)
1098 goto re_enable;
1099
1100 entry->table_pos = nve++;
1101 entry->in_hw = true;
1102
1103 if (frag && !frag->in_hw) {
1104 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1105 if (ret)
1106 goto re_enable;
1107 frag->table_pos = nve++;
1108 frag->in_hw = true;
1109 }
1110 }
1111
1112 if (!nve)
1113 goto re_enable;
1114
1115 /* Update all pass entry */
1116 for (i = 0; i < count; i++) {
1117 entry = &entries[i];
1118 if (!entry->is_last)
1119 continue;
1120
1121 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1122 if (ret)
1123 goto re_enable;
1124
1125 entry->table_pos = nve++;
1126 }
1127
1128 /* Assume n. of parsable entries == n. of valid entries */
1129 val = (nve << 16) & XGMAC_NPE;
1130 val |= nve & XGMAC_NVE;
1131 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1132
1133 /* Enable RX Parser */
1134 dwxgmac3_rxp_enable(ioaddr);
1135
1136 re_enable:
1137 /* Re-enable RX */
1138 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1139 return ret;
1140 }
1141
dwxgmac2_get_mac_tx_timestamp(struct mac_device_info * hw,u64 * ts)1142 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1143 {
1144 void __iomem *ioaddr = hw->pcsr;
1145 u32 value;
1146
1147 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1148 value, value & XGMAC_TXTSC, 100, 10000))
1149 return -EBUSY;
1150
1151 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1152 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1153 return 0;
1154 }
1155
dwxgmac2_flex_pps_config(void __iomem * ioaddr,int index,struct stmmac_pps_cfg * cfg,bool enable,u32 sub_second_inc,u32 systime_flags)1156 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1157 struct stmmac_pps_cfg *cfg, bool enable,
1158 u32 sub_second_inc, u32 systime_flags)
1159 {
1160 u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1161 u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1162 u64 period;
1163
1164 if (!cfg->available)
1165 return -EINVAL;
1166 if (tnsec & XGMAC_TRGTBUSY0)
1167 return -EBUSY;
1168 if (!sub_second_inc || !systime_flags)
1169 return -EINVAL;
1170
1171 val &= ~XGMAC_PPSx_MASK(index);
1172
1173 if (!enable) {
1174 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1175 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1176 return 0;
1177 }
1178
1179 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1180 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1181 val |= XGMAC_PPSEN0;
1182
1183 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1184
1185 if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1186 cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1187 writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1188
1189 period = cfg->period.tv_sec * 1000000000;
1190 period += cfg->period.tv_nsec;
1191
1192 do_div(period, sub_second_inc);
1193
1194 if (period <= 1)
1195 return -EINVAL;
1196
1197 writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1198
1199 period >>= 1;
1200 if (period <= 1)
1201 return -EINVAL;
1202
1203 writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1204
1205 /* Finally, activate it */
1206 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1207 return 0;
1208 }
1209
dwxgmac2_sarc_configure(void __iomem * ioaddr,int val)1210 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1211 {
1212 u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1213
1214 value &= ~XGMAC_CONFIG_SARC;
1215 value |= val << XGMAC_CONFIG_SARC_SHIFT;
1216
1217 writel(value, ioaddr + XGMAC_TX_CONFIG);
1218 }
1219
dwxgmac2_enable_vlan(struct mac_device_info * hw,u32 type)1220 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1221 {
1222 void __iomem *ioaddr = hw->pcsr;
1223 u32 value;
1224
1225 value = readl(ioaddr + XGMAC_VLAN_INCL);
1226 value |= XGMAC_VLAN_VLTI;
1227 value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1228 value &= ~XGMAC_VLAN_VLC;
1229 value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1230 writel(value, ioaddr + XGMAC_VLAN_INCL);
1231 }
1232
dwxgmac2_filter_wait(struct mac_device_info * hw)1233 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1234 {
1235 void __iomem *ioaddr = hw->pcsr;
1236 u32 value;
1237
1238 if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1239 !(value & XGMAC_XB), 100, 10000))
1240 return -EBUSY;
1241 return 0;
1242 }
1243
dwxgmac2_filter_read(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 * data)1244 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1245 u8 reg, u32 *data)
1246 {
1247 void __iomem *ioaddr = hw->pcsr;
1248 u32 value;
1249 int ret;
1250
1251 ret = dwxgmac2_filter_wait(hw);
1252 if (ret)
1253 return ret;
1254
1255 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1256 value |= XGMAC_TT | XGMAC_XB;
1257 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1258
1259 ret = dwxgmac2_filter_wait(hw);
1260 if (ret)
1261 return ret;
1262
1263 *data = readl(ioaddr + XGMAC_L3L4_DATA);
1264 return 0;
1265 }
1266
dwxgmac2_filter_write(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 data)1267 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1268 u8 reg, u32 data)
1269 {
1270 void __iomem *ioaddr = hw->pcsr;
1271 u32 value;
1272 int ret;
1273
1274 ret = dwxgmac2_filter_wait(hw);
1275 if (ret)
1276 return ret;
1277
1278 writel(data, ioaddr + XGMAC_L3L4_DATA);
1279
1280 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1281 value |= XGMAC_XB;
1282 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1283
1284 return dwxgmac2_filter_wait(hw);
1285 }
1286
dwxgmac2_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1287 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1288 bool en, bool ipv6, bool sa, bool inv,
1289 u32 match)
1290 {
1291 void __iomem *ioaddr = hw->pcsr;
1292 u32 value;
1293 int ret;
1294
1295 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1296 value |= XGMAC_FILTER_IPFE;
1297 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1298
1299 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1300 if (ret)
1301 return ret;
1302
1303 /* For IPv6 not both SA/DA filters can be active */
1304 if (ipv6) {
1305 value |= XGMAC_L3PEN0;
1306 value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1307 value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1308 if (sa) {
1309 value |= XGMAC_L3SAM0;
1310 if (inv)
1311 value |= XGMAC_L3SAIM0;
1312 } else {
1313 value |= XGMAC_L3DAM0;
1314 if (inv)
1315 value |= XGMAC_L3DAIM0;
1316 }
1317 } else {
1318 value &= ~XGMAC_L3PEN0;
1319 if (sa) {
1320 value |= XGMAC_L3SAM0;
1321 if (inv)
1322 value |= XGMAC_L3SAIM0;
1323 } else {
1324 value |= XGMAC_L3DAM0;
1325 if (inv)
1326 value |= XGMAC_L3DAIM0;
1327 }
1328 }
1329
1330 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1331 if (ret)
1332 return ret;
1333
1334 if (sa) {
1335 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1336 if (ret)
1337 return ret;
1338 } else {
1339 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1340 if (ret)
1341 return ret;
1342 }
1343
1344 if (!en)
1345 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1346
1347 return 0;
1348 }
1349
dwxgmac2_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1350 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1351 bool en, bool udp, bool sa, bool inv,
1352 u32 match)
1353 {
1354 void __iomem *ioaddr = hw->pcsr;
1355 u32 value;
1356 int ret;
1357
1358 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1359 value |= XGMAC_FILTER_IPFE;
1360 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1361
1362 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1363 if (ret)
1364 return ret;
1365
1366 if (udp) {
1367 value |= XGMAC_L4PEN0;
1368 } else {
1369 value &= ~XGMAC_L4PEN0;
1370 }
1371
1372 value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1373 value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1374 if (sa) {
1375 value |= XGMAC_L4SPM0;
1376 if (inv)
1377 value |= XGMAC_L4SPIM0;
1378 } else {
1379 value |= XGMAC_L4DPM0;
1380 if (inv)
1381 value |= XGMAC_L4DPIM0;
1382 }
1383
1384 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1385 if (ret)
1386 return ret;
1387
1388 if (sa) {
1389 value = match & XGMAC_L4SP0;
1390
1391 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1392 if (ret)
1393 return ret;
1394 } else {
1395 value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1396
1397 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1398 if (ret)
1399 return ret;
1400 }
1401
1402 if (!en)
1403 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1404
1405 return 0;
1406 }
1407
dwxgmac2_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1408 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1409 u32 addr)
1410 {
1411 void __iomem *ioaddr = hw->pcsr;
1412 u32 value;
1413
1414 writel(addr, ioaddr + XGMAC_ARP_ADDR);
1415
1416 value = readl(ioaddr + XGMAC_RX_CONFIG);
1417 if (en)
1418 value |= XGMAC_CONFIG_ARPEN;
1419 else
1420 value &= ~XGMAC_CONFIG_ARPEN;
1421 writel(value, ioaddr + XGMAC_RX_CONFIG);
1422 }
1423
dwxgmac3_est_write(void __iomem * ioaddr,u32 reg,u32 val,bool gcl)1424 static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
1425 {
1426 u32 ctrl;
1427
1428 writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
1429
1430 ctrl = (reg << XGMAC_ADDR_SHIFT);
1431 ctrl |= gcl ? 0 : XGMAC_GCRR;
1432
1433 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1434
1435 ctrl |= XGMAC_SRWO;
1436 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1437
1438 return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
1439 ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
1440 }
1441
dwxgmac3_est_configure(void __iomem * ioaddr,struct stmmac_est * cfg,unsigned int ptp_rate)1442 static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
1443 unsigned int ptp_rate)
1444 {
1445 int i, ret = 0x0;
1446 u32 ctrl;
1447
1448 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
1449 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
1450 ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
1451 ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
1452 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
1453 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
1454 if (ret)
1455 return ret;
1456
1457 for (i = 0; i < cfg->gcl_size; i++) {
1458 ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
1459 if (ret)
1460 return ret;
1461 }
1462
1463 ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
1464 ctrl &= ~XGMAC_PTOV;
1465 ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
1466 if (cfg->enable)
1467 ctrl |= XGMAC_EEST | XGMAC_SSWL;
1468 else
1469 ctrl &= ~XGMAC_EEST;
1470
1471 writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
1472 return 0;
1473 }
1474
dwxgmac3_fpe_configure(void __iomem * ioaddr,u32 num_txq,u32 num_rxq,bool enable)1475 static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
1476 u32 num_rxq, bool enable)
1477 {
1478 u32 value;
1479
1480 if (!enable) {
1481 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1482
1483 value &= ~XGMAC_EFPE;
1484
1485 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1486 return;
1487 }
1488
1489 value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1490 value &= ~XGMAC_RQ;
1491 value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1492 writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1493
1494 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1495 value |= XGMAC_EFPE;
1496 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1497 }
1498
1499 const struct stmmac_ops dwxgmac210_ops = {
1500 .core_init = dwxgmac2_core_init,
1501 .phylink_get_caps = xgmac_phylink_get_caps,
1502 .set_mac = dwxgmac2_set_mac,
1503 .rx_ipc = dwxgmac2_rx_ipc,
1504 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1505 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1506 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1507 .rx_queue_routing = dwxgmac2_rx_queue_routing,
1508 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1509 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1510 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1511 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1512 .config_cbs = dwxgmac2_config_cbs,
1513 .dump_regs = dwxgmac2_dump_regs,
1514 .host_irq_status = dwxgmac2_host_irq_status,
1515 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1516 .flow_ctrl = dwxgmac2_flow_ctrl,
1517 .pmt = dwxgmac2_pmt,
1518 .set_umac_addr = dwxgmac2_set_umac_addr,
1519 .get_umac_addr = dwxgmac2_get_umac_addr,
1520 .set_eee_mode = dwxgmac2_set_eee_mode,
1521 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1522 .set_eee_timer = dwxgmac2_set_eee_timer,
1523 .set_eee_pls = dwxgmac2_set_eee_pls,
1524 .pcs_ctrl_ane = NULL,
1525 .pcs_rane = NULL,
1526 .pcs_get_adv_lp = NULL,
1527 .debug = NULL,
1528 .set_filter = dwxgmac2_set_filter,
1529 .safety_feat_config = dwxgmac3_safety_feat_config,
1530 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1531 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1532 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1533 .rss_configure = dwxgmac2_rss_configure,
1534 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1535 .rxp_config = dwxgmac3_rxp_config,
1536 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1537 .flex_pps_config = dwxgmac2_flex_pps_config,
1538 .sarc_configure = dwxgmac2_sarc_configure,
1539 .enable_vlan = dwxgmac2_enable_vlan,
1540 .config_l3_filter = dwxgmac2_config_l3_filter,
1541 .config_l4_filter = dwxgmac2_config_l4_filter,
1542 .set_arp_offload = dwxgmac2_set_arp_offload,
1543 .est_configure = dwxgmac3_est_configure,
1544 .fpe_configure = dwxgmac3_fpe_configure,
1545 };
1546
dwxlgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)1547 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1548 u32 queue)
1549 {
1550 void __iomem *ioaddr = hw->pcsr;
1551 u32 value;
1552
1553 value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1554 if (mode == MTL_QUEUE_AVB)
1555 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1556 else if (mode == MTL_QUEUE_DCB)
1557 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1558 writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1559 }
1560
1561 const struct stmmac_ops dwxlgmac2_ops = {
1562 .core_init = dwxgmac2_core_init,
1563 .phylink_get_caps = xgmac_phylink_get_caps,
1564 .set_mac = dwxgmac2_set_mac,
1565 .rx_ipc = dwxgmac2_rx_ipc,
1566 .rx_queue_enable = dwxlgmac2_rx_queue_enable,
1567 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1568 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1569 .rx_queue_routing = dwxgmac2_rx_queue_routing,
1570 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1571 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1572 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1573 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1574 .config_cbs = dwxgmac2_config_cbs,
1575 .dump_regs = dwxgmac2_dump_regs,
1576 .host_irq_status = dwxgmac2_host_irq_status,
1577 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1578 .flow_ctrl = dwxgmac2_flow_ctrl,
1579 .pmt = dwxgmac2_pmt,
1580 .set_umac_addr = dwxgmac2_set_umac_addr,
1581 .get_umac_addr = dwxgmac2_get_umac_addr,
1582 .set_eee_mode = dwxgmac2_set_eee_mode,
1583 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1584 .set_eee_timer = dwxgmac2_set_eee_timer,
1585 .set_eee_pls = dwxgmac2_set_eee_pls,
1586 .pcs_ctrl_ane = NULL,
1587 .pcs_rane = NULL,
1588 .pcs_get_adv_lp = NULL,
1589 .debug = NULL,
1590 .set_filter = dwxgmac2_set_filter,
1591 .safety_feat_config = dwxgmac3_safety_feat_config,
1592 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1593 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1594 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1595 .rss_configure = dwxgmac2_rss_configure,
1596 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1597 .rxp_config = dwxgmac3_rxp_config,
1598 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1599 .flex_pps_config = dwxgmac2_flex_pps_config,
1600 .sarc_configure = dwxgmac2_sarc_configure,
1601 .enable_vlan = dwxgmac2_enable_vlan,
1602 .config_l3_filter = dwxgmac2_config_l3_filter,
1603 .config_l4_filter = dwxgmac2_config_l4_filter,
1604 .set_arp_offload = dwxgmac2_set_arp_offload,
1605 .est_configure = dwxgmac3_est_configure,
1606 .fpe_configure = dwxgmac3_fpe_configure,
1607 };
1608
dwxgmac2_setup(struct stmmac_priv * priv)1609 int dwxgmac2_setup(struct stmmac_priv *priv)
1610 {
1611 struct mac_device_info *mac = priv->hw;
1612
1613 dev_info(priv->device, "\tXGMAC2\n");
1614
1615 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1616 mac->pcsr = priv->ioaddr;
1617 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1618 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1619 mac->mcast_bits_log2 = 0;
1620
1621 if (mac->multicast_filter_bins)
1622 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1623
1624 mac->link.duplex = 0;
1625 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1626 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1627 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1628 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1629 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1630 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1631 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1632 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1633
1634 mac->mii.addr = XGMAC_MDIO_ADDR;
1635 mac->mii.data = XGMAC_MDIO_DATA;
1636 mac->mii.addr_shift = 16;
1637 mac->mii.addr_mask = GENMASK(20, 16);
1638 mac->mii.reg_shift = 0;
1639 mac->mii.reg_mask = GENMASK(15, 0);
1640 mac->mii.clk_csr_shift = 19;
1641 mac->mii.clk_csr_mask = GENMASK(21, 19);
1642
1643 return 0;
1644 }
1645
dwxlgmac2_setup(struct stmmac_priv * priv)1646 int dwxlgmac2_setup(struct stmmac_priv *priv)
1647 {
1648 struct mac_device_info *mac = priv->hw;
1649
1650 dev_info(priv->device, "\tXLGMAC\n");
1651
1652 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1653 mac->pcsr = priv->ioaddr;
1654 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1655 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1656 mac->mcast_bits_log2 = 0;
1657
1658 if (mac->multicast_filter_bins)
1659 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1660
1661 mac->link.duplex = 0;
1662 mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1663 mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1664 mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1665 mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1666 mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1667 mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1668 mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1669 mac->link.speed_mask = XLGMAC_CONFIG_SS;
1670
1671 mac->mii.addr = XGMAC_MDIO_ADDR;
1672 mac->mii.data = XGMAC_MDIO_DATA;
1673 mac->mii.addr_shift = 16;
1674 mac->mii.addr_mask = GENMASK(20, 16);
1675 mac->mii.reg_shift = 0;
1676 mac->mii.reg_mask = GENMASK(15, 0);
1677 mac->mii.clk_csr_shift = 19;
1678 mac->mii.clk_csr_mask = GENMASK(21, 19);
1679
1680 return 0;
1681 }
1682