1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 #include "dwxgmac2.h"
13 
dwxgmac2_core_init(struct mac_device_info * hw,struct net_device * dev)14 static void dwxgmac2_core_init(struct mac_device_info *hw,
15 			       struct net_device *dev)
16 {
17 	void __iomem *ioaddr = hw->pcsr;
18 	u32 tx, rx;
19 
20 	tx = readl(ioaddr + XGMAC_TX_CONFIG);
21 	rx = readl(ioaddr + XGMAC_RX_CONFIG);
22 
23 	tx |= XGMAC_CORE_INIT_TX;
24 	rx |= XGMAC_CORE_INIT_RX;
25 
26 	if (hw->ps) {
27 		tx |= XGMAC_CONFIG_TE;
28 		tx &= ~hw->link.speed_mask;
29 
30 		switch (hw->ps) {
31 		case SPEED_10000:
32 			tx |= hw->link.xgmii.speed10000;
33 			break;
34 		case SPEED_2500:
35 			tx |= hw->link.speed2500;
36 			break;
37 		case SPEED_1000:
38 		default:
39 			tx |= hw->link.speed1000;
40 			break;
41 		}
42 	}
43 
44 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
45 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
46 	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
47 }
48 
dwxgmac2_set_mac(void __iomem * ioaddr,bool enable)49 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
50 {
51 	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
52 	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
53 
54 	if (enable) {
55 		tx |= XGMAC_CONFIG_TE;
56 		rx |= XGMAC_CONFIG_RE;
57 	} else {
58 		tx &= ~XGMAC_CONFIG_TE;
59 		rx &= ~XGMAC_CONFIG_RE;
60 	}
61 
62 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
63 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
64 }
65 
dwxgmac2_rx_ipc(struct mac_device_info * hw)66 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
67 {
68 	void __iomem *ioaddr = hw->pcsr;
69 	u32 value;
70 
71 	value = readl(ioaddr + XGMAC_RX_CONFIG);
72 	if (hw->rx_csum)
73 		value |= XGMAC_CONFIG_IPC;
74 	else
75 		value &= ~XGMAC_CONFIG_IPC;
76 	writel(value, ioaddr + XGMAC_RX_CONFIG);
77 
78 	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
79 }
80 
dwxgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)81 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
82 				     u32 queue)
83 {
84 	void __iomem *ioaddr = hw->pcsr;
85 	u32 value;
86 
87 	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
88 	if (mode == MTL_QUEUE_AVB)
89 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
90 	else if (mode == MTL_QUEUE_DCB)
91 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
92 	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
93 }
94 
dwxgmac2_rx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)95 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
96 				   u32 queue)
97 {
98 	void __iomem *ioaddr = hw->pcsr;
99 	u32 value, reg;
100 
101 	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
102 	if (queue >= 4)
103 		queue -= 4;
104 
105 	value = readl(ioaddr + reg);
106 	value &= ~XGMAC_PSRQ(queue);
107 	value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
108 
109 	writel(value, ioaddr + reg);
110 }
111 
dwxgmac2_tx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)112 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
113 				   u32 queue)
114 {
115 	void __iomem *ioaddr = hw->pcsr;
116 	u32 value, reg;
117 
118 	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
119 	if (queue >= 4)
120 		queue -= 4;
121 
122 	value = readl(ioaddr + reg);
123 	value &= ~XGMAC_PSTC(queue);
124 	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
125 
126 	writel(value, ioaddr + reg);
127 }
128 
dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)129 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
130 					    u32 rx_alg)
131 {
132 	void __iomem *ioaddr = hw->pcsr;
133 	u32 value;
134 
135 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
136 	value &= ~XGMAC_RAA;
137 
138 	switch (rx_alg) {
139 	case MTL_RX_ALGORITHM_SP:
140 		break;
141 	case MTL_RX_ALGORITHM_WSP:
142 		value |= XGMAC_RAA;
143 		break;
144 	default:
145 		break;
146 	}
147 
148 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
149 }
150 
dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)151 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
152 					    u32 tx_alg)
153 {
154 	void __iomem *ioaddr = hw->pcsr;
155 	bool ets = true;
156 	u32 value;
157 	int i;
158 
159 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
160 	value &= ~XGMAC_ETSALG;
161 
162 	switch (tx_alg) {
163 	case MTL_TX_ALGORITHM_WRR:
164 		value |= XGMAC_WRR;
165 		break;
166 	case MTL_TX_ALGORITHM_WFQ:
167 		value |= XGMAC_WFQ;
168 		break;
169 	case MTL_TX_ALGORITHM_DWRR:
170 		value |= XGMAC_DWRR;
171 		break;
172 	default:
173 		ets = false;
174 		break;
175 	}
176 
177 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
178 
179 	/* Set ETS if desired */
180 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
181 		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
182 		value &= ~XGMAC_TSA;
183 		if (ets)
184 			value |= XGMAC_ETS;
185 		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
186 	}
187 }
188 
dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)189 static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
190 					     u32 weight, u32 queue)
191 {
192 	void __iomem *ioaddr = hw->pcsr;
193 
194 	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
195 }
196 
dwxgmac2_map_mtl_to_dma(struct mac_device_info * hw,u32 queue,u32 chan)197 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
198 				    u32 chan)
199 {
200 	void __iomem *ioaddr = hw->pcsr;
201 	u32 value, reg;
202 
203 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
204 	if (queue >= 4)
205 		queue -= 4;
206 
207 	value = readl(ioaddr + reg);
208 	value &= ~XGMAC_QxMDMACH(queue);
209 	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
210 
211 	writel(value, ioaddr + reg);
212 }
213 
dwxgmac2_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)214 static void dwxgmac2_config_cbs(struct mac_device_info *hw,
215 				u32 send_slope, u32 idle_slope,
216 				u32 high_credit, u32 low_credit, u32 queue)
217 {
218 	void __iomem *ioaddr = hw->pcsr;
219 	u32 value;
220 
221 	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
222 	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
223 	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
224 	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
225 
226 	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
227 	value &= ~XGMAC_TSA;
228 	value |= XGMAC_CC | XGMAC_CBS;
229 	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
230 }
231 
dwxgmac2_dump_regs(struct mac_device_info * hw,u32 * reg_space)232 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
233 {
234 	void __iomem *ioaddr = hw->pcsr;
235 	int i;
236 
237 	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
238 		reg_space[i] = readl(ioaddr + i * 4);
239 }
240 
dwxgmac2_host_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)241 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
242 				    struct stmmac_extra_stats *x)
243 {
244 	void __iomem *ioaddr = hw->pcsr;
245 	u32 stat, en;
246 	int ret = 0;
247 
248 	en = readl(ioaddr + XGMAC_INT_EN);
249 	stat = readl(ioaddr + XGMAC_INT_STATUS);
250 
251 	stat &= en;
252 
253 	if (stat & XGMAC_PMTIS) {
254 		x->irq_receive_pmt_irq_n++;
255 		readl(ioaddr + XGMAC_PMT);
256 	}
257 
258 	if (stat & XGMAC_LPIIS) {
259 		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
260 
261 		if (lpi & XGMAC_TLPIEN) {
262 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
263 			x->irq_tx_path_in_lpi_mode_n++;
264 		}
265 		if (lpi & XGMAC_TLPIEX) {
266 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
267 			x->irq_tx_path_exit_lpi_mode_n++;
268 		}
269 		if (lpi & XGMAC_RLPIEN)
270 			x->irq_rx_path_in_lpi_mode_n++;
271 		if (lpi & XGMAC_RLPIEX)
272 			x->irq_rx_path_exit_lpi_mode_n++;
273 	}
274 
275 	return ret;
276 }
277 
dwxgmac2_host_mtl_irq_status(struct mac_device_info * hw,u32 chan)278 static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
279 {
280 	void __iomem *ioaddr = hw->pcsr;
281 	int ret = 0;
282 	u32 status;
283 
284 	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
285 	if (status & BIT(chan)) {
286 		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
287 
288 		if (chan_status & XGMAC_RXOVFIS)
289 			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
290 
291 		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
292 	}
293 
294 	return ret;
295 }
296 
dwxgmac2_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)297 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
298 			       unsigned int fc, unsigned int pause_time,
299 			       u32 tx_cnt)
300 {
301 	void __iomem *ioaddr = hw->pcsr;
302 	u32 i;
303 
304 	if (fc & FLOW_RX)
305 		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
306 	if (fc & FLOW_TX) {
307 		for (i = 0; i < tx_cnt; i++) {
308 			u32 value = XGMAC_TFE;
309 
310 			if (duplex)
311 				value |= pause_time << XGMAC_PT_SHIFT;
312 
313 			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
314 		}
315 	}
316 }
317 
dwxgmac2_pmt(struct mac_device_info * hw,unsigned long mode)318 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
319 {
320 	void __iomem *ioaddr = hw->pcsr;
321 	u32 val = 0x0;
322 
323 	if (mode & WAKE_MAGIC)
324 		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
325 	if (mode & WAKE_UCAST)
326 		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
327 	if (val) {
328 		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
329 		cfg |= XGMAC_CONFIG_RE;
330 		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
331 	}
332 
333 	writel(val, ioaddr + XGMAC_PMT);
334 }
335 
dwxgmac2_set_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)336 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
337 				   unsigned char *addr, unsigned int reg_n)
338 {
339 	void __iomem *ioaddr = hw->pcsr;
340 	u32 value;
341 
342 	value = (addr[5] << 8) | addr[4];
343 	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
344 
345 	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
346 	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
347 }
348 
dwxgmac2_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)349 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
350 				   unsigned char *addr, unsigned int reg_n)
351 {
352 	void __iomem *ioaddr = hw->pcsr;
353 	u32 hi_addr, lo_addr;
354 
355 	/* Read the MAC address from the hardware */
356 	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
357 	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
358 
359 	/* Extract the MAC address from the high and low words */
360 	addr[0] = lo_addr & 0xff;
361 	addr[1] = (lo_addr >> 8) & 0xff;
362 	addr[2] = (lo_addr >> 16) & 0xff;
363 	addr[3] = (lo_addr >> 24) & 0xff;
364 	addr[4] = hi_addr & 0xff;
365 	addr[5] = (hi_addr >> 8) & 0xff;
366 }
367 
dwxgmac2_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)368 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
369 				  bool en_tx_lpi_clockgating)
370 {
371 	void __iomem *ioaddr = hw->pcsr;
372 	u32 value;
373 
374 	value = readl(ioaddr + XGMAC_LPI_CTRL);
375 
376 	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
377 	if (en_tx_lpi_clockgating)
378 		value |= XGMAC_TXCGE;
379 
380 	writel(value, ioaddr + XGMAC_LPI_CTRL);
381 }
382 
dwxgmac2_reset_eee_mode(struct mac_device_info * hw)383 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
384 {
385 	void __iomem *ioaddr = hw->pcsr;
386 	u32 value;
387 
388 	value = readl(ioaddr + XGMAC_LPI_CTRL);
389 	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
390 	writel(value, ioaddr + XGMAC_LPI_CTRL);
391 }
392 
dwxgmac2_set_eee_pls(struct mac_device_info * hw,int link)393 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
394 {
395 	void __iomem *ioaddr = hw->pcsr;
396 	u32 value;
397 
398 	value = readl(ioaddr + XGMAC_LPI_CTRL);
399 	if (link)
400 		value |= XGMAC_PLS;
401 	else
402 		value &= ~XGMAC_PLS;
403 	writel(value, ioaddr + XGMAC_LPI_CTRL);
404 }
405 
dwxgmac2_set_eee_timer(struct mac_device_info * hw,int ls,int tw)406 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
407 {
408 	void __iomem *ioaddr = hw->pcsr;
409 	u32 value;
410 
411 	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
412 	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
413 }
414 
dwxgmac2_set_mchash(void __iomem * ioaddr,u32 * mcfilterbits,int mcbitslog2)415 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
416 				int mcbitslog2)
417 {
418 	int numhashregs, regs;
419 
420 	switch (mcbitslog2) {
421 	case 6:
422 		numhashregs = 2;
423 		break;
424 	case 7:
425 		numhashregs = 4;
426 		break;
427 	case 8:
428 		numhashregs = 8;
429 		break;
430 	default:
431 		return;
432 	}
433 
434 	for (regs = 0; regs < numhashregs; regs++)
435 		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
436 }
437 
dwxgmac2_set_filter(struct mac_device_info * hw,struct net_device * dev)438 static void dwxgmac2_set_filter(struct mac_device_info *hw,
439 				struct net_device *dev)
440 {
441 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
442 	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
443 	int mcbitslog2 = hw->mcast_bits_log2;
444 	u32 mc_filter[8];
445 	int i;
446 
447 	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
448 	value |= XGMAC_FILTER_HPF;
449 
450 	memset(mc_filter, 0, sizeof(mc_filter));
451 
452 	if (dev->flags & IFF_PROMISC) {
453 		value |= XGMAC_FILTER_PR;
454 		value |= XGMAC_FILTER_PCF;
455 	} else if ((dev->flags & IFF_ALLMULTI) ||
456 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
457 		value |= XGMAC_FILTER_PM;
458 
459 		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
460 			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
461 	} else if (!netdev_mc_empty(dev)) {
462 		struct netdev_hw_addr *ha;
463 
464 		value |= XGMAC_FILTER_HMC;
465 
466 		netdev_for_each_mc_addr(ha, dev) {
467 			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
468 					(32 - mcbitslog2));
469 			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
470 		}
471 	}
472 
473 	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
474 
475 	/* Handle multiple unicast addresses */
476 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
477 		value |= XGMAC_FILTER_PR;
478 	} else {
479 		struct netdev_hw_addr *ha;
480 		int reg = 1;
481 
482 		netdev_for_each_uc_addr(ha, dev) {
483 			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
484 			reg++;
485 		}
486 
487 		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
488 			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
489 			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
490 		}
491 	}
492 
493 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
494 }
495 
dwxgmac2_set_mac_loopback(void __iomem * ioaddr,bool enable)496 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
497 {
498 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
499 
500 	if (enable)
501 		value |= XGMAC_CONFIG_LM;
502 	else
503 		value &= ~XGMAC_CONFIG_LM;
504 
505 	writel(value, ioaddr + XGMAC_RX_CONFIG);
506 }
507 
dwxgmac2_rss_write_reg(void __iomem * ioaddr,bool is_key,int idx,u32 val)508 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
509 				  u32 val)
510 {
511 	u32 ctrl = 0;
512 
513 	writel(val, ioaddr + XGMAC_RSS_DATA);
514 	ctrl |= idx << XGMAC_RSSIA_SHIFT;
515 	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
516 	ctrl |= XGMAC_OB;
517 	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
518 
519 	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
520 				  !(ctrl & XGMAC_OB), 100, 10000);
521 }
522 
dwxgmac2_rss_configure(struct mac_device_info * hw,struct stmmac_rss * cfg,u32 num_rxq)523 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
524 				  struct stmmac_rss *cfg, u32 num_rxq)
525 {
526 	void __iomem *ioaddr = hw->pcsr;
527 	u32 value, *key;
528 	int i, ret;
529 
530 	value = readl(ioaddr + XGMAC_RSS_CTRL);
531 	if (!cfg || !cfg->enable) {
532 		value &= ~XGMAC_RSSE;
533 		writel(value, ioaddr + XGMAC_RSS_CTRL);
534 		return 0;
535 	}
536 
537 	key = (u32 *)cfg->key;
538 	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
539 		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
540 		if (ret)
541 			return ret;
542 	}
543 
544 	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
545 		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
546 		if (ret)
547 			return ret;
548 	}
549 
550 	for (i = 0; i < num_rxq; i++)
551 		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
552 
553 	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
554 	writel(value, ioaddr + XGMAC_RSS_CTRL);
555 	return 0;
556 }
557 
dwxgmac2_update_vlan_hash(struct mac_device_info * hw,u32 hash,bool is_double)558 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
559 				      bool is_double)
560 {
561 	void __iomem *ioaddr = hw->pcsr;
562 
563 	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
564 
565 	if (hash) {
566 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
567 
568 		value |= XGMAC_FILTER_VTFE;
569 
570 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
571 
572 		value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
573 		if (is_double) {
574 			value |= XGMAC_VLAN_EDVLP;
575 			value |= XGMAC_VLAN_ESVL;
576 			value |= XGMAC_VLAN_DOVLTC;
577 		}
578 
579 		writel(value, ioaddr + XGMAC_VLAN_TAG);
580 	} else {
581 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
582 
583 		value &= ~XGMAC_FILTER_VTFE;
584 
585 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
586 
587 		value = readl(ioaddr + XGMAC_VLAN_TAG);
588 
589 		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
590 		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
591 		value &= ~XGMAC_VLAN_DOVLTC;
592 		value &= ~XGMAC_VLAN_VID;
593 
594 		writel(value, ioaddr + XGMAC_VLAN_TAG);
595 	}
596 }
597 
598 struct dwxgmac3_error_desc {
599 	bool valid;
600 	const char *desc;
601 	const char *detailed_desc;
602 };
603 
604 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
605 
dwxgmac3_log_error(struct net_device * ndev,u32 value,bool corr,const char * module_name,const struct dwxgmac3_error_desc * desc,unsigned long field_offset,struct stmmac_safety_stats * stats)606 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
607 			       const char *module_name,
608 			       const struct dwxgmac3_error_desc *desc,
609 			       unsigned long field_offset,
610 			       struct stmmac_safety_stats *stats)
611 {
612 	unsigned long loc, mask;
613 	u8 *bptr = (u8 *)stats;
614 	unsigned long *ptr;
615 
616 	ptr = (unsigned long *)(bptr + field_offset);
617 
618 	mask = value;
619 	for_each_set_bit(loc, &mask, 32) {
620 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
621 				"correctable" : "uncorrectable", module_name,
622 				desc[loc].desc, desc[loc].detailed_desc);
623 
624 		/* Update counters */
625 		ptr[loc]++;
626 	}
627 }
628 
629 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
630 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
631 	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
632 	{ true, "TPES", "TSO Data Path Parity Check Error" },
633 	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
634 	{ true, "MTPES", "MTL Data Path Parity Check Error" },
635 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
636 	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
637 	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
638 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
639 	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
640 	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
641 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
642 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
643 	{ true, "TTES", "TX FSM Timeout Error" },
644 	{ true, "RTES", "RX FSM Timeout Error" },
645 	{ true, "CTES", "CSR FSM Timeout Error" },
646 	{ true, "ATES", "APP FSM Timeout Error" },
647 	{ true, "PTES", "PTP FSM Timeout Error" },
648 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
649 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
650 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
651 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
652 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
653 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
654 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
655 	{ true, "FSMPES", "FSM State Parity Error" },
656 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
657 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
658 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
659 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
660 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
661 	{ true, "CPI", "Control Register Parity Check Error" },
662 };
663 
dwxgmac3_handle_mac_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)664 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
665 				    void __iomem *ioaddr, bool correctable,
666 				    struct stmmac_safety_stats *stats)
667 {
668 	u32 value;
669 
670 	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
671 	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
672 
673 	dwxgmac3_log_error(ndev, value, correctable, "MAC",
674 			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
675 }
676 
677 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
678 	{ true, "TXCES", "MTL TX Memory Error" },
679 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
680 	{ true, "TXUES", "MTL TX Memory Error" },
681 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
682 	{ true, "RXCES", "MTL RX Memory Error" },
683 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
684 	{ true, "RXUES", "MTL RX Memory Error" },
685 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
686 	{ true, "ECES", "MTL EST Memory Error" },
687 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
688 	{ true, "EUES", "MTL EST Memory Error" },
689 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
690 	{ true, "RPCES", "MTL RX Parser Memory Error" },
691 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
692 	{ true, "RPUES", "MTL RX Parser Memory Error" },
693 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
694 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
695 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
696 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
697 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
698 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
699 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
700 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
701 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
702 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
703 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
704 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
705 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
706 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
707 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
708 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
709 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
710 };
711 
dwxgmac3_handle_mtl_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)712 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
713 				    void __iomem *ioaddr, bool correctable,
714 				    struct stmmac_safety_stats *stats)
715 {
716 	u32 value;
717 
718 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
719 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
720 
721 	dwxgmac3_log_error(ndev, value, correctable, "MTL",
722 			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
723 }
724 
725 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
726 	{ true, "TCES", "DMA TSO Memory Error" },
727 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
728 	{ true, "TUES", "DMA TSO Memory Error" },
729 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
730 	{ true, "DCES", "DMA DCACHE Memory Error" },
731 	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
732 	{ true, "DUES", "DMA DCACHE Memory Error" },
733 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
734 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
735 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
736 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
737 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
738 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
739 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
740 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
741 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
742 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
743 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
744 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
745 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
746 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
747 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
748 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
749 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
750 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
751 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
752 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
753 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
754 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
755 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
756 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
757 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
758 };
759 
dwxgmac3_handle_dma_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)760 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
761 				    void __iomem *ioaddr, bool correctable,
762 				    struct stmmac_safety_stats *stats)
763 {
764 	u32 value;
765 
766 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
767 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
768 
769 	dwxgmac3_log_error(ndev, value, correctable, "DMA",
770 			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
771 }
772 
dwxgmac3_safety_feat_config(void __iomem * ioaddr,unsigned int asp)773 static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
774 {
775 	u32 value;
776 
777 	if (!asp)
778 		return -EINVAL;
779 
780 	/* 1. Enable Safety Features */
781 	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
782 
783 	/* 2. Enable MTL Safety Interrupts */
784 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
785 	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
786 	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
787 	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
788 	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
789 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
790 
791 	/* 3. Enable DMA Safety Interrupts */
792 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
793 	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
794 	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
795 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
796 
797 	/* Only ECC Protection for External Memory feature is selected */
798 	if (asp <= 0x1)
799 		return 0;
800 
801 	/* 4. Enable Parity and Timeout for FSM */
802 	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
803 	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
804 	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
805 	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
806 
807 	return 0;
808 }
809 
dwxgmac3_safety_feat_irq_status(struct net_device * ndev,void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_stats * stats)810 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
811 					   void __iomem *ioaddr,
812 					   unsigned int asp,
813 					   struct stmmac_safety_stats *stats)
814 {
815 	bool err, corr;
816 	u32 mtl, dma;
817 	int ret = 0;
818 
819 	if (!asp)
820 		return -EINVAL;
821 
822 	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
823 	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
824 
825 	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
826 	corr = false;
827 	if (err) {
828 		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
829 		ret |= !corr;
830 	}
831 
832 	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
833 	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
834 	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
835 	if (err) {
836 		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
837 		ret |= !corr;
838 	}
839 
840 	err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
841 	corr = dma & XGMAC_DECIS;
842 	if (err) {
843 		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
844 		ret |= !corr;
845 	}
846 
847 	return ret;
848 }
849 
850 static const struct dwxgmac3_error {
851 	const struct dwxgmac3_error_desc *desc;
852 } dwxgmac3_all_errors[] = {
853 	{ dwxgmac3_mac_errors },
854 	{ dwxgmac3_mtl_errors },
855 	{ dwxgmac3_dma_errors },
856 };
857 
dwxgmac3_safety_feat_dump(struct stmmac_safety_stats * stats,int index,unsigned long * count,const char ** desc)858 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
859 				     int index, unsigned long *count,
860 				     const char **desc)
861 {
862 	int module = index / 32, offset = index % 32;
863 	unsigned long *ptr = (unsigned long *)stats;
864 
865 	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
866 		return -EINVAL;
867 	if (!dwxgmac3_all_errors[module].desc[offset].valid)
868 		return -EINVAL;
869 	if (count)
870 		*count = *(ptr + index);
871 	if (desc)
872 		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
873 	return 0;
874 }
875 
dwxgmac3_rxp_disable(void __iomem * ioaddr)876 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
877 {
878 	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
879 
880 	val &= ~XGMAC_FRPE;
881 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
882 
883 	return 0;
884 }
885 
dwxgmac3_rxp_enable(void __iomem * ioaddr)886 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
887 {
888 	u32 val;
889 
890 	val = readl(ioaddr + XGMAC_MTL_OPMODE);
891 	val |= XGMAC_FRPE;
892 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
893 }
894 
dwxgmac3_rxp_update_single_entry(void __iomem * ioaddr,struct stmmac_tc_entry * entry,int pos)895 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
896 					    struct stmmac_tc_entry *entry,
897 					    int pos)
898 {
899 	int ret, i;
900 
901 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
902 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
903 		u32 val;
904 
905 		/* Wait for ready */
906 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
907 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
908 		if (ret)
909 			return ret;
910 
911 		/* Write data */
912 		val = *((u32 *)&entry->val + i);
913 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
914 
915 		/* Write pos */
916 		val = real_pos & XGMAC_ADDR;
917 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
918 
919 		/* Write OP */
920 		val |= XGMAC_WRRDN;
921 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
922 
923 		/* Start Write */
924 		val |= XGMAC_STARTBUSY;
925 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
926 
927 		/* Wait for done */
928 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
929 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
930 		if (ret)
931 			return ret;
932 	}
933 
934 	return 0;
935 }
936 
937 static struct stmmac_tc_entry *
dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry * entries,unsigned int count,u32 curr_prio)938 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
939 			    unsigned int count, u32 curr_prio)
940 {
941 	struct stmmac_tc_entry *entry;
942 	u32 min_prio = ~0x0;
943 	int i, min_prio_idx;
944 	bool found = false;
945 
946 	for (i = count - 1; i >= 0; i--) {
947 		entry = &entries[i];
948 
949 		/* Do not update unused entries */
950 		if (!entry->in_use)
951 			continue;
952 		/* Do not update already updated entries (i.e. fragments) */
953 		if (entry->in_hw)
954 			continue;
955 		/* Let last entry be updated last */
956 		if (entry->is_last)
957 			continue;
958 		/* Do not return fragments */
959 		if (entry->is_frag)
960 			continue;
961 		/* Check if we already checked this prio */
962 		if (entry->prio < curr_prio)
963 			continue;
964 		/* Check if this is the minimum prio */
965 		if (entry->prio < min_prio) {
966 			min_prio = entry->prio;
967 			min_prio_idx = i;
968 			found = true;
969 		}
970 	}
971 
972 	if (found)
973 		return &entries[min_prio_idx];
974 	return NULL;
975 }
976 
dwxgmac3_rxp_config(void __iomem * ioaddr,struct stmmac_tc_entry * entries,unsigned int count)977 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
978 			       struct stmmac_tc_entry *entries,
979 			       unsigned int count)
980 {
981 	struct stmmac_tc_entry *entry, *frag;
982 	int i, ret, nve = 0;
983 	u32 curr_prio = 0;
984 	u32 old_val, val;
985 
986 	/* Force disable RX */
987 	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
988 	val = old_val & ~XGMAC_CONFIG_RE;
989 	writel(val, ioaddr + XGMAC_RX_CONFIG);
990 
991 	/* Disable RX Parser */
992 	ret = dwxgmac3_rxp_disable(ioaddr);
993 	if (ret)
994 		goto re_enable;
995 
996 	/* Set all entries as NOT in HW */
997 	for (i = 0; i < count; i++) {
998 		entry = &entries[i];
999 		entry->in_hw = false;
1000 	}
1001 
1002 	/* Update entries by reverse order */
1003 	while (1) {
1004 		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1005 		if (!entry)
1006 			break;
1007 
1008 		curr_prio = entry->prio;
1009 		frag = entry->frag_ptr;
1010 
1011 		/* Set special fragment requirements */
1012 		if (frag) {
1013 			entry->val.af = 0;
1014 			entry->val.rf = 0;
1015 			entry->val.nc = 1;
1016 			entry->val.ok_index = nve + 2;
1017 		}
1018 
1019 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1020 		if (ret)
1021 			goto re_enable;
1022 
1023 		entry->table_pos = nve++;
1024 		entry->in_hw = true;
1025 
1026 		if (frag && !frag->in_hw) {
1027 			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1028 			if (ret)
1029 				goto re_enable;
1030 			frag->table_pos = nve++;
1031 			frag->in_hw = true;
1032 		}
1033 	}
1034 
1035 	if (!nve)
1036 		goto re_enable;
1037 
1038 	/* Update all pass entry */
1039 	for (i = 0; i < count; i++) {
1040 		entry = &entries[i];
1041 		if (!entry->is_last)
1042 			continue;
1043 
1044 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1045 		if (ret)
1046 			goto re_enable;
1047 
1048 		entry->table_pos = nve++;
1049 	}
1050 
1051 	/* Assume n. of parsable entries == n. of valid entries */
1052 	val = (nve << 16) & XGMAC_NPE;
1053 	val |= nve & XGMAC_NVE;
1054 	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1055 
1056 	/* Enable RX Parser */
1057 	dwxgmac3_rxp_enable(ioaddr);
1058 
1059 re_enable:
1060 	/* Re-enable RX */
1061 	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1062 	return ret;
1063 }
1064 
dwxgmac2_get_mac_tx_timestamp(struct mac_device_info * hw,u64 * ts)1065 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1066 {
1067 	void __iomem *ioaddr = hw->pcsr;
1068 	u32 value;
1069 
1070 	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1071 				      value, value & XGMAC_TXTSC, 100, 10000))
1072 		return -EBUSY;
1073 
1074 	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1075 	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1076 	return 0;
1077 }
1078 
dwxgmac2_flex_pps_config(void __iomem * ioaddr,int index,struct stmmac_pps_cfg * cfg,bool enable,u32 sub_second_inc,u32 systime_flags)1079 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1080 				    struct stmmac_pps_cfg *cfg, bool enable,
1081 				    u32 sub_second_inc, u32 systime_flags)
1082 {
1083 	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1084 	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1085 	u64 period;
1086 
1087 	if (!cfg->available)
1088 		return -EINVAL;
1089 	if (tnsec & XGMAC_TRGTBUSY0)
1090 		return -EBUSY;
1091 	if (!sub_second_inc || !systime_flags)
1092 		return -EINVAL;
1093 
1094 	val &= ~XGMAC_PPSx_MASK(index);
1095 
1096 	if (!enable) {
1097 		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1098 		writel(val, ioaddr + XGMAC_PPS_CONTROL);
1099 		return 0;
1100 	}
1101 
1102 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1103 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1104 	val |= XGMAC_PPSEN0;
1105 
1106 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1107 
1108 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1109 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1110 	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1111 
1112 	period = cfg->period.tv_sec * 1000000000;
1113 	period += cfg->period.tv_nsec;
1114 
1115 	do_div(period, sub_second_inc);
1116 
1117 	if (period <= 1)
1118 		return -EINVAL;
1119 
1120 	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1121 
1122 	period >>= 1;
1123 	if (period <= 1)
1124 		return -EINVAL;
1125 
1126 	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1127 
1128 	/* Finally, activate it */
1129 	writel(val, ioaddr + XGMAC_PPS_CONTROL);
1130 	return 0;
1131 }
1132 
dwxgmac2_sarc_configure(void __iomem * ioaddr,int val)1133 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1134 {
1135 	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1136 
1137 	value &= ~XGMAC_CONFIG_SARC;
1138 	value |= val << XGMAC_CONFIG_SARC_SHIFT;
1139 
1140 	writel(value, ioaddr + XGMAC_TX_CONFIG);
1141 }
1142 
dwxgmac2_enable_vlan(struct mac_device_info * hw,u32 type)1143 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1144 {
1145 	void __iomem *ioaddr = hw->pcsr;
1146 	u32 value;
1147 
1148 	value = readl(ioaddr + XGMAC_VLAN_INCL);
1149 	value |= XGMAC_VLAN_VLTI;
1150 	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1151 	value &= ~XGMAC_VLAN_VLC;
1152 	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1153 	writel(value, ioaddr + XGMAC_VLAN_INCL);
1154 }
1155 
dwxgmac2_filter_wait(struct mac_device_info * hw)1156 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1157 {
1158 	void __iomem *ioaddr = hw->pcsr;
1159 	u32 value;
1160 
1161 	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1162 			       !(value & XGMAC_XB), 100, 10000))
1163 		return -EBUSY;
1164 	return 0;
1165 }
1166 
dwxgmac2_filter_read(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 * data)1167 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1168 				u8 reg, u32 *data)
1169 {
1170 	void __iomem *ioaddr = hw->pcsr;
1171 	u32 value;
1172 	int ret;
1173 
1174 	ret = dwxgmac2_filter_wait(hw);
1175 	if (ret)
1176 		return ret;
1177 
1178 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1179 	value |= XGMAC_TT | XGMAC_XB;
1180 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1181 
1182 	ret = dwxgmac2_filter_wait(hw);
1183 	if (ret)
1184 		return ret;
1185 
1186 	*data = readl(ioaddr + XGMAC_L3L4_DATA);
1187 	return 0;
1188 }
1189 
dwxgmac2_filter_write(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 data)1190 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1191 				 u8 reg, u32 data)
1192 {
1193 	void __iomem *ioaddr = hw->pcsr;
1194 	u32 value;
1195 	int ret;
1196 
1197 	ret = dwxgmac2_filter_wait(hw);
1198 	if (ret)
1199 		return ret;
1200 
1201 	writel(data, ioaddr + XGMAC_L3L4_DATA);
1202 
1203 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1204 	value |= XGMAC_XB;
1205 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1206 
1207 	return dwxgmac2_filter_wait(hw);
1208 }
1209 
dwxgmac2_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1210 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1211 				     bool en, bool ipv6, bool sa, bool inv,
1212 				     u32 match)
1213 {
1214 	void __iomem *ioaddr = hw->pcsr;
1215 	u32 value;
1216 	int ret;
1217 
1218 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1219 	value |= XGMAC_FILTER_IPFE;
1220 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1221 
1222 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1223 	if (ret)
1224 		return ret;
1225 
1226 	/* For IPv6 not both SA/DA filters can be active */
1227 	if (ipv6) {
1228 		value |= XGMAC_L3PEN0;
1229 		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1230 		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1231 		if (sa) {
1232 			value |= XGMAC_L3SAM0;
1233 			if (inv)
1234 				value |= XGMAC_L3SAIM0;
1235 		} else {
1236 			value |= XGMAC_L3DAM0;
1237 			if (inv)
1238 				value |= XGMAC_L3DAIM0;
1239 		}
1240 	} else {
1241 		value &= ~XGMAC_L3PEN0;
1242 		if (sa) {
1243 			value |= XGMAC_L3SAM0;
1244 			if (inv)
1245 				value |= XGMAC_L3SAIM0;
1246 		} else {
1247 			value |= XGMAC_L3DAM0;
1248 			if (inv)
1249 				value |= XGMAC_L3DAIM0;
1250 		}
1251 	}
1252 
1253 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1254 	if (ret)
1255 		return ret;
1256 
1257 	if (sa) {
1258 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1259 		if (ret)
1260 			return ret;
1261 	} else {
1262 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1263 		if (ret)
1264 			return ret;
1265 	}
1266 
1267 	if (!en)
1268 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1269 
1270 	return 0;
1271 }
1272 
dwxgmac2_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1273 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1274 				     bool en, bool udp, bool sa, bool inv,
1275 				     u32 match)
1276 {
1277 	void __iomem *ioaddr = hw->pcsr;
1278 	u32 value;
1279 	int ret;
1280 
1281 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1282 	value |= XGMAC_FILTER_IPFE;
1283 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1284 
1285 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1286 	if (ret)
1287 		return ret;
1288 
1289 	if (udp) {
1290 		value |= XGMAC_L4PEN0;
1291 	} else {
1292 		value &= ~XGMAC_L4PEN0;
1293 	}
1294 
1295 	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1296 	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1297 	if (sa) {
1298 		value |= XGMAC_L4SPM0;
1299 		if (inv)
1300 			value |= XGMAC_L4SPIM0;
1301 	} else {
1302 		value |= XGMAC_L4DPM0;
1303 		if (inv)
1304 			value |= XGMAC_L4DPIM0;
1305 	}
1306 
1307 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1308 	if (ret)
1309 		return ret;
1310 
1311 	if (sa) {
1312 		value = match & XGMAC_L4SP0;
1313 
1314 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1315 		if (ret)
1316 			return ret;
1317 	} else {
1318 		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1319 
1320 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1321 		if (ret)
1322 			return ret;
1323 	}
1324 
1325 	if (!en)
1326 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1327 
1328 	return 0;
1329 }
1330 
dwxgmac2_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1331 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1332 				     u32 addr)
1333 {
1334 	void __iomem *ioaddr = hw->pcsr;
1335 	u32 value;
1336 
1337 	writel(addr, ioaddr + XGMAC_ARP_ADDR);
1338 
1339 	value = readl(ioaddr + XGMAC_RX_CONFIG);
1340 	if (en)
1341 		value |= XGMAC_CONFIG_ARPEN;
1342 	else
1343 		value &= ~XGMAC_CONFIG_ARPEN;
1344 	writel(value, ioaddr + XGMAC_RX_CONFIG);
1345 }
1346 
1347 const struct stmmac_ops dwxgmac210_ops = {
1348 	.core_init = dwxgmac2_core_init,
1349 	.set_mac = dwxgmac2_set_mac,
1350 	.rx_ipc = dwxgmac2_rx_ipc,
1351 	.rx_queue_enable = dwxgmac2_rx_queue_enable,
1352 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1353 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1354 	.rx_queue_routing = NULL,
1355 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1356 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1357 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1358 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1359 	.config_cbs = dwxgmac2_config_cbs,
1360 	.dump_regs = dwxgmac2_dump_regs,
1361 	.host_irq_status = dwxgmac2_host_irq_status,
1362 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1363 	.flow_ctrl = dwxgmac2_flow_ctrl,
1364 	.pmt = dwxgmac2_pmt,
1365 	.set_umac_addr = dwxgmac2_set_umac_addr,
1366 	.get_umac_addr = dwxgmac2_get_umac_addr,
1367 	.set_eee_mode = dwxgmac2_set_eee_mode,
1368 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1369 	.set_eee_timer = dwxgmac2_set_eee_timer,
1370 	.set_eee_pls = dwxgmac2_set_eee_pls,
1371 	.pcs_ctrl_ane = NULL,
1372 	.pcs_rane = NULL,
1373 	.pcs_get_adv_lp = NULL,
1374 	.debug = NULL,
1375 	.set_filter = dwxgmac2_set_filter,
1376 	.safety_feat_config = dwxgmac3_safety_feat_config,
1377 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1378 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1379 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1380 	.rss_configure = dwxgmac2_rss_configure,
1381 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1382 	.rxp_config = dwxgmac3_rxp_config,
1383 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1384 	.flex_pps_config = dwxgmac2_flex_pps_config,
1385 	.sarc_configure = dwxgmac2_sarc_configure,
1386 	.enable_vlan = dwxgmac2_enable_vlan,
1387 	.config_l3_filter = dwxgmac2_config_l3_filter,
1388 	.config_l4_filter = dwxgmac2_config_l4_filter,
1389 	.set_arp_offload = dwxgmac2_set_arp_offload,
1390 };
1391 
dwxgmac2_setup(struct stmmac_priv * priv)1392 int dwxgmac2_setup(struct stmmac_priv *priv)
1393 {
1394 	struct mac_device_info *mac = priv->hw;
1395 
1396 	dev_info(priv->device, "\tXGMAC2\n");
1397 
1398 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1399 	mac->pcsr = priv->ioaddr;
1400 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1401 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1402 	mac->mcast_bits_log2 = 0;
1403 
1404 	if (mac->multicast_filter_bins)
1405 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1406 
1407 	mac->link.duplex = 0;
1408 	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1409 	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1410 	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1411 	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1412 	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1413 	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1414 	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1415 	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1416 
1417 	mac->mii.addr = XGMAC_MDIO_ADDR;
1418 	mac->mii.data = XGMAC_MDIO_DATA;
1419 	mac->mii.addr_shift = 16;
1420 	mac->mii.addr_mask = GENMASK(20, 16);
1421 	mac->mii.reg_shift = 0;
1422 	mac->mii.reg_mask = GENMASK(15, 0);
1423 	mac->mii.clk_csr_shift = 19;
1424 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1425 
1426 	return 0;
1427 }
1428