1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
4 * stmmac Selftests Support
5 *
6 * Author: Jose Abreu <joabreu@synopsys.com>
7 */
8
9 #include <linux/bitrev.h>
10 #include <linux/completion.h>
11 #include <linux/crc32.h>
12 #include <linux/ethtool.h>
13 #include <linux/ip.h>
14 #include <linux/phy.h>
15 #include <linux/udp.h>
16 #include <net/pkt_cls.h>
17 #include <net/tcp.h>
18 #include <net/udp.h>
19 #include <net/tc_act/tc_gact.h>
20 #include "stmmac.h"
21
22 struct stmmachdr {
23 __be32 version;
24 __be64 magic;
25 u8 id;
26 } __packed;
27
28 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
29 sizeof(struct stmmachdr))
30 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
31 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
32
33 struct stmmac_packet_attrs {
34 int vlan;
35 int vlan_id_in;
36 int vlan_id_out;
37 unsigned char *src;
38 unsigned char *dst;
39 u32 ip_src;
40 u32 ip_dst;
41 int tcp;
42 int sport;
43 int dport;
44 u32 exp_hash;
45 int dont_wait;
46 int timeout;
47 int size;
48 int max_size;
49 int remove_sa;
50 u8 id;
51 int sarc;
52 u16 queue_mapping;
53 };
54
55 static u8 stmmac_test_next_id;
56
stmmac_test_get_udp_skb(struct stmmac_priv * priv,struct stmmac_packet_attrs * attr)57 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
58 struct stmmac_packet_attrs *attr)
59 {
60 struct sk_buff *skb = NULL;
61 struct udphdr *uhdr = NULL;
62 struct tcphdr *thdr = NULL;
63 struct stmmachdr *shdr;
64 struct ethhdr *ehdr;
65 struct iphdr *ihdr;
66 int iplen, size;
67
68 size = attr->size + STMMAC_TEST_PKT_SIZE;
69 if (attr->vlan) {
70 size += 4;
71 if (attr->vlan > 1)
72 size += 4;
73 }
74
75 if (attr->tcp)
76 size += sizeof(struct tcphdr);
77 else
78 size += sizeof(struct udphdr);
79
80 if (attr->max_size && (attr->max_size > size))
81 size = attr->max_size;
82
83 skb = netdev_alloc_skb_ip_align(priv->dev, size);
84 if (!skb)
85 return NULL;
86
87 prefetchw(skb->data);
88
89 if (attr->vlan > 1)
90 ehdr = skb_push(skb, ETH_HLEN + 8);
91 else if (attr->vlan)
92 ehdr = skb_push(skb, ETH_HLEN + 4);
93 else if (attr->remove_sa)
94 ehdr = skb_push(skb, ETH_HLEN - 6);
95 else
96 ehdr = skb_push(skb, ETH_HLEN);
97 skb_reset_mac_header(skb);
98
99 skb_set_network_header(skb, skb->len);
100 ihdr = skb_put(skb, sizeof(*ihdr));
101
102 skb_set_transport_header(skb, skb->len);
103 if (attr->tcp)
104 thdr = skb_put(skb, sizeof(*thdr));
105 else
106 uhdr = skb_put(skb, sizeof(*uhdr));
107
108 if (!attr->remove_sa)
109 eth_zero_addr(ehdr->h_source);
110 eth_zero_addr(ehdr->h_dest);
111 if (attr->src && !attr->remove_sa)
112 ether_addr_copy(ehdr->h_source, attr->src);
113 if (attr->dst)
114 ether_addr_copy(ehdr->h_dest, attr->dst);
115
116 if (!attr->remove_sa) {
117 ehdr->h_proto = htons(ETH_P_IP);
118 } else {
119 __be16 *ptr = (__be16 *)ehdr;
120
121 /* HACK */
122 ptr[3] = htons(ETH_P_IP);
123 }
124
125 if (attr->vlan) {
126 __be16 *tag, *proto;
127
128 if (!attr->remove_sa) {
129 tag = (void *)ehdr + ETH_HLEN;
130 proto = (void *)ehdr + (2 * ETH_ALEN);
131 } else {
132 tag = (void *)ehdr + ETH_HLEN - 6;
133 proto = (void *)ehdr + ETH_ALEN;
134 }
135
136 proto[0] = htons(ETH_P_8021Q);
137 tag[0] = htons(attr->vlan_id_out);
138 tag[1] = htons(ETH_P_IP);
139 if (attr->vlan > 1) {
140 proto[0] = htons(ETH_P_8021AD);
141 tag[1] = htons(ETH_P_8021Q);
142 tag[2] = htons(attr->vlan_id_in);
143 tag[3] = htons(ETH_P_IP);
144 }
145 }
146
147 if (attr->tcp) {
148 thdr->source = htons(attr->sport);
149 thdr->dest = htons(attr->dport);
150 thdr->doff = sizeof(struct tcphdr) / 4;
151 thdr->check = 0;
152 } else {
153 uhdr->source = htons(attr->sport);
154 uhdr->dest = htons(attr->dport);
155 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
156 if (attr->max_size)
157 uhdr->len = htons(attr->max_size -
158 (sizeof(*ihdr) + sizeof(*ehdr)));
159 uhdr->check = 0;
160 }
161
162 ihdr->ihl = 5;
163 ihdr->ttl = 32;
164 ihdr->version = 4;
165 if (attr->tcp)
166 ihdr->protocol = IPPROTO_TCP;
167 else
168 ihdr->protocol = IPPROTO_UDP;
169 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
170 if (attr->tcp)
171 iplen += sizeof(*thdr);
172 else
173 iplen += sizeof(*uhdr);
174
175 if (attr->max_size)
176 iplen = attr->max_size - sizeof(*ehdr);
177
178 ihdr->tot_len = htons(iplen);
179 ihdr->frag_off = 0;
180 ihdr->saddr = htonl(attr->ip_src);
181 ihdr->daddr = htonl(attr->ip_dst);
182 ihdr->tos = 0;
183 ihdr->id = 0;
184 ip_send_check(ihdr);
185
186 shdr = skb_put(skb, sizeof(*shdr));
187 shdr->version = 0;
188 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
189 attr->id = stmmac_test_next_id;
190 shdr->id = stmmac_test_next_id++;
191
192 if (attr->size)
193 skb_put(skb, attr->size);
194 if (attr->max_size && (attr->max_size > skb->len))
195 skb_put(skb, attr->max_size - skb->len);
196
197 skb->csum = 0;
198 skb->ip_summed = CHECKSUM_PARTIAL;
199 if (attr->tcp) {
200 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
201 skb->csum_start = skb_transport_header(skb) - skb->head;
202 skb->csum_offset = offsetof(struct tcphdr, check);
203 } else {
204 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
205 }
206
207 skb->protocol = htons(ETH_P_IP);
208 skb->pkt_type = PACKET_HOST;
209 skb->dev = priv->dev;
210
211 return skb;
212 }
213
stmmac_test_get_arp_skb(struct stmmac_priv * priv,struct stmmac_packet_attrs * attr)214 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
215 struct stmmac_packet_attrs *attr)
216 {
217 __be32 ip_src = htonl(attr->ip_src);
218 __be32 ip_dst = htonl(attr->ip_dst);
219 struct sk_buff *skb = NULL;
220
221 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
222 NULL, attr->src, attr->dst);
223 if (!skb)
224 return NULL;
225
226 skb->pkt_type = PACKET_HOST;
227 skb->dev = priv->dev;
228
229 return skb;
230 }
231
232 struct stmmac_test_priv {
233 struct stmmac_packet_attrs *packet;
234 struct packet_type pt;
235 struct completion comp;
236 int double_vlan;
237 int vlan_id;
238 int ok;
239 };
240
stmmac_test_loopback_validate(struct sk_buff * skb,struct net_device * ndev,struct packet_type * pt,struct net_device * orig_ndev)241 static int stmmac_test_loopback_validate(struct sk_buff *skb,
242 struct net_device *ndev,
243 struct packet_type *pt,
244 struct net_device *orig_ndev)
245 {
246 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
247 struct stmmachdr *shdr;
248 struct ethhdr *ehdr;
249 struct udphdr *uhdr;
250 struct tcphdr *thdr;
251 struct iphdr *ihdr;
252
253 skb = skb_unshare(skb, GFP_ATOMIC);
254 if (!skb)
255 goto out;
256
257 if (skb_linearize(skb))
258 goto out;
259 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
260 goto out;
261
262 ehdr = (struct ethhdr *)skb_mac_header(skb);
263 if (tpriv->packet->dst) {
264 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
265 goto out;
266 }
267 if (tpriv->packet->sarc) {
268 if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
269 goto out;
270 } else if (tpriv->packet->src) {
271 if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
272 goto out;
273 }
274
275 ihdr = ip_hdr(skb);
276 if (tpriv->double_vlan)
277 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
278
279 if (tpriv->packet->tcp) {
280 if (ihdr->protocol != IPPROTO_TCP)
281 goto out;
282
283 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
284 if (thdr->dest != htons(tpriv->packet->dport))
285 goto out;
286
287 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
288 } else {
289 if (ihdr->protocol != IPPROTO_UDP)
290 goto out;
291
292 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
293 if (uhdr->dest != htons(tpriv->packet->dport))
294 goto out;
295
296 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
297 }
298
299 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
300 goto out;
301 if (tpriv->packet->exp_hash && !skb->hash)
302 goto out;
303 if (tpriv->packet->id != shdr->id)
304 goto out;
305
306 tpriv->ok = true;
307 complete(&tpriv->comp);
308 out:
309 kfree_skb(skb);
310 return 0;
311 }
312
__stmmac_test_loopback(struct stmmac_priv * priv,struct stmmac_packet_attrs * attr)313 static int __stmmac_test_loopback(struct stmmac_priv *priv,
314 struct stmmac_packet_attrs *attr)
315 {
316 struct stmmac_test_priv *tpriv;
317 struct sk_buff *skb = NULL;
318 int ret = 0;
319
320 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
321 if (!tpriv)
322 return -ENOMEM;
323
324 tpriv->ok = false;
325 init_completion(&tpriv->comp);
326
327 tpriv->pt.type = htons(ETH_P_IP);
328 tpriv->pt.func = stmmac_test_loopback_validate;
329 tpriv->pt.dev = priv->dev;
330 tpriv->pt.af_packet_priv = tpriv;
331 tpriv->packet = attr;
332
333 if (!attr->dont_wait)
334 dev_add_pack(&tpriv->pt);
335
336 skb = stmmac_test_get_udp_skb(priv, attr);
337 if (!skb) {
338 ret = -ENOMEM;
339 goto cleanup;
340 }
341
342 skb_set_queue_mapping(skb, attr->queue_mapping);
343 ret = dev_queue_xmit(skb);
344 if (ret)
345 goto cleanup;
346
347 if (attr->dont_wait)
348 goto cleanup;
349
350 if (!attr->timeout)
351 attr->timeout = STMMAC_LB_TIMEOUT;
352
353 wait_for_completion_timeout(&tpriv->comp, attr->timeout);
354 ret = tpriv->ok ? 0 : -ETIMEDOUT;
355
356 cleanup:
357 if (!attr->dont_wait)
358 dev_remove_pack(&tpriv->pt);
359 kfree(tpriv);
360 return ret;
361 }
362
stmmac_test_mac_loopback(struct stmmac_priv * priv)363 static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
364 {
365 struct stmmac_packet_attrs attr = { };
366
367 attr.dst = priv->dev->dev_addr;
368 return __stmmac_test_loopback(priv, &attr);
369 }
370
stmmac_test_phy_loopback(struct stmmac_priv * priv)371 static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
372 {
373 struct stmmac_packet_attrs attr = { };
374 int ret;
375
376 if (!priv->dev->phydev)
377 return -EBUSY;
378
379 ret = phy_loopback(priv->dev->phydev, true);
380 if (ret)
381 return ret;
382
383 attr.dst = priv->dev->dev_addr;
384 ret = __stmmac_test_loopback(priv, &attr);
385
386 phy_loopback(priv->dev->phydev, false);
387 return ret;
388 }
389
stmmac_test_mmc(struct stmmac_priv * priv)390 static int stmmac_test_mmc(struct stmmac_priv *priv)
391 {
392 struct stmmac_counters initial, final;
393 int ret;
394
395 memset(&initial, 0, sizeof(initial));
396 memset(&final, 0, sizeof(final));
397
398 if (!priv->dma_cap.rmon)
399 return -EOPNOTSUPP;
400
401 /* Save previous results into internal struct */
402 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
403
404 ret = stmmac_test_mac_loopback(priv);
405 if (ret)
406 return ret;
407
408 /* These will be loopback results so no need to save them */
409 stmmac_mmc_read(priv, priv->mmcaddr, &final);
410
411 /*
412 * The number of MMC counters available depends on HW configuration
413 * so we just use this one to validate the feature. I hope there is
414 * not a version without this counter.
415 */
416 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
417 return -EINVAL;
418
419 return 0;
420 }
421
stmmac_test_eee(struct stmmac_priv * priv)422 static int stmmac_test_eee(struct stmmac_priv *priv)
423 {
424 struct stmmac_extra_stats *initial, *final;
425 int retries = 10;
426 int ret;
427
428 if (!priv->dma_cap.eee || !priv->eee_active)
429 return -EOPNOTSUPP;
430
431 initial = kzalloc(sizeof(*initial), GFP_KERNEL);
432 if (!initial)
433 return -ENOMEM;
434
435 final = kzalloc(sizeof(*final), GFP_KERNEL);
436 if (!final) {
437 ret = -ENOMEM;
438 goto out_free_initial;
439 }
440
441 memcpy(initial, &priv->xstats, sizeof(*initial));
442
443 ret = stmmac_test_mac_loopback(priv);
444 if (ret)
445 goto out_free_final;
446
447 /* We have no traffic in the line so, sooner or later it will go LPI */
448 while (--retries) {
449 memcpy(final, &priv->xstats, sizeof(*final));
450
451 if (final->irq_tx_path_in_lpi_mode_n >
452 initial->irq_tx_path_in_lpi_mode_n)
453 break;
454 msleep(100);
455 }
456
457 if (!retries) {
458 ret = -ETIMEDOUT;
459 goto out_free_final;
460 }
461
462 if (final->irq_tx_path_in_lpi_mode_n <=
463 initial->irq_tx_path_in_lpi_mode_n) {
464 ret = -EINVAL;
465 goto out_free_final;
466 }
467
468 if (final->irq_tx_path_exit_lpi_mode_n <=
469 initial->irq_tx_path_exit_lpi_mode_n) {
470 ret = -EINVAL;
471 goto out_free_final;
472 }
473
474 out_free_final:
475 kfree(final);
476 out_free_initial:
477 kfree(initial);
478 return ret;
479 }
480
stmmac_filter_check(struct stmmac_priv * priv)481 static int stmmac_filter_check(struct stmmac_priv *priv)
482 {
483 if (!(priv->dev->flags & IFF_PROMISC))
484 return 0;
485
486 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
487 return -EOPNOTSUPP;
488 }
489
stmmac_hash_check(struct stmmac_priv * priv,unsigned char * addr)490 static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
491 {
492 int mc_offset = 32 - priv->hw->mcast_bits_log2;
493 struct netdev_hw_addr *ha;
494 u32 hash, hash_nr;
495
496 /* First compute the hash for desired addr */
497 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
498 hash_nr = hash >> 5;
499 hash = 1 << (hash & 0x1f);
500
501 /* Now, check if it collides with any existing one */
502 netdev_for_each_mc_addr(ha, priv->dev) {
503 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
504 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
505 return false;
506 }
507
508 /* No collisions, address is good to go */
509 return true;
510 }
511
stmmac_perfect_check(struct stmmac_priv * priv,unsigned char * addr)512 static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
513 {
514 struct netdev_hw_addr *ha;
515
516 /* Check if it collides with any existing one */
517 netdev_for_each_uc_addr(ha, priv->dev) {
518 if (!memcmp(ha->addr, addr, ETH_ALEN))
519 return false;
520 }
521
522 /* No collisions, address is good to go */
523 return true;
524 }
525
stmmac_test_hfilt(struct stmmac_priv * priv)526 static int stmmac_test_hfilt(struct stmmac_priv *priv)
527 {
528 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
529 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
530 struct stmmac_packet_attrs attr = { };
531 int ret, tries = 256;
532
533 ret = stmmac_filter_check(priv);
534 if (ret)
535 return ret;
536
537 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
538 return -EOPNOTSUPP;
539
540 while (--tries) {
541 /* We only need to check the bd_addr for collisions */
542 bd_addr[ETH_ALEN - 1] = tries;
543 if (stmmac_hash_check(priv, bd_addr))
544 break;
545 }
546
547 if (!tries)
548 return -EOPNOTSUPP;
549
550 ret = dev_mc_add(priv->dev, gd_addr);
551 if (ret)
552 return ret;
553
554 attr.dst = gd_addr;
555
556 /* Shall receive packet */
557 ret = __stmmac_test_loopback(priv, &attr);
558 if (ret)
559 goto cleanup;
560
561 attr.dst = bd_addr;
562
563 /* Shall NOT receive packet */
564 ret = __stmmac_test_loopback(priv, &attr);
565 ret = ret ? 0 : -EINVAL;
566
567 cleanup:
568 dev_mc_del(priv->dev, gd_addr);
569 return ret;
570 }
571
stmmac_test_pfilt(struct stmmac_priv * priv)572 static int stmmac_test_pfilt(struct stmmac_priv *priv)
573 {
574 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
575 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
576 struct stmmac_packet_attrs attr = { };
577 int ret, tries = 256;
578
579 if (stmmac_filter_check(priv))
580 return -EOPNOTSUPP;
581 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
582 return -EOPNOTSUPP;
583
584 while (--tries) {
585 /* We only need to check the bd_addr for collisions */
586 bd_addr[ETH_ALEN - 1] = tries;
587 if (stmmac_perfect_check(priv, bd_addr))
588 break;
589 }
590
591 if (!tries)
592 return -EOPNOTSUPP;
593
594 ret = dev_uc_add(priv->dev, gd_addr);
595 if (ret)
596 return ret;
597
598 attr.dst = gd_addr;
599
600 /* Shall receive packet */
601 ret = __stmmac_test_loopback(priv, &attr);
602 if (ret)
603 goto cleanup;
604
605 attr.dst = bd_addr;
606
607 /* Shall NOT receive packet */
608 ret = __stmmac_test_loopback(priv, &attr);
609 ret = ret ? 0 : -EINVAL;
610
611 cleanup:
612 dev_uc_del(priv->dev, gd_addr);
613 return ret;
614 }
615
stmmac_test_mcfilt(struct stmmac_priv * priv)616 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
617 {
618 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
619 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
620 struct stmmac_packet_attrs attr = { };
621 int ret, tries = 256;
622
623 if (stmmac_filter_check(priv))
624 return -EOPNOTSUPP;
625 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
626 return -EOPNOTSUPP;
627
628 while (--tries) {
629 /* We only need to check the mc_addr for collisions */
630 mc_addr[ETH_ALEN - 1] = tries;
631 if (stmmac_hash_check(priv, mc_addr))
632 break;
633 }
634
635 if (!tries)
636 return -EOPNOTSUPP;
637
638 ret = dev_uc_add(priv->dev, uc_addr);
639 if (ret)
640 return ret;
641
642 attr.dst = uc_addr;
643
644 /* Shall receive packet */
645 ret = __stmmac_test_loopback(priv, &attr);
646 if (ret)
647 goto cleanup;
648
649 attr.dst = mc_addr;
650
651 /* Shall NOT receive packet */
652 ret = __stmmac_test_loopback(priv, &attr);
653 ret = ret ? 0 : -EINVAL;
654
655 cleanup:
656 dev_uc_del(priv->dev, uc_addr);
657 return ret;
658 }
659
stmmac_test_ucfilt(struct stmmac_priv * priv)660 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
661 {
662 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
663 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
664 struct stmmac_packet_attrs attr = { };
665 int ret, tries = 256;
666
667 if (stmmac_filter_check(priv))
668 return -EOPNOTSUPP;
669 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
670 return -EOPNOTSUPP;
671
672 while (--tries) {
673 /* We only need to check the uc_addr for collisions */
674 uc_addr[ETH_ALEN - 1] = tries;
675 if (stmmac_perfect_check(priv, uc_addr))
676 break;
677 }
678
679 if (!tries)
680 return -EOPNOTSUPP;
681
682 ret = dev_mc_add(priv->dev, mc_addr);
683 if (ret)
684 return ret;
685
686 attr.dst = mc_addr;
687
688 /* Shall receive packet */
689 ret = __stmmac_test_loopback(priv, &attr);
690 if (ret)
691 goto cleanup;
692
693 attr.dst = uc_addr;
694
695 /* Shall NOT receive packet */
696 ret = __stmmac_test_loopback(priv, &attr);
697 ret = ret ? 0 : -EINVAL;
698
699 cleanup:
700 dev_mc_del(priv->dev, mc_addr);
701 return ret;
702 }
703
stmmac_test_flowctrl_validate(struct sk_buff * skb,struct net_device * ndev,struct packet_type * pt,struct net_device * orig_ndev)704 static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
705 struct net_device *ndev,
706 struct packet_type *pt,
707 struct net_device *orig_ndev)
708 {
709 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
710 struct ethhdr *ehdr;
711
712 ehdr = (struct ethhdr *)skb_mac_header(skb);
713 if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
714 goto out;
715 if (ehdr->h_proto != htons(ETH_P_PAUSE))
716 goto out;
717
718 tpriv->ok = true;
719 complete(&tpriv->comp);
720 out:
721 kfree_skb(skb);
722 return 0;
723 }
724
stmmac_test_flowctrl(struct stmmac_priv * priv)725 static int stmmac_test_flowctrl(struct stmmac_priv *priv)
726 {
727 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
728 struct phy_device *phydev = priv->dev->phydev;
729 u32 rx_cnt = priv->plat->rx_queues_to_use;
730 struct stmmac_test_priv *tpriv;
731 unsigned int pkt_count;
732 int i, ret = 0;
733
734 if (!phydev || (!phydev->pause && !phydev->asym_pause))
735 return -EOPNOTSUPP;
736
737 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
738 if (!tpriv)
739 return -ENOMEM;
740
741 tpriv->ok = false;
742 init_completion(&tpriv->comp);
743 tpriv->pt.type = htons(ETH_P_PAUSE);
744 tpriv->pt.func = stmmac_test_flowctrl_validate;
745 tpriv->pt.dev = priv->dev;
746 tpriv->pt.af_packet_priv = tpriv;
747 dev_add_pack(&tpriv->pt);
748
749 /* Compute minimum number of packets to make FIFO full */
750 pkt_count = priv->plat->rx_fifo_size;
751 if (!pkt_count)
752 pkt_count = priv->dma_cap.rx_fifo_size;
753 pkt_count /= 1400;
754 pkt_count *= 2;
755
756 for (i = 0; i < rx_cnt; i++)
757 stmmac_stop_rx(priv, priv->ioaddr, i);
758
759 ret = dev_set_promiscuity(priv->dev, 1);
760 if (ret)
761 goto cleanup;
762
763 ret = dev_mc_add(priv->dev, paddr);
764 if (ret)
765 goto cleanup;
766
767 for (i = 0; i < pkt_count; i++) {
768 struct stmmac_packet_attrs attr = { };
769
770 attr.dst = priv->dev->dev_addr;
771 attr.dont_wait = true;
772 attr.size = 1400;
773
774 ret = __stmmac_test_loopback(priv, &attr);
775 if (ret)
776 goto cleanup;
777 if (tpriv->ok)
778 break;
779 }
780
781 /* Wait for some time in case RX Watchdog is enabled */
782 msleep(200);
783
784 for (i = 0; i < rx_cnt; i++) {
785 struct stmmac_channel *ch = &priv->channel[i];
786 u32 tail;
787
788 tail = priv->rx_queue[i].dma_rx_phy +
789 (DMA_RX_SIZE * sizeof(struct dma_desc));
790
791 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
792 stmmac_start_rx(priv, priv->ioaddr, i);
793
794 local_bh_disable();
795 napi_reschedule(&ch->rx_napi);
796 local_bh_enable();
797 }
798
799 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
800 ret = tpriv->ok ? 0 : -ETIMEDOUT;
801
802 cleanup:
803 dev_mc_del(priv->dev, paddr);
804 dev_set_promiscuity(priv->dev, -1);
805 dev_remove_pack(&tpriv->pt);
806 kfree(tpriv);
807 return ret;
808 }
809
stmmac_test_rss(struct stmmac_priv * priv)810 static int stmmac_test_rss(struct stmmac_priv *priv)
811 {
812 struct stmmac_packet_attrs attr = { };
813
814 if (!priv->dma_cap.rssen || !priv->rss.enable)
815 return -EOPNOTSUPP;
816
817 attr.dst = priv->dev->dev_addr;
818 attr.exp_hash = true;
819 attr.sport = 0x321;
820 attr.dport = 0x123;
821
822 return __stmmac_test_loopback(priv, &attr);
823 }
824
stmmac_test_vlan_validate(struct sk_buff * skb,struct net_device * ndev,struct packet_type * pt,struct net_device * orig_ndev)825 static int stmmac_test_vlan_validate(struct sk_buff *skb,
826 struct net_device *ndev,
827 struct packet_type *pt,
828 struct net_device *orig_ndev)
829 {
830 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
831 struct stmmachdr *shdr;
832 struct ethhdr *ehdr;
833 struct udphdr *uhdr;
834 struct iphdr *ihdr;
835 u16 proto;
836
837 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
838
839 skb = skb_unshare(skb, GFP_ATOMIC);
840 if (!skb)
841 goto out;
842
843 if (skb_linearize(skb))
844 goto out;
845 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
846 goto out;
847 if (tpriv->vlan_id) {
848 if (skb->vlan_proto != htons(proto))
849 goto out;
850 if (skb->vlan_tci != tpriv->vlan_id)
851 goto out;
852 }
853
854 ehdr = (struct ethhdr *)skb_mac_header(skb);
855 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
856 goto out;
857
858 ihdr = ip_hdr(skb);
859 if (tpriv->double_vlan)
860 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
861 if (ihdr->protocol != IPPROTO_UDP)
862 goto out;
863
864 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
865 if (uhdr->dest != htons(tpriv->packet->dport))
866 goto out;
867
868 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
869 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
870 goto out;
871
872 tpriv->ok = true;
873 complete(&tpriv->comp);
874
875 out:
876 kfree_skb(skb);
877 return 0;
878 }
879
stmmac_test_vlanfilt(struct stmmac_priv * priv)880 static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
881 {
882 struct stmmac_packet_attrs attr = { };
883 struct stmmac_test_priv *tpriv;
884 struct sk_buff *skb = NULL;
885 int ret = 0, i;
886
887 if (!priv->dma_cap.vlhash)
888 return -EOPNOTSUPP;
889
890 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
891 if (!tpriv)
892 return -ENOMEM;
893
894 tpriv->ok = false;
895 init_completion(&tpriv->comp);
896
897 tpriv->pt.type = htons(ETH_P_IP);
898 tpriv->pt.func = stmmac_test_vlan_validate;
899 tpriv->pt.dev = priv->dev;
900 tpriv->pt.af_packet_priv = tpriv;
901 tpriv->packet = &attr;
902
903 /*
904 * As we use HASH filtering, false positives may appear. This is a
905 * specially chosen ID so that adjacent IDs (+4) have different
906 * HASH values.
907 */
908 tpriv->vlan_id = 0x123;
909 dev_add_pack(&tpriv->pt);
910
911 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
912 if (ret)
913 goto cleanup;
914
915 for (i = 0; i < 4; i++) {
916 attr.vlan = 1;
917 attr.vlan_id_out = tpriv->vlan_id + i;
918 attr.dst = priv->dev->dev_addr;
919 attr.sport = 9;
920 attr.dport = 9;
921
922 skb = stmmac_test_get_udp_skb(priv, &attr);
923 if (!skb) {
924 ret = -ENOMEM;
925 goto vlan_del;
926 }
927
928 skb_set_queue_mapping(skb, 0);
929 ret = dev_queue_xmit(skb);
930 if (ret)
931 goto vlan_del;
932
933 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
934 ret = tpriv->ok ? 0 : -ETIMEDOUT;
935 if (ret && !i) {
936 goto vlan_del;
937 } else if (!ret && i) {
938 ret = -EINVAL;
939 goto vlan_del;
940 } else {
941 ret = 0;
942 }
943
944 tpriv->ok = false;
945 }
946
947 vlan_del:
948 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
949 cleanup:
950 dev_remove_pack(&tpriv->pt);
951 kfree(tpriv);
952 return ret;
953 }
954
stmmac_test_dvlanfilt(struct stmmac_priv * priv)955 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
956 {
957 struct stmmac_packet_attrs attr = { };
958 struct stmmac_test_priv *tpriv;
959 struct sk_buff *skb = NULL;
960 int ret = 0, i;
961
962 if (!priv->dma_cap.vlhash)
963 return -EOPNOTSUPP;
964
965 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
966 if (!tpriv)
967 return -ENOMEM;
968
969 tpriv->ok = false;
970 tpriv->double_vlan = true;
971 init_completion(&tpriv->comp);
972
973 tpriv->pt.type = htons(ETH_P_8021Q);
974 tpriv->pt.func = stmmac_test_vlan_validate;
975 tpriv->pt.dev = priv->dev;
976 tpriv->pt.af_packet_priv = tpriv;
977 tpriv->packet = &attr;
978
979 /*
980 * As we use HASH filtering, false positives may appear. This is a
981 * specially chosen ID so that adjacent IDs (+4) have different
982 * HASH values.
983 */
984 tpriv->vlan_id = 0x123;
985 dev_add_pack(&tpriv->pt);
986
987 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
988 if (ret)
989 goto cleanup;
990
991 for (i = 0; i < 4; i++) {
992 attr.vlan = 2;
993 attr.vlan_id_out = tpriv->vlan_id + i;
994 attr.dst = priv->dev->dev_addr;
995 attr.sport = 9;
996 attr.dport = 9;
997
998 skb = stmmac_test_get_udp_skb(priv, &attr);
999 if (!skb) {
1000 ret = -ENOMEM;
1001 goto vlan_del;
1002 }
1003
1004 skb_set_queue_mapping(skb, 0);
1005 ret = dev_queue_xmit(skb);
1006 if (ret)
1007 goto vlan_del;
1008
1009 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1010 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1011 if (ret && !i) {
1012 goto vlan_del;
1013 } else if (!ret && i) {
1014 ret = -EINVAL;
1015 goto vlan_del;
1016 } else {
1017 ret = 0;
1018 }
1019
1020 tpriv->ok = false;
1021 }
1022
1023 vlan_del:
1024 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1025 cleanup:
1026 dev_remove_pack(&tpriv->pt);
1027 kfree(tpriv);
1028 return ret;
1029 }
1030
1031 #ifdef CONFIG_NET_CLS_ACT
stmmac_test_rxp(struct stmmac_priv * priv)1032 static int stmmac_test_rxp(struct stmmac_priv *priv)
1033 {
1034 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
1035 struct tc_cls_u32_offload cls_u32 = { };
1036 struct stmmac_packet_attrs attr = { };
1037 struct tc_action **actions, *act;
1038 struct tc_u32_sel *sel;
1039 struct tcf_exts *exts;
1040 int ret, i, nk = 1;
1041
1042 if (!tc_can_offload(priv->dev))
1043 return -EOPNOTSUPP;
1044 if (!priv->dma_cap.frpsel)
1045 return -EOPNOTSUPP;
1046
1047 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
1048 if (!sel)
1049 return -ENOMEM;
1050
1051 exts = kzalloc(sizeof(*exts), GFP_KERNEL);
1052 if (!exts) {
1053 ret = -ENOMEM;
1054 goto cleanup_sel;
1055 }
1056
1057 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
1058 if (!actions) {
1059 ret = -ENOMEM;
1060 goto cleanup_exts;
1061 }
1062
1063 act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
1064 if (!act) {
1065 ret = -ENOMEM;
1066 goto cleanup_actions;
1067 }
1068
1069 cls_u32.command = TC_CLSU32_NEW_KNODE;
1070 cls_u32.common.chain_index = 0;
1071 cls_u32.common.protocol = htons(ETH_P_ALL);
1072 cls_u32.knode.exts = exts;
1073 cls_u32.knode.sel = sel;
1074 cls_u32.knode.handle = 0x123;
1075
1076 exts->nr_actions = nk;
1077 exts->actions = actions;
1078 for (i = 0; i < nk; i++) {
1079 struct tcf_gact *gact = to_gact(&act[i]);
1080
1081 actions[i] = &act[i];
1082 gact->tcf_action = TC_ACT_SHOT;
1083 }
1084
1085 sel->nkeys = nk;
1086 sel->offshift = 0;
1087 sel->keys[0].off = 6;
1088 sel->keys[0].val = htonl(0xdeadbeef);
1089 sel->keys[0].mask = ~0x0;
1090
1091 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1092 if (ret)
1093 goto cleanup_act;
1094
1095 attr.dst = priv->dev->dev_addr;
1096 attr.src = addr;
1097
1098 ret = __stmmac_test_loopback(priv, &attr);
1099 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1100
1101 cls_u32.command = TC_CLSU32_DELETE_KNODE;
1102 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1103
1104 cleanup_act:
1105 kfree(act);
1106 cleanup_actions:
1107 kfree(actions);
1108 cleanup_exts:
1109 kfree(exts);
1110 cleanup_sel:
1111 kfree(sel);
1112 return ret;
1113 }
1114 #else
stmmac_test_rxp(struct stmmac_priv * priv)1115 static int stmmac_test_rxp(struct stmmac_priv *priv)
1116 {
1117 return -EOPNOTSUPP;
1118 }
1119 #endif
1120
stmmac_test_desc_sai(struct stmmac_priv * priv)1121 static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1122 {
1123 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1124 struct stmmac_packet_attrs attr = { };
1125 int ret;
1126
1127 if (!priv->dma_cap.vlins)
1128 return -EOPNOTSUPP;
1129
1130 attr.remove_sa = true;
1131 attr.sarc = true;
1132 attr.src = src;
1133 attr.dst = priv->dev->dev_addr;
1134
1135 priv->sarc_type = 0x1;
1136
1137 ret = __stmmac_test_loopback(priv, &attr);
1138
1139 priv->sarc_type = 0x0;
1140 return ret;
1141 }
1142
stmmac_test_desc_sar(struct stmmac_priv * priv)1143 static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1144 {
1145 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1146 struct stmmac_packet_attrs attr = { };
1147 int ret;
1148
1149 if (!priv->dma_cap.vlins)
1150 return -EOPNOTSUPP;
1151
1152 attr.sarc = true;
1153 attr.src = src;
1154 attr.dst = priv->dev->dev_addr;
1155
1156 priv->sarc_type = 0x2;
1157
1158 ret = __stmmac_test_loopback(priv, &attr);
1159
1160 priv->sarc_type = 0x0;
1161 return ret;
1162 }
1163
stmmac_test_reg_sai(struct stmmac_priv * priv)1164 static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1165 {
1166 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1167 struct stmmac_packet_attrs attr = { };
1168 int ret;
1169
1170 if (!priv->dma_cap.vlins)
1171 return -EOPNOTSUPP;
1172
1173 attr.remove_sa = true;
1174 attr.sarc = true;
1175 attr.src = src;
1176 attr.dst = priv->dev->dev_addr;
1177
1178 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1179 return -EOPNOTSUPP;
1180
1181 ret = __stmmac_test_loopback(priv, &attr);
1182
1183 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1184 return ret;
1185 }
1186
stmmac_test_reg_sar(struct stmmac_priv * priv)1187 static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1188 {
1189 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1190 struct stmmac_packet_attrs attr = { };
1191 int ret;
1192
1193 if (!priv->dma_cap.vlins)
1194 return -EOPNOTSUPP;
1195
1196 attr.sarc = true;
1197 attr.src = src;
1198 attr.dst = priv->dev->dev_addr;
1199
1200 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1201 return -EOPNOTSUPP;
1202
1203 ret = __stmmac_test_loopback(priv, &attr);
1204
1205 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1206 return ret;
1207 }
1208
stmmac_test_vlanoff_common(struct stmmac_priv * priv,bool svlan)1209 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1210 {
1211 struct stmmac_packet_attrs attr = { };
1212 struct stmmac_test_priv *tpriv;
1213 struct sk_buff *skb = NULL;
1214 int ret = 0;
1215 u16 proto;
1216
1217 if (!priv->dma_cap.vlins)
1218 return -EOPNOTSUPP;
1219
1220 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1221 if (!tpriv)
1222 return -ENOMEM;
1223
1224 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1225
1226 tpriv->ok = false;
1227 tpriv->double_vlan = svlan;
1228 init_completion(&tpriv->comp);
1229
1230 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1231 tpriv->pt.func = stmmac_test_vlan_validate;
1232 tpriv->pt.dev = priv->dev;
1233 tpriv->pt.af_packet_priv = tpriv;
1234 tpriv->packet = &attr;
1235 tpriv->vlan_id = 0x123;
1236 dev_add_pack(&tpriv->pt);
1237
1238 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1239 if (ret)
1240 goto cleanup;
1241
1242 attr.dst = priv->dev->dev_addr;
1243
1244 skb = stmmac_test_get_udp_skb(priv, &attr);
1245 if (!skb) {
1246 ret = -ENOMEM;
1247 goto vlan_del;
1248 }
1249
1250 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1251 skb->protocol = htons(proto);
1252
1253 skb_set_queue_mapping(skb, 0);
1254 ret = dev_queue_xmit(skb);
1255 if (ret)
1256 goto vlan_del;
1257
1258 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1259 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1260
1261 vlan_del:
1262 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1263 cleanup:
1264 dev_remove_pack(&tpriv->pt);
1265 kfree(tpriv);
1266 return ret;
1267 }
1268
stmmac_test_vlanoff(struct stmmac_priv * priv)1269 static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1270 {
1271 return stmmac_test_vlanoff_common(priv, false);
1272 }
1273
stmmac_test_svlanoff(struct stmmac_priv * priv)1274 static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1275 {
1276 if (!priv->dma_cap.dvlan)
1277 return -EOPNOTSUPP;
1278 return stmmac_test_vlanoff_common(priv, true);
1279 }
1280
1281 #ifdef CONFIG_NET_CLS_ACT
__stmmac_test_l3filt(struct stmmac_priv * priv,u32 dst,u32 src,u32 dst_mask,u32 src_mask)1282 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1283 u32 dst_mask, u32 src_mask)
1284 {
1285 struct flow_dissector_key_ipv4_addrs key, mask;
1286 unsigned long dummy_cookie = 0xdeadbeef;
1287 struct stmmac_packet_attrs attr = { };
1288 struct flow_dissector *dissector;
1289 struct flow_cls_offload *cls;
1290 struct flow_rule *rule;
1291 int ret;
1292
1293 if (!tc_can_offload(priv->dev))
1294 return -EOPNOTSUPP;
1295 if (!priv->dma_cap.l3l4fnum)
1296 return -EOPNOTSUPP;
1297 if (priv->rss.enable)
1298 stmmac_rss_configure(priv, priv->hw, NULL,
1299 priv->plat->rx_queues_to_use);
1300
1301 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1302 if (!dissector) {
1303 ret = -ENOMEM;
1304 goto cleanup_rss;
1305 }
1306
1307 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1308 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1309
1310 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1311 if (!cls) {
1312 ret = -ENOMEM;
1313 goto cleanup_dissector;
1314 }
1315
1316 cls->common.chain_index = 0;
1317 cls->command = FLOW_CLS_REPLACE;
1318 cls->cookie = dummy_cookie;
1319
1320 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1321 if (!rule) {
1322 ret = -ENOMEM;
1323 goto cleanup_cls;
1324 }
1325
1326 rule->match.dissector = dissector;
1327 rule->match.key = (void *)&key;
1328 rule->match.mask = (void *)&mask;
1329
1330 key.src = htonl(src);
1331 key.dst = htonl(dst);
1332 mask.src = src_mask;
1333 mask.dst = dst_mask;
1334
1335 cls->rule = rule;
1336
1337 rule->action.entries[0].id = FLOW_ACTION_DROP;
1338 rule->action.num_entries = 1;
1339
1340 attr.dst = priv->dev->dev_addr;
1341 attr.ip_dst = dst;
1342 attr.ip_src = src;
1343
1344 /* Shall receive packet */
1345 ret = __stmmac_test_loopback(priv, &attr);
1346 if (ret)
1347 goto cleanup_rule;
1348
1349 ret = stmmac_tc_setup_cls(priv, priv, cls);
1350 if (ret)
1351 goto cleanup_rule;
1352
1353 /* Shall NOT receive packet */
1354 ret = __stmmac_test_loopback(priv, &attr);
1355 ret = ret ? 0 : -EINVAL;
1356
1357 cls->command = FLOW_CLS_DESTROY;
1358 stmmac_tc_setup_cls(priv, priv, cls);
1359 cleanup_rule:
1360 kfree(rule);
1361 cleanup_cls:
1362 kfree(cls);
1363 cleanup_dissector:
1364 kfree(dissector);
1365 cleanup_rss:
1366 if (priv->rss.enable) {
1367 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1368 priv->plat->rx_queues_to_use);
1369 }
1370
1371 return ret;
1372 }
1373 #else
__stmmac_test_l3filt(struct stmmac_priv * priv,u32 dst,u32 src,u32 dst_mask,u32 src_mask)1374 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1375 u32 dst_mask, u32 src_mask)
1376 {
1377 return -EOPNOTSUPP;
1378 }
1379 #endif
1380
stmmac_test_l3filt_da(struct stmmac_priv * priv)1381 static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1382 {
1383 u32 addr = 0x10203040;
1384
1385 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1386 }
1387
stmmac_test_l3filt_sa(struct stmmac_priv * priv)1388 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1389 {
1390 u32 addr = 0x10203040;
1391
1392 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1393 }
1394
1395 #ifdef CONFIG_NET_CLS_ACT
__stmmac_test_l4filt(struct stmmac_priv * priv,u32 dst,u32 src,u32 dst_mask,u32 src_mask,bool udp)1396 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1397 u32 dst_mask, u32 src_mask, bool udp)
1398 {
1399 struct {
1400 struct flow_dissector_key_basic bkey;
1401 struct flow_dissector_key_ports key;
1402 } __aligned(BITS_PER_LONG / 8) keys;
1403 struct {
1404 struct flow_dissector_key_basic bmask;
1405 struct flow_dissector_key_ports mask;
1406 } __aligned(BITS_PER_LONG / 8) masks;
1407 unsigned long dummy_cookie = 0xdeadbeef;
1408 struct stmmac_packet_attrs attr = { };
1409 struct flow_dissector *dissector;
1410 struct flow_cls_offload *cls;
1411 struct flow_rule *rule;
1412 int ret;
1413
1414 if (!tc_can_offload(priv->dev))
1415 return -EOPNOTSUPP;
1416 if (!priv->dma_cap.l3l4fnum)
1417 return -EOPNOTSUPP;
1418 if (priv->rss.enable)
1419 stmmac_rss_configure(priv, priv->hw, NULL,
1420 priv->plat->rx_queues_to_use);
1421
1422 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1423 if (!dissector) {
1424 ret = -ENOMEM;
1425 goto cleanup_rss;
1426 }
1427
1428 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
1429 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
1430 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1431 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1432
1433 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1434 if (!cls) {
1435 ret = -ENOMEM;
1436 goto cleanup_dissector;
1437 }
1438
1439 cls->common.chain_index = 0;
1440 cls->command = FLOW_CLS_REPLACE;
1441 cls->cookie = dummy_cookie;
1442
1443 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1444 if (!rule) {
1445 ret = -ENOMEM;
1446 goto cleanup_cls;
1447 }
1448
1449 rule->match.dissector = dissector;
1450 rule->match.key = (void *)&keys;
1451 rule->match.mask = (void *)&masks;
1452
1453 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1454 keys.key.src = htons(src);
1455 keys.key.dst = htons(dst);
1456 masks.mask.src = src_mask;
1457 masks.mask.dst = dst_mask;
1458
1459 cls->rule = rule;
1460
1461 rule->action.entries[0].id = FLOW_ACTION_DROP;
1462 rule->action.num_entries = 1;
1463
1464 attr.dst = priv->dev->dev_addr;
1465 attr.tcp = !udp;
1466 attr.sport = src;
1467 attr.dport = dst;
1468 attr.ip_dst = 0;
1469
1470 /* Shall receive packet */
1471 ret = __stmmac_test_loopback(priv, &attr);
1472 if (ret)
1473 goto cleanup_rule;
1474
1475 ret = stmmac_tc_setup_cls(priv, priv, cls);
1476 if (ret)
1477 goto cleanup_rule;
1478
1479 /* Shall NOT receive packet */
1480 ret = __stmmac_test_loopback(priv, &attr);
1481 ret = ret ? 0 : -EINVAL;
1482
1483 cls->command = FLOW_CLS_DESTROY;
1484 stmmac_tc_setup_cls(priv, priv, cls);
1485 cleanup_rule:
1486 kfree(rule);
1487 cleanup_cls:
1488 kfree(cls);
1489 cleanup_dissector:
1490 kfree(dissector);
1491 cleanup_rss:
1492 if (priv->rss.enable) {
1493 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1494 priv->plat->rx_queues_to_use);
1495 }
1496
1497 return ret;
1498 }
1499 #else
__stmmac_test_l4filt(struct stmmac_priv * priv,u32 dst,u32 src,u32 dst_mask,u32 src_mask,bool udp)1500 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1501 u32 dst_mask, u32 src_mask, bool udp)
1502 {
1503 return -EOPNOTSUPP;
1504 }
1505 #endif
1506
stmmac_test_l4filt_da_tcp(struct stmmac_priv * priv)1507 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1508 {
1509 u16 dummy_port = 0x123;
1510
1511 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1512 }
1513
stmmac_test_l4filt_sa_tcp(struct stmmac_priv * priv)1514 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1515 {
1516 u16 dummy_port = 0x123;
1517
1518 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1519 }
1520
stmmac_test_l4filt_da_udp(struct stmmac_priv * priv)1521 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1522 {
1523 u16 dummy_port = 0x123;
1524
1525 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1526 }
1527
stmmac_test_l4filt_sa_udp(struct stmmac_priv * priv)1528 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1529 {
1530 u16 dummy_port = 0x123;
1531
1532 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1533 }
1534
stmmac_test_arp_validate(struct sk_buff * skb,struct net_device * ndev,struct packet_type * pt,struct net_device * orig_ndev)1535 static int stmmac_test_arp_validate(struct sk_buff *skb,
1536 struct net_device *ndev,
1537 struct packet_type *pt,
1538 struct net_device *orig_ndev)
1539 {
1540 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1541 struct ethhdr *ehdr;
1542 struct arphdr *ahdr;
1543
1544 ehdr = (struct ethhdr *)skb_mac_header(skb);
1545 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
1546 goto out;
1547
1548 ahdr = arp_hdr(skb);
1549 if (ahdr->ar_op != htons(ARPOP_REPLY))
1550 goto out;
1551
1552 tpriv->ok = true;
1553 complete(&tpriv->comp);
1554 out:
1555 kfree_skb(skb);
1556 return 0;
1557 }
1558
stmmac_test_arpoffload(struct stmmac_priv * priv)1559 static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1560 {
1561 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1562 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1563 struct stmmac_packet_attrs attr = { };
1564 struct stmmac_test_priv *tpriv;
1565 struct sk_buff *skb = NULL;
1566 u32 ip_addr = 0xdeadcafe;
1567 u32 ip_src = 0xdeadbeef;
1568 int ret;
1569
1570 if (!priv->dma_cap.arpoffsel)
1571 return -EOPNOTSUPP;
1572
1573 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1574 if (!tpriv)
1575 return -ENOMEM;
1576
1577 tpriv->ok = false;
1578 init_completion(&tpriv->comp);
1579
1580 tpriv->pt.type = htons(ETH_P_ARP);
1581 tpriv->pt.func = stmmac_test_arp_validate;
1582 tpriv->pt.dev = priv->dev;
1583 tpriv->pt.af_packet_priv = tpriv;
1584 tpriv->packet = &attr;
1585 dev_add_pack(&tpriv->pt);
1586
1587 attr.src = src;
1588 attr.ip_src = ip_src;
1589 attr.dst = dst;
1590 attr.ip_dst = ip_addr;
1591
1592 skb = stmmac_test_get_arp_skb(priv, &attr);
1593 if (!skb) {
1594 ret = -ENOMEM;
1595 goto cleanup;
1596 }
1597
1598 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1599 if (ret)
1600 goto cleanup;
1601
1602 ret = dev_set_promiscuity(priv->dev, 1);
1603 if (ret)
1604 goto cleanup;
1605
1606 skb_set_queue_mapping(skb, 0);
1607 ret = dev_queue_xmit(skb);
1608 if (ret)
1609 goto cleanup_promisc;
1610
1611 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1612 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1613
1614 cleanup_promisc:
1615 dev_set_promiscuity(priv->dev, -1);
1616 cleanup:
1617 stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1618 dev_remove_pack(&tpriv->pt);
1619 kfree(tpriv);
1620 return ret;
1621 }
1622
__stmmac_test_jumbo(struct stmmac_priv * priv,u16 queue)1623 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1624 {
1625 struct stmmac_packet_attrs attr = { };
1626 int size = priv->dma_buf_sz;
1627
1628 attr.dst = priv->dev->dev_addr;
1629 attr.max_size = size - ETH_FCS_LEN;
1630 attr.queue_mapping = queue;
1631
1632 return __stmmac_test_loopback(priv, &attr);
1633 }
1634
stmmac_test_jumbo(struct stmmac_priv * priv)1635 static int stmmac_test_jumbo(struct stmmac_priv *priv)
1636 {
1637 return __stmmac_test_jumbo(priv, 0);
1638 }
1639
stmmac_test_mjumbo(struct stmmac_priv * priv)1640 static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1641 {
1642 u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1643 int ret;
1644
1645 if (tx_cnt <= 1)
1646 return -EOPNOTSUPP;
1647
1648 for (chan = 0; chan < tx_cnt; chan++) {
1649 ret = __stmmac_test_jumbo(priv, chan);
1650 if (ret)
1651 return ret;
1652 }
1653
1654 return 0;
1655 }
1656
stmmac_test_sph(struct stmmac_priv * priv)1657 static int stmmac_test_sph(struct stmmac_priv *priv)
1658 {
1659 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1660 struct stmmac_packet_attrs attr = { };
1661 int ret;
1662
1663 if (!priv->sph)
1664 return -EOPNOTSUPP;
1665
1666 /* Check for UDP first */
1667 attr.dst = priv->dev->dev_addr;
1668 attr.tcp = false;
1669
1670 ret = __stmmac_test_loopback(priv, &attr);
1671 if (ret)
1672 return ret;
1673
1674 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1675 if (cnt_end <= cnt_start)
1676 return -EINVAL;
1677
1678 /* Check for TCP now */
1679 cnt_start = cnt_end;
1680
1681 attr.dst = priv->dev->dev_addr;
1682 attr.tcp = true;
1683
1684 ret = __stmmac_test_loopback(priv, &attr);
1685 if (ret)
1686 return ret;
1687
1688 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1689 if (cnt_end <= cnt_start)
1690 return -EINVAL;
1691
1692 return 0;
1693 }
1694
1695 #define STMMAC_LOOPBACK_NONE 0
1696 #define STMMAC_LOOPBACK_MAC 1
1697 #define STMMAC_LOOPBACK_PHY 2
1698
1699 static const struct stmmac_test {
1700 char name[ETH_GSTRING_LEN];
1701 int lb;
1702 int (*fn)(struct stmmac_priv *priv);
1703 } stmmac_selftests[] = {
1704 {
1705 .name = "MAC Loopback ",
1706 .lb = STMMAC_LOOPBACK_MAC,
1707 .fn = stmmac_test_mac_loopback,
1708 }, {
1709 .name = "PHY Loopback ",
1710 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1711 .fn = stmmac_test_phy_loopback,
1712 }, {
1713 .name = "MMC Counters ",
1714 .lb = STMMAC_LOOPBACK_PHY,
1715 .fn = stmmac_test_mmc,
1716 }, {
1717 .name = "EEE ",
1718 .lb = STMMAC_LOOPBACK_PHY,
1719 .fn = stmmac_test_eee,
1720 }, {
1721 .name = "Hash Filter MC ",
1722 .lb = STMMAC_LOOPBACK_PHY,
1723 .fn = stmmac_test_hfilt,
1724 }, {
1725 .name = "Perfect Filter UC ",
1726 .lb = STMMAC_LOOPBACK_PHY,
1727 .fn = stmmac_test_pfilt,
1728 }, {
1729 .name = "MC Filter ",
1730 .lb = STMMAC_LOOPBACK_PHY,
1731 .fn = stmmac_test_mcfilt,
1732 }, {
1733 .name = "UC Filter ",
1734 .lb = STMMAC_LOOPBACK_PHY,
1735 .fn = stmmac_test_ucfilt,
1736 }, {
1737 .name = "Flow Control ",
1738 .lb = STMMAC_LOOPBACK_PHY,
1739 .fn = stmmac_test_flowctrl,
1740 }, {
1741 .name = "RSS ",
1742 .lb = STMMAC_LOOPBACK_PHY,
1743 .fn = stmmac_test_rss,
1744 }, {
1745 .name = "VLAN Filtering ",
1746 .lb = STMMAC_LOOPBACK_PHY,
1747 .fn = stmmac_test_vlanfilt,
1748 }, {
1749 .name = "Double VLAN Filtering",
1750 .lb = STMMAC_LOOPBACK_PHY,
1751 .fn = stmmac_test_dvlanfilt,
1752 }, {
1753 .name = "Flexible RX Parser ",
1754 .lb = STMMAC_LOOPBACK_PHY,
1755 .fn = stmmac_test_rxp,
1756 }, {
1757 .name = "SA Insertion (desc) ",
1758 .lb = STMMAC_LOOPBACK_PHY,
1759 .fn = stmmac_test_desc_sai,
1760 }, {
1761 .name = "SA Replacement (desc)",
1762 .lb = STMMAC_LOOPBACK_PHY,
1763 .fn = stmmac_test_desc_sar,
1764 }, {
1765 .name = "SA Insertion (reg) ",
1766 .lb = STMMAC_LOOPBACK_PHY,
1767 .fn = stmmac_test_reg_sai,
1768 }, {
1769 .name = "SA Replacement (reg)",
1770 .lb = STMMAC_LOOPBACK_PHY,
1771 .fn = stmmac_test_reg_sar,
1772 }, {
1773 .name = "VLAN TX Insertion ",
1774 .lb = STMMAC_LOOPBACK_PHY,
1775 .fn = stmmac_test_vlanoff,
1776 }, {
1777 .name = "SVLAN TX Insertion ",
1778 .lb = STMMAC_LOOPBACK_PHY,
1779 .fn = stmmac_test_svlanoff,
1780 }, {
1781 .name = "L3 DA Filtering ",
1782 .lb = STMMAC_LOOPBACK_PHY,
1783 .fn = stmmac_test_l3filt_da,
1784 }, {
1785 .name = "L3 SA Filtering ",
1786 .lb = STMMAC_LOOPBACK_PHY,
1787 .fn = stmmac_test_l3filt_sa,
1788 }, {
1789 .name = "L4 DA TCP Filtering ",
1790 .lb = STMMAC_LOOPBACK_PHY,
1791 .fn = stmmac_test_l4filt_da_tcp,
1792 }, {
1793 .name = "L4 SA TCP Filtering ",
1794 .lb = STMMAC_LOOPBACK_PHY,
1795 .fn = stmmac_test_l4filt_sa_tcp,
1796 }, {
1797 .name = "L4 DA UDP Filtering ",
1798 .lb = STMMAC_LOOPBACK_PHY,
1799 .fn = stmmac_test_l4filt_da_udp,
1800 }, {
1801 .name = "L4 SA UDP Filtering ",
1802 .lb = STMMAC_LOOPBACK_PHY,
1803 .fn = stmmac_test_l4filt_sa_udp,
1804 }, {
1805 .name = "ARP Offload ",
1806 .lb = STMMAC_LOOPBACK_PHY,
1807 .fn = stmmac_test_arpoffload,
1808 }, {
1809 .name = "Jumbo Frame ",
1810 .lb = STMMAC_LOOPBACK_PHY,
1811 .fn = stmmac_test_jumbo,
1812 }, {
1813 .name = "Multichannel Jumbo ",
1814 .lb = STMMAC_LOOPBACK_PHY,
1815 .fn = stmmac_test_mjumbo,
1816 }, {
1817 .name = "Split Header ",
1818 .lb = STMMAC_LOOPBACK_PHY,
1819 .fn = stmmac_test_sph,
1820 },
1821 };
1822
stmmac_selftest_run(struct net_device * dev,struct ethtool_test * etest,u64 * buf)1823 void stmmac_selftest_run(struct net_device *dev,
1824 struct ethtool_test *etest, u64 *buf)
1825 {
1826 struct stmmac_priv *priv = netdev_priv(dev);
1827 int count = stmmac_selftest_get_count(priv);
1828 int carrier = netif_carrier_ok(dev);
1829 int i, ret;
1830
1831 memset(buf, 0, sizeof(*buf) * count);
1832 stmmac_test_next_id = 0;
1833
1834 if (etest->flags != ETH_TEST_FL_OFFLINE) {
1835 netdev_err(priv->dev, "Only offline tests are supported\n");
1836 etest->flags |= ETH_TEST_FL_FAILED;
1837 return;
1838 } else if (!carrier) {
1839 netdev_err(priv->dev, "You need valid Link to execute tests\n");
1840 etest->flags |= ETH_TEST_FL_FAILED;
1841 return;
1842 }
1843
1844 /* We don't want extra traffic */
1845 netif_carrier_off(dev);
1846
1847 /* Wait for queues drain */
1848 msleep(200);
1849
1850 for (i = 0; i < count; i++) {
1851 ret = 0;
1852
1853 switch (stmmac_selftests[i].lb) {
1854 case STMMAC_LOOPBACK_PHY:
1855 ret = -EOPNOTSUPP;
1856 if (dev->phydev)
1857 ret = phy_loopback(dev->phydev, true);
1858 if (!ret)
1859 break;
1860 /* Fallthrough */
1861 case STMMAC_LOOPBACK_MAC:
1862 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1863 break;
1864 case STMMAC_LOOPBACK_NONE:
1865 break;
1866 default:
1867 ret = -EOPNOTSUPP;
1868 break;
1869 }
1870
1871 /*
1872 * First tests will always be MAC / PHY loobpack. If any of
1873 * them is not supported we abort earlier.
1874 */
1875 if (ret) {
1876 netdev_err(priv->dev, "Loopback is not supported\n");
1877 etest->flags |= ETH_TEST_FL_FAILED;
1878 break;
1879 }
1880
1881 ret = stmmac_selftests[i].fn(priv);
1882 if (ret && (ret != -EOPNOTSUPP))
1883 etest->flags |= ETH_TEST_FL_FAILED;
1884 buf[i] = ret;
1885
1886 switch (stmmac_selftests[i].lb) {
1887 case STMMAC_LOOPBACK_PHY:
1888 ret = -EOPNOTSUPP;
1889 if (dev->phydev)
1890 ret = phy_loopback(dev->phydev, false);
1891 if (!ret)
1892 break;
1893 /* Fallthrough */
1894 case STMMAC_LOOPBACK_MAC:
1895 stmmac_set_mac_loopback(priv, priv->ioaddr, false);
1896 break;
1897 default:
1898 break;
1899 }
1900 }
1901
1902 /* Restart everything */
1903 if (carrier)
1904 netif_carrier_on(dev);
1905 }
1906
stmmac_selftest_get_strings(struct stmmac_priv * priv,u8 * data)1907 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
1908 {
1909 u8 *p = data;
1910 int i;
1911
1912 for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
1913 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
1914 stmmac_selftests[i].name);
1915 p += ETH_GSTRING_LEN;
1916 }
1917 }
1918
stmmac_selftest_get_count(struct stmmac_priv * priv)1919 int stmmac_selftest_get_count(struct stmmac_priv *priv)
1920 {
1921 return ARRAY_SIZE(stmmac_selftests);
1922 }
1923