1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2 /*
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4 * Copyright 2020 NXP
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/of_platform.h>
12 #include <linux/of_mdio.h>
13 #include <linux/of_net.h>
14 #include <linux/io.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/icmp.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/udp.h>
21 #include <linux/tcp.h>
22 #include <linux/net.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
26 #include <linux/highmem.h>
27 #include <linux/percpu.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/sort.h>
30 #include <linux/phy_fixed.h>
31 #include <linux/bpf.h>
32 #include <linux/bpf_trace.h>
33 #include <soc/fsl/bman.h>
34 #include <soc/fsl/qman.h>
35 #include "fman.h"
36 #include "fman_port.h"
37 #include "mac.h"
38 #include "dpaa_eth.h"
39
40 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
41 * using trace events only need to #include <trace/events/sched.h>
42 */
43 #define CREATE_TRACE_POINTS
44 #include "dpaa_eth_trace.h"
45
46 static int debug = -1;
47 module_param(debug, int, 0444);
48 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
49
50 static u16 tx_timeout = 1000;
51 module_param(tx_timeout, ushort, 0444);
52 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
53
54 #define FM_FD_STAT_RX_ERRORS \
55 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
56 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
57 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
58 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
59 FM_FD_ERR_PRS_HDR_ERR)
60
61 #define FM_FD_STAT_TX_ERRORS \
62 (FM_FD_ERR_UNSUPPORTED_FORMAT | \
63 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
64
65 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
66 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
67 NETIF_MSG_IFDOWN | NETIF_MSG_HW)
68
69 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000
70 /* Ingress congestion threshold on FMan ports
71 * The size in bytes of the ingress tail-drop threshold on FMan ports.
72 * Traffic piling up above this value will be rejected by QMan and discarded
73 * by FMan.
74 */
75
76 /* Size in bytes of the FQ taildrop threshold */
77 #define DPAA_FQ_TD 0x200000
78
79 #define DPAA_CS_THRESHOLD_1G 0x06000000
80 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
81 * The size in bytes of the egress Congestion State notification threshold on
82 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
83 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
84 * and the larger the frame size, the more acute the problem.
85 * So we have to find a balance between these factors:
86 * - avoiding the device staying congested for a prolonged time (risking
87 * the netdev watchdog to fire - see also the tx_timeout module param);
88 * - affecting performance of protocols such as TCP, which otherwise
89 * behave well under the congestion notification mechanism;
90 * - preventing the Tx cores from tightly-looping (as if the congestion
91 * threshold was too low to be effective);
92 * - running out of memory if the CS threshold is set too high.
93 */
94
95 #define DPAA_CS_THRESHOLD_10G 0x10000000
96 /* The size in bytes of the egress Congestion State notification threshold on
97 * 10G ports, range 0x1000 .. 0x10000000
98 */
99
100 /* Largest value that the FQD's OAL field can hold */
101 #define FSL_QMAN_MAX_OAL 127
102
103 /* Default alignment for start of data in an Rx FD */
104 #ifdef CONFIG_DPAA_ERRATUM_A050385
105 /* aligning data start to 64 avoids DMA transaction splits, unless the buffer
106 * is crossing a 4k page boundary
107 */
108 #define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
109 /* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
110 * crossings; also, all SG fragments except the last must have a size multiple
111 * of 256 to avoid DMA transaction splits
112 */
113 #define DPAA_A050385_ALIGN 256
114 #define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
115 DPAA_A050385_ALIGN : 16)
116 #else
117 #define DPAA_FD_DATA_ALIGNMENT 16
118 #define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
119 #endif
120
121 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
122 #define DPAA_SGT_SIZE 256
123
124 /* Values for the L3R field of the FM Parse Results
125 */
126 /* L3 Type field: First IP Present IPv4 */
127 #define FM_L3_PARSE_RESULT_IPV4 0x8000
128 /* L3 Type field: First IP Present IPv6 */
129 #define FM_L3_PARSE_RESULT_IPV6 0x4000
130 /* Values for the L4R field of the FM Parse Results */
131 /* L4 Type field: UDP */
132 #define FM_L4_PARSE_RESULT_UDP 0x40
133 /* L4 Type field: TCP */
134 #define FM_L4_PARSE_RESULT_TCP 0x20
135
136 /* FD status field indicating whether the FM Parser has attempted to validate
137 * the L4 csum of the frame.
138 * Note that having this bit set doesn't necessarily imply that the checksum
139 * is valid. One would have to check the parse results to find that out.
140 */
141 #define FM_FD_STAT_L4CV 0x00000004
142
143 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
144 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
145
146 #define FSL_DPAA_BPID_INV 0xff
147 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128
148 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80
149
150 #define DPAA_TX_PRIV_DATA_SIZE 16
151 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
152 #define DPAA_TIME_STAMP_SIZE 8
153 #define DPAA_HASH_RESULTS_SIZE 8
154 #define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
155 + DPAA_HASH_RESULTS_SIZE)
156 #define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
157 XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
158 #ifdef CONFIG_DPAA_ERRATUM_A050385
159 #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
160 #define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
161 DPAA_RX_PRIV_DATA_A050385_SIZE : \
162 DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
163 #else
164 #define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
165 #endif
166
167 #define DPAA_ETH_PCD_RXQ_NUM 128
168
169 #define DPAA_ENQUEUE_RETRIES 100000
170
171 enum port_type {RX, TX};
172
173 struct fm_port_fqs {
174 struct dpaa_fq *tx_defq;
175 struct dpaa_fq *tx_errq;
176 struct dpaa_fq *rx_defq;
177 struct dpaa_fq *rx_errq;
178 struct dpaa_fq *rx_pcdq;
179 };
180
181 /* All the dpa bps in use at any moment */
182 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
183
184 #define DPAA_BP_RAW_SIZE 4096
185
186 #ifdef CONFIG_DPAA_ERRATUM_A050385
187 #define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
188 ~(DPAA_A050385_ALIGN - 1))
189 #else
190 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
191 #endif
192
193 static int dpaa_max_frm;
194
195 static int dpaa_rx_extra_headroom;
196
197 #define dpaa_get_max_mtu() \
198 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
199
200 static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
201
dpaa_netdev_init(struct net_device * net_dev,const struct net_device_ops * dpaa_ops,u16 tx_timeout)202 static int dpaa_netdev_init(struct net_device *net_dev,
203 const struct net_device_ops *dpaa_ops,
204 u16 tx_timeout)
205 {
206 struct dpaa_priv *priv = netdev_priv(net_dev);
207 struct device *dev = net_dev->dev.parent;
208 struct mac_device *mac_dev = priv->mac_dev;
209 struct dpaa_percpu_priv *percpu_priv;
210 const u8 *mac_addr;
211 int i, err;
212
213 /* Although we access another CPU's private data here
214 * we do it at initialization so it is safe
215 */
216 for_each_possible_cpu(i) {
217 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
218 percpu_priv->net_dev = net_dev;
219 }
220
221 net_dev->netdev_ops = dpaa_ops;
222 mac_addr = mac_dev->addr;
223
224 net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
225 net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
226
227 net_dev->min_mtu = ETH_MIN_MTU;
228 net_dev->max_mtu = dpaa_get_max_mtu();
229
230 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
231 NETIF_F_LLTX | NETIF_F_RXHASH);
232
233 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
234 /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
235 * For conformity, we'll still declare GSO explicitly.
236 */
237 net_dev->features |= NETIF_F_GSO;
238 net_dev->features |= NETIF_F_RXCSUM;
239
240 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
241 /* we do not want shared skbs on TX */
242 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
243
244 net_dev->features |= net_dev->hw_features;
245 net_dev->vlan_features = net_dev->features;
246
247 if (is_valid_ether_addr(mac_addr)) {
248 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
249 eth_hw_addr_set(net_dev, mac_addr);
250 } else {
251 eth_hw_addr_random(net_dev);
252 err = mac_dev->change_addr(mac_dev->fman_mac,
253 (const enet_addr_t *)net_dev->dev_addr);
254 if (err) {
255 dev_err(dev, "Failed to set random MAC address\n");
256 return -EINVAL;
257 }
258 dev_info(dev, "Using random MAC address: %pM\n",
259 net_dev->dev_addr);
260 }
261
262 net_dev->ethtool_ops = &dpaa_ethtool_ops;
263
264 net_dev->needed_headroom = priv->tx_headroom;
265 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
266
267 mac_dev->net_dev = net_dev;
268 mac_dev->update_speed = dpaa_eth_cgr_set_speed;
269
270 /* start without the RUNNING flag, phylib controls it later */
271 netif_carrier_off(net_dev);
272
273 err = register_netdev(net_dev);
274 if (err < 0) {
275 dev_err(dev, "register_netdev() = %d\n", err);
276 return err;
277 }
278
279 return 0;
280 }
281
dpaa_stop(struct net_device * net_dev)282 static int dpaa_stop(struct net_device *net_dev)
283 {
284 struct mac_device *mac_dev;
285 struct dpaa_priv *priv;
286 int i, err, error;
287
288 priv = netdev_priv(net_dev);
289 mac_dev = priv->mac_dev;
290
291 netif_tx_stop_all_queues(net_dev);
292 /* Allow the Fman (Tx) port to process in-flight frames before we
293 * try switching it off.
294 */
295 msleep(200);
296
297 if (mac_dev->phy_dev)
298 phy_stop(mac_dev->phy_dev);
299 mac_dev->disable(mac_dev->fman_mac);
300
301 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
302 error = fman_port_disable(mac_dev->port[i]);
303 if (error)
304 err = error;
305 }
306
307 if (net_dev->phydev)
308 phy_disconnect(net_dev->phydev);
309 net_dev->phydev = NULL;
310
311 msleep(200);
312
313 return err;
314 }
315
dpaa_tx_timeout(struct net_device * net_dev,unsigned int txqueue)316 static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
317 {
318 struct dpaa_percpu_priv *percpu_priv;
319 const struct dpaa_priv *priv;
320
321 priv = netdev_priv(net_dev);
322 percpu_priv = this_cpu_ptr(priv->percpu_priv);
323
324 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
325 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
326
327 percpu_priv->stats.tx_errors++;
328 }
329
330 /* Calculates the statistics for the given device by adding the statistics
331 * collected by each CPU.
332 */
dpaa_get_stats64(struct net_device * net_dev,struct rtnl_link_stats64 * s)333 static void dpaa_get_stats64(struct net_device *net_dev,
334 struct rtnl_link_stats64 *s)
335 {
336 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
337 struct dpaa_priv *priv = netdev_priv(net_dev);
338 struct dpaa_percpu_priv *percpu_priv;
339 u64 *netstats = (u64 *)s;
340 u64 *cpustats;
341 int i, j;
342
343 for_each_possible_cpu(i) {
344 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
345
346 cpustats = (u64 *)&percpu_priv->stats;
347
348 /* add stats from all CPUs */
349 for (j = 0; j < numstats; j++)
350 netstats[j] += cpustats[j];
351 }
352 }
353
dpaa_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)354 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
355 void *type_data)
356 {
357 struct dpaa_priv *priv = netdev_priv(net_dev);
358 struct tc_mqprio_qopt *mqprio = type_data;
359 u8 num_tc;
360 int i;
361
362 if (type != TC_SETUP_QDISC_MQPRIO)
363 return -EOPNOTSUPP;
364
365 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
366 num_tc = mqprio->num_tc;
367
368 if (num_tc == priv->num_tc)
369 return 0;
370
371 if (!num_tc) {
372 netdev_reset_tc(net_dev);
373 goto out;
374 }
375
376 if (num_tc > DPAA_TC_NUM) {
377 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
378 DPAA_TC_NUM);
379 return -EINVAL;
380 }
381
382 netdev_set_num_tc(net_dev, num_tc);
383
384 for (i = 0; i < num_tc; i++)
385 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
386 i * DPAA_TC_TXQ_NUM);
387
388 out:
389 priv->num_tc = num_tc ? : 1;
390 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
391 return 0;
392 }
393
dpaa_mac_dev_get(struct platform_device * pdev)394 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
395 {
396 struct dpaa_eth_data *eth_data;
397 struct device *dpaa_dev;
398 struct mac_device *mac_dev;
399
400 dpaa_dev = &pdev->dev;
401 eth_data = dpaa_dev->platform_data;
402 if (!eth_data) {
403 dev_err(dpaa_dev, "eth_data missing\n");
404 return ERR_PTR(-ENODEV);
405 }
406 mac_dev = eth_data->mac_dev;
407 if (!mac_dev) {
408 dev_err(dpaa_dev, "mac_dev missing\n");
409 return ERR_PTR(-EINVAL);
410 }
411
412 return mac_dev;
413 }
414
dpaa_set_mac_address(struct net_device * net_dev,void * addr)415 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
416 {
417 const struct dpaa_priv *priv;
418 struct mac_device *mac_dev;
419 struct sockaddr old_addr;
420 int err;
421
422 priv = netdev_priv(net_dev);
423
424 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
425
426 err = eth_mac_addr(net_dev, addr);
427 if (err < 0) {
428 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
429 return err;
430 }
431
432 mac_dev = priv->mac_dev;
433
434 err = mac_dev->change_addr(mac_dev->fman_mac,
435 (const enet_addr_t *)net_dev->dev_addr);
436 if (err < 0) {
437 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
438 err);
439 /* reverting to previous address */
440 eth_mac_addr(net_dev, &old_addr);
441
442 return err;
443 }
444
445 return 0;
446 }
447
dpaa_set_rx_mode(struct net_device * net_dev)448 static void dpaa_set_rx_mode(struct net_device *net_dev)
449 {
450 const struct dpaa_priv *priv;
451 int err;
452
453 priv = netdev_priv(net_dev);
454
455 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
456 priv->mac_dev->promisc = !priv->mac_dev->promisc;
457 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
458 priv->mac_dev->promisc);
459 if (err < 0)
460 netif_err(priv, drv, net_dev,
461 "mac_dev->set_promisc() = %d\n",
462 err);
463 }
464
465 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
466 priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
467 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
468 priv->mac_dev->allmulti);
469 if (err < 0)
470 netif_err(priv, drv, net_dev,
471 "mac_dev->set_allmulti() = %d\n",
472 err);
473 }
474
475 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
476 if (err < 0)
477 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
478 err);
479 }
480
dpaa_bpid2pool(int bpid)481 static struct dpaa_bp *dpaa_bpid2pool(int bpid)
482 {
483 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
484 return NULL;
485
486 return dpaa_bp_array[bpid];
487 }
488
489 /* checks if this bpool is already allocated */
dpaa_bpid2pool_use(int bpid)490 static bool dpaa_bpid2pool_use(int bpid)
491 {
492 if (dpaa_bpid2pool(bpid)) {
493 refcount_inc(&dpaa_bp_array[bpid]->refs);
494 return true;
495 }
496
497 return false;
498 }
499
500 /* called only once per bpid by dpaa_bp_alloc_pool() */
dpaa_bpid2pool_map(int bpid,struct dpaa_bp * dpaa_bp)501 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
502 {
503 dpaa_bp_array[bpid] = dpaa_bp;
504 refcount_set(&dpaa_bp->refs, 1);
505 }
506
dpaa_bp_alloc_pool(struct dpaa_bp * dpaa_bp)507 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
508 {
509 int err;
510
511 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
512 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
513 __func__);
514 return -EINVAL;
515 }
516
517 /* If the pool is already specified, we only create one per bpid */
518 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
519 dpaa_bpid2pool_use(dpaa_bp->bpid))
520 return 0;
521
522 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
523 dpaa_bp->pool = bman_new_pool();
524 if (!dpaa_bp->pool) {
525 pr_err("%s: bman_new_pool() failed\n",
526 __func__);
527 return -ENODEV;
528 }
529
530 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
531 }
532
533 if (dpaa_bp->seed_cb) {
534 err = dpaa_bp->seed_cb(dpaa_bp);
535 if (err)
536 goto pool_seed_failed;
537 }
538
539 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
540
541 return 0;
542
543 pool_seed_failed:
544 pr_err("%s: pool seeding failed\n", __func__);
545 bman_free_pool(dpaa_bp->pool);
546
547 return err;
548 }
549
550 /* remove and free all the buffers from the given buffer pool */
dpaa_bp_drain(struct dpaa_bp * bp)551 static void dpaa_bp_drain(struct dpaa_bp *bp)
552 {
553 u8 num = 8;
554 int ret;
555
556 do {
557 struct bm_buffer bmb[8];
558 int i;
559
560 ret = bman_acquire(bp->pool, bmb, num);
561 if (ret < 0) {
562 if (num == 8) {
563 /* we have less than 8 buffers left;
564 * drain them one by one
565 */
566 num = 1;
567 ret = 1;
568 continue;
569 } else {
570 /* Pool is fully drained */
571 break;
572 }
573 }
574
575 if (bp->free_buf_cb)
576 for (i = 0; i < num; i++)
577 bp->free_buf_cb(bp, &bmb[i]);
578 } while (ret > 0);
579 }
580
dpaa_bp_free(struct dpaa_bp * dpaa_bp)581 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
582 {
583 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
584
585 /* the mapping between bpid and dpaa_bp is done very late in the
586 * allocation procedure; if something failed before the mapping, the bp
587 * was not configured, therefore we don't need the below instructions
588 */
589 if (!bp)
590 return;
591
592 if (!refcount_dec_and_test(&bp->refs))
593 return;
594
595 if (bp->free_buf_cb)
596 dpaa_bp_drain(bp);
597
598 dpaa_bp_array[bp->bpid] = NULL;
599 bman_free_pool(bp->pool);
600 }
601
dpaa_bps_free(struct dpaa_priv * priv)602 static void dpaa_bps_free(struct dpaa_priv *priv)
603 {
604 dpaa_bp_free(priv->dpaa_bp);
605 }
606
607 /* Use multiple WQs for FQ assignment:
608 * - Tx Confirmation queues go to WQ1.
609 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
610 * to be scheduled, in case there are many more FQs in WQ6).
611 * - Rx Default goes to WQ6.
612 * - Tx queues go to different WQs depending on their priority. Equal
613 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
614 * WQ0 (highest priority).
615 * This ensures that Tx-confirmed buffers are timely released. In particular,
616 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
617 * are greatly outnumbered by other FQs in the system, while
618 * dequeue scheduling is round-robin.
619 */
dpaa_assign_wq(struct dpaa_fq * fq,int idx)620 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
621 {
622 switch (fq->fq_type) {
623 case FQ_TYPE_TX_CONFIRM:
624 case FQ_TYPE_TX_CONF_MQ:
625 fq->wq = 1;
626 break;
627 case FQ_TYPE_RX_ERROR:
628 case FQ_TYPE_TX_ERROR:
629 fq->wq = 5;
630 break;
631 case FQ_TYPE_RX_DEFAULT:
632 case FQ_TYPE_RX_PCD:
633 fq->wq = 6;
634 break;
635 case FQ_TYPE_TX:
636 switch (idx / DPAA_TC_TXQ_NUM) {
637 case 0:
638 /* Low priority (best effort) */
639 fq->wq = 6;
640 break;
641 case 1:
642 /* Medium priority */
643 fq->wq = 2;
644 break;
645 case 2:
646 /* High priority */
647 fq->wq = 1;
648 break;
649 case 3:
650 /* Very high priority */
651 fq->wq = 0;
652 break;
653 default:
654 WARN(1, "Too many TX FQs: more than %d!\n",
655 DPAA_ETH_TXQ_NUM);
656 }
657 break;
658 default:
659 WARN(1, "Invalid FQ type %d for FQID %d!\n",
660 fq->fq_type, fq->fqid);
661 }
662 }
663
dpaa_fq_alloc(struct device * dev,u32 start,u32 count,struct list_head * list,enum dpaa_fq_type fq_type)664 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
665 u32 start, u32 count,
666 struct list_head *list,
667 enum dpaa_fq_type fq_type)
668 {
669 struct dpaa_fq *dpaa_fq;
670 int i;
671
672 dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
673 GFP_KERNEL);
674 if (!dpaa_fq)
675 return NULL;
676
677 for (i = 0; i < count; i++) {
678 dpaa_fq[i].fq_type = fq_type;
679 dpaa_fq[i].fqid = start ? start + i : 0;
680 list_add_tail(&dpaa_fq[i].list, list);
681 }
682
683 for (i = 0; i < count; i++)
684 dpaa_assign_wq(dpaa_fq + i, i);
685
686 return dpaa_fq;
687 }
688
dpaa_alloc_all_fqs(struct device * dev,struct list_head * list,struct fm_port_fqs * port_fqs)689 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
690 struct fm_port_fqs *port_fqs)
691 {
692 struct dpaa_fq *dpaa_fq;
693 u32 fq_base, fq_base_aligned, i;
694
695 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
696 if (!dpaa_fq)
697 goto fq_alloc_failed;
698
699 port_fqs->rx_errq = &dpaa_fq[0];
700
701 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
702 if (!dpaa_fq)
703 goto fq_alloc_failed;
704
705 port_fqs->rx_defq = &dpaa_fq[0];
706
707 /* the PCD FQIDs range needs to be aligned for correct operation */
708 if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
709 goto fq_alloc_failed;
710
711 fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
712
713 for (i = fq_base; i < fq_base_aligned; i++)
714 qman_release_fqid(i);
715
716 for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
717 i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
718 qman_release_fqid(i);
719
720 dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
721 list, FQ_TYPE_RX_PCD);
722 if (!dpaa_fq)
723 goto fq_alloc_failed;
724
725 port_fqs->rx_pcdq = &dpaa_fq[0];
726
727 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
728 goto fq_alloc_failed;
729
730 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
731 if (!dpaa_fq)
732 goto fq_alloc_failed;
733
734 port_fqs->tx_errq = &dpaa_fq[0];
735
736 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
737 if (!dpaa_fq)
738 goto fq_alloc_failed;
739
740 port_fqs->tx_defq = &dpaa_fq[0];
741
742 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
743 goto fq_alloc_failed;
744
745 return 0;
746
747 fq_alloc_failed:
748 dev_err(dev, "dpaa_fq_alloc() failed\n");
749 return -ENOMEM;
750 }
751
752 static u32 rx_pool_channel;
753 static DEFINE_SPINLOCK(rx_pool_channel_init);
754
dpaa_get_channel(void)755 static int dpaa_get_channel(void)
756 {
757 spin_lock(&rx_pool_channel_init);
758 if (!rx_pool_channel) {
759 u32 pool;
760 int ret;
761
762 ret = qman_alloc_pool(&pool);
763
764 if (!ret)
765 rx_pool_channel = pool;
766 }
767 spin_unlock(&rx_pool_channel_init);
768 if (!rx_pool_channel)
769 return -ENOMEM;
770 return rx_pool_channel;
771 }
772
dpaa_release_channel(void)773 static void dpaa_release_channel(void)
774 {
775 qman_release_pool(rx_pool_channel);
776 }
777
dpaa_eth_add_channel(u16 channel,struct device * dev)778 static void dpaa_eth_add_channel(u16 channel, struct device *dev)
779 {
780 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
781 const cpumask_t *cpus = qman_affine_cpus();
782 struct qman_portal *portal;
783 int cpu;
784
785 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
786 portal = qman_get_affine_portal(cpu);
787 qman_p_static_dequeue_add(portal, pool);
788 qman_start_using_portal(portal, dev);
789 }
790 }
791
792 /* Congestion group state change notification callback.
793 * Stops the device's egress queues while they are congested and
794 * wakes them upon exiting congested state.
795 * Also updates some CGR-related stats.
796 */
dpaa_eth_cgscn(struct qman_portal * qm,struct qman_cgr * cgr,int congested)797 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
798 int congested)
799 {
800 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
801 struct dpaa_priv, cgr_data.cgr);
802
803 if (congested) {
804 priv->cgr_data.congestion_start_jiffies = jiffies;
805 netif_tx_stop_all_queues(priv->net_dev);
806 priv->cgr_data.cgr_congested_count++;
807 } else {
808 priv->cgr_data.congested_jiffies +=
809 (jiffies - priv->cgr_data.congestion_start_jiffies);
810 netif_tx_wake_all_queues(priv->net_dev);
811 }
812 }
813
dpaa_eth_cgr_init(struct dpaa_priv * priv)814 static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
815 {
816 struct qm_mcc_initcgr initcgr;
817 u32 cs_th;
818 int err;
819
820 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
821 if (err < 0) {
822 if (netif_msg_drv(priv))
823 pr_err("%s: Error %d allocating CGR ID\n",
824 __func__, err);
825 goto out_error;
826 }
827 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
828
829 /* Enable Congestion State Change Notifications and CS taildrop */
830 memset(&initcgr, 0, sizeof(initcgr));
831 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
832 initcgr.cgr.cscn_en = QM_CGR_EN;
833
834 /* Set different thresholds based on the configured MAC speed.
835 * This may turn suboptimal if the MAC is reconfigured at another
836 * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
837 * callback.
838 */
839 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
840 cs_th = DPAA_CS_THRESHOLD_10G;
841 else
842 cs_th = DPAA_CS_THRESHOLD_1G;
843 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
844
845 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
846 initcgr.cgr.cstd_en = QM_CGR_EN;
847
848 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
849 &initcgr);
850 if (err < 0) {
851 if (netif_msg_drv(priv))
852 pr_err("%s: Error %d creating CGR with ID %d\n",
853 __func__, err, priv->cgr_data.cgr.cgrid);
854 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
855 goto out_error;
856 }
857 if (netif_msg_drv(priv))
858 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
859 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
860 priv->cgr_data.cgr.chan);
861
862 out_error:
863 return err;
864 }
865
dpaa_eth_cgr_set_speed(struct mac_device * mac_dev,int speed)866 static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
867 {
868 struct net_device *net_dev = mac_dev->net_dev;
869 struct dpaa_priv *priv = netdev_priv(net_dev);
870 struct qm_mcc_initcgr opts = { };
871 u32 cs_th;
872 int err;
873
874 opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
875 switch (speed) {
876 case SPEED_10000:
877 cs_th = DPAA_CS_THRESHOLD_10G;
878 break;
879 case SPEED_1000:
880 default:
881 cs_th = DPAA_CS_THRESHOLD_1G;
882 break;
883 }
884 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
885
886 err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
887 if (err)
888 netdev_err(net_dev, "could not update speed: %d\n", err);
889 }
890
dpaa_setup_ingress(const struct dpaa_priv * priv,struct dpaa_fq * fq,const struct qman_fq * template)891 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
892 struct dpaa_fq *fq,
893 const struct qman_fq *template)
894 {
895 fq->fq_base = *template;
896 fq->net_dev = priv->net_dev;
897
898 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
899 fq->channel = priv->channel;
900 }
901
dpaa_setup_egress(const struct dpaa_priv * priv,struct dpaa_fq * fq,struct fman_port * port,const struct qman_fq * template)902 static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
903 struct dpaa_fq *fq,
904 struct fman_port *port,
905 const struct qman_fq *template)
906 {
907 fq->fq_base = *template;
908 fq->net_dev = priv->net_dev;
909
910 if (port) {
911 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
912 fq->channel = (u16)fman_port_get_qman_channel_id(port);
913 } else {
914 fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
915 }
916 }
917
dpaa_fq_setup(struct dpaa_priv * priv,const struct dpaa_fq_cbs * fq_cbs,struct fman_port * tx_port)918 static void dpaa_fq_setup(struct dpaa_priv *priv,
919 const struct dpaa_fq_cbs *fq_cbs,
920 struct fman_port *tx_port)
921 {
922 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
923 const cpumask_t *affine_cpus = qman_affine_cpus();
924 u16 channels[NR_CPUS];
925 struct dpaa_fq *fq;
926
927 for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
928 channels[num_portals++] = qman_affine_channel(cpu);
929
930 if (num_portals == 0)
931 dev_err(priv->net_dev->dev.parent,
932 "No Qman software (affine) channels found\n");
933
934 /* Initialize each FQ in the list */
935 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
936 switch (fq->fq_type) {
937 case FQ_TYPE_RX_DEFAULT:
938 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
939 break;
940 case FQ_TYPE_RX_ERROR:
941 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
942 break;
943 case FQ_TYPE_RX_PCD:
944 if (!num_portals)
945 continue;
946 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
947 fq->channel = channels[portal_cnt++ % num_portals];
948 break;
949 case FQ_TYPE_TX:
950 dpaa_setup_egress(priv, fq, tx_port,
951 &fq_cbs->egress_ern);
952 /* If we have more Tx queues than the number of cores,
953 * just ignore the extra ones.
954 */
955 if (egress_cnt < DPAA_ETH_TXQ_NUM)
956 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
957 break;
958 case FQ_TYPE_TX_CONF_MQ:
959 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
960 fallthrough;
961 case FQ_TYPE_TX_CONFIRM:
962 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
963 break;
964 case FQ_TYPE_TX_ERROR:
965 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
966 break;
967 default:
968 dev_warn(priv->net_dev->dev.parent,
969 "Unknown FQ type detected!\n");
970 break;
971 }
972 }
973
974 /* Make sure all CPUs receive a corresponding Tx queue. */
975 while (egress_cnt < DPAA_ETH_TXQ_NUM) {
976 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
977 if (fq->fq_type != FQ_TYPE_TX)
978 continue;
979 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
980 if (egress_cnt == DPAA_ETH_TXQ_NUM)
981 break;
982 }
983 }
984 }
985
dpaa_tx_fq_to_id(const struct dpaa_priv * priv,struct qman_fq * tx_fq)986 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
987 struct qman_fq *tx_fq)
988 {
989 int i;
990
991 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
992 if (priv->egress_fqs[i] == tx_fq)
993 return i;
994
995 return -EINVAL;
996 }
997
dpaa_fq_init(struct dpaa_fq * dpaa_fq,bool td_enable)998 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
999 {
1000 const struct dpaa_priv *priv;
1001 struct qman_fq *confq = NULL;
1002 struct qm_mcc_initfq initfq;
1003 struct device *dev;
1004 struct qman_fq *fq;
1005 int queue_id;
1006 int err;
1007
1008 priv = netdev_priv(dpaa_fq->net_dev);
1009 dev = dpaa_fq->net_dev->dev.parent;
1010
1011 if (dpaa_fq->fqid == 0)
1012 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1013
1014 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
1015
1016 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
1017 if (err) {
1018 dev_err(dev, "qman_create_fq() failed\n");
1019 return err;
1020 }
1021 fq = &dpaa_fq->fq_base;
1022
1023 if (dpaa_fq->init) {
1024 memset(&initfq, 0, sizeof(initfq));
1025
1026 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
1027 /* Note: we may get to keep an empty FQ in cache */
1028 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1029
1030 /* Try to reduce the number of portal interrupts for
1031 * Tx Confirmation FQs.
1032 */
1033 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1034 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1035
1036 /* FQ placement */
1037 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1038
1039 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1040
1041 /* Put all egress queues in a congestion group of their own.
1042 * Sensu stricto, the Tx confirmation queues are Rx FQs,
1043 * rather than Tx - but they nonetheless account for the
1044 * memory footprint on behalf of egress traffic. We therefore
1045 * place them in the netdev's CGR, along with the Tx FQs.
1046 */
1047 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1048 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1049 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1050 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1051 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1052 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1053 /* Set a fixed overhead accounting, in an attempt to
1054 * reduce the impact of fixed-size skb shells and the
1055 * driver's needed headroom on system memory. This is
1056 * especially the case when the egress traffic is
1057 * composed of small datagrams.
1058 * Unfortunately, QMan's OAL value is capped to an
1059 * insufficient value, but even that is better than
1060 * no overhead accounting at all.
1061 */
1062 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1063 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1064 qm_fqd_set_oal(&initfq.fqd,
1065 min(sizeof(struct sk_buff) +
1066 priv->tx_headroom,
1067 (size_t)FSL_QMAN_MAX_OAL));
1068 }
1069
1070 if (td_enable) {
1071 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1072 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1073 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1074 }
1075
1076 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1077 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1078 if (queue_id >= 0)
1079 confq = priv->conf_fqs[queue_id];
1080 if (confq) {
1081 initfq.we_mask |=
1082 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1083 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1084 * A2V=1 (contextA A2 field is valid)
1085 * A0V=1 (contextA A0 field is valid)
1086 * B0V=1 (contextB field is valid)
1087 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1088 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1089 */
1090 qm_fqd_context_a_set64(&initfq.fqd,
1091 0x1e00000080000000ULL);
1092 }
1093 }
1094
1095 /* Put all the ingress queues in our "ingress CGR". */
1096 if (priv->use_ingress_cgr &&
1097 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1098 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
1099 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
1100 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1101 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1102 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1103 /* Set a fixed overhead accounting, just like for the
1104 * egress CGR.
1105 */
1106 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1107 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1108 qm_fqd_set_oal(&initfq.fqd,
1109 min(sizeof(struct sk_buff) +
1110 priv->tx_headroom,
1111 (size_t)FSL_QMAN_MAX_OAL));
1112 }
1113
1114 /* Initialization common to all ingress queues */
1115 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1116 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1117 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1118 QM_FQCTRL_CTXASTASHING);
1119 initfq.fqd.context_a.stashing.exclusive =
1120 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1121 QM_STASHING_EXCL_ANNOTATION;
1122 qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1123 DIV_ROUND_UP(sizeof(struct qman_fq),
1124 64));
1125 }
1126
1127 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1128 if (err < 0) {
1129 dev_err(dev, "qman_init_fq(%u) = %d\n",
1130 qman_fq_fqid(fq), err);
1131 qman_destroy_fq(fq);
1132 return err;
1133 }
1134 }
1135
1136 dpaa_fq->fqid = qman_fq_fqid(fq);
1137
1138 if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1139 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) {
1140 err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
1141 dpaa_fq->fqid, 0);
1142 if (err) {
1143 dev_err(dev, "xdp_rxq_info_reg() = %d\n", err);
1144 return err;
1145 }
1146
1147 err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq,
1148 MEM_TYPE_PAGE_ORDER0, NULL);
1149 if (err) {
1150 dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n",
1151 err);
1152 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1153 return err;
1154 }
1155 }
1156
1157 return 0;
1158 }
1159
dpaa_fq_free_entry(struct device * dev,struct qman_fq * fq)1160 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1161 {
1162 const struct dpaa_priv *priv;
1163 struct dpaa_fq *dpaa_fq;
1164 int err, error;
1165
1166 err = 0;
1167
1168 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1169 priv = netdev_priv(dpaa_fq->net_dev);
1170
1171 if (dpaa_fq->init) {
1172 err = qman_retire_fq(fq, NULL);
1173 if (err < 0 && netif_msg_drv(priv))
1174 dev_err(dev, "qman_retire_fq(%u) = %d\n",
1175 qman_fq_fqid(fq), err);
1176
1177 error = qman_oos_fq(fq);
1178 if (error < 0 && netif_msg_drv(priv)) {
1179 dev_err(dev, "qman_oos_fq(%u) = %d\n",
1180 qman_fq_fqid(fq), error);
1181 if (err >= 0)
1182 err = error;
1183 }
1184 }
1185
1186 if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1187 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) &&
1188 xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq))
1189 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1190
1191 qman_destroy_fq(fq);
1192 list_del(&dpaa_fq->list);
1193
1194 return err;
1195 }
1196
dpaa_fq_free(struct device * dev,struct list_head * list)1197 static int dpaa_fq_free(struct device *dev, struct list_head *list)
1198 {
1199 struct dpaa_fq *dpaa_fq, *tmp;
1200 int err, error;
1201
1202 err = 0;
1203 list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1204 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1205 if (error < 0 && err >= 0)
1206 err = error;
1207 }
1208
1209 return err;
1210 }
1211
dpaa_eth_init_tx_port(struct fman_port * port,struct dpaa_fq * errq,struct dpaa_fq * defq,struct dpaa_buffer_layout * buf_layout)1212 static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1213 struct dpaa_fq *defq,
1214 struct dpaa_buffer_layout *buf_layout)
1215 {
1216 struct fman_buffer_prefix_content buf_prefix_content;
1217 struct fman_port_params params;
1218 int err;
1219
1220 memset(¶ms, 0, sizeof(params));
1221 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1222
1223 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1224 buf_prefix_content.pass_prs_result = true;
1225 buf_prefix_content.pass_hash_result = true;
1226 buf_prefix_content.pass_time_stamp = true;
1227 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1228
1229 params.specific_params.non_rx_params.err_fqid = errq->fqid;
1230 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1231
1232 err = fman_port_config(port, ¶ms);
1233 if (err) {
1234 pr_err("%s: fman_port_config failed\n", __func__);
1235 return err;
1236 }
1237
1238 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1239 if (err) {
1240 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1241 __func__);
1242 return err;
1243 }
1244
1245 err = fman_port_init(port);
1246 if (err)
1247 pr_err("%s: fm_port_init failed\n", __func__);
1248
1249 return err;
1250 }
1251
dpaa_eth_init_rx_port(struct fman_port * port,struct dpaa_bp * bp,struct dpaa_fq * errq,struct dpaa_fq * defq,struct dpaa_fq * pcdq,struct dpaa_buffer_layout * buf_layout)1252 static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
1253 struct dpaa_fq *errq,
1254 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
1255 struct dpaa_buffer_layout *buf_layout)
1256 {
1257 struct fman_buffer_prefix_content buf_prefix_content;
1258 struct fman_port_rx_params *rx_p;
1259 struct fman_port_params params;
1260 int err;
1261
1262 memset(¶ms, 0, sizeof(params));
1263 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1264
1265 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1266 buf_prefix_content.pass_prs_result = true;
1267 buf_prefix_content.pass_hash_result = true;
1268 buf_prefix_content.pass_time_stamp = true;
1269 buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
1270
1271 rx_p = ¶ms.specific_params.rx_params;
1272 rx_p->err_fqid = errq->fqid;
1273 rx_p->dflt_fqid = defq->fqid;
1274 if (pcdq) {
1275 rx_p->pcd_base_fqid = pcdq->fqid;
1276 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
1277 }
1278
1279 rx_p->ext_buf_pools.num_of_pools_used = 1;
1280 rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
1281 rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
1282
1283 err = fman_port_config(port, ¶ms);
1284 if (err) {
1285 pr_err("%s: fman_port_config failed\n", __func__);
1286 return err;
1287 }
1288
1289 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1290 if (err) {
1291 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1292 __func__);
1293 return err;
1294 }
1295
1296 err = fman_port_init(port);
1297 if (err)
1298 pr_err("%s: fm_port_init failed\n", __func__);
1299
1300 return err;
1301 }
1302
dpaa_eth_init_ports(struct mac_device * mac_dev,struct dpaa_bp * bp,struct fm_port_fqs * port_fqs,struct dpaa_buffer_layout * buf_layout,struct device * dev)1303 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1304 struct dpaa_bp *bp,
1305 struct fm_port_fqs *port_fqs,
1306 struct dpaa_buffer_layout *buf_layout,
1307 struct device *dev)
1308 {
1309 struct fman_port *rxport = mac_dev->port[RX];
1310 struct fman_port *txport = mac_dev->port[TX];
1311 int err;
1312
1313 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1314 port_fqs->tx_defq, &buf_layout[TX]);
1315 if (err)
1316 return err;
1317
1318 err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
1319 port_fqs->rx_defq, port_fqs->rx_pcdq,
1320 &buf_layout[RX]);
1321
1322 return err;
1323 }
1324
dpaa_bman_release(const struct dpaa_bp * dpaa_bp,struct bm_buffer * bmb,int cnt)1325 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1326 struct bm_buffer *bmb, int cnt)
1327 {
1328 int err;
1329
1330 err = bman_release(dpaa_bp->pool, bmb, cnt);
1331 /* Should never occur, address anyway to avoid leaking the buffers */
1332 if (WARN_ON(err) && dpaa_bp->free_buf_cb)
1333 while (cnt-- > 0)
1334 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1335
1336 return cnt;
1337 }
1338
dpaa_release_sgt_members(struct qm_sg_entry * sgt)1339 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1340 {
1341 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1342 struct dpaa_bp *dpaa_bp;
1343 int i = 0, j;
1344
1345 memset(bmb, 0, sizeof(bmb));
1346
1347 do {
1348 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1349 if (!dpaa_bp)
1350 return;
1351
1352 j = 0;
1353 do {
1354 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1355
1356 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1357
1358 j++; i++;
1359 } while (j < ARRAY_SIZE(bmb) &&
1360 !qm_sg_entry_is_final(&sgt[i - 1]) &&
1361 sgt[i - 1].bpid == sgt[i].bpid);
1362
1363 dpaa_bman_release(dpaa_bp, bmb, j);
1364 } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1365 }
1366
dpaa_fd_release(const struct net_device * net_dev,const struct qm_fd * fd)1367 static void dpaa_fd_release(const struct net_device *net_dev,
1368 const struct qm_fd *fd)
1369 {
1370 struct qm_sg_entry *sgt;
1371 struct dpaa_bp *dpaa_bp;
1372 struct bm_buffer bmb;
1373 dma_addr_t addr;
1374 void *vaddr;
1375
1376 bmb.data = 0;
1377 bm_buffer_set64(&bmb, qm_fd_addr(fd));
1378
1379 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1380 if (!dpaa_bp)
1381 return;
1382
1383 if (qm_fd_get_format(fd) == qm_fd_sg) {
1384 vaddr = phys_to_virt(qm_fd_addr(fd));
1385 sgt = vaddr + qm_fd_get_offset(fd);
1386
1387 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1388 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1389
1390 dpaa_release_sgt_members(sgt);
1391
1392 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
1393 virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
1394 DMA_FROM_DEVICE);
1395 if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1396 netdev_err(net_dev, "DMA mapping failed\n");
1397 return;
1398 }
1399 bm_buffer_set64(&bmb, addr);
1400 }
1401
1402 dpaa_bman_release(dpaa_bp, &bmb, 1);
1403 }
1404
count_ern(struct dpaa_percpu_priv * percpu_priv,const union qm_mr_entry * msg)1405 static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1406 const union qm_mr_entry *msg)
1407 {
1408 switch (msg->ern.rc & QM_MR_RC_MASK) {
1409 case QM_MR_RC_CGR_TAILDROP:
1410 percpu_priv->ern_cnt.cg_tdrop++;
1411 break;
1412 case QM_MR_RC_WRED:
1413 percpu_priv->ern_cnt.wred++;
1414 break;
1415 case QM_MR_RC_ERROR:
1416 percpu_priv->ern_cnt.err_cond++;
1417 break;
1418 case QM_MR_RC_ORPWINDOW_EARLY:
1419 percpu_priv->ern_cnt.early_window++;
1420 break;
1421 case QM_MR_RC_ORPWINDOW_LATE:
1422 percpu_priv->ern_cnt.late_window++;
1423 break;
1424 case QM_MR_RC_FQ_TAILDROP:
1425 percpu_priv->ern_cnt.fq_tdrop++;
1426 break;
1427 case QM_MR_RC_ORPWINDOW_RETIRED:
1428 percpu_priv->ern_cnt.fq_retired++;
1429 break;
1430 case QM_MR_RC_ORP_ZERO:
1431 percpu_priv->ern_cnt.orp_zero++;
1432 break;
1433 }
1434 }
1435
1436 /* Turn on HW checksum computation for this outgoing frame.
1437 * If the current protocol is not something we support in this regard
1438 * (or if the stack has already computed the SW checksum), we do nothing.
1439 *
1440 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1441 * otherwise.
1442 *
1443 * Note that this function may modify the fd->cmd field and the skb data buffer
1444 * (the Parse Results area).
1445 */
dpaa_enable_tx_csum(struct dpaa_priv * priv,struct sk_buff * skb,struct qm_fd * fd,void * parse_results)1446 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1447 struct sk_buff *skb,
1448 struct qm_fd *fd,
1449 void *parse_results)
1450 {
1451 struct fman_prs_result *parse_result;
1452 u16 ethertype = ntohs(skb->protocol);
1453 struct ipv6hdr *ipv6h = NULL;
1454 struct iphdr *iph;
1455 int retval = 0;
1456 u8 l4_proto;
1457
1458 if (skb->ip_summed != CHECKSUM_PARTIAL)
1459 return 0;
1460
1461 /* Note: L3 csum seems to be already computed in sw, but we can't choose
1462 * L4 alone from the FM configuration anyway.
1463 */
1464
1465 /* Fill in some fields of the Parse Results array, so the FMan
1466 * can find them as if they came from the FMan Parser.
1467 */
1468 parse_result = (struct fman_prs_result *)parse_results;
1469
1470 /* If we're dealing with VLAN, get the real Ethernet type */
1471 if (ethertype == ETH_P_8021Q) {
1472 /* We can't always assume the MAC header is set correctly
1473 * by the stack, so reset to beginning of skb->data
1474 */
1475 skb_reset_mac_header(skb);
1476 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1477 }
1478
1479 /* Fill in the relevant L3 parse result fields
1480 * and read the L4 protocol type
1481 */
1482 switch (ethertype) {
1483 case ETH_P_IP:
1484 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1485 iph = ip_hdr(skb);
1486 WARN_ON(!iph);
1487 l4_proto = iph->protocol;
1488 break;
1489 case ETH_P_IPV6:
1490 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1491 ipv6h = ipv6_hdr(skb);
1492 WARN_ON(!ipv6h);
1493 l4_proto = ipv6h->nexthdr;
1494 break;
1495 default:
1496 /* We shouldn't even be here */
1497 if (net_ratelimit())
1498 netif_alert(priv, tx_err, priv->net_dev,
1499 "Can't compute HW csum for L3 proto 0x%x\n",
1500 ntohs(skb->protocol));
1501 retval = -EIO;
1502 goto return_error;
1503 }
1504
1505 /* Fill in the relevant L4 parse result fields */
1506 switch (l4_proto) {
1507 case IPPROTO_UDP:
1508 parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1509 break;
1510 case IPPROTO_TCP:
1511 parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1512 break;
1513 default:
1514 if (net_ratelimit())
1515 netif_alert(priv, tx_err, priv->net_dev,
1516 "Can't compute HW csum for L4 proto 0x%x\n",
1517 l4_proto);
1518 retval = -EIO;
1519 goto return_error;
1520 }
1521
1522 /* At index 0 is IPOffset_1 as defined in the Parse Results */
1523 parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1524 parse_result->l4_off = (u8)skb_transport_offset(skb);
1525
1526 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1527 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1528
1529 /* On P1023 and similar platforms fd->cmd interpretation could
1530 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1531 * is not set so we do not need to check; in the future, if/when
1532 * using context_a we need to check this bit
1533 */
1534
1535 return_error:
1536 return retval;
1537 }
1538
dpaa_bp_add_8_bufs(const struct dpaa_bp * dpaa_bp)1539 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1540 {
1541 struct net_device *net_dev = dpaa_bp->priv->net_dev;
1542 struct bm_buffer bmb[8];
1543 dma_addr_t addr;
1544 struct page *p;
1545 u8 i;
1546
1547 for (i = 0; i < 8; i++) {
1548 p = dev_alloc_pages(0);
1549 if (unlikely(!p)) {
1550 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1551 goto release_previous_buffs;
1552 }
1553
1554 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
1555 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1556 if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1557 addr))) {
1558 netdev_err(net_dev, "DMA map failed\n");
1559 goto release_previous_buffs;
1560 }
1561
1562 bmb[i].data = 0;
1563 bm_buffer_set64(&bmb[i], addr);
1564 }
1565
1566 release_bufs:
1567 return dpaa_bman_release(dpaa_bp, bmb, i);
1568
1569 release_previous_buffs:
1570 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1571
1572 bm_buffer_set64(&bmb[i], 0);
1573 /* Avoid releasing a completely null buffer; bman_release() requires
1574 * at least one buffer.
1575 */
1576 if (likely(i))
1577 goto release_bufs;
1578
1579 return 0;
1580 }
1581
dpaa_bp_seed(struct dpaa_bp * dpaa_bp)1582 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1583 {
1584 int i;
1585
1586 /* Give each CPU an allotment of "config_count" buffers */
1587 for_each_possible_cpu(i) {
1588 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1589 int j;
1590
1591 /* Although we access another CPU's counters here
1592 * we do it at boot time so it is safe
1593 */
1594 for (j = 0; j < dpaa_bp->config_count; j += 8)
1595 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1596 }
1597 return 0;
1598 }
1599
1600 /* Add buffers/(pages) for Rx processing whenever bpool count falls below
1601 * REFILL_THRESHOLD.
1602 */
dpaa_eth_refill_bpool(struct dpaa_bp * dpaa_bp,int * countptr)1603 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1604 {
1605 int count = *countptr;
1606 int new_bufs;
1607
1608 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1609 do {
1610 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1611 if (unlikely(!new_bufs)) {
1612 /* Avoid looping forever if we've temporarily
1613 * run out of memory. We'll try again at the
1614 * next NAPI cycle.
1615 */
1616 break;
1617 }
1618 count += new_bufs;
1619 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1620
1621 *countptr = count;
1622 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1623 return -ENOMEM;
1624 }
1625
1626 return 0;
1627 }
1628
dpaa_eth_refill_bpools(struct dpaa_priv * priv)1629 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1630 {
1631 struct dpaa_bp *dpaa_bp;
1632 int *countptr;
1633
1634 dpaa_bp = priv->dpaa_bp;
1635 if (!dpaa_bp)
1636 return -EINVAL;
1637 countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1638
1639 return dpaa_eth_refill_bpool(dpaa_bp, countptr);
1640 }
1641
1642 /* Cleanup function for outgoing frame descriptors that were built on Tx path,
1643 * either contiguous frames or scatter/gather ones.
1644 * Skb freeing is not handled here.
1645 *
1646 * This function may be called on error paths in the Tx function, so guard
1647 * against cases when not all fd relevant fields were filled in. To avoid
1648 * reading the invalid transmission timestamp for the error paths set ts to
1649 * false.
1650 *
1651 * Return the skb backpointer, since for S/G frames the buffer containing it
1652 * gets freed here.
1653 *
1654 * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer
1655 * and return NULL in this case.
1656 */
dpaa_cleanup_tx_fd(const struct dpaa_priv * priv,const struct qm_fd * fd,bool ts)1657 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1658 const struct qm_fd *fd, bool ts)
1659 {
1660 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1661 struct device *dev = priv->net_dev->dev.parent;
1662 struct skb_shared_hwtstamps shhwtstamps;
1663 dma_addr_t addr = qm_fd_addr(fd);
1664 void *vaddr = phys_to_virt(addr);
1665 const struct qm_sg_entry *sgt;
1666 struct dpaa_eth_swbp *swbp;
1667 struct sk_buff *skb;
1668 u64 ns;
1669 int i;
1670
1671 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1672 dma_unmap_page(priv->tx_dma_dev, addr,
1673 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1674 dma_dir);
1675
1676 /* The sgt buffer has been allocated with netdev_alloc_frag(),
1677 * it's from lowmem.
1678 */
1679 sgt = vaddr + qm_fd_get_offset(fd);
1680
1681 /* sgt[0] is from lowmem, was dma_map_single()-ed */
1682 dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
1683 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1684
1685 /* remaining pages were mapped with skb_frag_dma_map() */
1686 for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
1687 !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
1688 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1689
1690 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
1691 qm_sg_entry_get_len(&sgt[i]), dma_dir);
1692 }
1693 } else {
1694 dma_unmap_single(priv->tx_dma_dev, addr,
1695 qm_fd_get_offset(fd) + qm_fd_get_length(fd),
1696 dma_dir);
1697 }
1698
1699 swbp = (struct dpaa_eth_swbp *)vaddr;
1700 skb = swbp->skb;
1701
1702 /* No skb backpointer is set when running XDP. An xdp_frame
1703 * backpointer is saved instead.
1704 */
1705 if (!skb) {
1706 xdp_return_frame(swbp->xdpf);
1707 return NULL;
1708 }
1709
1710 /* DMA unmapping is required before accessing the HW provided info */
1711 if (ts && priv->tx_tstamp &&
1712 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1713 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1714
1715 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
1716 &ns)) {
1717 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1718 skb_tstamp_tx(skb, &shhwtstamps);
1719 } else {
1720 dev_warn(dev, "fman_port_get_tstamp failed!\n");
1721 }
1722 }
1723
1724 if (qm_fd_get_format(fd) == qm_fd_sg)
1725 /* Free the page that we allocated on Tx for the SGT */
1726 free_pages((unsigned long)vaddr, 0);
1727
1728 return skb;
1729 }
1730
rx_csum_offload(const struct dpaa_priv * priv,const struct qm_fd * fd)1731 static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1732 {
1733 /* The parser has run and performed L4 checksum validation.
1734 * We know there were no parser errors (and implicitly no
1735 * L4 csum error), otherwise we wouldn't be here.
1736 */
1737 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1738 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1739 return CHECKSUM_UNNECESSARY;
1740
1741 /* We're here because either the parser didn't run or the L4 checksum
1742 * was not verified. This may include the case of a UDP frame with
1743 * checksum zero or an L4 proto other than TCP/UDP
1744 */
1745 return CHECKSUM_NONE;
1746 }
1747
1748 #define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
1749
1750 /* Build a linear skb around the received buffer.
1751 * We are guaranteed there is enough room at the end of the data buffer to
1752 * accommodate the shared info area of the skb.
1753 */
contig_fd_to_skb(const struct dpaa_priv * priv,const struct qm_fd * fd)1754 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1755 const struct qm_fd *fd)
1756 {
1757 ssize_t fd_off = qm_fd_get_offset(fd);
1758 dma_addr_t addr = qm_fd_addr(fd);
1759 struct dpaa_bp *dpaa_bp;
1760 struct sk_buff *skb;
1761 void *vaddr;
1762
1763 vaddr = phys_to_virt(addr);
1764 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1765
1766 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1767 if (!dpaa_bp)
1768 goto free_buffer;
1769
1770 skb = build_skb(vaddr, dpaa_bp->size +
1771 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1772 if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
1773 goto free_buffer;
1774 skb_reserve(skb, fd_off);
1775 skb_put(skb, qm_fd_get_length(fd));
1776
1777 skb->ip_summed = rx_csum_offload(priv, fd);
1778
1779 return skb;
1780
1781 free_buffer:
1782 free_pages((unsigned long)vaddr, 0);
1783 return NULL;
1784 }
1785
1786 /* Build an skb with the data of the first S/G entry in the linear portion and
1787 * the rest of the frame as skb fragments.
1788 *
1789 * The page fragment holding the S/G Table is recycled here.
1790 */
sg_fd_to_skb(const struct dpaa_priv * priv,const struct qm_fd * fd)1791 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1792 const struct qm_fd *fd)
1793 {
1794 ssize_t fd_off = qm_fd_get_offset(fd);
1795 dma_addr_t addr = qm_fd_addr(fd);
1796 const struct qm_sg_entry *sgt;
1797 struct page *page, *head_page;
1798 struct dpaa_bp *dpaa_bp;
1799 void *vaddr, *sg_vaddr;
1800 int frag_off, frag_len;
1801 struct sk_buff *skb;
1802 dma_addr_t sg_addr;
1803 int page_offset;
1804 unsigned int sz;
1805 int *count_ptr;
1806 int i, j;
1807
1808 vaddr = phys_to_virt(addr);
1809 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1810
1811 /* Iterate through the SGT entries and add data buffers to the skb */
1812 sgt = vaddr + fd_off;
1813 skb = NULL;
1814 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1815 /* Extension bit is not supported */
1816 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1817
1818 sg_addr = qm_sg_addr(&sgt[i]);
1819 sg_vaddr = phys_to_virt(sg_addr);
1820 WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
1821
1822 dma_unmap_page(priv->rx_dma_dev, sg_addr,
1823 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1824
1825 /* We may use multiple Rx pools */
1826 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1827 if (!dpaa_bp)
1828 goto free_buffers;
1829
1830 if (!skb) {
1831 sz = dpaa_bp->size +
1832 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1833 skb = build_skb(sg_vaddr, sz);
1834 if (WARN_ON(!skb))
1835 goto free_buffers;
1836
1837 skb->ip_summed = rx_csum_offload(priv, fd);
1838
1839 /* Make sure forwarded skbs will have enough space
1840 * on Tx, if extra headers are added.
1841 */
1842 WARN_ON(fd_off != priv->rx_headroom);
1843 skb_reserve(skb, fd_off);
1844 skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1845 } else {
1846 /* Not the first S/G entry; all data from buffer will
1847 * be added in an skb fragment; fragment index is offset
1848 * by one since first S/G entry was incorporated in the
1849 * linear part of the skb.
1850 *
1851 * Caution: 'page' may be a tail page.
1852 */
1853 page = virt_to_page(sg_vaddr);
1854 head_page = virt_to_head_page(sg_vaddr);
1855
1856 /* Compute offset in (possibly tail) page */
1857 page_offset = ((unsigned long)sg_vaddr &
1858 (PAGE_SIZE - 1)) +
1859 (page_address(page) - page_address(head_page));
1860 /* page_offset only refers to the beginning of sgt[i];
1861 * but the buffer itself may have an internal offset.
1862 */
1863 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1864 frag_len = qm_sg_entry_get_len(&sgt[i]);
1865 /* skb_add_rx_frag() does no checking on the page; if
1866 * we pass it a tail page, we'll end up with
1867 * bad page accounting and eventually with segafults.
1868 */
1869 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1870 frag_len, dpaa_bp->size);
1871 }
1872
1873 /* Update the pool count for the current {cpu x bpool} */
1874 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1875 (*count_ptr)--;
1876
1877 if (qm_sg_entry_is_final(&sgt[i]))
1878 break;
1879 }
1880 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1881
1882 /* free the SG table buffer */
1883 free_pages((unsigned long)vaddr, 0);
1884
1885 return skb;
1886
1887 free_buffers:
1888 /* free all the SG entries */
1889 for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
1890 sg_addr = qm_sg_addr(&sgt[j]);
1891 sg_vaddr = phys_to_virt(sg_addr);
1892 /* all pages 0..i were unmaped */
1893 if (j > i)
1894 dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
1895 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1896 free_pages((unsigned long)sg_vaddr, 0);
1897 /* counters 0..i-1 were decremented */
1898 if (j >= i) {
1899 dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
1900 if (dpaa_bp) {
1901 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1902 (*count_ptr)--;
1903 }
1904 }
1905
1906 if (qm_sg_entry_is_final(&sgt[j]))
1907 break;
1908 }
1909 /* free the SGT fragment */
1910 free_pages((unsigned long)vaddr, 0);
1911
1912 return NULL;
1913 }
1914
skb_to_contig_fd(struct dpaa_priv * priv,struct sk_buff * skb,struct qm_fd * fd,int * offset)1915 static int skb_to_contig_fd(struct dpaa_priv *priv,
1916 struct sk_buff *skb, struct qm_fd *fd,
1917 int *offset)
1918 {
1919 struct net_device *net_dev = priv->net_dev;
1920 enum dma_data_direction dma_dir;
1921 struct dpaa_eth_swbp *swbp;
1922 unsigned char *buff_start;
1923 dma_addr_t addr;
1924 int err;
1925
1926 /* We are guaranteed to have at least tx_headroom bytes
1927 * available, so just use that for offset.
1928 */
1929 fd->bpid = FSL_DPAA_BPID_INV;
1930 buff_start = skb->data - priv->tx_headroom;
1931 dma_dir = DMA_TO_DEVICE;
1932
1933 swbp = (struct dpaa_eth_swbp *)buff_start;
1934 swbp->skb = skb;
1935
1936 /* Enable L3/L4 hardware checksum computation.
1937 *
1938 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1939 * need to write into the skb.
1940 */
1941 err = dpaa_enable_tx_csum(priv, skb, fd,
1942 buff_start + DPAA_TX_PRIV_DATA_SIZE);
1943 if (unlikely(err < 0)) {
1944 if (net_ratelimit())
1945 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1946 err);
1947 return err;
1948 }
1949
1950 /* Fill in the rest of the FD fields */
1951 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1952 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1953
1954 /* Map the entire buffer size that may be seen by FMan, but no more */
1955 addr = dma_map_single(priv->tx_dma_dev, buff_start,
1956 priv->tx_headroom + skb->len, dma_dir);
1957 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1958 if (net_ratelimit())
1959 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1960 return -EINVAL;
1961 }
1962 qm_fd_addr_set64(fd, addr);
1963
1964 return 0;
1965 }
1966
skb_to_sg_fd(struct dpaa_priv * priv,struct sk_buff * skb,struct qm_fd * fd)1967 static int skb_to_sg_fd(struct dpaa_priv *priv,
1968 struct sk_buff *skb, struct qm_fd *fd)
1969 {
1970 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1971 const int nr_frags = skb_shinfo(skb)->nr_frags;
1972 struct net_device *net_dev = priv->net_dev;
1973 struct dpaa_eth_swbp *swbp;
1974 struct qm_sg_entry *sgt;
1975 void *buff_start;
1976 skb_frag_t *frag;
1977 dma_addr_t addr;
1978 size_t frag_len;
1979 struct page *p;
1980 int i, j, err;
1981
1982 /* get a page to store the SGTable */
1983 p = dev_alloc_pages(0);
1984 if (unlikely(!p)) {
1985 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1986 return -ENOMEM;
1987 }
1988 buff_start = page_address(p);
1989
1990 /* Enable L3/L4 hardware checksum computation.
1991 *
1992 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1993 * need to write into the skb.
1994 */
1995 err = dpaa_enable_tx_csum(priv, skb, fd,
1996 buff_start + DPAA_TX_PRIV_DATA_SIZE);
1997 if (unlikely(err < 0)) {
1998 if (net_ratelimit())
1999 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
2000 err);
2001 goto csum_failed;
2002 }
2003
2004 /* SGT[0] is used by the linear part */
2005 sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
2006 frag_len = skb_headlen(skb);
2007 qm_sg_entry_set_len(&sgt[0], frag_len);
2008 sgt[0].bpid = FSL_DPAA_BPID_INV;
2009 sgt[0].offset = 0;
2010 addr = dma_map_single(priv->tx_dma_dev, skb->data,
2011 skb_headlen(skb), dma_dir);
2012 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2013 netdev_err(priv->net_dev, "DMA mapping failed\n");
2014 err = -EINVAL;
2015 goto sg0_map_failed;
2016 }
2017 qm_sg_entry_set64(&sgt[0], addr);
2018
2019 /* populate the rest of SGT entries */
2020 for (i = 0; i < nr_frags; i++) {
2021 frag = &skb_shinfo(skb)->frags[i];
2022 frag_len = skb_frag_size(frag);
2023 WARN_ON(!skb_frag_page(frag));
2024 addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
2025 frag_len, dma_dir);
2026 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2027 netdev_err(priv->net_dev, "DMA mapping failed\n");
2028 err = -EINVAL;
2029 goto sg_map_failed;
2030 }
2031
2032 qm_sg_entry_set_len(&sgt[i + 1], frag_len);
2033 sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
2034 sgt[i + 1].offset = 0;
2035
2036 /* keep the offset in the address */
2037 qm_sg_entry_set64(&sgt[i + 1], addr);
2038 }
2039
2040 /* Set the final bit in the last used entry of the SGT */
2041 qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
2042
2043 /* set fd offset to priv->tx_headroom */
2044 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
2045
2046 /* DMA map the SGT page */
2047 swbp = (struct dpaa_eth_swbp *)buff_start;
2048 swbp->skb = skb;
2049
2050 addr = dma_map_page(priv->tx_dma_dev, p, 0,
2051 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
2052 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2053 netdev_err(priv->net_dev, "DMA mapping failed\n");
2054 err = -EINVAL;
2055 goto sgt_map_failed;
2056 }
2057
2058 fd->bpid = FSL_DPAA_BPID_INV;
2059 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2060 qm_fd_addr_set64(fd, addr);
2061
2062 return 0;
2063
2064 sgt_map_failed:
2065 sg_map_failed:
2066 for (j = 0; j < i; j++)
2067 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
2068 qm_sg_entry_get_len(&sgt[j]), dma_dir);
2069 sg0_map_failed:
2070 csum_failed:
2071 free_pages((unsigned long)buff_start, 0);
2072
2073 return err;
2074 }
2075
dpaa_xmit(struct dpaa_priv * priv,struct rtnl_link_stats64 * percpu_stats,int queue,struct qm_fd * fd)2076 static inline int dpaa_xmit(struct dpaa_priv *priv,
2077 struct rtnl_link_stats64 *percpu_stats,
2078 int queue,
2079 struct qm_fd *fd)
2080 {
2081 struct qman_fq *egress_fq;
2082 int err, i;
2083
2084 egress_fq = priv->egress_fqs[queue];
2085 if (fd->bpid == FSL_DPAA_BPID_INV)
2086 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2087
2088 /* Trace this Tx fd */
2089 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2090
2091 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2092 err = qman_enqueue(egress_fq, fd);
2093 if (err != -EBUSY)
2094 break;
2095 }
2096
2097 if (unlikely(err < 0)) {
2098 percpu_stats->tx_fifo_errors++;
2099 return err;
2100 }
2101
2102 percpu_stats->tx_packets++;
2103 percpu_stats->tx_bytes += qm_fd_get_length(fd);
2104
2105 return 0;
2106 }
2107
2108 #ifdef CONFIG_DPAA_ERRATUM_A050385
dpaa_a050385_wa_skb(struct net_device * net_dev,struct sk_buff ** s)2109 static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
2110 {
2111 struct dpaa_priv *priv = netdev_priv(net_dev);
2112 struct sk_buff *new_skb, *skb = *s;
2113 unsigned char *start, i;
2114
2115 /* check linear buffer alignment */
2116 if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
2117 goto workaround;
2118
2119 /* linear buffers just need to have an aligned start */
2120 if (!skb_is_nonlinear(skb))
2121 return 0;
2122
2123 /* linear data size for nonlinear skbs needs to be aligned */
2124 if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
2125 goto workaround;
2126
2127 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2128 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2129
2130 /* all fragments need to have aligned start addresses */
2131 if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
2132 goto workaround;
2133
2134 /* all but last fragment need to have aligned sizes */
2135 if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
2136 (i < skb_shinfo(skb)->nr_frags - 1))
2137 goto workaround;
2138 }
2139
2140 return 0;
2141
2142 workaround:
2143 /* copy all the skb content into a new linear buffer */
2144 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
2145 priv->tx_headroom);
2146 if (!new_skb)
2147 return -ENOMEM;
2148
2149 /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
2150 skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
2151
2152 /* Workaround for DPAA_A050385 requires data start to be aligned */
2153 start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
2154 if (start - new_skb->data)
2155 skb_reserve(new_skb, start - new_skb->data);
2156
2157 skb_put(new_skb, skb->len);
2158 skb_copy_bits(skb, 0, new_skb->data, skb->len);
2159 skb_copy_header(new_skb, skb);
2160 new_skb->dev = skb->dev;
2161
2162 /* Copy relevant timestamp info from the old skb to the new */
2163 if (priv->tx_tstamp) {
2164 skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
2165 skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
2166 skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
2167 if (skb->sk)
2168 skb_set_owner_w(new_skb, skb->sk);
2169 }
2170
2171 /* We move the headroom when we align it so we have to reset the
2172 * network and transport header offsets relative to the new data
2173 * pointer. The checksum offload relies on these offsets.
2174 */
2175 skb_set_network_header(new_skb, skb_network_offset(skb));
2176 skb_set_transport_header(new_skb, skb_transport_offset(skb));
2177
2178 dev_kfree_skb(skb);
2179 *s = new_skb;
2180
2181 return 0;
2182 }
2183
dpaa_a050385_wa_xdpf(struct dpaa_priv * priv,struct xdp_frame ** init_xdpf)2184 static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
2185 struct xdp_frame **init_xdpf)
2186 {
2187 struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
2188 void *new_buff, *aligned_data;
2189 struct page *p;
2190 u32 data_shift;
2191 int headroom;
2192
2193 /* Check the data alignment and make sure the headroom is large
2194 * enough to store the xdpf backpointer. Use an aligned headroom
2195 * value.
2196 *
2197 * Due to alignment constraints, we give XDP access to the full 256
2198 * byte frame headroom. If the XDP program uses all of it, copy the
2199 * data to a new buffer and make room for storing the backpointer.
2200 */
2201 if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
2202 xdpf->headroom >= priv->tx_headroom) {
2203 xdpf->headroom = priv->tx_headroom;
2204 return 0;
2205 }
2206
2207 /* Try to move the data inside the buffer just enough to align it and
2208 * store the xdpf backpointer. If the available headroom isn't large
2209 * enough, resort to allocating a new buffer and copying the data.
2210 */
2211 aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
2212 data_shift = xdpf->data - aligned_data;
2213
2214 /* The XDP frame's headroom needs to be large enough to accommodate
2215 * shifting the data as well as storing the xdpf backpointer.
2216 */
2217 if (xdpf->headroom >= data_shift + priv->tx_headroom) {
2218 memmove(aligned_data, xdpf->data, xdpf->len);
2219 xdpf->data = aligned_data;
2220 xdpf->headroom = priv->tx_headroom;
2221 return 0;
2222 }
2223
2224 /* The new xdp_frame is stored in the new buffer. Reserve enough space
2225 * in the headroom for storing it along with the driver's private
2226 * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
2227 * guarantee the data's alignment in the buffer.
2228 */
2229 headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
2230 DPAA_FD_DATA_ALIGNMENT);
2231
2232 /* Assure the extended headroom and data don't overflow the buffer,
2233 * while maintaining the mandatory tailroom.
2234 */
2235 if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
2236 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
2237 return -ENOMEM;
2238
2239 p = dev_alloc_pages(0);
2240 if (unlikely(!p))
2241 return -ENOMEM;
2242
2243 /* Copy the data to the new buffer at a properly aligned offset */
2244 new_buff = page_address(p);
2245 memcpy(new_buff + headroom, xdpf->data, xdpf->len);
2246
2247 /* Create an XDP frame around the new buffer in a similar fashion
2248 * to xdp_convert_buff_to_frame.
2249 */
2250 new_xdpf = new_buff;
2251 new_xdpf->data = new_buff + headroom;
2252 new_xdpf->len = xdpf->len;
2253 new_xdpf->headroom = priv->tx_headroom;
2254 new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
2255 new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
2256
2257 /* Release the initial buffer */
2258 xdp_return_frame_rx_napi(xdpf);
2259
2260 *init_xdpf = new_xdpf;
2261 return 0;
2262 }
2263 #endif
2264
2265 static netdev_tx_t
dpaa_start_xmit(struct sk_buff * skb,struct net_device * net_dev)2266 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2267 {
2268 const int queue_mapping = skb_get_queue_mapping(skb);
2269 bool nonlinear = skb_is_nonlinear(skb);
2270 struct rtnl_link_stats64 *percpu_stats;
2271 struct dpaa_percpu_priv *percpu_priv;
2272 struct netdev_queue *txq;
2273 struct dpaa_priv *priv;
2274 struct qm_fd fd;
2275 int offset = 0;
2276 int err = 0;
2277
2278 priv = netdev_priv(net_dev);
2279 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2280 percpu_stats = &percpu_priv->stats;
2281
2282 qm_fd_clear_fd(&fd);
2283
2284 if (!nonlinear) {
2285 /* We're going to store the skb backpointer at the beginning
2286 * of the data buffer, so we need a privately owned skb
2287 *
2288 * We've made sure skb is not shared in dev->priv_flags,
2289 * we need to verify the skb head is not cloned
2290 */
2291 if (skb_cow_head(skb, priv->tx_headroom))
2292 goto enomem;
2293
2294 WARN_ON(skb_is_nonlinear(skb));
2295 }
2296
2297 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2298 * make sure we don't feed FMan with more fragments than it supports.
2299 */
2300 if (unlikely(nonlinear &&
2301 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
2302 /* If the egress skb contains more fragments than we support
2303 * we have no choice but to linearize it ourselves.
2304 */
2305 if (__skb_linearize(skb))
2306 goto enomem;
2307
2308 nonlinear = skb_is_nonlinear(skb);
2309 }
2310
2311 #ifdef CONFIG_DPAA_ERRATUM_A050385
2312 if (unlikely(fman_has_errata_a050385())) {
2313 if (dpaa_a050385_wa_skb(net_dev, &skb))
2314 goto enomem;
2315 nonlinear = skb_is_nonlinear(skb);
2316 }
2317 #endif
2318
2319 if (nonlinear) {
2320 /* Just create a S/G fd based on the skb */
2321 err = skb_to_sg_fd(priv, skb, &fd);
2322 percpu_priv->tx_frag_skbuffs++;
2323 } else {
2324 /* Create a contig FD from this skb */
2325 err = skb_to_contig_fd(priv, skb, &fd, &offset);
2326 }
2327 if (unlikely(err < 0))
2328 goto skb_to_fd_failed;
2329
2330 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2331
2332 /* LLTX requires to do our own update of trans_start */
2333 txq_trans_cond_update(txq);
2334
2335 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2336 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2337 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2338 }
2339
2340 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2341 return NETDEV_TX_OK;
2342
2343 dpaa_cleanup_tx_fd(priv, &fd, false);
2344 skb_to_fd_failed:
2345 enomem:
2346 percpu_stats->tx_errors++;
2347 dev_kfree_skb(skb);
2348 return NETDEV_TX_OK;
2349 }
2350
dpaa_rx_error(struct net_device * net_dev,const struct dpaa_priv * priv,struct dpaa_percpu_priv * percpu_priv,const struct qm_fd * fd,u32 fqid)2351 static void dpaa_rx_error(struct net_device *net_dev,
2352 const struct dpaa_priv *priv,
2353 struct dpaa_percpu_priv *percpu_priv,
2354 const struct qm_fd *fd,
2355 u32 fqid)
2356 {
2357 if (net_ratelimit())
2358 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2359 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2360
2361 percpu_priv->stats.rx_errors++;
2362
2363 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2364 percpu_priv->rx_errors.dme++;
2365 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2366 percpu_priv->rx_errors.fpe++;
2367 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2368 percpu_priv->rx_errors.fse++;
2369 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2370 percpu_priv->rx_errors.phe++;
2371
2372 dpaa_fd_release(net_dev, fd);
2373 }
2374
dpaa_tx_error(struct net_device * net_dev,const struct dpaa_priv * priv,struct dpaa_percpu_priv * percpu_priv,const struct qm_fd * fd,u32 fqid)2375 static void dpaa_tx_error(struct net_device *net_dev,
2376 const struct dpaa_priv *priv,
2377 struct dpaa_percpu_priv *percpu_priv,
2378 const struct qm_fd *fd,
2379 u32 fqid)
2380 {
2381 struct sk_buff *skb;
2382
2383 if (net_ratelimit())
2384 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2385 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2386
2387 percpu_priv->stats.tx_errors++;
2388
2389 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2390 dev_kfree_skb(skb);
2391 }
2392
dpaa_eth_poll(struct napi_struct * napi,int budget)2393 static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2394 {
2395 struct dpaa_napi_portal *np =
2396 container_of(napi, struct dpaa_napi_portal, napi);
2397 int cleaned;
2398
2399 np->xdp_act = 0;
2400
2401 cleaned = qman_p_poll_dqrr(np->p, budget);
2402
2403 if (cleaned < budget) {
2404 napi_complete_done(napi, cleaned);
2405 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2406 } else if (np->down) {
2407 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2408 }
2409
2410 if (np->xdp_act & XDP_REDIRECT)
2411 xdp_do_flush();
2412
2413 return cleaned;
2414 }
2415
dpaa_tx_conf(struct net_device * net_dev,const struct dpaa_priv * priv,struct dpaa_percpu_priv * percpu_priv,const struct qm_fd * fd,u32 fqid)2416 static void dpaa_tx_conf(struct net_device *net_dev,
2417 const struct dpaa_priv *priv,
2418 struct dpaa_percpu_priv *percpu_priv,
2419 const struct qm_fd *fd,
2420 u32 fqid)
2421 {
2422 struct sk_buff *skb;
2423
2424 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2425 if (net_ratelimit())
2426 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2427 be32_to_cpu(fd->status) &
2428 FM_FD_STAT_TX_ERRORS);
2429
2430 percpu_priv->stats.tx_errors++;
2431 }
2432
2433 percpu_priv->tx_confirm++;
2434
2435 skb = dpaa_cleanup_tx_fd(priv, fd, true);
2436
2437 consume_skb(skb);
2438 }
2439
dpaa_eth_napi_schedule(struct dpaa_percpu_priv * percpu_priv,struct qman_portal * portal,bool sched_napi)2440 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2441 struct qman_portal *portal, bool sched_napi)
2442 {
2443 if (sched_napi) {
2444 /* Disable QMan IRQ and invoke NAPI */
2445 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2446
2447 percpu_priv->np.p = portal;
2448 napi_schedule(&percpu_priv->np.napi);
2449 percpu_priv->in_interrupt++;
2450 return 1;
2451 }
2452 return 0;
2453 }
2454
rx_error_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq,bool sched_napi)2455 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2456 struct qman_fq *fq,
2457 const struct qm_dqrr_entry *dq,
2458 bool sched_napi)
2459 {
2460 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2461 struct dpaa_percpu_priv *percpu_priv;
2462 struct net_device *net_dev;
2463 struct dpaa_bp *dpaa_bp;
2464 struct dpaa_priv *priv;
2465
2466 net_dev = dpaa_fq->net_dev;
2467 priv = netdev_priv(net_dev);
2468 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2469 if (!dpaa_bp)
2470 return qman_cb_dqrr_consume;
2471
2472 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2473
2474 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2475 return qman_cb_dqrr_stop;
2476
2477 dpaa_eth_refill_bpools(priv);
2478 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2479
2480 return qman_cb_dqrr_consume;
2481 }
2482
dpaa_xdp_xmit_frame(struct net_device * net_dev,struct xdp_frame * xdpf)2483 static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
2484 struct xdp_frame *xdpf)
2485 {
2486 struct dpaa_priv *priv = netdev_priv(net_dev);
2487 struct rtnl_link_stats64 *percpu_stats;
2488 struct dpaa_percpu_priv *percpu_priv;
2489 struct dpaa_eth_swbp *swbp;
2490 struct netdev_queue *txq;
2491 void *buff_start;
2492 struct qm_fd fd;
2493 dma_addr_t addr;
2494 int err;
2495
2496 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2497 percpu_stats = &percpu_priv->stats;
2498
2499 #ifdef CONFIG_DPAA_ERRATUM_A050385
2500 if (unlikely(fman_has_errata_a050385())) {
2501 if (dpaa_a050385_wa_xdpf(priv, &xdpf)) {
2502 err = -ENOMEM;
2503 goto out_error;
2504 }
2505 }
2506 #endif
2507
2508 if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) {
2509 err = -EINVAL;
2510 goto out_error;
2511 }
2512
2513 buff_start = xdpf->data - xdpf->headroom;
2514
2515 /* Leave empty the skb backpointer at the start of the buffer.
2516 * Save the XDP frame for easy cleanup on confirmation.
2517 */
2518 swbp = (struct dpaa_eth_swbp *)buff_start;
2519 swbp->skb = NULL;
2520 swbp->xdpf = xdpf;
2521
2522 qm_fd_clear_fd(&fd);
2523 fd.bpid = FSL_DPAA_BPID_INV;
2524 fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2525 qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len);
2526
2527 addr = dma_map_single(priv->tx_dma_dev, buff_start,
2528 xdpf->headroom + xdpf->len,
2529 DMA_TO_DEVICE);
2530 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2531 err = -EINVAL;
2532 goto out_error;
2533 }
2534
2535 qm_fd_addr_set64(&fd, addr);
2536
2537 /* Bump the trans_start */
2538 txq = netdev_get_tx_queue(net_dev, smp_processor_id());
2539 txq_trans_cond_update(txq);
2540
2541 err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
2542 if (err) {
2543 dma_unmap_single(priv->tx_dma_dev, addr,
2544 qm_fd_get_offset(&fd) + qm_fd_get_length(&fd),
2545 DMA_TO_DEVICE);
2546 goto out_error;
2547 }
2548
2549 return 0;
2550
2551 out_error:
2552 percpu_stats->tx_errors++;
2553 return err;
2554 }
2555
dpaa_run_xdp(struct dpaa_priv * priv,struct qm_fd * fd,void * vaddr,struct dpaa_fq * dpaa_fq,unsigned int * xdp_meta_len)2556 static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
2557 struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
2558 {
2559 ssize_t fd_off = qm_fd_get_offset(fd);
2560 struct bpf_prog *xdp_prog;
2561 struct xdp_frame *xdpf;
2562 struct xdp_buff xdp;
2563 u32 xdp_act;
2564 int err;
2565
2566 xdp_prog = READ_ONCE(priv->xdp_prog);
2567 if (!xdp_prog)
2568 return XDP_PASS;
2569
2570 xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
2571 &dpaa_fq->xdp_rxq);
2572 xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
2573 XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
2574
2575 /* We reserve a fixed headroom of 256 bytes under the erratum and we
2576 * offer it all to XDP programs to use. If no room is left for the
2577 * xdpf backpointer on TX, we will need to copy the data.
2578 * Disable metadata support since data realignments might be required
2579 * and the information can be lost.
2580 */
2581 #ifdef CONFIG_DPAA_ERRATUM_A050385
2582 if (unlikely(fman_has_errata_a050385())) {
2583 xdp_set_data_meta_invalid(&xdp);
2584 xdp.data_hard_start = vaddr;
2585 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2586 }
2587 #endif
2588
2589 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
2590
2591 /* Update the length and the offset of the FD */
2592 qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data);
2593
2594 switch (xdp_act) {
2595 case XDP_PASS:
2596 #ifdef CONFIG_DPAA_ERRATUM_A050385
2597 *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 :
2598 xdp.data - xdp.data_meta;
2599 #else
2600 *xdp_meta_len = xdp.data - xdp.data_meta;
2601 #endif
2602 break;
2603 case XDP_TX:
2604 /* We can access the full headroom when sending the frame
2605 * back out
2606 */
2607 xdp.data_hard_start = vaddr;
2608 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2609 xdpf = xdp_convert_buff_to_frame(&xdp);
2610 if (unlikely(!xdpf)) {
2611 free_pages((unsigned long)vaddr, 0);
2612 break;
2613 }
2614
2615 if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
2616 xdp_return_frame_rx_napi(xdpf);
2617
2618 break;
2619 case XDP_REDIRECT:
2620 /* Allow redirect to use the full headroom */
2621 xdp.data_hard_start = vaddr;
2622 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2623
2624 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
2625 if (err) {
2626 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2627 free_pages((unsigned long)vaddr, 0);
2628 }
2629 break;
2630 default:
2631 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
2632 fallthrough;
2633 case XDP_ABORTED:
2634 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2635 fallthrough;
2636 case XDP_DROP:
2637 /* Free the buffer */
2638 free_pages((unsigned long)vaddr, 0);
2639 break;
2640 }
2641
2642 return xdp_act;
2643 }
2644
rx_default_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq,bool sched_napi)2645 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2646 struct qman_fq *fq,
2647 const struct qm_dqrr_entry *dq,
2648 bool sched_napi)
2649 {
2650 bool ts_valid = false, hash_valid = false;
2651 struct skb_shared_hwtstamps *shhwtstamps;
2652 unsigned int skb_len, xdp_meta_len = 0;
2653 struct rtnl_link_stats64 *percpu_stats;
2654 struct dpaa_percpu_priv *percpu_priv;
2655 const struct qm_fd *fd = &dq->fd;
2656 dma_addr_t addr = qm_fd_addr(fd);
2657 struct dpaa_napi_portal *np;
2658 enum qm_fd_format fd_format;
2659 struct net_device *net_dev;
2660 u32 fd_status, hash_offset;
2661 struct qm_sg_entry *sgt;
2662 struct dpaa_bp *dpaa_bp;
2663 struct dpaa_fq *dpaa_fq;
2664 struct dpaa_priv *priv;
2665 struct sk_buff *skb;
2666 int *count_ptr;
2667 u32 xdp_act;
2668 void *vaddr;
2669 u32 hash;
2670 u64 ns;
2671
2672 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2673 fd_status = be32_to_cpu(fd->status);
2674 fd_format = qm_fd_get_format(fd);
2675 net_dev = dpaa_fq->net_dev;
2676 priv = netdev_priv(net_dev);
2677 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2678 if (!dpaa_bp)
2679 return qman_cb_dqrr_consume;
2680
2681 /* Trace the Rx fd */
2682 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2683
2684 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2685 percpu_stats = &percpu_priv->stats;
2686 np = &percpu_priv->np;
2687
2688 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
2689 return qman_cb_dqrr_stop;
2690
2691 /* Make sure we didn't run out of buffers */
2692 if (unlikely(dpaa_eth_refill_bpools(priv))) {
2693 /* Unable to refill the buffer pool due to insufficient
2694 * system memory. Just release the frame back into the pool,
2695 * otherwise we'll soon end up with an empty buffer pool.
2696 */
2697 dpaa_fd_release(net_dev, &dq->fd);
2698 return qman_cb_dqrr_consume;
2699 }
2700
2701 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2702 if (net_ratelimit())
2703 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2704 fd_status & FM_FD_STAT_RX_ERRORS);
2705
2706 percpu_stats->rx_errors++;
2707 dpaa_fd_release(net_dev, fd);
2708 return qman_cb_dqrr_consume;
2709 }
2710
2711 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2712 DMA_FROM_DEVICE);
2713
2714 /* prefetch the first 64 bytes of the frame or the SGT start */
2715 vaddr = phys_to_virt(addr);
2716 prefetch(vaddr + qm_fd_get_offset(fd));
2717
2718 /* The only FD types that we may receive are contig and S/G */
2719 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2720
2721 /* Account for either the contig buffer or the SGT buffer (depending on
2722 * which case we were in) having been removed from the pool.
2723 */
2724 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2725 (*count_ptr)--;
2726
2727 /* Extract the timestamp stored in the headroom before running XDP */
2728 if (priv->rx_tstamp) {
2729 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
2730 ts_valid = true;
2731 else
2732 WARN_ONCE(1, "fman_port_get_tstamp failed!\n");
2733 }
2734
2735 /* Extract the hash stored in the headroom before running XDP */
2736 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2737 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
2738 &hash_offset)) {
2739 hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
2740 hash_valid = true;
2741 }
2742
2743 if (likely(fd_format == qm_fd_contig)) {
2744 xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
2745 dpaa_fq, &xdp_meta_len);
2746 np->xdp_act |= xdp_act;
2747 if (xdp_act != XDP_PASS) {
2748 percpu_stats->rx_packets++;
2749 percpu_stats->rx_bytes += qm_fd_get_length(fd);
2750 return qman_cb_dqrr_consume;
2751 }
2752 skb = contig_fd_to_skb(priv, fd);
2753 } else {
2754 /* XDP doesn't support S/G frames. Return the fragments to the
2755 * buffer pool and release the SGT.
2756 */
2757 if (READ_ONCE(priv->xdp_prog)) {
2758 WARN_ONCE(1, "S/G frames not supported under XDP\n");
2759 sgt = vaddr + qm_fd_get_offset(fd);
2760 dpaa_release_sgt_members(sgt);
2761 free_pages((unsigned long)vaddr, 0);
2762 return qman_cb_dqrr_consume;
2763 }
2764 skb = sg_fd_to_skb(priv, fd);
2765 }
2766 if (!skb)
2767 return qman_cb_dqrr_consume;
2768
2769 if (xdp_meta_len)
2770 skb_metadata_set(skb, xdp_meta_len);
2771
2772 /* Set the previously extracted timestamp */
2773 if (ts_valid) {
2774 shhwtstamps = skb_hwtstamps(skb);
2775 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2776 shhwtstamps->hwtstamp = ns_to_ktime(ns);
2777 }
2778
2779 skb->protocol = eth_type_trans(skb, net_dev);
2780
2781 /* Set the previously extracted hash */
2782 if (hash_valid) {
2783 enum pkt_hash_types type;
2784
2785 /* if L4 exists, it was used in the hash generation */
2786 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
2787 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
2788 skb_set_hash(skb, hash, type);
2789 }
2790
2791 skb_len = skb->len;
2792
2793 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2794 percpu_stats->rx_dropped++;
2795 return qman_cb_dqrr_consume;
2796 }
2797
2798 percpu_stats->rx_packets++;
2799 percpu_stats->rx_bytes += skb_len;
2800
2801 return qman_cb_dqrr_consume;
2802 }
2803
conf_error_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq,bool sched_napi)2804 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2805 struct qman_fq *fq,
2806 const struct qm_dqrr_entry *dq,
2807 bool sched_napi)
2808 {
2809 struct dpaa_percpu_priv *percpu_priv;
2810 struct net_device *net_dev;
2811 struct dpaa_priv *priv;
2812
2813 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2814 priv = netdev_priv(net_dev);
2815
2816 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2817
2818 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2819 return qman_cb_dqrr_stop;
2820
2821 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2822
2823 return qman_cb_dqrr_consume;
2824 }
2825
conf_dflt_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq,bool sched_napi)2826 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2827 struct qman_fq *fq,
2828 const struct qm_dqrr_entry *dq,
2829 bool sched_napi)
2830 {
2831 struct dpaa_percpu_priv *percpu_priv;
2832 struct net_device *net_dev;
2833 struct dpaa_priv *priv;
2834
2835 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2836 priv = netdev_priv(net_dev);
2837
2838 /* Trace the fd */
2839 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2840
2841 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2842
2843 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2844 return qman_cb_dqrr_stop;
2845
2846 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2847
2848 return qman_cb_dqrr_consume;
2849 }
2850
egress_ern(struct qman_portal * portal,struct qman_fq * fq,const union qm_mr_entry * msg)2851 static void egress_ern(struct qman_portal *portal,
2852 struct qman_fq *fq,
2853 const union qm_mr_entry *msg)
2854 {
2855 const struct qm_fd *fd = &msg->ern.fd;
2856 struct dpaa_percpu_priv *percpu_priv;
2857 const struct dpaa_priv *priv;
2858 struct net_device *net_dev;
2859 struct sk_buff *skb;
2860
2861 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2862 priv = netdev_priv(net_dev);
2863 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2864
2865 percpu_priv->stats.tx_dropped++;
2866 percpu_priv->stats.tx_fifo_errors++;
2867 count_ern(percpu_priv, msg);
2868
2869 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2870 dev_kfree_skb_any(skb);
2871 }
2872
2873 static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2874 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2875 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2876 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2877 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2878 .egress_ern = { .cb = { .ern = egress_ern } }
2879 };
2880
dpaa_eth_napi_enable(struct dpaa_priv * priv)2881 static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2882 {
2883 struct dpaa_percpu_priv *percpu_priv;
2884 int i;
2885
2886 for_each_online_cpu(i) {
2887 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2888
2889 percpu_priv->np.down = false;
2890 napi_enable(&percpu_priv->np.napi);
2891 }
2892 }
2893
dpaa_eth_napi_disable(struct dpaa_priv * priv)2894 static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2895 {
2896 struct dpaa_percpu_priv *percpu_priv;
2897 int i;
2898
2899 for_each_online_cpu(i) {
2900 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2901
2902 percpu_priv->np.down = true;
2903 napi_disable(&percpu_priv->np.napi);
2904 }
2905 }
2906
dpaa_adjust_link(struct net_device * net_dev)2907 static void dpaa_adjust_link(struct net_device *net_dev)
2908 {
2909 struct mac_device *mac_dev;
2910 struct dpaa_priv *priv;
2911
2912 priv = netdev_priv(net_dev);
2913 mac_dev = priv->mac_dev;
2914 mac_dev->adjust_link(mac_dev);
2915 }
2916
2917 /* The Aquantia PHYs are capable of performing rate adaptation */
2918 #define PHY_VEND_AQUANTIA 0x03a1b400
2919 #define PHY_VEND_AQUANTIA2 0x31c31c00
2920
dpaa_phy_init(struct net_device * net_dev)2921 static int dpaa_phy_init(struct net_device *net_dev)
2922 {
2923 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2924 struct mac_device *mac_dev;
2925 struct phy_device *phy_dev;
2926 struct dpaa_priv *priv;
2927 u32 phy_vendor;
2928
2929 priv = netdev_priv(net_dev);
2930 mac_dev = priv->mac_dev;
2931
2932 phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
2933 &dpaa_adjust_link, 0,
2934 mac_dev->phy_if);
2935 if (!phy_dev) {
2936 netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2937 return -ENODEV;
2938 }
2939
2940 phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
2941 /* Unless the PHY is capable of rate adaptation */
2942 if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
2943 (phy_vendor != PHY_VEND_AQUANTIA &&
2944 phy_vendor != PHY_VEND_AQUANTIA2)) {
2945 /* remove any features not supported by the controller */
2946 ethtool_convert_legacy_u32_to_link_mode(mask,
2947 mac_dev->if_support);
2948 linkmode_and(phy_dev->supported, phy_dev->supported, mask);
2949 }
2950
2951 phy_support_asym_pause(phy_dev);
2952
2953 mac_dev->phy_dev = phy_dev;
2954 net_dev->phydev = phy_dev;
2955
2956 return 0;
2957 }
2958
dpaa_open(struct net_device * net_dev)2959 static int dpaa_open(struct net_device *net_dev)
2960 {
2961 struct mac_device *mac_dev;
2962 struct dpaa_priv *priv;
2963 int err, i;
2964
2965 priv = netdev_priv(net_dev);
2966 mac_dev = priv->mac_dev;
2967 dpaa_eth_napi_enable(priv);
2968
2969 err = dpaa_phy_init(net_dev);
2970 if (err)
2971 goto phy_init_failed;
2972
2973 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2974 err = fman_port_enable(mac_dev->port[i]);
2975 if (err)
2976 goto mac_start_failed;
2977 }
2978
2979 err = priv->mac_dev->enable(mac_dev->fman_mac);
2980 if (err < 0) {
2981 netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
2982 goto mac_start_failed;
2983 }
2984 phy_start(priv->mac_dev->phy_dev);
2985
2986 netif_tx_start_all_queues(net_dev);
2987
2988 return 0;
2989
2990 mac_start_failed:
2991 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2992 fman_port_disable(mac_dev->port[i]);
2993
2994 phy_init_failed:
2995 dpaa_eth_napi_disable(priv);
2996
2997 return err;
2998 }
2999
dpaa_eth_stop(struct net_device * net_dev)3000 static int dpaa_eth_stop(struct net_device *net_dev)
3001 {
3002 struct dpaa_priv *priv;
3003 int err;
3004
3005 err = dpaa_stop(net_dev);
3006
3007 priv = netdev_priv(net_dev);
3008 dpaa_eth_napi_disable(priv);
3009
3010 return err;
3011 }
3012
xdp_validate_mtu(struct dpaa_priv * priv,int mtu)3013 static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
3014 {
3015 int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom;
3016
3017 /* We do not support S/G fragments when XDP is enabled.
3018 * Limit the MTU in relation to the buffer size.
3019 */
3020 if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) {
3021 dev_warn(priv->net_dev->dev.parent,
3022 "The maximum MTU for XDP is %d\n",
3023 max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN);
3024 return false;
3025 }
3026
3027 return true;
3028 }
3029
dpaa_change_mtu(struct net_device * net_dev,int new_mtu)3030 static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
3031 {
3032 struct dpaa_priv *priv = netdev_priv(net_dev);
3033
3034 if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
3035 return -EINVAL;
3036
3037 net_dev->mtu = new_mtu;
3038 return 0;
3039 }
3040
dpaa_setup_xdp(struct net_device * net_dev,struct netdev_bpf * bpf)3041 static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
3042 {
3043 struct dpaa_priv *priv = netdev_priv(net_dev);
3044 struct bpf_prog *old_prog;
3045 int err;
3046 bool up;
3047
3048 /* S/G fragments are not supported in XDP-mode */
3049 if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) {
3050 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
3051 return -EINVAL;
3052 }
3053
3054 up = netif_running(net_dev);
3055
3056 if (up)
3057 dpaa_eth_stop(net_dev);
3058
3059 old_prog = xchg(&priv->xdp_prog, bpf->prog);
3060 if (old_prog)
3061 bpf_prog_put(old_prog);
3062
3063 if (up) {
3064 err = dpaa_open(net_dev);
3065 if (err) {
3066 NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed");
3067 return err;
3068 }
3069 }
3070
3071 return 0;
3072 }
3073
dpaa_xdp(struct net_device * net_dev,struct netdev_bpf * xdp)3074 static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
3075 {
3076 switch (xdp->command) {
3077 case XDP_SETUP_PROG:
3078 return dpaa_setup_xdp(net_dev, xdp);
3079 default:
3080 return -EINVAL;
3081 }
3082 }
3083
dpaa_xdp_xmit(struct net_device * net_dev,int n,struct xdp_frame ** frames,u32 flags)3084 static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
3085 struct xdp_frame **frames, u32 flags)
3086 {
3087 struct xdp_frame *xdpf;
3088 int i, nxmit = 0;
3089
3090 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3091 return -EINVAL;
3092
3093 if (!netif_running(net_dev))
3094 return -ENETDOWN;
3095
3096 for (i = 0; i < n; i++) {
3097 xdpf = frames[i];
3098 if (dpaa_xdp_xmit_frame(net_dev, xdpf))
3099 break;
3100 nxmit++;
3101 }
3102
3103 return nxmit;
3104 }
3105
dpaa_ts_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)3106 static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3107 {
3108 struct dpaa_priv *priv = netdev_priv(dev);
3109 struct hwtstamp_config config;
3110
3111 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
3112 return -EFAULT;
3113
3114 switch (config.tx_type) {
3115 case HWTSTAMP_TX_OFF:
3116 /* Couldn't disable rx/tx timestamping separately.
3117 * Do nothing here.
3118 */
3119 priv->tx_tstamp = false;
3120 break;
3121 case HWTSTAMP_TX_ON:
3122 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3123 priv->tx_tstamp = true;
3124 break;
3125 default:
3126 return -ERANGE;
3127 }
3128
3129 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
3130 /* Couldn't disable rx/tx timestamping separately.
3131 * Do nothing here.
3132 */
3133 priv->rx_tstamp = false;
3134 } else {
3135 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3136 priv->rx_tstamp = true;
3137 /* TS is set for all frame types, not only those requested */
3138 config.rx_filter = HWTSTAMP_FILTER_ALL;
3139 }
3140
3141 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
3142 -EFAULT : 0;
3143 }
3144
dpaa_ioctl(struct net_device * net_dev,struct ifreq * rq,int cmd)3145 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
3146 {
3147 int ret = -EINVAL;
3148
3149 if (cmd == SIOCGMIIREG) {
3150 if (net_dev->phydev)
3151 return phy_mii_ioctl(net_dev->phydev, rq, cmd);
3152 }
3153
3154 if (cmd == SIOCSHWTSTAMP)
3155 return dpaa_ts_ioctl(net_dev, rq, cmd);
3156
3157 return ret;
3158 }
3159
3160 static const struct net_device_ops dpaa_ops = {
3161 .ndo_open = dpaa_open,
3162 .ndo_start_xmit = dpaa_start_xmit,
3163 .ndo_stop = dpaa_eth_stop,
3164 .ndo_tx_timeout = dpaa_tx_timeout,
3165 .ndo_get_stats64 = dpaa_get_stats64,
3166 .ndo_change_carrier = fixed_phy_change_carrier,
3167 .ndo_set_mac_address = dpaa_set_mac_address,
3168 .ndo_validate_addr = eth_validate_addr,
3169 .ndo_set_rx_mode = dpaa_set_rx_mode,
3170 .ndo_eth_ioctl = dpaa_ioctl,
3171 .ndo_setup_tc = dpaa_setup_tc,
3172 .ndo_change_mtu = dpaa_change_mtu,
3173 .ndo_bpf = dpaa_xdp,
3174 .ndo_xdp_xmit = dpaa_xdp_xmit,
3175 };
3176
dpaa_napi_add(struct net_device * net_dev)3177 static int dpaa_napi_add(struct net_device *net_dev)
3178 {
3179 struct dpaa_priv *priv = netdev_priv(net_dev);
3180 struct dpaa_percpu_priv *percpu_priv;
3181 int cpu;
3182
3183 for_each_possible_cpu(cpu) {
3184 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3185
3186 netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
3187 }
3188
3189 return 0;
3190 }
3191
dpaa_napi_del(struct net_device * net_dev)3192 static void dpaa_napi_del(struct net_device *net_dev)
3193 {
3194 struct dpaa_priv *priv = netdev_priv(net_dev);
3195 struct dpaa_percpu_priv *percpu_priv;
3196 int cpu;
3197
3198 for_each_possible_cpu(cpu) {
3199 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3200
3201 netif_napi_del(&percpu_priv->np.napi);
3202 }
3203 }
3204
dpaa_bp_free_pf(const struct dpaa_bp * bp,struct bm_buffer * bmb)3205 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
3206 struct bm_buffer *bmb)
3207 {
3208 dma_addr_t addr = bm_buf_addr(bmb);
3209
3210 dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
3211 DMA_FROM_DEVICE);
3212
3213 skb_free_frag(phys_to_virt(addr));
3214 }
3215
3216 /* Alloc the dpaa_bp struct and configure default values */
dpaa_bp_alloc(struct device * dev)3217 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
3218 {
3219 struct dpaa_bp *dpaa_bp;
3220
3221 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
3222 if (!dpaa_bp)
3223 return ERR_PTR(-ENOMEM);
3224
3225 dpaa_bp->bpid = FSL_DPAA_BPID_INV;
3226 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
3227 if (!dpaa_bp->percpu_count)
3228 return ERR_PTR(-ENOMEM);
3229
3230 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
3231
3232 dpaa_bp->seed_cb = dpaa_bp_seed;
3233 dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
3234
3235 return dpaa_bp;
3236 }
3237
3238 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
3239 * We won't be sending congestion notifications to FMan; for now, we just use
3240 * this CGR to generate enqueue rejections to FMan in order to drop the frames
3241 * before they reach our ingress queues and eat up memory.
3242 */
dpaa_ingress_cgr_init(struct dpaa_priv * priv)3243 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
3244 {
3245 struct qm_mcc_initcgr initcgr;
3246 u32 cs_th;
3247 int err;
3248
3249 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
3250 if (err < 0) {
3251 if (netif_msg_drv(priv))
3252 pr_err("Error %d allocating CGR ID\n", err);
3253 goto out_error;
3254 }
3255
3256 /* Enable CS TD, but disable Congestion State Change Notifications. */
3257 memset(&initcgr, 0, sizeof(initcgr));
3258 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
3259 initcgr.cgr.cscn_en = QM_CGR_EN;
3260 cs_th = DPAA_INGRESS_CS_THRESHOLD;
3261 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
3262
3263 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
3264 initcgr.cgr.cstd_en = QM_CGR_EN;
3265
3266 /* This CGR will be associated with the SWP affined to the current CPU.
3267 * However, we'll place all our ingress FQs in it.
3268 */
3269 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
3270 &initcgr);
3271 if (err < 0) {
3272 if (netif_msg_drv(priv))
3273 pr_err("Error %d creating ingress CGR with ID %d\n",
3274 err, priv->ingress_cgr.cgrid);
3275 qman_release_cgrid(priv->ingress_cgr.cgrid);
3276 goto out_error;
3277 }
3278 if (netif_msg_drv(priv))
3279 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
3280 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
3281
3282 priv->use_ingress_cgr = true;
3283
3284 out_error:
3285 return err;
3286 }
3287
dpaa_get_headroom(struct dpaa_buffer_layout * bl,enum port_type port)3288 static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
3289 enum port_type port)
3290 {
3291 u16 headroom;
3292
3293 /* The frame headroom must accommodate:
3294 * - the driver private data area
3295 * - parse results, hash results, timestamp if selected
3296 * If either hash results or time stamp are selected, both will
3297 * be copied to/from the frame headroom, as TS is located between PR and
3298 * HR in the IC and IC copy size has a granularity of 16bytes
3299 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
3300 *
3301 * Also make sure the headroom is a multiple of data_align bytes
3302 */
3303 headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
3304
3305 if (port == RX) {
3306 #ifdef CONFIG_DPAA_ERRATUM_A050385
3307 if (unlikely(fman_has_errata_a050385()))
3308 headroom = XDP_PACKET_HEADROOM;
3309 #endif
3310
3311 return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
3312 } else {
3313 return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
3314 }
3315 }
3316
dpaa_eth_probe(struct platform_device * pdev)3317 static int dpaa_eth_probe(struct platform_device *pdev)
3318 {
3319 struct net_device *net_dev = NULL;
3320 struct dpaa_bp *dpaa_bp = NULL;
3321 struct dpaa_fq *dpaa_fq, *tmp;
3322 struct dpaa_priv *priv = NULL;
3323 struct fm_port_fqs port_fqs;
3324 struct mac_device *mac_dev;
3325 int err = 0, channel;
3326 struct device *dev;
3327
3328 dev = &pdev->dev;
3329
3330 err = bman_is_probed();
3331 if (!err)
3332 return -EPROBE_DEFER;
3333 if (err < 0) {
3334 dev_err(dev, "failing probe due to bman probe error\n");
3335 return -ENODEV;
3336 }
3337 err = qman_is_probed();
3338 if (!err)
3339 return -EPROBE_DEFER;
3340 if (err < 0) {
3341 dev_err(dev, "failing probe due to qman probe error\n");
3342 return -ENODEV;
3343 }
3344 err = bman_portals_probed();
3345 if (!err)
3346 return -EPROBE_DEFER;
3347 if (err < 0) {
3348 dev_err(dev,
3349 "failing probe due to bman portals probe error\n");
3350 return -ENODEV;
3351 }
3352 err = qman_portals_probed();
3353 if (!err)
3354 return -EPROBE_DEFER;
3355 if (err < 0) {
3356 dev_err(dev,
3357 "failing probe due to qman portals probe error\n");
3358 return -ENODEV;
3359 }
3360
3361 /* Allocate this early, so we can store relevant information in
3362 * the private area
3363 */
3364 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
3365 if (!net_dev) {
3366 dev_err(dev, "alloc_etherdev_mq() failed\n");
3367 return -ENOMEM;
3368 }
3369
3370 /* Do this here, so we can be verbose early */
3371 SET_NETDEV_DEV(net_dev, dev->parent);
3372 dev_set_drvdata(dev, net_dev);
3373
3374 priv = netdev_priv(net_dev);
3375 priv->net_dev = net_dev;
3376
3377 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
3378
3379 mac_dev = dpaa_mac_dev_get(pdev);
3380 if (IS_ERR(mac_dev)) {
3381 netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
3382 err = PTR_ERR(mac_dev);
3383 goto free_netdev;
3384 }
3385
3386 /* Devices used for DMA mapping */
3387 priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
3388 priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
3389 err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
3390 if (!err)
3391 err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
3392 DMA_BIT_MASK(40));
3393 if (err) {
3394 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
3395 goto free_netdev;
3396 }
3397
3398 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
3399 * we choose conservatively and let the user explicitly set a higher
3400 * MTU via ifconfig. Otherwise, the user may end up with different MTUs
3401 * in the same LAN.
3402 * If on the other hand fsl_fm_max_frm has been chosen below 1500,
3403 * start with the maximum allowed.
3404 */
3405 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
3406
3407 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
3408 net_dev->mtu);
3409
3410 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
3411 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
3412
3413 /* bp init */
3414 dpaa_bp = dpaa_bp_alloc(dev);
3415 if (IS_ERR(dpaa_bp)) {
3416 err = PTR_ERR(dpaa_bp);
3417 goto free_dpaa_bps;
3418 }
3419 /* the raw size of the buffers used for reception */
3420 dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
3421 /* avoid runtime computations by keeping the usable size here */
3422 dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
3423 dpaa_bp->priv = priv;
3424
3425 err = dpaa_bp_alloc_pool(dpaa_bp);
3426 if (err < 0)
3427 goto free_dpaa_bps;
3428 priv->dpaa_bp = dpaa_bp;
3429
3430 INIT_LIST_HEAD(&priv->dpaa_fq_list);
3431
3432 memset(&port_fqs, 0, sizeof(port_fqs));
3433
3434 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
3435 if (err < 0) {
3436 dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
3437 goto free_dpaa_bps;
3438 }
3439
3440 priv->mac_dev = mac_dev;
3441
3442 channel = dpaa_get_channel();
3443 if (channel < 0) {
3444 dev_err(dev, "dpaa_get_channel() failed\n");
3445 err = channel;
3446 goto free_dpaa_bps;
3447 }
3448
3449 priv->channel = (u16)channel;
3450
3451 /* Walk the CPUs with affine portals
3452 * and add this pool channel to each's dequeue mask.
3453 */
3454 dpaa_eth_add_channel(priv->channel, &pdev->dev);
3455
3456 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
3457
3458 /* Create a congestion group for this netdev, with
3459 * dynamically-allocated CGR ID.
3460 * Must be executed after probing the MAC, but before
3461 * assigning the egress FQs to the CGRs.
3462 */
3463 err = dpaa_eth_cgr_init(priv);
3464 if (err < 0) {
3465 dev_err(dev, "Error initializing CGR\n");
3466 goto free_dpaa_bps;
3467 }
3468
3469 err = dpaa_ingress_cgr_init(priv);
3470 if (err < 0) {
3471 dev_err(dev, "Error initializing ingress CGR\n");
3472 goto delete_egress_cgr;
3473 }
3474
3475 /* Add the FQs to the interface, and make them active */
3476 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
3477 err = dpaa_fq_init(dpaa_fq, false);
3478 if (err < 0)
3479 goto free_dpaa_fqs;
3480 }
3481
3482 priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
3483 priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
3484
3485 /* All real interfaces need their ports initialized */
3486 err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
3487 &priv->buf_layout[0], dev);
3488 if (err)
3489 goto free_dpaa_fqs;
3490
3491 /* Rx traffic distribution based on keygen hashing defaults to on */
3492 priv->keygen_in_use = true;
3493
3494 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
3495 if (!priv->percpu_priv) {
3496 dev_err(dev, "devm_alloc_percpu() failed\n");
3497 err = -ENOMEM;
3498 goto free_dpaa_fqs;
3499 }
3500
3501 priv->num_tc = 1;
3502 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
3503
3504 /* Initialize NAPI */
3505 err = dpaa_napi_add(net_dev);
3506 if (err < 0)
3507 goto delete_dpaa_napi;
3508
3509 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3510 if (err < 0)
3511 goto delete_dpaa_napi;
3512
3513 dpaa_eth_sysfs_init(&net_dev->dev);
3514
3515 netif_info(priv, probe, net_dev, "Probed interface %s\n",
3516 net_dev->name);
3517
3518 return 0;
3519
3520 delete_dpaa_napi:
3521 dpaa_napi_del(net_dev);
3522 free_dpaa_fqs:
3523 dpaa_fq_free(dev, &priv->dpaa_fq_list);
3524 qman_delete_cgr_safe(&priv->ingress_cgr);
3525 qman_release_cgrid(priv->ingress_cgr.cgrid);
3526 delete_egress_cgr:
3527 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3528 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3529 free_dpaa_bps:
3530 dpaa_bps_free(priv);
3531 free_netdev:
3532 dev_set_drvdata(dev, NULL);
3533 free_netdev(net_dev);
3534
3535 return err;
3536 }
3537
dpaa_remove(struct platform_device * pdev)3538 static int dpaa_remove(struct platform_device *pdev)
3539 {
3540 struct net_device *net_dev;
3541 struct dpaa_priv *priv;
3542 struct device *dev;
3543 int err;
3544
3545 dev = &pdev->dev;
3546 net_dev = dev_get_drvdata(dev);
3547
3548 priv = netdev_priv(net_dev);
3549
3550 dpaa_eth_sysfs_remove(dev);
3551
3552 dev_set_drvdata(dev, NULL);
3553 unregister_netdev(net_dev);
3554
3555 err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
3556
3557 qman_delete_cgr_safe(&priv->ingress_cgr);
3558 qman_release_cgrid(priv->ingress_cgr.cgrid);
3559 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3560 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3561
3562 dpaa_napi_del(net_dev);
3563
3564 dpaa_bps_free(priv);
3565
3566 free_netdev(net_dev);
3567
3568 return err;
3569 }
3570
3571 static const struct platform_device_id dpaa_devtype[] = {
3572 {
3573 .name = "dpaa-ethernet",
3574 .driver_data = 0,
3575 }, {
3576 }
3577 };
3578 MODULE_DEVICE_TABLE(platform, dpaa_devtype);
3579
3580 static struct platform_driver dpaa_driver = {
3581 .driver = {
3582 .name = KBUILD_MODNAME,
3583 },
3584 .id_table = dpaa_devtype,
3585 .probe = dpaa_eth_probe,
3586 .remove = dpaa_remove
3587 };
3588
dpaa_load(void)3589 static int __init dpaa_load(void)
3590 {
3591 int err;
3592
3593 pr_debug("FSL DPAA Ethernet driver\n");
3594
3595 /* initialize dpaa_eth mirror values */
3596 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3597 dpaa_max_frm = fman_get_max_frm();
3598
3599 err = platform_driver_register(&dpaa_driver);
3600 if (err < 0)
3601 pr_err("Error, platform_driver_register() = %d\n", err);
3602
3603 return err;
3604 }
3605 module_init(dpaa_load);
3606
dpaa_unload(void)3607 static void __exit dpaa_unload(void)
3608 {
3609 platform_driver_unregister(&dpaa_driver);
3610
3611 /* Only one channel is used and needs to be released after all
3612 * interfaces are removed
3613 */
3614 dpaa_release_channel();
3615 }
3616 module_exit(dpaa_unload);
3617
3618 MODULE_LICENSE("Dual BSD/GPL");
3619 MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
3620