1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2 /*
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/string.h>
9 #include <linux/of_platform.h>
10 #include <linux/net_tstamp.h>
11 #include <linux/fsl/ptp_qoriq.h>
12
13 #include "dpaa_eth.h"
14 #include "mac.h"
15
16 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
17 "interrupts",
18 "rx packets",
19 "tx packets",
20 "tx confirm",
21 "tx S/G",
22 "tx error",
23 "rx error",
24 "rx dropped",
25 "tx dropped",
26 };
27
28 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
29 /* dpa rx errors */
30 "rx dma error",
31 "rx frame physical error",
32 "rx frame size error",
33 "rx header error",
34
35 /* demultiplexing errors */
36 "qman cg_tdrop",
37 "qman wred",
38 "qman error cond",
39 "qman early window",
40 "qman late window",
41 "qman fq tdrop",
42 "qman fq retired",
43 "qman orp disabled",
44
45 /* congestion related stats */
46 "congestion time (ms)",
47 "entered congestion",
48 "congested (0/1)"
49 };
50
51 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
52 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
53
dpaa_get_link_ksettings(struct net_device * net_dev,struct ethtool_link_ksettings * cmd)54 static int dpaa_get_link_ksettings(struct net_device *net_dev,
55 struct ethtool_link_ksettings *cmd)
56 {
57 if (!net_dev->phydev)
58 return 0;
59
60 phy_ethtool_ksettings_get(net_dev->phydev, cmd);
61
62 return 0;
63 }
64
dpaa_set_link_ksettings(struct net_device * net_dev,const struct ethtool_link_ksettings * cmd)65 static int dpaa_set_link_ksettings(struct net_device *net_dev,
66 const struct ethtool_link_ksettings *cmd)
67 {
68 int err;
69
70 if (!net_dev->phydev)
71 return -ENODEV;
72
73 err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
74 if (err < 0)
75 netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
76
77 return err;
78 }
79
dpaa_get_drvinfo(struct net_device * net_dev,struct ethtool_drvinfo * drvinfo)80 static void dpaa_get_drvinfo(struct net_device *net_dev,
81 struct ethtool_drvinfo *drvinfo)
82 {
83 strscpy(drvinfo->driver, KBUILD_MODNAME,
84 sizeof(drvinfo->driver));
85 strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
86 sizeof(drvinfo->bus_info));
87 }
88
dpaa_get_msglevel(struct net_device * net_dev)89 static u32 dpaa_get_msglevel(struct net_device *net_dev)
90 {
91 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
92 }
93
dpaa_set_msglevel(struct net_device * net_dev,u32 msg_enable)94 static void dpaa_set_msglevel(struct net_device *net_dev,
95 u32 msg_enable)
96 {
97 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
98 }
99
dpaa_nway_reset(struct net_device * net_dev)100 static int dpaa_nway_reset(struct net_device *net_dev)
101 {
102 int err;
103
104 if (!net_dev->phydev)
105 return -ENODEV;
106
107 err = 0;
108 if (net_dev->phydev->autoneg) {
109 err = phy_start_aneg(net_dev->phydev);
110 if (err < 0)
111 netdev_err(net_dev, "phy_start_aneg() = %d\n",
112 err);
113 }
114
115 return err;
116 }
117
dpaa_get_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)118 static void dpaa_get_pauseparam(struct net_device *net_dev,
119 struct ethtool_pauseparam *epause)
120 {
121 struct mac_device *mac_dev;
122 struct dpaa_priv *priv;
123
124 priv = netdev_priv(net_dev);
125 mac_dev = priv->mac_dev;
126
127 if (!net_dev->phydev)
128 return;
129
130 epause->autoneg = mac_dev->autoneg_pause;
131 epause->rx_pause = mac_dev->rx_pause_active;
132 epause->tx_pause = mac_dev->tx_pause_active;
133 }
134
dpaa_set_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)135 static int dpaa_set_pauseparam(struct net_device *net_dev,
136 struct ethtool_pauseparam *epause)
137 {
138 struct mac_device *mac_dev;
139 struct phy_device *phydev;
140 bool rx_pause, tx_pause;
141 struct dpaa_priv *priv;
142 int err;
143
144 priv = netdev_priv(net_dev);
145 mac_dev = priv->mac_dev;
146
147 phydev = net_dev->phydev;
148 if (!phydev) {
149 netdev_err(net_dev, "phy device not initialized\n");
150 return -ENODEV;
151 }
152
153 if (!phy_validate_pause(phydev, epause))
154 return -EINVAL;
155
156 /* The MAC should know how to handle PAUSE frame autonegotiation before
157 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
158 * settings.
159 */
160 mac_dev->autoneg_pause = !!epause->autoneg;
161 mac_dev->rx_pause_req = !!epause->rx_pause;
162 mac_dev->tx_pause_req = !!epause->tx_pause;
163
164 /* Determine the sym/asym advertised PAUSE capabilities from the desired
165 * rx/tx pause settings.
166 */
167
168 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
169
170 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
171 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
172 if (err < 0)
173 netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
174
175 return err;
176 }
177
dpaa_get_sset_count(struct net_device * net_dev,int type)178 static int dpaa_get_sset_count(struct net_device *net_dev, int type)
179 {
180 unsigned int total_stats, num_stats;
181
182 num_stats = num_online_cpus() + 1;
183 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
184 DPAA_STATS_GLOBAL_LEN;
185
186 switch (type) {
187 case ETH_SS_STATS:
188 return total_stats;
189 default:
190 return -EOPNOTSUPP;
191 }
192 }
193
copy_stats(struct dpaa_percpu_priv * percpu_priv,int num_cpus,int crr_cpu,u64 bp_count,u64 * data)194 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
195 int crr_cpu, u64 bp_count, u64 *data)
196 {
197 int num_values = num_cpus + 1;
198 int crr = 0;
199
200 /* update current CPU's stats and also add them to the total values */
201 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
202 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
203
204 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
205 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
206
207 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
208 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
209
210 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
211 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
212
213 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
214 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
215
216 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
217 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
218
219 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
220 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
221
222 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
223 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
224
225 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
226 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
227
228 data[crr * num_values + crr_cpu] = bp_count;
229 data[crr++ * num_values + num_cpus] += bp_count;
230 }
231
dpaa_get_ethtool_stats(struct net_device * net_dev,struct ethtool_stats * stats,u64 * data)232 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
233 struct ethtool_stats *stats, u64 *data)
234 {
235 struct dpaa_percpu_priv *percpu_priv;
236 struct dpaa_rx_errors rx_errors;
237 unsigned int num_cpus, offset;
238 u64 bp_count, cg_time, cg_num;
239 struct dpaa_ern_cnt ern_cnt;
240 struct dpaa_bp *dpaa_bp;
241 struct dpaa_priv *priv;
242 int total_stats, i;
243 bool cg_status;
244
245 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
246 priv = netdev_priv(net_dev);
247 num_cpus = num_online_cpus();
248
249 memset(&bp_count, 0, sizeof(bp_count));
250 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
251 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
252 memset(data, 0, total_stats * sizeof(u64));
253
254 for_each_online_cpu(i) {
255 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
256 dpaa_bp = priv->dpaa_bp;
257 if (!dpaa_bp->percpu_count)
258 continue;
259 bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
260 rx_errors.dme += percpu_priv->rx_errors.dme;
261 rx_errors.fpe += percpu_priv->rx_errors.fpe;
262 rx_errors.fse += percpu_priv->rx_errors.fse;
263 rx_errors.phe += percpu_priv->rx_errors.phe;
264
265 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
266 ern_cnt.wred += percpu_priv->ern_cnt.wred;
267 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
268 ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
269 ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
270 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
271 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
272 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
273
274 copy_stats(percpu_priv, num_cpus, i, bp_count, data);
275 }
276
277 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
278 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
279
280 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
281 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
282
283 /* gather congestion related counters */
284 cg_num = 0;
285 cg_status = false;
286 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
287 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
288 cg_num = priv->cgr_data.cgr_congested_count;
289
290 /* reset congestion stats (like QMan API does */
291 priv->cgr_data.congested_jiffies = 0;
292 priv->cgr_data.cgr_congested_count = 0;
293 }
294
295 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
296 data[offset++] = cg_time;
297 data[offset++] = cg_num;
298 data[offset++] = cg_status;
299 }
300
dpaa_get_strings(struct net_device * net_dev,u32 stringset,u8 * data)301 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
302 u8 *data)
303 {
304 unsigned int i, j, num_cpus, size;
305 char string_cpu[ETH_GSTRING_LEN];
306 u8 *strings;
307
308 memset(string_cpu, 0, sizeof(string_cpu));
309 strings = data;
310 num_cpus = num_online_cpus();
311 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
312
313 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
314 for (j = 0; j < num_cpus; j++) {
315 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
316 dpaa_stats_percpu[i], j);
317 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
318 strings += ETH_GSTRING_LEN;
319 }
320 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
321 dpaa_stats_percpu[i]);
322 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
323 strings += ETH_GSTRING_LEN;
324 }
325 for (j = 0; j < num_cpus; j++) {
326 snprintf(string_cpu, ETH_GSTRING_LEN,
327 "bpool [CPU %d]", j);
328 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
329 strings += ETH_GSTRING_LEN;
330 }
331 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
332 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
333 strings += ETH_GSTRING_LEN;
334
335 memcpy(strings, dpaa_stats_global, size);
336 }
337
dpaa_get_hash_opts(struct net_device * dev,struct ethtool_rxnfc * cmd)338 static int dpaa_get_hash_opts(struct net_device *dev,
339 struct ethtool_rxnfc *cmd)
340 {
341 struct dpaa_priv *priv = netdev_priv(dev);
342
343 cmd->data = 0;
344
345 switch (cmd->flow_type) {
346 case TCP_V4_FLOW:
347 case TCP_V6_FLOW:
348 case UDP_V4_FLOW:
349 case UDP_V6_FLOW:
350 if (priv->keygen_in_use)
351 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
352 fallthrough;
353 case IPV4_FLOW:
354 case IPV6_FLOW:
355 case SCTP_V4_FLOW:
356 case SCTP_V6_FLOW:
357 case AH_ESP_V4_FLOW:
358 case AH_ESP_V6_FLOW:
359 case AH_V4_FLOW:
360 case AH_V6_FLOW:
361 case ESP_V4_FLOW:
362 case ESP_V6_FLOW:
363 if (priv->keygen_in_use)
364 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
365 break;
366 default:
367 cmd->data = 0;
368 break;
369 }
370
371 return 0;
372 }
373
dpaa_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * unused)374 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
375 u32 *unused)
376 {
377 int ret = -EOPNOTSUPP;
378
379 switch (cmd->cmd) {
380 case ETHTOOL_GRXFH:
381 ret = dpaa_get_hash_opts(dev, cmd);
382 break;
383 default:
384 break;
385 }
386
387 return ret;
388 }
389
dpaa_set_hash(struct net_device * net_dev,bool enable)390 static void dpaa_set_hash(struct net_device *net_dev, bool enable)
391 {
392 struct mac_device *mac_dev;
393 struct fman_port *rxport;
394 struct dpaa_priv *priv;
395
396 priv = netdev_priv(net_dev);
397 mac_dev = priv->mac_dev;
398 rxport = mac_dev->port[0];
399
400 fman_port_use_kg_hash(rxport, enable);
401 priv->keygen_in_use = enable;
402 }
403
dpaa_set_hash_opts(struct net_device * dev,struct ethtool_rxnfc * nfc)404 static int dpaa_set_hash_opts(struct net_device *dev,
405 struct ethtool_rxnfc *nfc)
406 {
407 int ret = -EINVAL;
408
409 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
410 if (nfc->data &
411 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
412 return -EINVAL;
413
414 switch (nfc->flow_type) {
415 case TCP_V4_FLOW:
416 case TCP_V6_FLOW:
417 case UDP_V4_FLOW:
418 case UDP_V6_FLOW:
419 case IPV4_FLOW:
420 case IPV6_FLOW:
421 case SCTP_V4_FLOW:
422 case SCTP_V6_FLOW:
423 case AH_ESP_V4_FLOW:
424 case AH_ESP_V6_FLOW:
425 case AH_V4_FLOW:
426 case AH_V6_FLOW:
427 case ESP_V4_FLOW:
428 case ESP_V6_FLOW:
429 dpaa_set_hash(dev, !!nfc->data);
430 ret = 0;
431 break;
432 default:
433 break;
434 }
435
436 return ret;
437 }
438
dpaa_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)439 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
440 {
441 int ret = -EOPNOTSUPP;
442
443 switch (cmd->cmd) {
444 case ETHTOOL_SRXFH:
445 ret = dpaa_set_hash_opts(dev, cmd);
446 break;
447 default:
448 break;
449 }
450
451 return ret;
452 }
453
dpaa_get_ts_info(struct net_device * net_dev,struct ethtool_ts_info * info)454 static int dpaa_get_ts_info(struct net_device *net_dev,
455 struct ethtool_ts_info *info)
456 {
457 struct device *dev = net_dev->dev.parent;
458 struct device_node *mac_node = dev->of_node;
459 struct device_node *fman_node = NULL, *ptp_node = NULL;
460 struct platform_device *ptp_dev = NULL;
461 struct ptp_qoriq *ptp = NULL;
462
463 info->phc_index = -1;
464
465 fman_node = of_get_parent(mac_node);
466 if (fman_node) {
467 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
468 of_node_put(fman_node);
469 }
470
471 if (ptp_node) {
472 ptp_dev = of_find_device_by_node(ptp_node);
473 of_node_put(ptp_node);
474 }
475
476 if (ptp_dev)
477 ptp = platform_get_drvdata(ptp_dev);
478
479 if (ptp)
480 info->phc_index = ptp->phc_index;
481
482 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
483 SOF_TIMESTAMPING_RX_HARDWARE |
484 SOF_TIMESTAMPING_RAW_HARDWARE;
485 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
486 (1 << HWTSTAMP_TX_ON);
487 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
488 (1 << HWTSTAMP_FILTER_ALL);
489
490 return 0;
491 }
492
dpaa_get_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)493 static int dpaa_get_coalesce(struct net_device *dev,
494 struct ethtool_coalesce *c,
495 struct kernel_ethtool_coalesce *kernel_coal,
496 struct netlink_ext_ack *extack)
497 {
498 struct qman_portal *portal;
499 u32 period;
500 u8 thresh;
501
502 portal = qman_get_affine_portal(smp_processor_id());
503 qman_portal_get_iperiod(portal, &period);
504 qman_dqrr_get_ithresh(portal, &thresh);
505
506 c->rx_coalesce_usecs = period;
507 c->rx_max_coalesced_frames = thresh;
508
509 return 0;
510 }
511
dpaa_set_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)512 static int dpaa_set_coalesce(struct net_device *dev,
513 struct ethtool_coalesce *c,
514 struct kernel_ethtool_coalesce *kernel_coal,
515 struct netlink_ext_ack *extack)
516 {
517 const cpumask_t *cpus = qman_affine_cpus();
518 bool needs_revert[NR_CPUS] = {false};
519 struct qman_portal *portal;
520 u32 period, prev_period;
521 u8 thresh, prev_thresh;
522 int cpu, res;
523
524 period = c->rx_coalesce_usecs;
525 thresh = c->rx_max_coalesced_frames;
526
527 /* save previous values */
528 portal = qman_get_affine_portal(smp_processor_id());
529 qman_portal_get_iperiod(portal, &prev_period);
530 qman_dqrr_get_ithresh(portal, &prev_thresh);
531
532 /* set new values */
533 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
534 portal = qman_get_affine_portal(cpu);
535 res = qman_portal_set_iperiod(portal, period);
536 if (res)
537 goto revert_values;
538 res = qman_dqrr_set_ithresh(portal, thresh);
539 if (res) {
540 qman_portal_set_iperiod(portal, prev_period);
541 goto revert_values;
542 }
543 needs_revert[cpu] = true;
544 }
545
546 return 0;
547
548 revert_values:
549 /* restore previous values */
550 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
551 if (!needs_revert[cpu])
552 continue;
553 portal = qman_get_affine_portal(cpu);
554 /* previous values will not fail, ignore return value */
555 qman_portal_set_iperiod(portal, prev_period);
556 qman_dqrr_set_ithresh(portal, prev_thresh);
557 }
558
559 return res;
560 }
561
562 const struct ethtool_ops dpaa_ethtool_ops = {
563 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
564 ETHTOOL_COALESCE_RX_MAX_FRAMES,
565 .get_drvinfo = dpaa_get_drvinfo,
566 .get_msglevel = dpaa_get_msglevel,
567 .set_msglevel = dpaa_set_msglevel,
568 .nway_reset = dpaa_nway_reset,
569 .get_pauseparam = dpaa_get_pauseparam,
570 .set_pauseparam = dpaa_set_pauseparam,
571 .get_link = ethtool_op_get_link,
572 .get_sset_count = dpaa_get_sset_count,
573 .get_ethtool_stats = dpaa_get_ethtool_stats,
574 .get_strings = dpaa_get_strings,
575 .get_link_ksettings = dpaa_get_link_ksettings,
576 .set_link_ksettings = dpaa_set_link_ksettings,
577 .get_rxnfc = dpaa_get_rxnfc,
578 .set_rxnfc = dpaa_set_rxnfc,
579 .get_ts_info = dpaa_get_ts_info,
580 .get_coalesce = dpaa_get_coalesce,
581 .set_coalesce = dpaa_set_coalesce,
582 };
583