1 /* Copyright 2008-2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any
18 * later version.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/string.h>
35 #include <linux/of_platform.h>
36 #include <linux/net_tstamp.h>
37 #include <linux/fsl/ptp_qoriq.h>
38
39 #include "dpaa_eth.h"
40 #include "mac.h"
41
42 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
43 "interrupts",
44 "rx packets",
45 "tx packets",
46 "tx confirm",
47 "tx S/G",
48 "tx error",
49 "rx error",
50 };
51
52 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
53 /* dpa rx errors */
54 "rx dma error",
55 "rx frame physical error",
56 "rx frame size error",
57 "rx header error",
58
59 /* demultiplexing errors */
60 "qman cg_tdrop",
61 "qman wred",
62 "qman error cond",
63 "qman early window",
64 "qman late window",
65 "qman fq tdrop",
66 "qman fq retired",
67 "qman orp disabled",
68
69 /* congestion related stats */
70 "congestion time (ms)",
71 "entered congestion",
72 "congested (0/1)"
73 };
74
75 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
76 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
77
dpaa_get_link_ksettings(struct net_device * net_dev,struct ethtool_link_ksettings * cmd)78 static int dpaa_get_link_ksettings(struct net_device *net_dev,
79 struct ethtool_link_ksettings *cmd)
80 {
81 if (!net_dev->phydev) {
82 netdev_dbg(net_dev, "phy device not initialized\n");
83 return 0;
84 }
85
86 phy_ethtool_ksettings_get(net_dev->phydev, cmd);
87
88 return 0;
89 }
90
dpaa_set_link_ksettings(struct net_device * net_dev,const struct ethtool_link_ksettings * cmd)91 static int dpaa_set_link_ksettings(struct net_device *net_dev,
92 const struct ethtool_link_ksettings *cmd)
93 {
94 int err;
95
96 if (!net_dev->phydev) {
97 netdev_err(net_dev, "phy device not initialized\n");
98 return -ENODEV;
99 }
100
101 err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
102 if (err < 0)
103 netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
104
105 return err;
106 }
107
dpaa_get_drvinfo(struct net_device * net_dev,struct ethtool_drvinfo * drvinfo)108 static void dpaa_get_drvinfo(struct net_device *net_dev,
109 struct ethtool_drvinfo *drvinfo)
110 {
111 int len;
112
113 strlcpy(drvinfo->driver, KBUILD_MODNAME,
114 sizeof(drvinfo->driver));
115 len = snprintf(drvinfo->version, sizeof(drvinfo->version),
116 "%X", 0);
117 len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
118 "%X", 0);
119
120 if (len >= sizeof(drvinfo->fw_version)) {
121 /* Truncated output */
122 netdev_notice(net_dev, "snprintf() = %d\n", len);
123 }
124 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
125 sizeof(drvinfo->bus_info));
126 }
127
dpaa_get_msglevel(struct net_device * net_dev)128 static u32 dpaa_get_msglevel(struct net_device *net_dev)
129 {
130 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
131 }
132
dpaa_set_msglevel(struct net_device * net_dev,u32 msg_enable)133 static void dpaa_set_msglevel(struct net_device *net_dev,
134 u32 msg_enable)
135 {
136 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
137 }
138
dpaa_nway_reset(struct net_device * net_dev)139 static int dpaa_nway_reset(struct net_device *net_dev)
140 {
141 int err;
142
143 if (!net_dev->phydev) {
144 netdev_err(net_dev, "phy device not initialized\n");
145 return -ENODEV;
146 }
147
148 err = 0;
149 if (net_dev->phydev->autoneg) {
150 err = phy_start_aneg(net_dev->phydev);
151 if (err < 0)
152 netdev_err(net_dev, "phy_start_aneg() = %d\n",
153 err);
154 }
155
156 return err;
157 }
158
dpaa_get_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)159 static void dpaa_get_pauseparam(struct net_device *net_dev,
160 struct ethtool_pauseparam *epause)
161 {
162 struct mac_device *mac_dev;
163 struct dpaa_priv *priv;
164
165 priv = netdev_priv(net_dev);
166 mac_dev = priv->mac_dev;
167
168 if (!net_dev->phydev) {
169 netdev_err(net_dev, "phy device not initialized\n");
170 return;
171 }
172
173 epause->autoneg = mac_dev->autoneg_pause;
174 epause->rx_pause = mac_dev->rx_pause_active;
175 epause->tx_pause = mac_dev->tx_pause_active;
176 }
177
dpaa_set_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)178 static int dpaa_set_pauseparam(struct net_device *net_dev,
179 struct ethtool_pauseparam *epause)
180 {
181 struct mac_device *mac_dev;
182 struct phy_device *phydev;
183 bool rx_pause, tx_pause;
184 struct dpaa_priv *priv;
185 u32 newadv, oldadv;
186 int err;
187
188 priv = netdev_priv(net_dev);
189 mac_dev = priv->mac_dev;
190
191 phydev = net_dev->phydev;
192 if (!phydev) {
193 netdev_err(net_dev, "phy device not initialized\n");
194 return -ENODEV;
195 }
196
197 if (!(phydev->supported & SUPPORTED_Pause) ||
198 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
199 (epause->rx_pause != epause->tx_pause)))
200 return -EINVAL;
201
202 /* The MAC should know how to handle PAUSE frame autonegotiation before
203 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
204 * settings.
205 */
206 mac_dev->autoneg_pause = !!epause->autoneg;
207 mac_dev->rx_pause_req = !!epause->rx_pause;
208 mac_dev->tx_pause_req = !!epause->tx_pause;
209
210 /* Determine the sym/asym advertised PAUSE capabilities from the desired
211 * rx/tx pause settings.
212 */
213 newadv = 0;
214 if (epause->rx_pause)
215 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
216 if (epause->tx_pause)
217 newadv ^= ADVERTISED_Asym_Pause;
218
219 oldadv = phydev->advertising &
220 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
221
222 /* If there are differences between the old and the new advertised
223 * values, restart PHY autonegotiation and advertise the new values.
224 */
225 if (oldadv != newadv) {
226 phydev->advertising &= ~(ADVERTISED_Pause
227 | ADVERTISED_Asym_Pause);
228 phydev->advertising |= newadv;
229 if (phydev->autoneg) {
230 err = phy_start_aneg(phydev);
231 if (err < 0)
232 netdev_err(net_dev, "phy_start_aneg() = %d\n",
233 err);
234 }
235 }
236
237 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
238 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
239 if (err < 0)
240 netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
241
242 return err;
243 }
244
dpaa_get_sset_count(struct net_device * net_dev,int type)245 static int dpaa_get_sset_count(struct net_device *net_dev, int type)
246 {
247 unsigned int total_stats, num_stats;
248
249 num_stats = num_online_cpus() + 1;
250 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
251 DPAA_STATS_GLOBAL_LEN;
252
253 switch (type) {
254 case ETH_SS_STATS:
255 return total_stats;
256 default:
257 return -EOPNOTSUPP;
258 }
259 }
260
copy_stats(struct dpaa_percpu_priv * percpu_priv,int num_cpus,int crr_cpu,u64 * bp_count,u64 * data)261 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
262 int crr_cpu, u64 *bp_count, u64 *data)
263 {
264 int num_values = num_cpus + 1;
265 int crr = 0, j;
266
267 /* update current CPU's stats and also add them to the total values */
268 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
269 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
270
271 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
272 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
273
274 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
275 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
276
277 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
278 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
279
280 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
281 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
282
283 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
284 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
285
286 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
287 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
288
289 for (j = 0; j < DPAA_BPS_NUM; j++) {
290 data[crr * num_values + crr_cpu] = bp_count[j];
291 data[crr++ * num_values + num_cpus] += bp_count[j];
292 }
293 }
294
dpaa_get_ethtool_stats(struct net_device * net_dev,struct ethtool_stats * stats,u64 * data)295 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
296 struct ethtool_stats *stats, u64 *data)
297 {
298 u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
299 struct dpaa_percpu_priv *percpu_priv;
300 struct dpaa_rx_errors rx_errors;
301 unsigned int num_cpus, offset;
302 struct dpaa_ern_cnt ern_cnt;
303 struct dpaa_bp *dpaa_bp;
304 struct dpaa_priv *priv;
305 int total_stats, i, j;
306 bool cg_status;
307
308 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
309 priv = netdev_priv(net_dev);
310 num_cpus = num_online_cpus();
311
312 memset(&bp_count, 0, sizeof(bp_count));
313 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
314 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
315 memset(data, 0, total_stats * sizeof(u64));
316
317 for_each_online_cpu(i) {
318 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
319 for (j = 0; j < DPAA_BPS_NUM; j++) {
320 dpaa_bp = priv->dpaa_bps[j];
321 if (!dpaa_bp->percpu_count)
322 continue;
323 bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
324 }
325 rx_errors.dme += percpu_priv->rx_errors.dme;
326 rx_errors.fpe += percpu_priv->rx_errors.fpe;
327 rx_errors.fse += percpu_priv->rx_errors.fse;
328 rx_errors.phe += percpu_priv->rx_errors.phe;
329
330 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
331 ern_cnt.wred += percpu_priv->ern_cnt.wred;
332 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
333 ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
334 ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
335 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
336 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
337 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
338
339 copy_stats(percpu_priv, num_cpus, i, bp_count, data);
340 }
341
342 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
343 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
344
345 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
346 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
347
348 /* gather congestion related counters */
349 cg_num = 0;
350 cg_status = false;
351 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
352 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
353 cg_num = priv->cgr_data.cgr_congested_count;
354
355 /* reset congestion stats (like QMan API does */
356 priv->cgr_data.congested_jiffies = 0;
357 priv->cgr_data.cgr_congested_count = 0;
358 }
359
360 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
361 data[offset++] = cg_time;
362 data[offset++] = cg_num;
363 data[offset++] = cg_status;
364 }
365
dpaa_get_strings(struct net_device * net_dev,u32 stringset,u8 * data)366 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
367 u8 *data)
368 {
369 unsigned int i, j, num_cpus, size;
370 char string_cpu[ETH_GSTRING_LEN];
371 u8 *strings;
372
373 memset(string_cpu, 0, sizeof(string_cpu));
374 strings = data;
375 num_cpus = num_online_cpus();
376 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
377
378 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
379 for (j = 0; j < num_cpus; j++) {
380 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
381 dpaa_stats_percpu[i], j);
382 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
383 strings += ETH_GSTRING_LEN;
384 }
385 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
386 dpaa_stats_percpu[i]);
387 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
388 strings += ETH_GSTRING_LEN;
389 }
390 for (i = 0; i < DPAA_BPS_NUM; i++) {
391 for (j = 0; j < num_cpus; j++) {
392 snprintf(string_cpu, ETH_GSTRING_LEN,
393 "bpool %c [CPU %d]", 'a' + i, j);
394 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
395 strings += ETH_GSTRING_LEN;
396 }
397 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
398 'a' + i);
399 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
400 strings += ETH_GSTRING_LEN;
401 }
402 memcpy(strings, dpaa_stats_global, size);
403 }
404
dpaa_get_hash_opts(struct net_device * dev,struct ethtool_rxnfc * cmd)405 static int dpaa_get_hash_opts(struct net_device *dev,
406 struct ethtool_rxnfc *cmd)
407 {
408 struct dpaa_priv *priv = netdev_priv(dev);
409
410 cmd->data = 0;
411
412 switch (cmd->flow_type) {
413 case TCP_V4_FLOW:
414 case TCP_V6_FLOW:
415 case UDP_V4_FLOW:
416 case UDP_V6_FLOW:
417 if (priv->keygen_in_use)
418 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
419 /* Fall through */
420 case IPV4_FLOW:
421 case IPV6_FLOW:
422 case SCTP_V4_FLOW:
423 case SCTP_V6_FLOW:
424 case AH_ESP_V4_FLOW:
425 case AH_ESP_V6_FLOW:
426 case AH_V4_FLOW:
427 case AH_V6_FLOW:
428 case ESP_V4_FLOW:
429 case ESP_V6_FLOW:
430 if (priv->keygen_in_use)
431 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
432 break;
433 default:
434 cmd->data = 0;
435 break;
436 }
437
438 return 0;
439 }
440
dpaa_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * unused)441 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
442 u32 *unused)
443 {
444 int ret = -EOPNOTSUPP;
445
446 switch (cmd->cmd) {
447 case ETHTOOL_GRXFH:
448 ret = dpaa_get_hash_opts(dev, cmd);
449 break;
450 default:
451 break;
452 }
453
454 return ret;
455 }
456
dpaa_set_hash(struct net_device * net_dev,bool enable)457 static void dpaa_set_hash(struct net_device *net_dev, bool enable)
458 {
459 struct mac_device *mac_dev;
460 struct fman_port *rxport;
461 struct dpaa_priv *priv;
462
463 priv = netdev_priv(net_dev);
464 mac_dev = priv->mac_dev;
465 rxport = mac_dev->port[0];
466
467 fman_port_use_kg_hash(rxport, enable);
468 priv->keygen_in_use = enable;
469 }
470
dpaa_set_hash_opts(struct net_device * dev,struct ethtool_rxnfc * nfc)471 static int dpaa_set_hash_opts(struct net_device *dev,
472 struct ethtool_rxnfc *nfc)
473 {
474 int ret = -EINVAL;
475
476 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
477 if (nfc->data &
478 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
479 return -EINVAL;
480
481 switch (nfc->flow_type) {
482 case TCP_V4_FLOW:
483 case TCP_V6_FLOW:
484 case UDP_V4_FLOW:
485 case UDP_V6_FLOW:
486 case IPV4_FLOW:
487 case IPV6_FLOW:
488 case SCTP_V4_FLOW:
489 case SCTP_V6_FLOW:
490 case AH_ESP_V4_FLOW:
491 case AH_ESP_V6_FLOW:
492 case AH_V4_FLOW:
493 case AH_V6_FLOW:
494 case ESP_V4_FLOW:
495 case ESP_V6_FLOW:
496 dpaa_set_hash(dev, !!nfc->data);
497 ret = 0;
498 break;
499 default:
500 break;
501 }
502
503 return ret;
504 }
505
dpaa_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)506 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
507 {
508 int ret = -EOPNOTSUPP;
509
510 switch (cmd->cmd) {
511 case ETHTOOL_SRXFH:
512 ret = dpaa_set_hash_opts(dev, cmd);
513 break;
514 default:
515 break;
516 }
517
518 return ret;
519 }
520
dpaa_get_ts_info(struct net_device * net_dev,struct ethtool_ts_info * info)521 static int dpaa_get_ts_info(struct net_device *net_dev,
522 struct ethtool_ts_info *info)
523 {
524 struct device *dev = net_dev->dev.parent;
525 struct device_node *mac_node = dev->of_node;
526 struct device_node *fman_node = NULL, *ptp_node = NULL;
527 struct platform_device *ptp_dev = NULL;
528 struct qoriq_ptp *ptp = NULL;
529
530 info->phc_index = -1;
531
532 fman_node = of_get_parent(mac_node);
533 if (fman_node)
534 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
535
536 if (ptp_node)
537 ptp_dev = of_find_device_by_node(ptp_node);
538
539 if (ptp_dev)
540 ptp = platform_get_drvdata(ptp_dev);
541
542 if (ptp)
543 info->phc_index = ptp->phc_index;
544
545 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
546 SOF_TIMESTAMPING_RX_HARDWARE |
547 SOF_TIMESTAMPING_RAW_HARDWARE;
548 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
549 (1 << HWTSTAMP_TX_ON);
550 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
551 (1 << HWTSTAMP_FILTER_ALL);
552
553 return 0;
554 }
555
556 const struct ethtool_ops dpaa_ethtool_ops = {
557 .get_drvinfo = dpaa_get_drvinfo,
558 .get_msglevel = dpaa_get_msglevel,
559 .set_msglevel = dpaa_set_msglevel,
560 .nway_reset = dpaa_nway_reset,
561 .get_pauseparam = dpaa_get_pauseparam,
562 .set_pauseparam = dpaa_set_pauseparam,
563 .get_link = ethtool_op_get_link,
564 .get_sset_count = dpaa_get_sset_count,
565 .get_ethtool_stats = dpaa_get_ethtool_stats,
566 .get_strings = dpaa_get_strings,
567 .get_link_ksettings = dpaa_get_link_ksettings,
568 .set_link_ksettings = dpaa_set_link_ksettings,
569 .get_rxnfc = dpaa_get_rxnfc,
570 .set_rxnfc = dpaa_set_rxnfc,
571 .get_ts_info = dpaa_get_ts_info,
572 };
573