1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
34 #include "en.h"
35 #include "en/port.h"
36 #include "en/port_buffer.h"
37
38 #define MLX5E_100MB (100000)
39 #define MLX5E_1GB (1000000)
40
41 #define MLX5E_CEE_STATE_UP 1
42 #define MLX5E_CEE_STATE_DOWN 0
43
44 /* Max supported cable length is 1000 meters */
45 #define MLX5E_MAX_CABLE_LENGTH 1000
46
47 enum {
48 MLX5E_VENDOR_TC_GROUP_NUM = 7,
49 MLX5E_LOWEST_PRIO_GROUP = 0,
50 };
51
52 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
53 MLX5_CAP_QCAM_REG(mdev, qpts) && \
54 MLX5_CAP_QCAM_REG(mdev, qpdpm))
55
56 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
57 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
58
59 /* If dcbx mode is non-host set the dcbx mode to host.
60 */
mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode mode)61 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
62 enum mlx5_dcbx_oper_mode mode)
63 {
64 struct mlx5_core_dev *mdev = priv->mdev;
65 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
66 int err;
67
68 err = mlx5_query_port_dcbx_param(mdev, param);
69 if (err)
70 return err;
71
72 MLX5_SET(dcbx_param, param, version_admin, mode);
73 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
74 MLX5_SET(dcbx_param, param, willing_admin, 1);
75
76 return mlx5_set_port_dcbx_param(mdev, param);
77 }
78
mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv * priv)79 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
80 {
81 struct mlx5e_dcbx *dcbx = &priv->dcbx;
82 int err;
83
84 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
85 return 0;
86
87 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
88 return 0;
89
90 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
91 if (err)
92 return err;
93
94 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
95 return 0;
96 }
97
mlx5e_dcbnl_ieee_getets(struct net_device * netdev,struct ieee_ets * ets)98 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
99 struct ieee_ets *ets)
100 {
101 struct mlx5e_priv *priv = netdev_priv(netdev);
102 struct mlx5_core_dev *mdev = priv->mdev;
103 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
104 bool is_tc_group_6_exist = false;
105 bool is_zero_bw_ets_tc = false;
106 int err = 0;
107 int i;
108
109 if (!MLX5_CAP_GEN(priv->mdev, ets))
110 return -EOPNOTSUPP;
111
112 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
113 for (i = 0; i < ets->ets_cap; i++) {
114 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
115 if (err)
116 return err;
117
118 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
119 if (err)
120 return err;
121
122 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
123 if (err)
124 return err;
125
126 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
127 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
128 is_zero_bw_ets_tc = true;
129
130 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
131 is_tc_group_6_exist = true;
132 }
133
134 /* Report 0% ets tc if exits*/
135 if (is_zero_bw_ets_tc) {
136 for (i = 0; i < ets->ets_cap; i++)
137 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
138 ets->tc_tx_bw[i] = 0;
139 }
140
141 /* Update tc_tsa based on fw setting*/
142 for (i = 0; i < ets->ets_cap; i++) {
143 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
144 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
145 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
146 !is_tc_group_6_exist)
147 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
148 }
149 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
150
151 return err;
152 }
153
mlx5e_build_tc_group(struct ieee_ets * ets,u8 * tc_group,int max_tc)154 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
155 {
156 bool any_tc_mapped_to_ets = false;
157 bool ets_zero_bw = false;
158 int strict_group;
159 int i;
160
161 for (i = 0; i <= max_tc; i++) {
162 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
163 any_tc_mapped_to_ets = true;
164 if (!ets->tc_tx_bw[i])
165 ets_zero_bw = true;
166 }
167 }
168
169 /* strict group has higher priority than ets group */
170 strict_group = MLX5E_LOWEST_PRIO_GROUP;
171 if (any_tc_mapped_to_ets)
172 strict_group++;
173 if (ets_zero_bw)
174 strict_group++;
175
176 for (i = 0; i <= max_tc; i++) {
177 switch (ets->tc_tsa[i]) {
178 case IEEE_8021QAZ_TSA_VENDOR:
179 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
180 break;
181 case IEEE_8021QAZ_TSA_STRICT:
182 tc_group[i] = strict_group++;
183 break;
184 case IEEE_8021QAZ_TSA_ETS:
185 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
186 if (ets->tc_tx_bw[i] && ets_zero_bw)
187 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
188 break;
189 }
190 }
191 }
192
mlx5e_build_tc_tx_bw(struct ieee_ets * ets,u8 * tc_tx_bw,u8 * tc_group,int max_tc)193 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
194 u8 *tc_group, int max_tc)
195 {
196 int bw_for_ets_zero_bw_tc = 0;
197 int last_ets_zero_bw_tc = -1;
198 int num_ets_zero_bw = 0;
199 int i;
200
201 for (i = 0; i <= max_tc; i++) {
202 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
203 !ets->tc_tx_bw[i]) {
204 num_ets_zero_bw++;
205 last_ets_zero_bw_tc = i;
206 }
207 }
208
209 if (num_ets_zero_bw)
210 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
211
212 for (i = 0; i <= max_tc; i++) {
213 switch (ets->tc_tsa[i]) {
214 case IEEE_8021QAZ_TSA_VENDOR:
215 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
216 break;
217 case IEEE_8021QAZ_TSA_STRICT:
218 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
219 break;
220 case IEEE_8021QAZ_TSA_ETS:
221 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
222 ets->tc_tx_bw[i] :
223 bw_for_ets_zero_bw_tc;
224 break;
225 }
226 }
227
228 /* Make sure the total bw for ets zero bw group is 100% */
229 if (last_ets_zero_bw_tc != -1)
230 tc_tx_bw[last_ets_zero_bw_tc] +=
231 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
232 }
233
234 /* If there are ETS BW 0,
235 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
236 * Set group #0 to all the ETS BW 0 tcs and
237 * equally splits the 100% BW between them
238 * Report both group #0 and #1 as ETS type.
239 * All the tcs in group #0 will be reported with 0% BW.
240 */
mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv * priv,struct ieee_ets * ets)241 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
242 {
243 struct mlx5_core_dev *mdev = priv->mdev;
244 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
245 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
246 int max_tc = mlx5_max_tc(mdev);
247 int err, i;
248
249 mlx5e_build_tc_group(ets, tc_group, max_tc);
250 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
251
252 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
253 if (err)
254 return err;
255
256 err = mlx5_set_port_tc_group(mdev, tc_group);
257 if (err)
258 return err;
259
260 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
261
262 if (err)
263 return err;
264
265 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
266
267 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
268 mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
269 __func__, i, ets->prio_tc[i]);
270 mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
271 __func__, i, tc_tx_bw[i], tc_group[i]);
272 }
273
274 return err;
275 }
276
mlx5e_dbcnl_validate_ets(struct net_device * netdev,struct ieee_ets * ets,bool zero_sum_allowed)277 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
278 struct ieee_ets *ets,
279 bool zero_sum_allowed)
280 {
281 bool have_ets_tc = false;
282 int bw_sum = 0;
283 int i;
284
285 /* Validate Priority */
286 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
287 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
288 netdev_err(netdev,
289 "Failed to validate ETS: priority value greater than max(%d)\n",
290 MLX5E_MAX_PRIORITY);
291 return -EINVAL;
292 }
293 }
294
295 /* Validate Bandwidth Sum */
296 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
297 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
298 have_ets_tc = true;
299 bw_sum += ets->tc_tx_bw[i];
300 }
301 }
302
303 if (have_ets_tc && bw_sum != 100) {
304 if (bw_sum || (!bw_sum && !zero_sum_allowed))
305 netdev_err(netdev,
306 "Failed to validate ETS: BW sum is illegal\n");
307 return -EINVAL;
308 }
309 return 0;
310 }
311
mlx5e_dcbnl_ieee_setets(struct net_device * netdev,struct ieee_ets * ets)312 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
313 struct ieee_ets *ets)
314 {
315 struct mlx5e_priv *priv = netdev_priv(netdev);
316 int err;
317
318 if (!MLX5_CAP_GEN(priv->mdev, ets))
319 return -EOPNOTSUPP;
320
321 err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
322 if (err)
323 return err;
324
325 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
326 if (err)
327 return err;
328
329 return 0;
330 }
331
mlx5e_dcbnl_ieee_getpfc(struct net_device * dev,struct ieee_pfc * pfc)332 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
333 struct ieee_pfc *pfc)
334 {
335 struct mlx5e_priv *priv = netdev_priv(dev);
336 struct mlx5_core_dev *mdev = priv->mdev;
337 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
338 int i;
339
340 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
341 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
342 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
343 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
344 }
345
346 if (MLX5_BUFFER_SUPPORTED(mdev))
347 pfc->delay = priv->dcbx.cable_len;
348
349 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
350 }
351
mlx5e_dcbnl_ieee_setpfc(struct net_device * dev,struct ieee_pfc * pfc)352 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
353 struct ieee_pfc *pfc)
354 {
355 struct mlx5e_priv *priv = netdev_priv(dev);
356 struct mlx5_core_dev *mdev = priv->mdev;
357 u32 old_cable_len = priv->dcbx.cable_len;
358 struct ieee_pfc pfc_new;
359 u32 changed = 0;
360 u8 curr_pfc_en;
361 int ret = 0;
362
363 /* pfc_en */
364 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
365 if (pfc->pfc_en != curr_pfc_en) {
366 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
367 if (ret)
368 return ret;
369 mlx5_toggle_port_link(mdev);
370 changed |= MLX5E_PORT_BUFFER_PFC;
371 }
372
373 if (pfc->delay &&
374 pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
375 pfc->delay != priv->dcbx.cable_len) {
376 priv->dcbx.cable_len = pfc->delay;
377 changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
378 }
379
380 if (MLX5_BUFFER_SUPPORTED(mdev)) {
381 pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
382 if (priv->dcbx.manual_buffer)
383 ret = mlx5e_port_manual_buffer_config(priv, changed,
384 dev->mtu, &pfc_new,
385 NULL, NULL);
386
387 if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
388 priv->dcbx.cable_len = old_cable_len;
389 }
390
391 if (!ret) {
392 mlx5e_dbg(HW, priv,
393 "%s: PFC per priority bit mask: 0x%x\n",
394 __func__, pfc->pfc_en);
395 }
396 return ret;
397 }
398
mlx5e_dcbnl_getdcbx(struct net_device * dev)399 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
400 {
401 struct mlx5e_priv *priv = netdev_priv(dev);
402
403 return priv->dcbx.cap;
404 }
405
mlx5e_dcbnl_setdcbx(struct net_device * dev,u8 mode)406 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
407 {
408 struct mlx5e_priv *priv = netdev_priv(dev);
409 struct mlx5e_dcbx *dcbx = &priv->dcbx;
410
411 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
412 return 1;
413
414 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
415 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
416 return 0;
417
418 /* set dcbx to fw controlled */
419 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
420 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
421 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
422 return 0;
423 }
424
425 return 1;
426 }
427
428 if (!(mode & DCB_CAP_DCBX_HOST))
429 return 1;
430
431 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
432 return 1;
433
434 dcbx->cap = mode;
435
436 return 0;
437 }
438
mlx5e_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)439 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
440 {
441 struct mlx5e_priv *priv = netdev_priv(dev);
442 struct dcb_app temp;
443 bool is_new;
444 int err;
445
446 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
447 !MLX5_DSCP_SUPPORTED(priv->mdev))
448 return -EOPNOTSUPP;
449
450 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
451 (app->protocol >= MLX5E_MAX_DSCP))
452 return -EINVAL;
453
454 /* Save the old entry info */
455 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
456 temp.protocol = app->protocol;
457 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
458
459 /* Check if need to switch to dscp trust state */
460 if (!priv->dcbx.dscp_app_cnt) {
461 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
462 if (err)
463 return err;
464 }
465
466 /* Skip the fw command if new and old mapping are the same */
467 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
468 err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
469 if (err)
470 goto fw_err;
471 }
472
473 /* Delete the old entry if exists */
474 is_new = false;
475 err = dcb_ieee_delapp(dev, &temp);
476 if (err)
477 is_new = true;
478
479 /* Add new entry and update counter */
480 err = dcb_ieee_setapp(dev, app);
481 if (err)
482 return err;
483
484 if (is_new)
485 priv->dcbx.dscp_app_cnt++;
486
487 return err;
488
489 fw_err:
490 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
491 return err;
492 }
493
mlx5e_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)494 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
495 {
496 struct mlx5e_priv *priv = netdev_priv(dev);
497 int err;
498
499 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
500 !MLX5_DSCP_SUPPORTED(priv->mdev))
501 return -EOPNOTSUPP;
502
503 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
504 (app->protocol >= MLX5E_MAX_DSCP))
505 return -EINVAL;
506
507 /* Skip if no dscp app entry */
508 if (!priv->dcbx.dscp_app_cnt)
509 return -ENOENT;
510
511 /* Check if the entry matches fw setting */
512 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
513 return -ENOENT;
514
515 /* Delete the app entry */
516 err = dcb_ieee_delapp(dev, app);
517 if (err)
518 return err;
519
520 /* Reset the priority mapping back to zero */
521 err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
522 if (err)
523 goto fw_err;
524
525 priv->dcbx.dscp_app_cnt--;
526
527 /* Check if need to switch to pcp trust state */
528 if (!priv->dcbx.dscp_app_cnt)
529 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
530
531 return err;
532
533 fw_err:
534 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
535 return err;
536 }
537
mlx5e_dcbnl_ieee_getmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)538 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
539 struct ieee_maxrate *maxrate)
540 {
541 struct mlx5e_priv *priv = netdev_priv(netdev);
542 struct mlx5_core_dev *mdev = priv->mdev;
543 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
544 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
545 int err;
546 int i;
547
548 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
549 if (err)
550 return err;
551
552 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
553
554 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
555 switch (max_bw_unit[i]) {
556 case MLX5_100_MBPS_UNIT:
557 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
558 break;
559 case MLX5_GBPS_UNIT:
560 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
561 break;
562 case MLX5_BW_NO_LIMIT:
563 break;
564 default:
565 WARN(true, "non-supported BW unit");
566 break;
567 }
568 }
569
570 return 0;
571 }
572
mlx5e_dcbnl_ieee_setmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)573 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
574 struct ieee_maxrate *maxrate)
575 {
576 struct mlx5e_priv *priv = netdev_priv(netdev);
577 struct mlx5_core_dev *mdev = priv->mdev;
578 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
579 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
580 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
581 int i;
582
583 memset(max_bw_value, 0, sizeof(max_bw_value));
584 memset(max_bw_unit, 0, sizeof(max_bw_unit));
585
586 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
587 if (!maxrate->tc_maxrate[i]) {
588 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
589 continue;
590 }
591 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
592 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
593 MLX5E_100MB);
594 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
595 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
596 } else {
597 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
598 MLX5E_1GB);
599 max_bw_unit[i] = MLX5_GBPS_UNIT;
600 }
601 }
602
603 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
604 mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
605 __func__, i, max_bw_value[i]);
606 }
607
608 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
609 }
610
mlx5e_dcbnl_setall(struct net_device * netdev)611 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
612 {
613 struct mlx5e_priv *priv = netdev_priv(netdev);
614 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
615 struct mlx5_core_dev *mdev = priv->mdev;
616 struct ieee_ets ets;
617 struct ieee_pfc pfc;
618 int err = -EOPNOTSUPP;
619 int i;
620
621 if (!MLX5_CAP_GEN(mdev, ets))
622 goto out;
623
624 memset(&ets, 0, sizeof(ets));
625 memset(&pfc, 0, sizeof(pfc));
626
627 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
628 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
629 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
630 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
631 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
632 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
633 mlx5e_dbg(HW, priv,
634 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
635 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
636 ets.prio_tc[i]);
637 }
638
639 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
640 if (err)
641 goto out;
642
643 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
644 if (err) {
645 netdev_err(netdev,
646 "%s, Failed to set ETS: %d\n", __func__, err);
647 goto out;
648 }
649
650 /* Set PFC */
651 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
652 if (!cee_cfg->pfc_enable)
653 pfc.pfc_en = 0;
654 else
655 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
656 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
657
658 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
659 if (err) {
660 netdev_err(netdev,
661 "%s, Failed to set PFC: %d\n", __func__, err);
662 goto out;
663 }
664 out:
665 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
666 }
667
mlx5e_dcbnl_getstate(struct net_device * netdev)668 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
669 {
670 return MLX5E_CEE_STATE_UP;
671 }
672
mlx5e_dcbnl_getpermhwaddr(struct net_device * netdev,u8 * perm_addr)673 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
674 u8 *perm_addr)
675 {
676 struct mlx5e_priv *priv = netdev_priv(netdev);
677
678 if (!perm_addr)
679 return;
680
681 memset(perm_addr, 0xff, MAX_ADDR_LEN);
682
683 mlx5_query_mac_address(priv->mdev, perm_addr);
684 }
685
mlx5e_dcbnl_setpgtccfgtx(struct net_device * netdev,int priority,u8 prio_type,u8 pgid,u8 bw_pct,u8 up_map)686 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
687 int priority, u8 prio_type,
688 u8 pgid, u8 bw_pct, u8 up_map)
689 {
690 struct mlx5e_priv *priv = netdev_priv(netdev);
691 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
692
693 if (priority >= CEE_DCBX_MAX_PRIO) {
694 netdev_err(netdev,
695 "%s, priority is out of range\n", __func__);
696 return;
697 }
698
699 if (pgid >= CEE_DCBX_MAX_PGS) {
700 netdev_err(netdev,
701 "%s, priority group is out of range\n", __func__);
702 return;
703 }
704
705 cee_cfg->prio_to_pg_map[priority] = pgid;
706 }
707
mlx5e_dcbnl_setpgbwgcfgtx(struct net_device * netdev,int pgid,u8 bw_pct)708 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
709 int pgid, u8 bw_pct)
710 {
711 struct mlx5e_priv *priv = netdev_priv(netdev);
712 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
713
714 if (pgid >= CEE_DCBX_MAX_PGS) {
715 netdev_err(netdev,
716 "%s, priority group is out of range\n", __func__);
717 return;
718 }
719
720 cee_cfg->pg_bw_pct[pgid] = bw_pct;
721 }
722
mlx5e_dcbnl_getpgtccfgtx(struct net_device * netdev,int priority,u8 * prio_type,u8 * pgid,u8 * bw_pct,u8 * up_map)723 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
724 int priority, u8 *prio_type,
725 u8 *pgid, u8 *bw_pct, u8 *up_map)
726 {
727 struct mlx5e_priv *priv = netdev_priv(netdev);
728 struct mlx5_core_dev *mdev = priv->mdev;
729
730 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
731 netdev_err(netdev, "%s, ets is not supported\n", __func__);
732 return;
733 }
734
735 if (priority >= CEE_DCBX_MAX_PRIO) {
736 netdev_err(netdev,
737 "%s, priority is out of range\n", __func__);
738 return;
739 }
740
741 *prio_type = 0;
742 *bw_pct = 0;
743 *up_map = 0;
744
745 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
746 *pgid = 0;
747 }
748
mlx5e_dcbnl_getpgbwgcfgtx(struct net_device * netdev,int pgid,u8 * bw_pct)749 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
750 int pgid, u8 *bw_pct)
751 {
752 struct ieee_ets ets;
753
754 if (pgid >= CEE_DCBX_MAX_PGS) {
755 netdev_err(netdev,
756 "%s, priority group is out of range\n", __func__);
757 return;
758 }
759
760 mlx5e_dcbnl_ieee_getets(netdev, &ets);
761 *bw_pct = ets.tc_tx_bw[pgid];
762 }
763
mlx5e_dcbnl_setpfccfg(struct net_device * netdev,int priority,u8 setting)764 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
765 int priority, u8 setting)
766 {
767 struct mlx5e_priv *priv = netdev_priv(netdev);
768 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
769
770 if (priority >= CEE_DCBX_MAX_PRIO) {
771 netdev_err(netdev,
772 "%s, priority is out of range\n", __func__);
773 return;
774 }
775
776 if (setting > 1)
777 return;
778
779 cee_cfg->pfc_setting[priority] = setting;
780 }
781
782 static int
mlx5e_dcbnl_get_priority_pfc(struct net_device * netdev,int priority,u8 * setting)783 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
784 int priority, u8 *setting)
785 {
786 struct ieee_pfc pfc;
787 int err;
788
789 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
790
791 if (err)
792 *setting = 0;
793 else
794 *setting = (pfc.pfc_en >> priority) & 0x01;
795
796 return err;
797 }
798
mlx5e_dcbnl_getpfccfg(struct net_device * netdev,int priority,u8 * setting)799 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
800 int priority, u8 *setting)
801 {
802 if (priority >= CEE_DCBX_MAX_PRIO) {
803 netdev_err(netdev,
804 "%s, priority is out of range\n", __func__);
805 return;
806 }
807
808 if (!setting)
809 return;
810
811 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
812 }
813
mlx5e_dcbnl_getcap(struct net_device * netdev,int capid,u8 * cap)814 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
815 int capid, u8 *cap)
816 {
817 struct mlx5e_priv *priv = netdev_priv(netdev);
818 struct mlx5_core_dev *mdev = priv->mdev;
819 u8 rval = 0;
820
821 switch (capid) {
822 case DCB_CAP_ATTR_PG:
823 *cap = true;
824 break;
825 case DCB_CAP_ATTR_PFC:
826 *cap = true;
827 break;
828 case DCB_CAP_ATTR_UP2TC:
829 *cap = false;
830 break;
831 case DCB_CAP_ATTR_PG_TCS:
832 *cap = 1 << mlx5_max_tc(mdev);
833 break;
834 case DCB_CAP_ATTR_PFC_TCS:
835 *cap = 1 << mlx5_max_tc(mdev);
836 break;
837 case DCB_CAP_ATTR_GSP:
838 *cap = false;
839 break;
840 case DCB_CAP_ATTR_BCN:
841 *cap = false;
842 break;
843 case DCB_CAP_ATTR_DCBX:
844 *cap = priv->dcbx.cap |
845 DCB_CAP_DCBX_VER_CEE |
846 DCB_CAP_DCBX_VER_IEEE;
847 break;
848 default:
849 *cap = 0;
850 rval = 1;
851 break;
852 }
853
854 return rval;
855 }
856
mlx5e_dcbnl_getnumtcs(struct net_device * netdev,int tcs_id,u8 * num)857 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
858 int tcs_id, u8 *num)
859 {
860 struct mlx5e_priv *priv = netdev_priv(netdev);
861 struct mlx5_core_dev *mdev = priv->mdev;
862
863 switch (tcs_id) {
864 case DCB_NUMTCS_ATTR_PG:
865 case DCB_NUMTCS_ATTR_PFC:
866 *num = mlx5_max_tc(mdev) + 1;
867 break;
868 default:
869 return -EINVAL;
870 }
871
872 return 0;
873 }
874
mlx5e_dcbnl_getpfcstate(struct net_device * netdev)875 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
876 {
877 struct ieee_pfc pfc;
878
879 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
880 return MLX5E_CEE_STATE_DOWN;
881
882 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
883 }
884
mlx5e_dcbnl_setpfcstate(struct net_device * netdev,u8 state)885 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
886 {
887 struct mlx5e_priv *priv = netdev_priv(netdev);
888 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
889
890 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
891 return;
892
893 cee_cfg->pfc_enable = state;
894 }
895
mlx5e_dcbnl_getbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)896 static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
897 struct dcbnl_buffer *dcb_buffer)
898 {
899 struct mlx5e_priv *priv = netdev_priv(dev);
900 struct mlx5_core_dev *mdev = priv->mdev;
901 struct mlx5e_port_buffer port_buffer;
902 u8 buffer[MLX5E_MAX_PRIORITY];
903 int i, err;
904
905 if (!MLX5_BUFFER_SUPPORTED(mdev))
906 return -EOPNOTSUPP;
907
908 err = mlx5e_port_query_priority2buffer(mdev, buffer);
909 if (err)
910 return err;
911
912 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
913 dcb_buffer->prio2buffer[i] = buffer[i];
914
915 err = mlx5e_port_query_buffer(priv, &port_buffer);
916 if (err)
917 return err;
918
919 for (i = 0; i < MLX5E_MAX_BUFFER; i++)
920 dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
921 dcb_buffer->total_size = port_buffer.port_buffer_size;
922
923 return 0;
924 }
925
mlx5e_dcbnl_setbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)926 static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
927 struct dcbnl_buffer *dcb_buffer)
928 {
929 struct mlx5e_priv *priv = netdev_priv(dev);
930 struct mlx5_core_dev *mdev = priv->mdev;
931 struct mlx5e_port_buffer port_buffer;
932 u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
933 u32 *buffer_size = NULL;
934 u8 *prio2buffer = NULL;
935 u32 changed = 0;
936 int i, err;
937
938 if (!MLX5_BUFFER_SUPPORTED(mdev))
939 return -EOPNOTSUPP;
940
941 for (i = 0; i < DCBX_MAX_BUFFERS; i++)
942 mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
943
944 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
945 mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
946
947 err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
948 if (err)
949 return err;
950
951 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
952 if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
953 changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
954 prio2buffer = dcb_buffer->prio2buffer;
955 break;
956 }
957 }
958
959 err = mlx5e_port_query_buffer(priv, &port_buffer);
960 if (err)
961 return err;
962
963 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
964 if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
965 changed |= MLX5E_PORT_BUFFER_SIZE;
966 buffer_size = dcb_buffer->buffer_size;
967 break;
968 }
969 }
970
971 if (!changed)
972 return 0;
973
974 priv->dcbx.manual_buffer = true;
975 err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
976 buffer_size, prio2buffer);
977 return err;
978 }
979
980 const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
981 .ieee_getets = mlx5e_dcbnl_ieee_getets,
982 .ieee_setets = mlx5e_dcbnl_ieee_setets,
983 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
984 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
985 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
986 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
987 .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
988 .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
989 .getdcbx = mlx5e_dcbnl_getdcbx,
990 .setdcbx = mlx5e_dcbnl_setdcbx,
991 .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
992 .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
993
994 /* CEE interfaces */
995 .setall = mlx5e_dcbnl_setall,
996 .getstate = mlx5e_dcbnl_getstate,
997 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
998
999 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
1000 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
1001 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
1002 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
1003
1004 .setpfccfg = mlx5e_dcbnl_setpfccfg,
1005 .getpfccfg = mlx5e_dcbnl_getpfccfg,
1006 .getcap = mlx5e_dcbnl_getcap,
1007 .getnumtcs = mlx5e_dcbnl_getnumtcs,
1008 .getpfcstate = mlx5e_dcbnl_getpfcstate,
1009 .setpfcstate = mlx5e_dcbnl_setpfcstate,
1010 };
1011
mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode * mode)1012 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1013 enum mlx5_dcbx_oper_mode *mode)
1014 {
1015 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1016
1017 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1018
1019 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
1020 *mode = MLX5_GET(dcbx_param, out, version_oper);
1021
1022 /* From driver's point of view, we only care if the mode
1023 * is host (HOST) or non-host (AUTO)
1024 */
1025 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1026 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1027 }
1028
mlx5e_ets_init(struct mlx5e_priv * priv)1029 static void mlx5e_ets_init(struct mlx5e_priv *priv)
1030 {
1031 struct ieee_ets ets;
1032 int err;
1033 int i;
1034
1035 if (!MLX5_CAP_GEN(priv->mdev, ets))
1036 return;
1037
1038 memset(&ets, 0, sizeof(ets));
1039 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
1040 for (i = 0; i < ets.ets_cap; i++) {
1041 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1042 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1043 ets.prio_tc[i] = i;
1044 }
1045
1046 if (ets.ets_cap > 1) {
1047 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
1048 ets.prio_tc[0] = 1;
1049 ets.prio_tc[1] = 0;
1050 }
1051
1052 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1053 if (err)
1054 netdev_err(priv->netdev,
1055 "%s, Failed to init ETS: %d\n", __func__, err);
1056 }
1057
1058 enum {
1059 INIT,
1060 DELETE,
1061 };
1062
mlx5e_dcbnl_dscp_app(struct mlx5e_priv * priv,int action)1063 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1064 {
1065 struct dcb_app temp;
1066 int i;
1067
1068 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1069 return;
1070
1071 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1072 return;
1073
1074 /* No SEL_DSCP entry in non DSCP state */
1075 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1076 return;
1077
1078 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1079 for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1080 temp.protocol = i;
1081 temp.priority = priv->dcbx_dp.dscp2prio[i];
1082 if (action == INIT)
1083 dcb_ieee_setapp(priv->netdev, &temp);
1084 else
1085 dcb_ieee_delapp(priv->netdev, &temp);
1086 }
1087
1088 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1089 }
1090
mlx5e_dcbnl_init_app(struct mlx5e_priv * priv)1091 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1092 {
1093 mlx5e_dcbnl_dscp_app(priv, INIT);
1094 }
1095
mlx5e_dcbnl_delete_app(struct mlx5e_priv * priv)1096 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1097 {
1098 mlx5e_dcbnl_dscp_app(priv, DELETE);
1099 }
1100
mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv * priv,struct mlx5e_params * params)1101 static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
1102 struct mlx5e_params *params)
1103 {
1104 mlx5_query_min_inline(priv->mdev, ¶ms->tx_min_inline_mode);
1105 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
1106 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1107 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1108 }
1109
mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv * priv)1110 static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
1111 {
1112 struct mlx5e_channels new_channels = {};
1113
1114 mutex_lock(&priv->state_lock);
1115
1116 new_channels.params = priv->channels.params;
1117 mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
1118
1119 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1120 priv->channels.params = new_channels.params;
1121 goto out;
1122 }
1123
1124 /* Skip if tx_min_inline is the same */
1125 if (new_channels.params.tx_min_inline_mode ==
1126 priv->channels.params.tx_min_inline_mode)
1127 goto out;
1128
1129 mlx5e_safe_switch_channels(priv, &new_channels, NULL);
1130
1131 out:
1132 mutex_unlock(&priv->state_lock);
1133 }
1134
mlx5e_set_trust_state(struct mlx5e_priv * priv,u8 trust_state)1135 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1136 {
1137 int err;
1138
1139 err = mlx5_set_trust_state(priv->mdev, trust_state);
1140 if (err)
1141 return err;
1142 priv->dcbx_dp.trust_state = trust_state;
1143 mlx5e_trust_update_sq_inline_mode(priv);
1144
1145 return err;
1146 }
1147
mlx5e_set_dscp2prio(struct mlx5e_priv * priv,u8 dscp,u8 prio)1148 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1149 {
1150 int err;
1151
1152 err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1153 if (err)
1154 return err;
1155
1156 priv->dcbx_dp.dscp2prio[dscp] = prio;
1157 return err;
1158 }
1159
mlx5e_trust_initialize(struct mlx5e_priv * priv)1160 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1161 {
1162 struct mlx5_core_dev *mdev = priv->mdev;
1163 int err;
1164
1165 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1166
1167 if (!MLX5_DSCP_SUPPORTED(mdev))
1168 return 0;
1169
1170 err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
1171 if (err)
1172 return err;
1173
1174 mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
1175
1176 err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1177 if (err)
1178 return err;
1179
1180 return 0;
1181 }
1182
mlx5e_dcbnl_initialize(struct mlx5e_priv * priv)1183 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1184 {
1185 struct mlx5e_dcbx *dcbx = &priv->dcbx;
1186
1187 mlx5e_trust_initialize(priv);
1188
1189 if (!MLX5_CAP_GEN(priv->mdev, qos))
1190 return;
1191
1192 if (MLX5_CAP_GEN(priv->mdev, dcbx))
1193 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1194
1195 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1196 DCB_CAP_DCBX_VER_IEEE;
1197 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1198 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1199
1200 priv->dcbx.manual_buffer = false;
1201 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1202
1203 mlx5e_ets_init(priv);
1204 }
1205