1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include "hclge_main.h"
5 #include "hclge_tm.h"
6 #include "hnae3.h"
7
8 #define BW_PERCENT 100
9
hclge_ieee_ets_to_tm_info(struct hclge_dev * hdev,struct ieee_ets * ets)10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
11 struct ieee_ets *ets)
12 {
13 u8 i;
14
15 for (i = 0; i < HNAE3_MAX_TC; i++) {
16 switch (ets->tc_tsa[i]) {
17 case IEEE_8021QAZ_TSA_STRICT:
18 hdev->tm_info.tc_info[i].tc_sch_mode =
19 HCLGE_SCH_MODE_SP;
20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
21 break;
22 case IEEE_8021QAZ_TSA_ETS:
23 hdev->tm_info.tc_info[i].tc_sch_mode =
24 HCLGE_SCH_MODE_DWRR;
25 hdev->tm_info.pg_info[0].tc_dwrr[i] =
26 ets->tc_tx_bw[i];
27 break;
28 default:
29 /* Hardware only supports SP (strict priority)
30 * or ETS (enhanced transmission selection)
31 * algorithms, if we receive some other value
32 * from dcbnl, then throw an error.
33 */
34 return -EINVAL;
35 }
36 }
37
38 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
39
40 return 0;
41 }
42
hclge_tm_info_to_ieee_ets(struct hclge_dev * hdev,struct ieee_ets * ets)43 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
44 struct ieee_ets *ets)
45 {
46 u32 i;
47
48 memset(ets, 0, sizeof(*ets));
49 ets->willing = 1;
50 ets->ets_cap = hdev->tc_max;
51
52 for (i = 0; i < HNAE3_MAX_TC; i++) {
53 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
54 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
55
56 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
57 HCLGE_SCH_MODE_SP)
58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
59 else
60 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
61 }
62 }
63
64 /* IEEE std */
hclge_ieee_getets(struct hnae3_handle * h,struct ieee_ets * ets)65 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
66 {
67 struct hclge_vport *vport = hclge_get_vport(h);
68 struct hclge_dev *hdev = vport->back;
69
70 hclge_tm_info_to_ieee_ets(hdev, ets);
71
72 return 0;
73 }
74
hclge_dcb_common_validate(struct hclge_dev * hdev,u8 num_tc,u8 * prio_tc)75 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
76 u8 *prio_tc)
77 {
78 int i;
79
80 if (num_tc > hdev->tc_max) {
81 dev_err(&hdev->pdev->dev,
82 "tc num checking failed, %u > tc_max(%u)\n",
83 num_tc, hdev->tc_max);
84 return -EINVAL;
85 }
86
87 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
88 if (prio_tc[i] >= num_tc) {
89 dev_err(&hdev->pdev->dev,
90 "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
91 i, prio_tc[i], num_tc);
92 return -EINVAL;
93 }
94 }
95
96 if (num_tc > hdev->vport[0].alloc_tqps) {
97 dev_err(&hdev->pdev->dev,
98 "allocated tqp checking failed, %u > tqp(%u)\n",
99 num_tc, hdev->vport[0].alloc_tqps);
100 return -EINVAL;
101 }
102
103 return 0;
104 }
105
hclge_ets_validate(struct hclge_dev * hdev,struct ieee_ets * ets,u8 * tc,bool * changed)106 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
107 u8 *tc, bool *changed)
108 {
109 bool has_ets_tc = false;
110 u32 total_ets_bw = 0;
111 u8 max_tc = 0;
112 int ret;
113 u8 i;
114
115 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
116 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
117 *changed = true;
118
119 if (ets->prio_tc[i] > max_tc)
120 max_tc = ets->prio_tc[i];
121 }
122
123 ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
124 if (ret)
125 return ret;
126
127 for (i = 0; i < hdev->tc_max; i++) {
128 switch (ets->tc_tsa[i]) {
129 case IEEE_8021QAZ_TSA_STRICT:
130 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
131 HCLGE_SCH_MODE_SP)
132 *changed = true;
133 break;
134 case IEEE_8021QAZ_TSA_ETS:
135 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
136 HCLGE_SCH_MODE_DWRR)
137 *changed = true;
138
139 total_ets_bw += ets->tc_tx_bw[i];
140 has_ets_tc = true;
141 break;
142 default:
143 return -EINVAL;
144 }
145 }
146
147 if (has_ets_tc && total_ets_bw != BW_PERCENT)
148 return -EINVAL;
149
150 *tc = max_tc + 1;
151 if (*tc != hdev->tm_info.num_tc)
152 *changed = true;
153
154 return 0;
155 }
156
hclge_map_update(struct hclge_dev * hdev)157 static int hclge_map_update(struct hclge_dev *hdev)
158 {
159 int ret;
160
161 ret = hclge_tm_schd_setup_hw(hdev);
162 if (ret)
163 return ret;
164
165 ret = hclge_pause_setup_hw(hdev, false);
166 if (ret)
167 return ret;
168
169 ret = hclge_buffer_alloc(hdev);
170 if (ret)
171 return ret;
172
173 hclge_rss_indir_init_cfg(hdev);
174
175 return hclge_rss_init_hw(hdev);
176 }
177
hclge_client_setup_tc(struct hclge_dev * hdev)178 static int hclge_client_setup_tc(struct hclge_dev *hdev)
179 {
180 struct hclge_vport *vport = hdev->vport;
181 struct hnae3_client *client;
182 struct hnae3_handle *handle;
183 int ret;
184 u32 i;
185
186 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
187 handle = &vport[i].nic;
188 client = handle->client;
189
190 if (!client || !client->ops || !client->ops->setup_tc)
191 continue;
192
193 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
194 if (ret)
195 return ret;
196 }
197
198 return 0;
199 }
200
hclge_notify_down_uinit(struct hclge_dev * hdev)201 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
202 {
203 int ret;
204
205 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
206 if (ret)
207 return ret;
208
209 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
210 }
211
hclge_notify_init_up(struct hclge_dev * hdev)212 static int hclge_notify_init_up(struct hclge_dev *hdev)
213 {
214 int ret;
215
216 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
217 if (ret)
218 return ret;
219
220 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
221 }
222
hclge_ieee_setets(struct hnae3_handle * h,struct ieee_ets * ets)223 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
224 {
225 struct hclge_vport *vport = hclge_get_vport(h);
226 struct net_device *netdev = h->kinfo.netdev;
227 struct hclge_dev *hdev = vport->back;
228 bool map_changed = false;
229 u8 num_tc = 0;
230 int ret;
231
232 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
233 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
234 return -EINVAL;
235
236 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
237 if (ret)
238 return ret;
239
240 if (map_changed) {
241 netif_dbg(h, drv, netdev, "set ets\n");
242
243 ret = hclge_notify_down_uinit(hdev);
244 if (ret)
245 return ret;
246 }
247
248 hclge_tm_schd_info_update(hdev, num_tc);
249
250 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
251 if (ret)
252 goto err_out;
253
254 if (map_changed) {
255 ret = hclge_map_update(hdev);
256 if (ret)
257 goto err_out;
258
259 ret = hclge_client_setup_tc(hdev);
260 if (ret)
261 goto err_out;
262
263 ret = hclge_notify_init_up(hdev);
264 if (ret)
265 return ret;
266 }
267
268 return hclge_tm_dwrr_cfg(hdev);
269
270 err_out:
271 if (!map_changed)
272 return ret;
273
274 hclge_notify_init_up(hdev);
275
276 return ret;
277 }
278
hclge_ieee_getpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)279 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
280 {
281 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
282 struct hclge_vport *vport = hclge_get_vport(h);
283 struct hclge_dev *hdev = vport->back;
284 u8 i, j, pfc_map, *prio_tc;
285 int ret;
286
287 memset(pfc, 0, sizeof(*pfc));
288 pfc->pfc_cap = hdev->pfc_max;
289 prio_tc = hdev->tm_info.prio_tc;
290 pfc_map = hdev->tm_info.hw_pfc_map;
291
292 /* Pfc setting is based on TC */
293 for (i = 0; i < hdev->tm_info.num_tc; i++) {
294 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
295 if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
296 pfc->pfc_en |= BIT(j);
297 }
298 }
299
300 ret = hclge_pfc_tx_stats_get(hdev, requests);
301 if (ret)
302 return ret;
303
304 ret = hclge_pfc_rx_stats_get(hdev, indications);
305 if (ret)
306 return ret;
307
308 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
309 pfc->requests[i] = requests[i];
310 pfc->indications[i] = indications[i];
311 }
312 return 0;
313 }
314
hclge_ieee_setpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)315 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
316 {
317 struct hclge_vport *vport = hclge_get_vport(h);
318 struct net_device *netdev = h->kinfo.netdev;
319 struct hclge_dev *hdev = vport->back;
320 u8 i, j, pfc_map, *prio_tc;
321 int ret;
322
323 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
324 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
325 return -EINVAL;
326
327 if (pfc->pfc_en == hdev->tm_info.pfc_en)
328 return 0;
329
330 prio_tc = hdev->tm_info.prio_tc;
331 pfc_map = 0;
332
333 for (i = 0; i < hdev->tm_info.num_tc; i++) {
334 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
335 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
336 pfc_map |= BIT(i);
337 break;
338 }
339 }
340 }
341
342 hdev->tm_info.hw_pfc_map = pfc_map;
343 hdev->tm_info.pfc_en = pfc->pfc_en;
344
345 netif_dbg(h, drv, netdev,
346 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
347 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
348
349 hclge_tm_pfc_info_update(hdev);
350
351 ret = hclge_pause_setup_hw(hdev, false);
352 if (ret)
353 return ret;
354
355 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
356 if (ret)
357 return ret;
358
359 ret = hclge_buffer_alloc(hdev);
360 if (ret) {
361 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
362 return ret;
363 }
364
365 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
366 }
367
368 /* DCBX configuration */
hclge_getdcbx(struct hnae3_handle * h)369 static u8 hclge_getdcbx(struct hnae3_handle *h)
370 {
371 struct hclge_vport *vport = hclge_get_vport(h);
372 struct hclge_dev *hdev = vport->back;
373
374 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
375 return 0;
376
377 return hdev->dcbx_cap;
378 }
379
hclge_setdcbx(struct hnae3_handle * h,u8 mode)380 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
381 {
382 struct hclge_vport *vport = hclge_get_vport(h);
383 struct net_device *netdev = h->kinfo.netdev;
384 struct hclge_dev *hdev = vport->back;
385
386 netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
387
388 /* No support for LLD_MANAGED modes or CEE */
389 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
390 (mode & DCB_CAP_DCBX_VER_CEE) ||
391 !(mode & DCB_CAP_DCBX_HOST))
392 return 1;
393
394 hdev->dcbx_cap = mode;
395
396 return 0;
397 }
398
399 /* Set up TC for hardware offloaded mqprio in channel mode */
hclge_setup_tc(struct hnae3_handle * h,u8 tc,u8 * prio_tc)400 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
401 {
402 struct hclge_vport *vport = hclge_get_vport(h);
403 struct hclge_dev *hdev = vport->back;
404 int ret;
405
406 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
407 return -EINVAL;
408
409 ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
410 if (ret)
411 return -EINVAL;
412
413 ret = hclge_notify_down_uinit(hdev);
414 if (ret)
415 return ret;
416
417 hclge_tm_schd_info_update(hdev, tc);
418 hclge_tm_prio_tc_info_update(hdev, prio_tc);
419
420 ret = hclge_tm_init_hw(hdev, false);
421 if (ret)
422 goto err_out;
423
424 ret = hclge_client_setup_tc(hdev);
425 if (ret)
426 goto err_out;
427
428 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
429
430 if (tc > 1)
431 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
432 else
433 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
434
435 return hclge_notify_init_up(hdev);
436
437 err_out:
438 hclge_notify_init_up(hdev);
439
440 return ret;
441 }
442
443 static const struct hnae3_dcb_ops hns3_dcb_ops = {
444 .ieee_getets = hclge_ieee_getets,
445 .ieee_setets = hclge_ieee_setets,
446 .ieee_getpfc = hclge_ieee_getpfc,
447 .ieee_setpfc = hclge_ieee_setpfc,
448 .getdcbx = hclge_getdcbx,
449 .setdcbx = hclge_setdcbx,
450 .setup_tc = hclge_setup_tc,
451 };
452
hclge_dcb_ops_set(struct hclge_dev * hdev)453 void hclge_dcb_ops_set(struct hclge_dev *hdev)
454 {
455 struct hclge_vport *vport = hdev->vport;
456 struct hnae3_knic_private_info *kinfo;
457
458 /* Hdev does not support DCB or vport is
459 * not a pf, then dcb_ops is not set.
460 */
461 if (!hnae3_dev_dcb_supported(hdev) ||
462 vport->vport_id != 0)
463 return;
464
465 kinfo = &vport->nic.kinfo;
466 kinfo->dcb_ops = &hns3_dcb_ops;
467 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
468 }
469