1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include "hclge_main.h"
5 #include "hclge_tm.h"
6 #include "hnae3.h"
7
8 #define BW_PERCENT 100
9
hclge_ieee_ets_to_tm_info(struct hclge_dev * hdev,struct ieee_ets * ets)10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
11 struct ieee_ets *ets)
12 {
13 u8 i;
14
15 for (i = 0; i < HNAE3_MAX_TC; i++) {
16 switch (ets->tc_tsa[i]) {
17 case IEEE_8021QAZ_TSA_STRICT:
18 hdev->tm_info.tc_info[i].tc_sch_mode =
19 HCLGE_SCH_MODE_SP;
20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
21 break;
22 case IEEE_8021QAZ_TSA_ETS:
23 hdev->tm_info.tc_info[i].tc_sch_mode =
24 HCLGE_SCH_MODE_DWRR;
25 hdev->tm_info.pg_info[0].tc_dwrr[i] =
26 ets->tc_tx_bw[i];
27 break;
28 default:
29 /* Hardware only supports SP (strict priority)
30 * or ETS (enhanced transmission selection)
31 * algorithms, if we receive some other value
32 * from dcbnl, then throw an error.
33 */
34 return -EINVAL;
35 }
36 }
37
38 return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
39 }
40
hclge_tm_info_to_ieee_ets(struct hclge_dev * hdev,struct ieee_ets * ets)41 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
42 struct ieee_ets *ets)
43 {
44 u32 i;
45
46 memset(ets, 0, sizeof(*ets));
47 ets->willing = 1;
48 ets->ets_cap = hdev->tc_max;
49
50 for (i = 0; i < HNAE3_MAX_TC; i++) {
51 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
52 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
53
54 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
55 HCLGE_SCH_MODE_SP)
56 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
57 else
58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
59 }
60 }
61
62 /* IEEE std */
hclge_ieee_getets(struct hnae3_handle * h,struct ieee_ets * ets)63 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
64 {
65 struct hclge_vport *vport = hclge_get_vport(h);
66 struct hclge_dev *hdev = vport->back;
67
68 hclge_tm_info_to_ieee_ets(hdev, ets);
69
70 return 0;
71 }
72
hclge_ets_validate(struct hclge_dev * hdev,struct ieee_ets * ets,u8 * tc,bool * changed)73 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
74 u8 *tc, bool *changed)
75 {
76 u32 total_ets_bw = 0;
77 u8 max_tc = 0;
78 u8 i;
79
80 for (i = 0; i < HNAE3_MAX_TC; i++) {
81 if (ets->prio_tc[i] >= hdev->tc_max ||
82 i >= hdev->tc_max)
83 return -EINVAL;
84
85 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
86 *changed = true;
87
88 if (ets->prio_tc[i] > max_tc)
89 max_tc = ets->prio_tc[i];
90
91 switch (ets->tc_tsa[i]) {
92 case IEEE_8021QAZ_TSA_STRICT:
93 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
94 HCLGE_SCH_MODE_SP)
95 *changed = true;
96 break;
97 case IEEE_8021QAZ_TSA_ETS:
98 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
99 HCLGE_SCH_MODE_DWRR)
100 *changed = true;
101
102 total_ets_bw += ets->tc_tx_bw[i];
103 break;
104 default:
105 return -EINVAL;
106 }
107 }
108
109 if (total_ets_bw != BW_PERCENT)
110 return -EINVAL;
111
112 *tc = max_tc + 1;
113 if (*tc != hdev->tm_info.num_tc)
114 *changed = true;
115
116 return 0;
117 }
118
hclge_map_update(struct hnae3_handle * h)119 static int hclge_map_update(struct hnae3_handle *h)
120 {
121 struct hclge_vport *vport = hclge_get_vport(h);
122 struct hclge_dev *hdev = vport->back;
123 int ret;
124
125 ret = hclge_tm_map_cfg(hdev);
126 if (ret)
127 return ret;
128
129 ret = hclge_tm_schd_mode_hw(hdev);
130 if (ret)
131 return ret;
132
133 ret = hclge_pause_setup_hw(hdev);
134 if (ret)
135 return ret;
136
137 ret = hclge_buffer_alloc(hdev);
138 if (ret)
139 return ret;
140
141 hclge_rss_indir_init_cfg(hdev);
142
143 return hclge_rss_init_hw(hdev);
144 }
145
hclge_client_setup_tc(struct hclge_dev * hdev)146 static int hclge_client_setup_tc(struct hclge_dev *hdev)
147 {
148 struct hclge_vport *vport = hdev->vport;
149 struct hnae3_client *client;
150 struct hnae3_handle *handle;
151 int ret;
152 u32 i;
153
154 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
155 handle = &vport[i].nic;
156 client = handle->client;
157
158 if (!client || !client->ops || !client->ops->setup_tc)
159 continue;
160
161 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
162 if (ret)
163 return ret;
164 }
165
166 return 0;
167 }
168
hclge_ieee_setets(struct hnae3_handle * h,struct ieee_ets * ets)169 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
170 {
171 struct hclge_vport *vport = hclge_get_vport(h);
172 struct hclge_dev *hdev = vport->back;
173 bool map_changed = false;
174 u8 num_tc = 0;
175 int ret;
176
177 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
178 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
179 return -EINVAL;
180
181 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
182 if (ret)
183 return ret;
184
185 hclge_tm_schd_info_update(hdev, num_tc);
186
187 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
188 if (ret)
189 return ret;
190
191 if (map_changed) {
192 ret = hclge_client_setup_tc(hdev);
193 if (ret)
194 return ret;
195 }
196
197 return hclge_tm_dwrr_cfg(hdev);
198 }
199
hclge_ieee_getpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)200 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
201 {
202 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
203 struct hclge_vport *vport = hclge_get_vport(h);
204 struct hclge_dev *hdev = vport->back;
205 u8 i, j, pfc_map, *prio_tc;
206 int ret;
207
208 memset(pfc, 0, sizeof(*pfc));
209 pfc->pfc_cap = hdev->pfc_max;
210 prio_tc = hdev->tm_info.prio_tc;
211 pfc_map = hdev->tm_info.hw_pfc_map;
212
213 /* Pfc setting is based on TC */
214 for (i = 0; i < hdev->tm_info.num_tc; i++) {
215 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
216 if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
217 pfc->pfc_en |= BIT(j);
218 }
219 }
220
221 ret = hclge_pfc_tx_stats_get(hdev, requests);
222 if (ret)
223 return ret;
224
225 ret = hclge_pfc_rx_stats_get(hdev, indications);
226 if (ret)
227 return ret;
228
229 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
230 pfc->requests[i] = requests[i];
231 pfc->indications[i] = indications[i];
232 }
233 return 0;
234 }
235
hclge_ieee_setpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)236 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
237 {
238 struct hclge_vport *vport = hclge_get_vport(h);
239 struct hclge_dev *hdev = vport->back;
240 u8 i, j, pfc_map, *prio_tc;
241
242 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
243 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
244 return -EINVAL;
245
246 prio_tc = hdev->tm_info.prio_tc;
247 pfc_map = 0;
248
249 for (i = 0; i < hdev->tm_info.num_tc; i++) {
250 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
251 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
252 pfc_map |= BIT(i);
253 break;
254 }
255 }
256 }
257
258 if (pfc_map == hdev->tm_info.hw_pfc_map)
259 return 0;
260
261 hdev->tm_info.hw_pfc_map = pfc_map;
262
263 return hclge_pause_setup_hw(hdev);
264 }
265
266 /* DCBX configuration */
hclge_getdcbx(struct hnae3_handle * h)267 static u8 hclge_getdcbx(struct hnae3_handle *h)
268 {
269 struct hclge_vport *vport = hclge_get_vport(h);
270 struct hclge_dev *hdev = vport->back;
271
272 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
273 return 0;
274
275 return hdev->dcbx_cap;
276 }
277
hclge_setdcbx(struct hnae3_handle * h,u8 mode)278 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
279 {
280 struct hclge_vport *vport = hclge_get_vport(h);
281 struct hclge_dev *hdev = vport->back;
282
283 /* No support for LLD_MANAGED modes or CEE */
284 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
285 (mode & DCB_CAP_DCBX_VER_CEE) ||
286 !(mode & DCB_CAP_DCBX_HOST))
287 return 1;
288
289 hdev->dcbx_cap = mode;
290
291 return 0;
292 }
293
294 /* Set up TC for hardware offloaded mqprio in channel mode */
hclge_setup_tc(struct hnae3_handle * h,u8 tc,u8 * prio_tc)295 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
296 {
297 struct hclge_vport *vport = hclge_get_vport(h);
298 struct hclge_dev *hdev = vport->back;
299 int ret;
300
301 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
302 return -EINVAL;
303
304 if (tc > hdev->tc_max) {
305 dev_err(&hdev->pdev->dev,
306 "setup tc failed, tc(%u) > tc_max(%u)\n",
307 tc, hdev->tc_max);
308 return -EINVAL;
309 }
310
311 hclge_tm_schd_info_update(hdev, tc);
312
313 ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
314 if (ret)
315 return ret;
316
317 ret = hclge_tm_init_hw(hdev);
318 if (ret)
319 return ret;
320
321 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
322
323 if (tc > 1)
324 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
325 else
326 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
327
328 return 0;
329 }
330
331 static const struct hnae3_dcb_ops hns3_dcb_ops = {
332 .ieee_getets = hclge_ieee_getets,
333 .ieee_setets = hclge_ieee_setets,
334 .ieee_getpfc = hclge_ieee_getpfc,
335 .ieee_setpfc = hclge_ieee_setpfc,
336 .getdcbx = hclge_getdcbx,
337 .setdcbx = hclge_setdcbx,
338 .map_update = hclge_map_update,
339 .setup_tc = hclge_setup_tc,
340 };
341
hclge_dcb_ops_set(struct hclge_dev * hdev)342 void hclge_dcb_ops_set(struct hclge_dev *hdev)
343 {
344 struct hclge_vport *vport = hdev->vport;
345 struct hnae3_knic_private_info *kinfo;
346
347 /* Hdev does not support DCB or vport is
348 * not a pf, then dcb_ops is not set.
349 */
350 if (!hnae3_dev_dcb_supported(hdev) ||
351 vport->vport_id != 0)
352 return;
353
354 kinfo = &vport->nic.kinfo;
355 kinfo->dcb_ops = &hns3_dcb_ops;
356 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
357 }
358