1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
56 #define HCLGE_LINK_STATUS_MS 10
57
58 #define HCLGE_VF_VPORT_START_NUM 1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
89 {0, }
90 };
91
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
108
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
114 HCLGE_FUN_RST_ING,
115 HCLGE_GRO_EN_REG};
116
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
142 HCLGE_RING_EN_REG};
143
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
149
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "App Loopback test",
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
154 "Phy Loopback test"
155 };
156
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
335 },
336 };
337
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
374 };
375
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
378 { IP_FRAGEMENT, 1},
379 { ROCE_TYPE, 1},
380 { NEXT_KEY, 5},
381 { VLAN_NUMBER, 2},
382 { SRC_VPORT, 12},
383 { DST_VPORT, 12},
384 { TUNNEL_PACKET, 1},
385 };
386
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48},
389 { OUTER_SRC_MAC, 48},
390 { OUTER_VLAN_TAG_FST, 16},
391 { OUTER_VLAN_TAG_SEC, 16},
392 { OUTER_ETH_TYPE, 16},
393 { OUTER_L2_RSV, 16},
394 { OUTER_IP_TOS, 8},
395 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_IP, 32},
397 { OUTER_DST_IP, 32},
398 { OUTER_L3_RSV, 16},
399 { OUTER_SRC_PORT, 16},
400 { OUTER_DST_PORT, 16},
401 { OUTER_L4_RSV, 32},
402 { OUTER_TUN_VNI, 24},
403 { OUTER_TUN_FLOW_ID, 8},
404 { INNER_DST_MAC, 48},
405 { INNER_SRC_MAC, 48},
406 { INNER_VLAN_TAG_FST, 16},
407 { INNER_VLAN_TAG_SEC, 16},
408 { INNER_ETH_TYPE, 16},
409 { INNER_L2_RSV, 16},
410 { INNER_IP_TOS, 8},
411 { INNER_IP_PROTO, 8},
412 { INNER_SRC_IP, 32},
413 { INNER_DST_IP, 32},
414 { INNER_L3_RSV, 16},
415 { INNER_SRC_PORT, 16},
416 { INNER_DST_PORT, 16},
417 { INNER_L4_RSV, 32},
418 };
419
hclge_mac_update_stats_defective(struct hclge_dev * hdev)420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423
424 u64 *data = (u64 *)(&hdev->mac_stats);
425 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426 __le64 *desc_data;
427 int i, k, n;
428 int ret;
429
430 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 if (ret) {
433 dev_err(&hdev->pdev->dev,
434 "Get MAC pkt stats fail, status = %d.\n", ret);
435
436 return ret;
437 }
438
439 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 /* for special opcode 0032, only the first desc has the head */
441 if (unlikely(i == 0)) {
442 desc_data = (__le64 *)(&desc[i].data[0]);
443 n = HCLGE_RD_FIRST_STATS_NUM;
444 } else {
445 desc_data = (__le64 *)(&desc[i]);
446 n = HCLGE_RD_OTHER_STATS_NUM;
447 }
448
449 for (k = 0; k < n; k++) {
450 *data += le64_to_cpu(*desc_data);
451 data++;
452 desc_data++;
453 }
454 }
455
456 return 0;
457 }
458
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461 u64 *data = (u64 *)(&hdev->mac_stats);
462 struct hclge_desc *desc;
463 __le64 *desc_data;
464 u16 i, k, n;
465 int ret;
466
467 /* This may be called inside atomic sections,
468 * so GFP_ATOMIC is more suitalbe here
469 */
470 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 if (!desc)
472 return -ENOMEM;
473
474 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 if (ret) {
477 kfree(desc);
478 return ret;
479 }
480
481 for (i = 0; i < desc_num; i++) {
482 /* for special opcode 0034, only the first desc has the head */
483 if (i == 0) {
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RD_FIRST_STATS_NUM;
486 } else {
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RD_OTHER_STATS_NUM;
489 }
490
491 for (k = 0; k < n; k++) {
492 *data += le64_to_cpu(*desc_data);
493 data++;
494 desc_data++;
495 }
496 }
497
498 kfree(desc);
499
500 return 0;
501 }
502
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505 struct hclge_desc desc;
506 __le32 *desc_data;
507 u32 reg_num;
508 int ret;
509
510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 if (ret)
513 return ret;
514
515 desc_data = (__le32 *)(&desc.data[0]);
516 reg_num = le32_to_cpu(*desc_data);
517
518 *desc_num = 1 + ((reg_num - 3) >> 2) +
519 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520
521 return 0;
522 }
523
hclge_mac_update_stats(struct hclge_dev * hdev)524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526 u32 desc_num;
527 int ret;
528
529 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530
531 /* The firmware supports the new statistics acquisition method */
532 if (!ret)
533 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 else if (ret == -EOPNOTSUPP)
535 ret = hclge_mac_update_stats_defective(hdev);
536 else
537 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538
539 return ret;
540 }
541
hclge_tqps_update_stats(struct hnae3_handle * handle)542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 struct hclge_vport *vport = hclge_get_vport(handle);
546 struct hclge_dev *hdev = vport->back;
547 struct hnae3_queue *queue;
548 struct hclge_desc desc[1];
549 struct hclge_tqp *tqp;
550 int ret, i;
551
552 for (i = 0; i < kinfo->num_tqps; i++) {
553 queue = handle->kinfo.tqp[i];
554 tqp = container_of(queue, struct hclge_tqp, q);
555 /* command : HCLGE_OPC_QUERY_IGU_STAT */
556 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557 true);
558
559 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
560 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 if (ret) {
562 dev_err(&hdev->pdev->dev,
563 "Query tqp stat fail, status = %d,queue = %d\n",
564 ret, i);
565 return ret;
566 }
567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 le32_to_cpu(desc[0].data[1]);
569 }
570
571 for (i = 0; i < kinfo->num_tqps; i++) {
572 queue = handle->kinfo.tqp[i];
573 tqp = container_of(queue, struct hclge_tqp, q);
574 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575 hclge_cmd_setup_basic_desc(&desc[0],
576 HCLGE_OPC_QUERY_TX_STATS,
577 true);
578
579 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
580 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 if (ret) {
582 dev_err(&hdev->pdev->dev,
583 "Query tqp stat fail, status = %d,queue = %d\n",
584 ret, i);
585 return ret;
586 }
587 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 le32_to_cpu(desc[0].data[1]);
589 }
590
591 return 0;
592 }
593
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 struct hclge_tqp *tqp;
598 u64 *buff = data;
599 int i;
600
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604 }
605
606 for (i = 0; i < kinfo->num_tqps; i++) {
607 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 }
610
611 return buff;
612 }
613
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617
618 /* each tqp has TX & RX two queues */
619 return kinfo->num_tqps * (2);
620 }
621
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 u8 *buff = data;
626 int i;
627
628 for (i = 0; i < kinfo->num_tqps; i++) {
629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 struct hclge_tqp, q);
631 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 tqp->index);
633 buff = buff + ETH_GSTRING_LEN;
634 }
635
636 for (i = 0; i < kinfo->num_tqps; i++) {
637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 struct hclge_tqp, q);
639 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 tqp->index);
641 buff = buff + ETH_GSTRING_LEN;
642 }
643
644 return buff;
645 }
646
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 const struct hclge_comm_stats_str strs[],
649 int size, u64 *data)
650 {
651 u64 *buf = data;
652 u32 i;
653
654 for (i = 0; i < size; i++)
655 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656
657 return buf + size;
658 }
659
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)660 static u8 *hclge_comm_get_strings(u32 stringset,
661 const struct hclge_comm_stats_str strs[],
662 int size, u8 *data)
663 {
664 char *buff = (char *)data;
665 u32 i;
666
667 if (stringset != ETH_SS_STATS)
668 return buff;
669
670 for (i = 0; i < size; i++) {
671 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 buff = buff + ETH_GSTRING_LEN;
673 }
674
675 return (u8 *)buff;
676 }
677
hclge_update_stats_for_all(struct hclge_dev * hdev)678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680 struct hnae3_handle *handle;
681 int status;
682
683 handle = &hdev->vport[0].nic;
684 if (handle->client) {
685 status = hclge_tqps_update_stats(handle);
686 if (status) {
687 dev_err(&hdev->pdev->dev,
688 "Update TQPS stats fail, status = %d.\n",
689 status);
690 }
691 }
692
693 status = hclge_mac_update_stats(hdev);
694 if (status)
695 dev_err(&hdev->pdev->dev,
696 "Update MAC stats fail, status = %d.\n", status);
697 }
698
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)699 static void hclge_update_stats(struct hnae3_handle *handle,
700 struct net_device_stats *net_stats)
701 {
702 struct hclge_vport *vport = hclge_get_vport(handle);
703 struct hclge_dev *hdev = vport->back;
704 int status;
705
706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707 return;
708
709 status = hclge_mac_update_stats(hdev);
710 if (status)
711 dev_err(&hdev->pdev->dev,
712 "Update MAC stats fail, status = %d.\n",
713 status);
714
715 status = hclge_tqps_update_stats(handle);
716 if (status)
717 dev_err(&hdev->pdev->dev,
718 "Update TQPS stats fail, status = %d.\n",
719 status);
720
721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 HNAE3_SUPPORT_PHY_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
733 int count = 0;
734
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
739 */
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 count += 1;
748 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 }
750
751 count += 2;
752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754
755 if (hdev->hw.mac.phydev) {
756 count += 1;
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758 }
759
760 } else if (stringset == ETH_SS_STATS) {
761 count = ARRAY_SIZE(g_mac_stats_string) +
762 hclge_tqps_get_sset_count(handle, stringset);
763 }
764
765 return count;
766 }
767
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769 u8 *data)
770 {
771 u8 *p = (char *)data;
772 int size;
773
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
776 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 size, p);
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782 ETH_GSTRING_LEN);
783 p += ETH_GSTRING_LEN;
784 }
785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787 ETH_GSTRING_LEN);
788 p += ETH_GSTRING_LEN;
789 }
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 memcpy(p,
792 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793 ETH_GSTRING_LEN);
794 p += ETH_GSTRING_LEN;
795 }
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798 ETH_GSTRING_LEN);
799 p += ETH_GSTRING_LEN;
800 }
801 }
802 }
803
hclge_get_stats(struct hnae3_handle * handle,u64 * data)804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806 struct hclge_vport *vport = hclge_get_vport(handle);
807 struct hclge_dev *hdev = vport->back;
808 u64 *p;
809
810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 ARRAY_SIZE(g_mac_stats_string), data);
812 p = hclge_tqps_get_stats(handle, p);
813 }
814
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 struct hns3_mac_stats *mac_stats)
817 {
818 struct hclge_vport *vport = hclge_get_vport(handle);
819 struct hclge_dev *hdev = vport->back;
820
821 hclge_update_stats(handle, NULL);
822
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK 0xF
831
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833 return -EINVAL;
834
835 /* Set the pf to main pf */
836 if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 hdev->flag |= HCLGE_FLAG_MAIN;
838 else
839 hdev->flag &= ~HCLGE_FLAG_MAIN;
840
841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842 return 0;
843 }
844
hclge_query_function_status(struct hclge_dev * hdev)845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT 5
848
849 struct hclge_func_status_cmd *req;
850 struct hclge_desc desc;
851 int timeout = 0;
852 int ret;
853
854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 req = (struct hclge_func_status_cmd *)desc.data;
856
857 do {
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 if (ret) {
860 dev_err(&hdev->pdev->dev,
861 "query function status failed %d.\n", ret);
862 return ret;
863 }
864
865 /* Check pf reset is done */
866 if (req->pf_state)
867 break;
868 usleep_range(1000, 2000);
869 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870
871 return hclge_parse_func_status(hdev, req);
872 }
873
hclge_query_pf_resource(struct hclge_dev * hdev)874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876 struct hclge_pf_res_cmd *req;
877 struct hclge_desc desc;
878 int ret;
879
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 if (ret) {
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
885 return ret;
886 }
887
888 req = (struct hclge_pf_res_cmd *)desc.data;
889 hdev->num_tqps = le16_to_cpu(req->tqp_num);
890 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
891
892 if (req->tx_buf_size)
893 hdev->tx_buf_size =
894 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
895 else
896 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
897
898 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
899
900 if (req->dv_buf_size)
901 hdev->dv_buf_size =
902 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
903 else
904 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
905
906 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
907
908 if (hnae3_dev_roce_supported(hdev)) {
909 hdev->roce_base_msix_offset =
910 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
911 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
912 hdev->num_roce_msi =
913 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
914 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
915
916 /* nic's msix numbers is always equals to the roce's. */
917 hdev->num_nic_msi = hdev->num_roce_msi;
918
919 /* PF should have NIC vectors and Roce vectors,
920 * NIC vectors are queued before Roce vectors.
921 */
922 hdev->num_msi = hdev->num_roce_msi +
923 hdev->roce_base_msix_offset;
924 } else {
925 hdev->num_msi =
926 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
927 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
928
929 hdev->num_nic_msi = hdev->num_msi;
930 }
931
932 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
933 dev_err(&hdev->pdev->dev,
934 "Just %u msi resources, not enough for pf(min:2).\n",
935 hdev->num_nic_msi);
936 return -EINVAL;
937 }
938
939 return 0;
940 }
941
hclge_parse_speed(int speed_cmd,int * speed)942 static int hclge_parse_speed(int speed_cmd, int *speed)
943 {
944 switch (speed_cmd) {
945 case 6:
946 *speed = HCLGE_MAC_SPEED_10M;
947 break;
948 case 7:
949 *speed = HCLGE_MAC_SPEED_100M;
950 break;
951 case 0:
952 *speed = HCLGE_MAC_SPEED_1G;
953 break;
954 case 1:
955 *speed = HCLGE_MAC_SPEED_10G;
956 break;
957 case 2:
958 *speed = HCLGE_MAC_SPEED_25G;
959 break;
960 case 3:
961 *speed = HCLGE_MAC_SPEED_40G;
962 break;
963 case 4:
964 *speed = HCLGE_MAC_SPEED_50G;
965 break;
966 case 5:
967 *speed = HCLGE_MAC_SPEED_100G;
968 break;
969 case 8:
970 *speed = HCLGE_MAC_SPEED_200G;
971 break;
972 default:
973 return -EINVAL;
974 }
975
976 return 0;
977 }
978
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)979 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
980 {
981 struct hclge_vport *vport = hclge_get_vport(handle);
982 struct hclge_dev *hdev = vport->back;
983 u32 speed_ability = hdev->hw.mac.speed_ability;
984 u32 speed_bit = 0;
985
986 switch (speed) {
987 case HCLGE_MAC_SPEED_10M:
988 speed_bit = HCLGE_SUPPORT_10M_BIT;
989 break;
990 case HCLGE_MAC_SPEED_100M:
991 speed_bit = HCLGE_SUPPORT_100M_BIT;
992 break;
993 case HCLGE_MAC_SPEED_1G:
994 speed_bit = HCLGE_SUPPORT_1G_BIT;
995 break;
996 case HCLGE_MAC_SPEED_10G:
997 speed_bit = HCLGE_SUPPORT_10G_BIT;
998 break;
999 case HCLGE_MAC_SPEED_25G:
1000 speed_bit = HCLGE_SUPPORT_25G_BIT;
1001 break;
1002 case HCLGE_MAC_SPEED_40G:
1003 speed_bit = HCLGE_SUPPORT_40G_BIT;
1004 break;
1005 case HCLGE_MAC_SPEED_50G:
1006 speed_bit = HCLGE_SUPPORT_50G_BIT;
1007 break;
1008 case HCLGE_MAC_SPEED_100G:
1009 speed_bit = HCLGE_SUPPORT_100G_BIT;
1010 break;
1011 case HCLGE_MAC_SPEED_200G:
1012 speed_bit = HCLGE_SUPPORT_200G_BIT;
1013 break;
1014 default:
1015 return -EINVAL;
1016 }
1017
1018 if (speed_bit & speed_ability)
1019 return 0;
1020
1021 return -EINVAL;
1022 }
1023
hclge_convert_setting_sr(struct hclge_mac * mac,u16 speed_ability)1024 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1025 {
1026 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1028 mac->supported);
1029 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1031 mac->supported);
1032 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1034 mac->supported);
1035 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1037 mac->supported);
1038 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1040 mac->supported);
1041 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1043 mac->supported);
1044 }
1045
hclge_convert_setting_lr(struct hclge_mac * mac,u16 speed_ability)1046 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1047 {
1048 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1050 mac->supported);
1051 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1053 mac->supported);
1054 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1056 mac->supported);
1057 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1059 mac->supported);
1060 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1062 mac->supported);
1063 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1064 linkmode_set_bit(
1065 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1066 mac->supported);
1067 }
1068
hclge_convert_setting_cr(struct hclge_mac * mac,u16 speed_ability)1069 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1070 {
1071 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1073 mac->supported);
1074 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1076 mac->supported);
1077 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1079 mac->supported);
1080 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1082 mac->supported);
1083 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1085 mac->supported);
1086 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1088 mac->supported);
1089 }
1090
hclge_convert_setting_kr(struct hclge_mac * mac,u16 speed_ability)1091 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1092 {
1093 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1095 mac->supported);
1096 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1098 mac->supported);
1099 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1101 mac->supported);
1102 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1104 mac->supported);
1105 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1107 mac->supported);
1108 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1109 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1110 mac->supported);
1111 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1112 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1113 mac->supported);
1114 }
1115
hclge_convert_setting_fec(struct hclge_mac * mac)1116 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1117 {
1118 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1119 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1120
1121 switch (mac->speed) {
1122 case HCLGE_MAC_SPEED_10G:
1123 case HCLGE_MAC_SPEED_40G:
1124 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1125 mac->supported);
1126 mac->fec_ability =
1127 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1128 break;
1129 case HCLGE_MAC_SPEED_25G:
1130 case HCLGE_MAC_SPEED_50G:
1131 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1132 mac->supported);
1133 mac->fec_ability =
1134 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1135 BIT(HNAE3_FEC_AUTO);
1136 break;
1137 case HCLGE_MAC_SPEED_100G:
1138 case HCLGE_MAC_SPEED_200G:
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1140 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1141 break;
1142 default:
1143 mac->fec_ability = 0;
1144 break;
1145 }
1146 }
1147
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1148 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1149 u16 speed_ability)
1150 {
1151 struct hclge_mac *mac = &hdev->hw.mac;
1152
1153 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1155 mac->supported);
1156
1157 hclge_convert_setting_sr(mac, speed_ability);
1158 hclge_convert_setting_lr(mac, speed_ability);
1159 hclge_convert_setting_cr(mac, speed_ability);
1160 if (hnae3_dev_fec_supported(hdev))
1161 hclge_convert_setting_fec(mac);
1162
1163 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1164 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1166 }
1167
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1168 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1169 u16 speed_ability)
1170 {
1171 struct hclge_mac *mac = &hdev->hw.mac;
1172
1173 hclge_convert_setting_kr(mac, speed_ability);
1174 if (hnae3_dev_fec_supported(hdev))
1175 hclge_convert_setting_fec(mac);
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1179 }
1180
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1181 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1182 u16 speed_ability)
1183 {
1184 unsigned long *supported = hdev->hw.mac.supported;
1185
1186 /* default to support all speed for GE port */
1187 if (!speed_ability)
1188 speed_ability = HCLGE_SUPPORT_GE;
1189
1190 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1191 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1192 supported);
1193
1194 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1195 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1196 supported);
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1198 supported);
1199 }
1200
1201 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1202 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1204 }
1205
1206 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1208 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1210 }
1211
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1212 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1213 {
1214 u8 media_type = hdev->hw.mac.media_type;
1215
1216 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1217 hclge_parse_fiber_link_mode(hdev, speed_ability);
1218 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1219 hclge_parse_copper_link_mode(hdev, speed_ability);
1220 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1221 hclge_parse_backplane_link_mode(hdev, speed_ability);
1222 }
1223
hclge_get_max_speed(u16 speed_ability)1224 static u32 hclge_get_max_speed(u16 speed_ability)
1225 {
1226 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1227 return HCLGE_MAC_SPEED_200G;
1228
1229 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1230 return HCLGE_MAC_SPEED_100G;
1231
1232 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1233 return HCLGE_MAC_SPEED_50G;
1234
1235 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1236 return HCLGE_MAC_SPEED_40G;
1237
1238 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1239 return HCLGE_MAC_SPEED_25G;
1240
1241 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1242 return HCLGE_MAC_SPEED_10G;
1243
1244 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1245 return HCLGE_MAC_SPEED_1G;
1246
1247 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1248 return HCLGE_MAC_SPEED_100M;
1249
1250 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1251 return HCLGE_MAC_SPEED_10M;
1252
1253 return HCLGE_MAC_SPEED_1G;
1254 }
1255
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1256 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1257 {
1258 #define SPEED_ABILITY_EXT_SHIFT 8
1259
1260 struct hclge_cfg_param_cmd *req;
1261 u64 mac_addr_tmp_high;
1262 u16 speed_ability_ext;
1263 u64 mac_addr_tmp;
1264 unsigned int i;
1265
1266 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1267
1268 /* get the configuration */
1269 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1270 HCLGE_CFG_VMDQ_M,
1271 HCLGE_CFG_VMDQ_S);
1272 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1273 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1274 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1275 HCLGE_CFG_TQP_DESC_N_M,
1276 HCLGE_CFG_TQP_DESC_N_S);
1277
1278 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279 HCLGE_CFG_PHY_ADDR_M,
1280 HCLGE_CFG_PHY_ADDR_S);
1281 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_MEDIA_TP_M,
1283 HCLGE_CFG_MEDIA_TP_S);
1284 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_RX_BUF_LEN_M,
1286 HCLGE_CFG_RX_BUF_LEN_S);
1287 /* get mac_address */
1288 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1289 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1290 HCLGE_CFG_MAC_ADDR_H_M,
1291 HCLGE_CFG_MAC_ADDR_H_S);
1292
1293 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1294
1295 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1296 HCLGE_CFG_DEFAULT_SPEED_M,
1297 HCLGE_CFG_DEFAULT_SPEED_S);
1298 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1299 HCLGE_CFG_RSS_SIZE_M,
1300 HCLGE_CFG_RSS_SIZE_S);
1301
1302 for (i = 0; i < ETH_ALEN; i++)
1303 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1304
1305 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1306 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1307
1308 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1309 HCLGE_CFG_SPEED_ABILITY_M,
1310 HCLGE_CFG_SPEED_ABILITY_S);
1311 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1312 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1313 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1314 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1315
1316 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1317 HCLGE_CFG_UMV_TBL_SPACE_M,
1318 HCLGE_CFG_UMV_TBL_SPACE_S);
1319 if (!cfg->umv_space)
1320 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1321 }
1322
1323 /* hclge_get_cfg: query the static parameter from flash
1324 * @hdev: pointer to struct hclge_dev
1325 * @hcfg: the config structure to be getted
1326 */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1327 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1328 {
1329 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1330 struct hclge_cfg_param_cmd *req;
1331 unsigned int i;
1332 int ret;
1333
1334 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1335 u32 offset = 0;
1336
1337 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1338 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1339 true);
1340 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1341 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1342 /* Len should be united by 4 bytes when send to hardware */
1343 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1344 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1345 req->offset = cpu_to_le32(offset);
1346 }
1347
1348 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1349 if (ret) {
1350 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1351 return ret;
1352 }
1353
1354 hclge_parse_cfg(hcfg, desc);
1355
1356 return 0;
1357 }
1358
hclge_set_default_dev_specs(struct hclge_dev * hdev)1359 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1360 {
1361 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1362
1363 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1364
1365 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1366 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1367 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1368 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1369 }
1370
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1371 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1372 struct hclge_desc *desc)
1373 {
1374 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1375 struct hclge_dev_specs_0_cmd *req0;
1376
1377 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1378
1379 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1380 ae_dev->dev_specs.rss_ind_tbl_size =
1381 le16_to_cpu(req0->rss_ind_tbl_size);
1382 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1383 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1384 }
1385
hclge_check_dev_specs(struct hclge_dev * hdev)1386 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1387 {
1388 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1389
1390 if (!dev_specs->max_non_tso_bd_num)
1391 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1392 if (!dev_specs->rss_ind_tbl_size)
1393 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1394 if (!dev_specs->rss_key_size)
1395 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1396 if (!dev_specs->max_tm_rate)
1397 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1398 }
1399
hclge_query_dev_specs(struct hclge_dev * hdev)1400 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1401 {
1402 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1403 int ret;
1404 int i;
1405
1406 /* set default specifications as devices lower than version V3 do not
1407 * support querying specifications from firmware.
1408 */
1409 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1410 hclge_set_default_dev_specs(hdev);
1411 return 0;
1412 }
1413
1414 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1415 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1416 true);
1417 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1418 }
1419 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1420
1421 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1422 if (ret)
1423 return ret;
1424
1425 hclge_parse_dev_specs(hdev, desc);
1426 hclge_check_dev_specs(hdev);
1427
1428 return 0;
1429 }
1430
hclge_get_cap(struct hclge_dev * hdev)1431 static int hclge_get_cap(struct hclge_dev *hdev)
1432 {
1433 int ret;
1434
1435 ret = hclge_query_function_status(hdev);
1436 if (ret) {
1437 dev_err(&hdev->pdev->dev,
1438 "query function status error %d.\n", ret);
1439 return ret;
1440 }
1441
1442 /* get pf resource */
1443 return hclge_query_pf_resource(hdev);
1444 }
1445
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1446 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1447 {
1448 #define HCLGE_MIN_TX_DESC 64
1449 #define HCLGE_MIN_RX_DESC 64
1450
1451 if (!is_kdump_kernel())
1452 return;
1453
1454 dev_info(&hdev->pdev->dev,
1455 "Running kdump kernel. Using minimal resources\n");
1456
1457 /* minimal queue pairs equals to the number of vports */
1458 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1459 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1460 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1461 }
1462
hclge_configure(struct hclge_dev * hdev)1463 static int hclge_configure(struct hclge_dev *hdev)
1464 {
1465 struct hclge_cfg cfg;
1466 unsigned int i;
1467 int ret;
1468
1469 ret = hclge_get_cfg(hdev, &cfg);
1470 if (ret)
1471 return ret;
1472
1473 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1474 hdev->base_tqp_pid = 0;
1475 hdev->rss_size_max = cfg.rss_size_max;
1476 hdev->rx_buf_len = cfg.rx_buf_len;
1477 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1478 hdev->hw.mac.media_type = cfg.media_type;
1479 hdev->hw.mac.phy_addr = cfg.phy_addr;
1480 hdev->num_tx_desc = cfg.tqp_desc_num;
1481 hdev->num_rx_desc = cfg.tqp_desc_num;
1482 hdev->tm_info.num_pg = 1;
1483 hdev->tc_max = cfg.tc_num;
1484 hdev->tm_info.hw_pfc_map = 0;
1485 hdev->wanted_umv_size = cfg.umv_space;
1486
1487 if (hnae3_dev_fd_supported(hdev)) {
1488 hdev->fd_en = true;
1489 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1490 }
1491
1492 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1493 if (ret) {
1494 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1495 cfg.default_speed, ret);
1496 return ret;
1497 }
1498
1499 hclge_parse_link_mode(hdev, cfg.speed_ability);
1500
1501 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1502
1503 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1504 (hdev->tc_max < 1)) {
1505 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1506 hdev->tc_max);
1507 hdev->tc_max = 1;
1508 }
1509
1510 /* Dev does not support DCB */
1511 if (!hnae3_dev_dcb_supported(hdev)) {
1512 hdev->tc_max = 1;
1513 hdev->pfc_max = 0;
1514 } else {
1515 hdev->pfc_max = hdev->tc_max;
1516 }
1517
1518 hdev->tm_info.num_tc = 1;
1519
1520 /* Currently not support uncontiuous tc */
1521 for (i = 0; i < hdev->tm_info.num_tc; i++)
1522 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1523
1524 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1525
1526 hclge_init_kdump_kernel_config(hdev);
1527
1528 /* Set the init affinity based on pci func number */
1529 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1530 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1531 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1532 &hdev->affinity_mask);
1533
1534 return ret;
1535 }
1536
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1537 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1538 u16 tso_mss_max)
1539 {
1540 struct hclge_cfg_tso_status_cmd *req;
1541 struct hclge_desc desc;
1542
1543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1544
1545 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1546 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1547 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1548
1549 return hclge_cmd_send(&hdev->hw, &desc, 1);
1550 }
1551
hclge_config_gro(struct hclge_dev * hdev,bool en)1552 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1553 {
1554 struct hclge_cfg_gro_status_cmd *req;
1555 struct hclge_desc desc;
1556 int ret;
1557
1558 if (!hnae3_dev_gro_supported(hdev))
1559 return 0;
1560
1561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1562 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1563
1564 req->gro_en = en ? 1 : 0;
1565
1566 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1567 if (ret)
1568 dev_err(&hdev->pdev->dev,
1569 "GRO hardware config cmd failed, ret = %d\n", ret);
1570
1571 return ret;
1572 }
1573
hclge_alloc_tqps(struct hclge_dev * hdev)1574 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1575 {
1576 struct hclge_tqp *tqp;
1577 int i;
1578
1579 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1580 sizeof(struct hclge_tqp), GFP_KERNEL);
1581 if (!hdev->htqp)
1582 return -ENOMEM;
1583
1584 tqp = hdev->htqp;
1585
1586 for (i = 0; i < hdev->num_tqps; i++) {
1587 tqp->dev = &hdev->pdev->dev;
1588 tqp->index = i;
1589
1590 tqp->q.ae_algo = &ae_algo;
1591 tqp->q.buf_size = hdev->rx_buf_len;
1592 tqp->q.tx_desc_num = hdev->num_tx_desc;
1593 tqp->q.rx_desc_num = hdev->num_rx_desc;
1594 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1595 i * HCLGE_TQP_REG_SIZE;
1596
1597 tqp++;
1598 }
1599
1600 return 0;
1601 }
1602
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1603 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1604 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1605 {
1606 struct hclge_tqp_map_cmd *req;
1607 struct hclge_desc desc;
1608 int ret;
1609
1610 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1611
1612 req = (struct hclge_tqp_map_cmd *)desc.data;
1613 req->tqp_id = cpu_to_le16(tqp_pid);
1614 req->tqp_vf = func_id;
1615 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1616 if (!is_pf)
1617 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1618 req->tqp_vid = cpu_to_le16(tqp_vid);
1619
1620 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1621 if (ret)
1622 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1623
1624 return ret;
1625 }
1626
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1627 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1628 {
1629 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1630 struct hclge_dev *hdev = vport->back;
1631 int i, alloced;
1632
1633 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1634 alloced < num_tqps; i++) {
1635 if (!hdev->htqp[i].alloced) {
1636 hdev->htqp[i].q.handle = &vport->nic;
1637 hdev->htqp[i].q.tqp_index = alloced;
1638 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1639 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1640 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1641 hdev->htqp[i].alloced = true;
1642 alloced++;
1643 }
1644 }
1645 vport->alloc_tqps = alloced;
1646 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1647 vport->alloc_tqps / hdev->tm_info.num_tc);
1648
1649 /* ensure one to one mapping between irq and queue at default */
1650 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1651 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1652
1653 return 0;
1654 }
1655
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1656 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1657 u16 num_tx_desc, u16 num_rx_desc)
1658
1659 {
1660 struct hnae3_handle *nic = &vport->nic;
1661 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1662 struct hclge_dev *hdev = vport->back;
1663 int ret;
1664
1665 kinfo->num_tx_desc = num_tx_desc;
1666 kinfo->num_rx_desc = num_rx_desc;
1667
1668 kinfo->rx_buf_len = hdev->rx_buf_len;
1669
1670 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1671 sizeof(struct hnae3_queue *), GFP_KERNEL);
1672 if (!kinfo->tqp)
1673 return -ENOMEM;
1674
1675 ret = hclge_assign_tqp(vport, num_tqps);
1676 if (ret)
1677 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1678
1679 return ret;
1680 }
1681
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1682 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1683 struct hclge_vport *vport)
1684 {
1685 struct hnae3_handle *nic = &vport->nic;
1686 struct hnae3_knic_private_info *kinfo;
1687 u16 i;
1688
1689 kinfo = &nic->kinfo;
1690 for (i = 0; i < vport->alloc_tqps; i++) {
1691 struct hclge_tqp *q =
1692 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1693 bool is_pf;
1694 int ret;
1695
1696 is_pf = !(vport->vport_id);
1697 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1698 i, is_pf);
1699 if (ret)
1700 return ret;
1701 }
1702
1703 return 0;
1704 }
1705
hclge_map_tqp(struct hclge_dev * hdev)1706 static int hclge_map_tqp(struct hclge_dev *hdev)
1707 {
1708 struct hclge_vport *vport = hdev->vport;
1709 u16 i, num_vport;
1710
1711 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1712 for (i = 0; i < num_vport; i++) {
1713 int ret;
1714
1715 ret = hclge_map_tqp_to_vport(hdev, vport);
1716 if (ret)
1717 return ret;
1718
1719 vport++;
1720 }
1721
1722 return 0;
1723 }
1724
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1725 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1726 {
1727 struct hnae3_handle *nic = &vport->nic;
1728 struct hclge_dev *hdev = vport->back;
1729 int ret;
1730
1731 nic->pdev = hdev->pdev;
1732 nic->ae_algo = &ae_algo;
1733 nic->numa_node_mask = hdev->numa_node_mask;
1734
1735 ret = hclge_knic_setup(vport, num_tqps,
1736 hdev->num_tx_desc, hdev->num_rx_desc);
1737 if (ret)
1738 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1739
1740 return ret;
1741 }
1742
hclge_alloc_vport(struct hclge_dev * hdev)1743 static int hclge_alloc_vport(struct hclge_dev *hdev)
1744 {
1745 struct pci_dev *pdev = hdev->pdev;
1746 struct hclge_vport *vport;
1747 u32 tqp_main_vport;
1748 u32 tqp_per_vport;
1749 int num_vport, i;
1750 int ret;
1751
1752 /* We need to alloc a vport for main NIC of PF */
1753 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1754
1755 if (hdev->num_tqps < num_vport) {
1756 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1757 hdev->num_tqps, num_vport);
1758 return -EINVAL;
1759 }
1760
1761 /* Alloc the same number of TQPs for every vport */
1762 tqp_per_vport = hdev->num_tqps / num_vport;
1763 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1764
1765 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1766 GFP_KERNEL);
1767 if (!vport)
1768 return -ENOMEM;
1769
1770 hdev->vport = vport;
1771 hdev->num_alloc_vport = num_vport;
1772
1773 if (IS_ENABLED(CONFIG_PCI_IOV))
1774 hdev->num_alloc_vfs = hdev->num_req_vfs;
1775
1776 for (i = 0; i < num_vport; i++) {
1777 vport->back = hdev;
1778 vport->vport_id = i;
1779 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1780 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1781 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1782 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1783 INIT_LIST_HEAD(&vport->vlan_list);
1784 INIT_LIST_HEAD(&vport->uc_mac_list);
1785 INIT_LIST_HEAD(&vport->mc_mac_list);
1786 spin_lock_init(&vport->mac_list_lock);
1787
1788 if (i == 0)
1789 ret = hclge_vport_setup(vport, tqp_main_vport);
1790 else
1791 ret = hclge_vport_setup(vport, tqp_per_vport);
1792 if (ret) {
1793 dev_err(&pdev->dev,
1794 "vport setup failed for vport %d, %d\n",
1795 i, ret);
1796 return ret;
1797 }
1798
1799 vport++;
1800 }
1801
1802 return 0;
1803 }
1804
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1805 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1806 struct hclge_pkt_buf_alloc *buf_alloc)
1807 {
1808 /* TX buffer size is unit by 128 byte */
1809 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1810 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1811 struct hclge_tx_buff_alloc_cmd *req;
1812 struct hclge_desc desc;
1813 int ret;
1814 u8 i;
1815
1816 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1817
1818 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1819 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1820 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1821
1822 req->tx_pkt_buff[i] =
1823 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1824 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1825 }
1826
1827 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1828 if (ret)
1829 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1830 ret);
1831
1832 return ret;
1833 }
1834
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1835 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1836 struct hclge_pkt_buf_alloc *buf_alloc)
1837 {
1838 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1839
1840 if (ret)
1841 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1842
1843 return ret;
1844 }
1845
hclge_get_tc_num(struct hclge_dev * hdev)1846 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1847 {
1848 unsigned int i;
1849 u32 cnt = 0;
1850
1851 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1852 if (hdev->hw_tc_map & BIT(i))
1853 cnt++;
1854 return cnt;
1855 }
1856
1857 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1858 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1859 struct hclge_pkt_buf_alloc *buf_alloc)
1860 {
1861 struct hclge_priv_buf *priv;
1862 unsigned int i;
1863 int cnt = 0;
1864
1865 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1866 priv = &buf_alloc->priv_buf[i];
1867 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1868 priv->enable)
1869 cnt++;
1870 }
1871
1872 return cnt;
1873 }
1874
1875 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1876 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1877 struct hclge_pkt_buf_alloc *buf_alloc)
1878 {
1879 struct hclge_priv_buf *priv;
1880 unsigned int i;
1881 int cnt = 0;
1882
1883 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1884 priv = &buf_alloc->priv_buf[i];
1885 if (hdev->hw_tc_map & BIT(i) &&
1886 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1887 priv->enable)
1888 cnt++;
1889 }
1890
1891 return cnt;
1892 }
1893
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1894 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1895 {
1896 struct hclge_priv_buf *priv;
1897 u32 rx_priv = 0;
1898 int i;
1899
1900 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1901 priv = &buf_alloc->priv_buf[i];
1902 if (priv->enable)
1903 rx_priv += priv->buf_size;
1904 }
1905 return rx_priv;
1906 }
1907
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1908 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1909 {
1910 u32 i, total_tx_size = 0;
1911
1912 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1913 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1914
1915 return total_tx_size;
1916 }
1917
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1918 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1919 struct hclge_pkt_buf_alloc *buf_alloc,
1920 u32 rx_all)
1921 {
1922 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1923 u32 tc_num = hclge_get_tc_num(hdev);
1924 u32 shared_buf, aligned_mps;
1925 u32 rx_priv;
1926 int i;
1927
1928 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1929
1930 if (hnae3_dev_dcb_supported(hdev))
1931 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1932 hdev->dv_buf_size;
1933 else
1934 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1935 + hdev->dv_buf_size;
1936
1937 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1938 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1939 HCLGE_BUF_SIZE_UNIT);
1940
1941 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1942 if (rx_all < rx_priv + shared_std)
1943 return false;
1944
1945 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1946 buf_alloc->s_buf.buf_size = shared_buf;
1947 if (hnae3_dev_dcb_supported(hdev)) {
1948 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1949 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1950 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1951 HCLGE_BUF_SIZE_UNIT);
1952 } else {
1953 buf_alloc->s_buf.self.high = aligned_mps +
1954 HCLGE_NON_DCB_ADDITIONAL_BUF;
1955 buf_alloc->s_buf.self.low = aligned_mps;
1956 }
1957
1958 if (hnae3_dev_dcb_supported(hdev)) {
1959 hi_thrd = shared_buf - hdev->dv_buf_size;
1960
1961 if (tc_num <= NEED_RESERVE_TC_NUM)
1962 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1963 / BUF_MAX_PERCENT;
1964
1965 if (tc_num)
1966 hi_thrd = hi_thrd / tc_num;
1967
1968 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1969 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1970 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1971 } else {
1972 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1973 lo_thrd = aligned_mps;
1974 }
1975
1976 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1977 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1978 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1979 }
1980
1981 return true;
1982 }
1983
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1984 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1985 struct hclge_pkt_buf_alloc *buf_alloc)
1986 {
1987 u32 i, total_size;
1988
1989 total_size = hdev->pkt_buf_size;
1990
1991 /* alloc tx buffer for all enabled tc */
1992 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1993 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1994
1995 if (hdev->hw_tc_map & BIT(i)) {
1996 if (total_size < hdev->tx_buf_size)
1997 return -ENOMEM;
1998
1999 priv->tx_buf_size = hdev->tx_buf_size;
2000 } else {
2001 priv->tx_buf_size = 0;
2002 }
2003
2004 total_size -= priv->tx_buf_size;
2005 }
2006
2007 return 0;
2008 }
2009
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2010 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2011 struct hclge_pkt_buf_alloc *buf_alloc)
2012 {
2013 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2015 unsigned int i;
2016
2017 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2018 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2019
2020 priv->enable = 0;
2021 priv->wl.low = 0;
2022 priv->wl.high = 0;
2023 priv->buf_size = 0;
2024
2025 if (!(hdev->hw_tc_map & BIT(i)))
2026 continue;
2027
2028 priv->enable = 1;
2029
2030 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2031 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2032 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2033 HCLGE_BUF_SIZE_UNIT);
2034 } else {
2035 priv->wl.low = 0;
2036 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2037 aligned_mps;
2038 }
2039
2040 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2041 }
2042
2043 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2044 }
2045
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2046 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2047 struct hclge_pkt_buf_alloc *buf_alloc)
2048 {
2049 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2050 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2051 int i;
2052
2053 /* let the last to be cleared first */
2054 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2055 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2056 unsigned int mask = BIT((unsigned int)i);
2057
2058 if (hdev->hw_tc_map & mask &&
2059 !(hdev->tm_info.hw_pfc_map & mask)) {
2060 /* Clear the no pfc TC private buffer */
2061 priv->wl.low = 0;
2062 priv->wl.high = 0;
2063 priv->buf_size = 0;
2064 priv->enable = 0;
2065 no_pfc_priv_num--;
2066 }
2067
2068 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2069 no_pfc_priv_num == 0)
2070 break;
2071 }
2072
2073 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2074 }
2075
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2076 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2077 struct hclge_pkt_buf_alloc *buf_alloc)
2078 {
2079 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2080 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2081 int i;
2082
2083 /* let the last to be cleared first */
2084 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2085 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2086 unsigned int mask = BIT((unsigned int)i);
2087
2088 if (hdev->hw_tc_map & mask &&
2089 hdev->tm_info.hw_pfc_map & mask) {
2090 /* Reduce the number of pfc TC with private buffer */
2091 priv->wl.low = 0;
2092 priv->enable = 0;
2093 priv->wl.high = 0;
2094 priv->buf_size = 0;
2095 pfc_priv_num--;
2096 }
2097
2098 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2099 pfc_priv_num == 0)
2100 break;
2101 }
2102
2103 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2104 }
2105
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2106 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2107 struct hclge_pkt_buf_alloc *buf_alloc)
2108 {
2109 #define COMPENSATE_BUFFER 0x3C00
2110 #define COMPENSATE_HALF_MPS_NUM 5
2111 #define PRIV_WL_GAP 0x1800
2112
2113 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2114 u32 tc_num = hclge_get_tc_num(hdev);
2115 u32 half_mps = hdev->mps >> 1;
2116 u32 min_rx_priv;
2117 unsigned int i;
2118
2119 if (tc_num)
2120 rx_priv = rx_priv / tc_num;
2121
2122 if (tc_num <= NEED_RESERVE_TC_NUM)
2123 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2124
2125 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2126 COMPENSATE_HALF_MPS_NUM * half_mps;
2127 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2128 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2129
2130 if (rx_priv < min_rx_priv)
2131 return false;
2132
2133 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2134 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2135
2136 priv->enable = 0;
2137 priv->wl.low = 0;
2138 priv->wl.high = 0;
2139 priv->buf_size = 0;
2140
2141 if (!(hdev->hw_tc_map & BIT(i)))
2142 continue;
2143
2144 priv->enable = 1;
2145 priv->buf_size = rx_priv;
2146 priv->wl.high = rx_priv - hdev->dv_buf_size;
2147 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2148 }
2149
2150 buf_alloc->s_buf.buf_size = 0;
2151
2152 return true;
2153 }
2154
2155 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2156 * @hdev: pointer to struct hclge_dev
2157 * @buf_alloc: pointer to buffer calculation data
2158 * @return: 0: calculate sucessful, negative: fail
2159 */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2160 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2161 struct hclge_pkt_buf_alloc *buf_alloc)
2162 {
2163 /* When DCB is not supported, rx private buffer is not allocated. */
2164 if (!hnae3_dev_dcb_supported(hdev)) {
2165 u32 rx_all = hdev->pkt_buf_size;
2166
2167 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2168 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2169 return -ENOMEM;
2170
2171 return 0;
2172 }
2173
2174 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2175 return 0;
2176
2177 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2178 return 0;
2179
2180 /* try to decrease the buffer size */
2181 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2182 return 0;
2183
2184 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2185 return 0;
2186
2187 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2188 return 0;
2189
2190 return -ENOMEM;
2191 }
2192
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2193 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2194 struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196 struct hclge_rx_priv_buff_cmd *req;
2197 struct hclge_desc desc;
2198 int ret;
2199 int i;
2200
2201 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2202 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2203
2204 /* Alloc private buffer TCs */
2205 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2206 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2207
2208 req->buf_num[i] =
2209 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2210 req->buf_num[i] |=
2211 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2212 }
2213
2214 req->shared_buf =
2215 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2216 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2217
2218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2219 if (ret)
2220 dev_err(&hdev->pdev->dev,
2221 "rx private buffer alloc cmd failed %d\n", ret);
2222
2223 return ret;
2224 }
2225
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2226 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2227 struct hclge_pkt_buf_alloc *buf_alloc)
2228 {
2229 struct hclge_rx_priv_wl_buf *req;
2230 struct hclge_priv_buf *priv;
2231 struct hclge_desc desc[2];
2232 int i, j;
2233 int ret;
2234
2235 for (i = 0; i < 2; i++) {
2236 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2237 false);
2238 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2239
2240 /* The first descriptor set the NEXT bit to 1 */
2241 if (i == 0)
2242 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2243 else
2244 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2245
2246 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2247 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2248
2249 priv = &buf_alloc->priv_buf[idx];
2250 req->tc_wl[j].high =
2251 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2252 req->tc_wl[j].high |=
2253 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2254 req->tc_wl[j].low =
2255 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2256 req->tc_wl[j].low |=
2257 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2258 }
2259 }
2260
2261 /* Send 2 descriptor at one time */
2262 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2263 if (ret)
2264 dev_err(&hdev->pdev->dev,
2265 "rx private waterline config cmd failed %d\n",
2266 ret);
2267 return ret;
2268 }
2269
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2270 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2271 struct hclge_pkt_buf_alloc *buf_alloc)
2272 {
2273 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2274 struct hclge_rx_com_thrd *req;
2275 struct hclge_desc desc[2];
2276 struct hclge_tc_thrd *tc;
2277 int i, j;
2278 int ret;
2279
2280 for (i = 0; i < 2; i++) {
2281 hclge_cmd_setup_basic_desc(&desc[i],
2282 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2283 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2284
2285 /* The first descriptor set the NEXT bit to 1 */
2286 if (i == 0)
2287 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2288 else
2289 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2290
2291 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2292 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2293
2294 req->com_thrd[j].high =
2295 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2296 req->com_thrd[j].high |=
2297 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2298 req->com_thrd[j].low =
2299 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2300 req->com_thrd[j].low |=
2301 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2302 }
2303 }
2304
2305 /* Send 2 descriptors at one time */
2306 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2307 if (ret)
2308 dev_err(&hdev->pdev->dev,
2309 "common threshold config cmd failed %d\n", ret);
2310 return ret;
2311 }
2312
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2313 static int hclge_common_wl_config(struct hclge_dev *hdev,
2314 struct hclge_pkt_buf_alloc *buf_alloc)
2315 {
2316 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2317 struct hclge_rx_com_wl *req;
2318 struct hclge_desc desc;
2319 int ret;
2320
2321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2322
2323 req = (struct hclge_rx_com_wl *)desc.data;
2324 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2325 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2326
2327 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2328 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2329
2330 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2331 if (ret)
2332 dev_err(&hdev->pdev->dev,
2333 "common waterline config cmd failed %d\n", ret);
2334
2335 return ret;
2336 }
2337
hclge_buffer_alloc(struct hclge_dev * hdev)2338 int hclge_buffer_alloc(struct hclge_dev *hdev)
2339 {
2340 struct hclge_pkt_buf_alloc *pkt_buf;
2341 int ret;
2342
2343 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2344 if (!pkt_buf)
2345 return -ENOMEM;
2346
2347 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2348 if (ret) {
2349 dev_err(&hdev->pdev->dev,
2350 "could not calc tx buffer size for all TCs %d\n", ret);
2351 goto out;
2352 }
2353
2354 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2355 if (ret) {
2356 dev_err(&hdev->pdev->dev,
2357 "could not alloc tx buffers %d\n", ret);
2358 goto out;
2359 }
2360
2361 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2362 if (ret) {
2363 dev_err(&hdev->pdev->dev,
2364 "could not calc rx priv buffer size for all TCs %d\n",
2365 ret);
2366 goto out;
2367 }
2368
2369 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2370 if (ret) {
2371 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2372 ret);
2373 goto out;
2374 }
2375
2376 if (hnae3_dev_dcb_supported(hdev)) {
2377 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2378 if (ret) {
2379 dev_err(&hdev->pdev->dev,
2380 "could not configure rx private waterline %d\n",
2381 ret);
2382 goto out;
2383 }
2384
2385 ret = hclge_common_thrd_config(hdev, pkt_buf);
2386 if (ret) {
2387 dev_err(&hdev->pdev->dev,
2388 "could not configure common threshold %d\n",
2389 ret);
2390 goto out;
2391 }
2392 }
2393
2394 ret = hclge_common_wl_config(hdev, pkt_buf);
2395 if (ret)
2396 dev_err(&hdev->pdev->dev,
2397 "could not configure common waterline %d\n", ret);
2398
2399 out:
2400 kfree(pkt_buf);
2401 return ret;
2402 }
2403
hclge_init_roce_base_info(struct hclge_vport * vport)2404 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2405 {
2406 struct hnae3_handle *roce = &vport->roce;
2407 struct hnae3_handle *nic = &vport->nic;
2408
2409 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2410
2411 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2412 vport->back->num_msi_left == 0)
2413 return -EINVAL;
2414
2415 roce->rinfo.base_vector = vport->back->roce_base_vector;
2416
2417 roce->rinfo.netdev = nic->kinfo.netdev;
2418 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2419
2420 roce->pdev = nic->pdev;
2421 roce->ae_algo = nic->ae_algo;
2422 roce->numa_node_mask = nic->numa_node_mask;
2423
2424 return 0;
2425 }
2426
hclge_init_msi(struct hclge_dev * hdev)2427 static int hclge_init_msi(struct hclge_dev *hdev)
2428 {
2429 struct pci_dev *pdev = hdev->pdev;
2430 int vectors;
2431 int i;
2432
2433 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2434 hdev->num_msi,
2435 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2436 if (vectors < 0) {
2437 dev_err(&pdev->dev,
2438 "failed(%d) to allocate MSI/MSI-X vectors\n",
2439 vectors);
2440 return vectors;
2441 }
2442 if (vectors < hdev->num_msi)
2443 dev_warn(&hdev->pdev->dev,
2444 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2445 hdev->num_msi, vectors);
2446
2447 hdev->num_msi = vectors;
2448 hdev->num_msi_left = vectors;
2449
2450 hdev->base_msi_vector = pdev->irq;
2451 hdev->roce_base_vector = hdev->base_msi_vector +
2452 hdev->roce_base_msix_offset;
2453
2454 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2455 sizeof(u16), GFP_KERNEL);
2456 if (!hdev->vector_status) {
2457 pci_free_irq_vectors(pdev);
2458 return -ENOMEM;
2459 }
2460
2461 for (i = 0; i < hdev->num_msi; i++)
2462 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2463
2464 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2465 sizeof(int), GFP_KERNEL);
2466 if (!hdev->vector_irq) {
2467 pci_free_irq_vectors(pdev);
2468 return -ENOMEM;
2469 }
2470
2471 return 0;
2472 }
2473
hclge_check_speed_dup(u8 duplex,int speed)2474 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2475 {
2476 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2477 duplex = HCLGE_MAC_FULL;
2478
2479 return duplex;
2480 }
2481
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2482 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2483 u8 duplex)
2484 {
2485 struct hclge_config_mac_speed_dup_cmd *req;
2486 struct hclge_desc desc;
2487 int ret;
2488
2489 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2490
2491 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2492
2493 if (duplex)
2494 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2495
2496 switch (speed) {
2497 case HCLGE_MAC_SPEED_10M:
2498 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2499 HCLGE_CFG_SPEED_S, 6);
2500 break;
2501 case HCLGE_MAC_SPEED_100M:
2502 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2503 HCLGE_CFG_SPEED_S, 7);
2504 break;
2505 case HCLGE_MAC_SPEED_1G:
2506 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2507 HCLGE_CFG_SPEED_S, 0);
2508 break;
2509 case HCLGE_MAC_SPEED_10G:
2510 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2511 HCLGE_CFG_SPEED_S, 1);
2512 break;
2513 case HCLGE_MAC_SPEED_25G:
2514 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2515 HCLGE_CFG_SPEED_S, 2);
2516 break;
2517 case HCLGE_MAC_SPEED_40G:
2518 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2519 HCLGE_CFG_SPEED_S, 3);
2520 break;
2521 case HCLGE_MAC_SPEED_50G:
2522 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2523 HCLGE_CFG_SPEED_S, 4);
2524 break;
2525 case HCLGE_MAC_SPEED_100G:
2526 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2527 HCLGE_CFG_SPEED_S, 5);
2528 break;
2529 case HCLGE_MAC_SPEED_200G:
2530 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2531 HCLGE_CFG_SPEED_S, 8);
2532 break;
2533 default:
2534 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2535 return -EINVAL;
2536 }
2537
2538 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2539 1);
2540
2541 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2542 if (ret) {
2543 dev_err(&hdev->pdev->dev,
2544 "mac speed/duplex config cmd failed %d.\n", ret);
2545 return ret;
2546 }
2547
2548 return 0;
2549 }
2550
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2551 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2552 {
2553 struct hclge_mac *mac = &hdev->hw.mac;
2554 int ret;
2555
2556 duplex = hclge_check_speed_dup(duplex, speed);
2557 if (!mac->support_autoneg && mac->speed == speed &&
2558 mac->duplex == duplex)
2559 return 0;
2560
2561 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2562 if (ret)
2563 return ret;
2564
2565 hdev->hw.mac.speed = speed;
2566 hdev->hw.mac.duplex = duplex;
2567
2568 return 0;
2569 }
2570
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2571 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2572 u8 duplex)
2573 {
2574 struct hclge_vport *vport = hclge_get_vport(handle);
2575 struct hclge_dev *hdev = vport->back;
2576
2577 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2578 }
2579
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2580 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2581 {
2582 struct hclge_config_auto_neg_cmd *req;
2583 struct hclge_desc desc;
2584 u32 flag = 0;
2585 int ret;
2586
2587 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2588
2589 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2590 if (enable)
2591 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2592 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2593
2594 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2595 if (ret)
2596 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2597 ret);
2598
2599 return ret;
2600 }
2601
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2602 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2603 {
2604 struct hclge_vport *vport = hclge_get_vport(handle);
2605 struct hclge_dev *hdev = vport->back;
2606
2607 if (!hdev->hw.mac.support_autoneg) {
2608 if (enable) {
2609 dev_err(&hdev->pdev->dev,
2610 "autoneg is not supported by current port\n");
2611 return -EOPNOTSUPP;
2612 } else {
2613 return 0;
2614 }
2615 }
2616
2617 return hclge_set_autoneg_en(hdev, enable);
2618 }
2619
hclge_get_autoneg(struct hnae3_handle * handle)2620 static int hclge_get_autoneg(struct hnae3_handle *handle)
2621 {
2622 struct hclge_vport *vport = hclge_get_vport(handle);
2623 struct hclge_dev *hdev = vport->back;
2624 struct phy_device *phydev = hdev->hw.mac.phydev;
2625
2626 if (phydev)
2627 return phydev->autoneg;
2628
2629 return hdev->hw.mac.autoneg;
2630 }
2631
hclge_restart_autoneg(struct hnae3_handle * handle)2632 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2633 {
2634 struct hclge_vport *vport = hclge_get_vport(handle);
2635 struct hclge_dev *hdev = vport->back;
2636 int ret;
2637
2638 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2639
2640 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2641 if (ret)
2642 return ret;
2643 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2644 }
2645
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2646 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2647 {
2648 struct hclge_vport *vport = hclge_get_vport(handle);
2649 struct hclge_dev *hdev = vport->back;
2650
2651 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2652 return hclge_set_autoneg_en(hdev, !halt);
2653
2654 return 0;
2655 }
2656
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2657 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2658 {
2659 struct hclge_config_fec_cmd *req;
2660 struct hclge_desc desc;
2661 int ret;
2662
2663 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2664
2665 req = (struct hclge_config_fec_cmd *)desc.data;
2666 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2667 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2668 if (fec_mode & BIT(HNAE3_FEC_RS))
2669 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2670 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2671 if (fec_mode & BIT(HNAE3_FEC_BASER))
2672 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2673 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2674
2675 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2676 if (ret)
2677 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2678
2679 return ret;
2680 }
2681
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2682 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2683 {
2684 struct hclge_vport *vport = hclge_get_vport(handle);
2685 struct hclge_dev *hdev = vport->back;
2686 struct hclge_mac *mac = &hdev->hw.mac;
2687 int ret;
2688
2689 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2690 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2691 return -EINVAL;
2692 }
2693
2694 ret = hclge_set_fec_hw(hdev, fec_mode);
2695 if (ret)
2696 return ret;
2697
2698 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2699 return 0;
2700 }
2701
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2702 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2703 u8 *fec_mode)
2704 {
2705 struct hclge_vport *vport = hclge_get_vport(handle);
2706 struct hclge_dev *hdev = vport->back;
2707 struct hclge_mac *mac = &hdev->hw.mac;
2708
2709 if (fec_ability)
2710 *fec_ability = mac->fec_ability;
2711 if (fec_mode)
2712 *fec_mode = mac->fec_mode;
2713 }
2714
hclge_mac_init(struct hclge_dev * hdev)2715 static int hclge_mac_init(struct hclge_dev *hdev)
2716 {
2717 struct hclge_mac *mac = &hdev->hw.mac;
2718 int ret;
2719
2720 hdev->support_sfp_query = true;
2721 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2722 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2723 hdev->hw.mac.duplex);
2724 if (ret)
2725 return ret;
2726
2727 if (hdev->hw.mac.support_autoneg) {
2728 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2729 if (ret)
2730 return ret;
2731 }
2732
2733 mac->link = 0;
2734
2735 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2736 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2737 if (ret)
2738 return ret;
2739 }
2740
2741 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2742 if (ret) {
2743 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2744 return ret;
2745 }
2746
2747 ret = hclge_set_default_loopback(hdev);
2748 if (ret)
2749 return ret;
2750
2751 ret = hclge_buffer_alloc(hdev);
2752 if (ret)
2753 dev_err(&hdev->pdev->dev,
2754 "allocate buffer fail, ret=%d\n", ret);
2755
2756 return ret;
2757 }
2758
hclge_mbx_task_schedule(struct hclge_dev * hdev)2759 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2760 {
2761 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2762 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2763 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2764 hclge_wq, &hdev->service_task, 0);
2765 }
2766
hclge_reset_task_schedule(struct hclge_dev * hdev)2767 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2768 {
2769 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2770 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2771 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2772 hclge_wq, &hdev->service_task, 0);
2773 }
2774
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2775 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2776 {
2777 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2778 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2779 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2780 hclge_wq, &hdev->service_task,
2781 delay_time);
2782 }
2783
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2784 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2785 {
2786 struct hclge_link_status_cmd *req;
2787 struct hclge_desc desc;
2788 int ret;
2789
2790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2791 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2792 if (ret) {
2793 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2794 ret);
2795 return ret;
2796 }
2797
2798 req = (struct hclge_link_status_cmd *)desc.data;
2799 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2800 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2801
2802 return 0;
2803 }
2804
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2805 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2806 {
2807 struct phy_device *phydev = hdev->hw.mac.phydev;
2808
2809 *link_status = HCLGE_LINK_STATUS_DOWN;
2810
2811 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2812 return 0;
2813
2814 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2815 return 0;
2816
2817 return hclge_get_mac_link_status(hdev, link_status);
2818 }
2819
hclge_update_link_status(struct hclge_dev * hdev)2820 static void hclge_update_link_status(struct hclge_dev *hdev)
2821 {
2822 struct hnae3_client *rclient = hdev->roce_client;
2823 struct hnae3_client *client = hdev->nic_client;
2824 struct hnae3_handle *rhandle;
2825 struct hnae3_handle *handle;
2826 int state;
2827 int ret;
2828 int i;
2829
2830 if (!client)
2831 return;
2832
2833 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2834 return;
2835
2836 ret = hclge_get_mac_phy_link(hdev, &state);
2837 if (ret) {
2838 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2839 return;
2840 }
2841
2842 if (state != hdev->hw.mac.link) {
2843 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2844 handle = &hdev->vport[i].nic;
2845 client->ops->link_status_change(handle, state);
2846 hclge_config_mac_tnl_int(hdev, state);
2847 rhandle = &hdev->vport[i].roce;
2848 if (rclient && rclient->ops->link_status_change)
2849 rclient->ops->link_status_change(rhandle,
2850 state);
2851 }
2852 hdev->hw.mac.link = state;
2853 }
2854
2855 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2856 }
2857
hclge_update_port_capability(struct hclge_mac * mac)2858 static void hclge_update_port_capability(struct hclge_mac *mac)
2859 {
2860 /* update fec ability by speed */
2861 hclge_convert_setting_fec(mac);
2862
2863 /* firmware can not identify back plane type, the media type
2864 * read from configuration can help deal it
2865 */
2866 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2867 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2868 mac->module_type = HNAE3_MODULE_TYPE_KR;
2869 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2870 mac->module_type = HNAE3_MODULE_TYPE_TP;
2871
2872 if (mac->support_autoneg) {
2873 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2874 linkmode_copy(mac->advertising, mac->supported);
2875 } else {
2876 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2877 mac->supported);
2878 linkmode_zero(mac->advertising);
2879 }
2880 }
2881
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2882 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2883 {
2884 struct hclge_sfp_info_cmd *resp;
2885 struct hclge_desc desc;
2886 int ret;
2887
2888 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2889 resp = (struct hclge_sfp_info_cmd *)desc.data;
2890 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2891 if (ret == -EOPNOTSUPP) {
2892 dev_warn(&hdev->pdev->dev,
2893 "IMP do not support get SFP speed %d\n", ret);
2894 return ret;
2895 } else if (ret) {
2896 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2897 return ret;
2898 }
2899
2900 *speed = le32_to_cpu(resp->speed);
2901
2902 return 0;
2903 }
2904
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)2905 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2906 {
2907 struct hclge_sfp_info_cmd *resp;
2908 struct hclge_desc desc;
2909 int ret;
2910
2911 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2912 resp = (struct hclge_sfp_info_cmd *)desc.data;
2913
2914 resp->query_type = QUERY_ACTIVE_SPEED;
2915
2916 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2917 if (ret == -EOPNOTSUPP) {
2918 dev_warn(&hdev->pdev->dev,
2919 "IMP does not support get SFP info %d\n", ret);
2920 return ret;
2921 } else if (ret) {
2922 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2923 return ret;
2924 }
2925
2926 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2927 * set to mac->speed.
2928 */
2929 if (!le32_to_cpu(resp->speed))
2930 return 0;
2931
2932 mac->speed = le32_to_cpu(resp->speed);
2933 /* if resp->speed_ability is 0, it means it's an old version
2934 * firmware, do not update these params
2935 */
2936 if (resp->speed_ability) {
2937 mac->module_type = le32_to_cpu(resp->module_type);
2938 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2939 mac->autoneg = resp->autoneg;
2940 mac->support_autoneg = resp->autoneg_ability;
2941 mac->speed_type = QUERY_ACTIVE_SPEED;
2942 if (!resp->active_fec)
2943 mac->fec_mode = 0;
2944 else
2945 mac->fec_mode = BIT(resp->active_fec);
2946 } else {
2947 mac->speed_type = QUERY_SFP_SPEED;
2948 }
2949
2950 return 0;
2951 }
2952
hclge_update_port_info(struct hclge_dev * hdev)2953 static int hclge_update_port_info(struct hclge_dev *hdev)
2954 {
2955 struct hclge_mac *mac = &hdev->hw.mac;
2956 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2957 int ret;
2958
2959 /* get the port info from SFP cmd if not copper port */
2960 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2961 return 0;
2962
2963 /* if IMP does not support get SFP/qSFP info, return directly */
2964 if (!hdev->support_sfp_query)
2965 return 0;
2966
2967 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2968 ret = hclge_get_sfp_info(hdev, mac);
2969 else
2970 ret = hclge_get_sfp_speed(hdev, &speed);
2971
2972 if (ret == -EOPNOTSUPP) {
2973 hdev->support_sfp_query = false;
2974 return ret;
2975 } else if (ret) {
2976 return ret;
2977 }
2978
2979 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2980 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2981 hclge_update_port_capability(mac);
2982 return 0;
2983 }
2984 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2985 HCLGE_MAC_FULL);
2986 } else {
2987 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2988 return 0; /* do nothing if no SFP */
2989
2990 /* must config full duplex for SFP */
2991 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2992 }
2993 }
2994
hclge_get_status(struct hnae3_handle * handle)2995 static int hclge_get_status(struct hnae3_handle *handle)
2996 {
2997 struct hclge_vport *vport = hclge_get_vport(handle);
2998 struct hclge_dev *hdev = vport->back;
2999
3000 hclge_update_link_status(hdev);
3001
3002 return hdev->hw.mac.link;
3003 }
3004
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3005 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3006 {
3007 if (!pci_num_vf(hdev->pdev)) {
3008 dev_err(&hdev->pdev->dev,
3009 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3010 return NULL;
3011 }
3012
3013 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3014 dev_err(&hdev->pdev->dev,
3015 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3016 vf, pci_num_vf(hdev->pdev));
3017 return NULL;
3018 }
3019
3020 /* VF start from 1 in vport */
3021 vf += HCLGE_VF_VPORT_START_NUM;
3022 return &hdev->vport[vf];
3023 }
3024
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3025 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3026 struct ifla_vf_info *ivf)
3027 {
3028 struct hclge_vport *vport = hclge_get_vport(handle);
3029 struct hclge_dev *hdev = vport->back;
3030
3031 vport = hclge_get_vf_vport(hdev, vf);
3032 if (!vport)
3033 return -EINVAL;
3034
3035 ivf->vf = vf;
3036 ivf->linkstate = vport->vf_info.link_state;
3037 ivf->spoofchk = vport->vf_info.spoofchk;
3038 ivf->trusted = vport->vf_info.trusted;
3039 ivf->min_tx_rate = 0;
3040 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3041 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3042 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3043 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3044 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3045
3046 return 0;
3047 }
3048
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3049 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3050 int link_state)
3051 {
3052 struct hclge_vport *vport = hclge_get_vport(handle);
3053 struct hclge_dev *hdev = vport->back;
3054
3055 vport = hclge_get_vf_vport(hdev, vf);
3056 if (!vport)
3057 return -EINVAL;
3058
3059 vport->vf_info.link_state = link_state;
3060
3061 return 0;
3062 }
3063
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3064 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3065 {
3066 u32 cmdq_src_reg, msix_src_reg;
3067
3068 /* fetch the events from their corresponding regs */
3069 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3070 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3071
3072 /* Assumption: If by any chance reset and mailbox events are reported
3073 * together then we will only process reset event in this go and will
3074 * defer the processing of the mailbox events. Since, we would have not
3075 * cleared RX CMDQ event this time we would receive again another
3076 * interrupt from H/W just for the mailbox.
3077 *
3078 * check for vector0 reset event sources
3079 */
3080 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3081 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3082 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3083 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3084 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3085 hdev->rst_stats.imp_rst_cnt++;
3086 return HCLGE_VECTOR0_EVENT_RST;
3087 }
3088
3089 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3090 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3091 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3092 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3093 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3094 hdev->rst_stats.global_rst_cnt++;
3095 return HCLGE_VECTOR0_EVENT_RST;
3096 }
3097
3098 /* check for vector0 msix event source */
3099 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3100 *clearval = msix_src_reg;
3101 return HCLGE_VECTOR0_EVENT_ERR;
3102 }
3103
3104 /* check for vector0 mailbox(=CMDQ RX) event source */
3105 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3106 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3107 *clearval = cmdq_src_reg;
3108 return HCLGE_VECTOR0_EVENT_MBX;
3109 }
3110
3111 /* print other vector0 event source */
3112 dev_info(&hdev->pdev->dev,
3113 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3114 cmdq_src_reg, msix_src_reg);
3115 *clearval = msix_src_reg;
3116
3117 return HCLGE_VECTOR0_EVENT_OTHER;
3118 }
3119
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3120 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3121 u32 regclr)
3122 {
3123 switch (event_type) {
3124 case HCLGE_VECTOR0_EVENT_RST:
3125 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3126 break;
3127 case HCLGE_VECTOR0_EVENT_MBX:
3128 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3129 break;
3130 default:
3131 break;
3132 }
3133 }
3134
hclge_clear_all_event_cause(struct hclge_dev * hdev)3135 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3136 {
3137 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3138 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3139 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3140 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3141 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3142 }
3143
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3144 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3145 {
3146 writel(enable ? 1 : 0, vector->addr);
3147 }
3148
hclge_misc_irq_handle(int irq,void * data)3149 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3150 {
3151 struct hclge_dev *hdev = data;
3152 u32 clearval = 0;
3153 u32 event_cause;
3154
3155 hclge_enable_vector(&hdev->misc_vector, false);
3156 event_cause = hclge_check_event_cause(hdev, &clearval);
3157
3158 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3159 switch (event_cause) {
3160 case HCLGE_VECTOR0_EVENT_ERR:
3161 /* we do not know what type of reset is required now. This could
3162 * only be decided after we fetch the type of errors which
3163 * caused this event. Therefore, we will do below for now:
3164 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3165 * have defered type of reset to be used.
3166 * 2. Schedule the reset serivce task.
3167 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3168 * will fetch the correct type of reset. This would be done
3169 * by first decoding the types of errors.
3170 */
3171 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3172 fallthrough;
3173 case HCLGE_VECTOR0_EVENT_RST:
3174 hclge_reset_task_schedule(hdev);
3175 break;
3176 case HCLGE_VECTOR0_EVENT_MBX:
3177 /* If we are here then,
3178 * 1. Either we are not handling any mbx task and we are not
3179 * scheduled as well
3180 * OR
3181 * 2. We could be handling a mbx task but nothing more is
3182 * scheduled.
3183 * In both cases, we should schedule mbx task as there are more
3184 * mbx messages reported by this interrupt.
3185 */
3186 hclge_mbx_task_schedule(hdev);
3187 break;
3188 default:
3189 dev_warn(&hdev->pdev->dev,
3190 "received unknown or unhandled event of vector0\n");
3191 break;
3192 }
3193
3194 hclge_clear_event_cause(hdev, event_cause, clearval);
3195
3196 /* Enable interrupt if it is not cause by reset. And when
3197 * clearval equal to 0, it means interrupt status may be
3198 * cleared by hardware before driver reads status register.
3199 * For this case, vector0 interrupt also should be enabled.
3200 */
3201 if (!clearval ||
3202 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3203 hclge_enable_vector(&hdev->misc_vector, true);
3204 }
3205
3206 return IRQ_HANDLED;
3207 }
3208
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3209 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3210 {
3211 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3212 dev_warn(&hdev->pdev->dev,
3213 "vector(vector_id %d) has been freed.\n", vector_id);
3214 return;
3215 }
3216
3217 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3218 hdev->num_msi_left += 1;
3219 hdev->num_msi_used -= 1;
3220 }
3221
hclge_get_misc_vector(struct hclge_dev * hdev)3222 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3223 {
3224 struct hclge_misc_vector *vector = &hdev->misc_vector;
3225
3226 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3227
3228 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3229 hdev->vector_status[0] = 0;
3230
3231 hdev->num_msi_left -= 1;
3232 hdev->num_msi_used += 1;
3233 }
3234
hclge_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)3235 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3236 const cpumask_t *mask)
3237 {
3238 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3239 affinity_notify);
3240
3241 cpumask_copy(&hdev->affinity_mask, mask);
3242 }
3243
hclge_irq_affinity_release(struct kref * ref)3244 static void hclge_irq_affinity_release(struct kref *ref)
3245 {
3246 }
3247
hclge_misc_affinity_setup(struct hclge_dev * hdev)3248 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3249 {
3250 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3251 &hdev->affinity_mask);
3252
3253 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3254 hdev->affinity_notify.release = hclge_irq_affinity_release;
3255 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3256 &hdev->affinity_notify);
3257 }
3258
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3259 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3260 {
3261 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3262 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3263 }
3264
hclge_misc_irq_init(struct hclge_dev * hdev)3265 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3266 {
3267 int ret;
3268
3269 hclge_get_misc_vector(hdev);
3270
3271 /* this would be explicitly freed in the end */
3272 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3273 HCLGE_NAME, pci_name(hdev->pdev));
3274 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3275 0, hdev->misc_vector.name, hdev);
3276 if (ret) {
3277 hclge_free_vector(hdev, 0);
3278 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3279 hdev->misc_vector.vector_irq);
3280 }
3281
3282 return ret;
3283 }
3284
hclge_misc_irq_uninit(struct hclge_dev * hdev)3285 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3286 {
3287 free_irq(hdev->misc_vector.vector_irq, hdev);
3288 hclge_free_vector(hdev, 0);
3289 }
3290
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3291 int hclge_notify_client(struct hclge_dev *hdev,
3292 enum hnae3_reset_notify_type type)
3293 {
3294 struct hnae3_client *client = hdev->nic_client;
3295 u16 i;
3296
3297 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3298 return 0;
3299
3300 if (!client->ops->reset_notify)
3301 return -EOPNOTSUPP;
3302
3303 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3304 struct hnae3_handle *handle = &hdev->vport[i].nic;
3305 int ret;
3306
3307 ret = client->ops->reset_notify(handle, type);
3308 if (ret) {
3309 dev_err(&hdev->pdev->dev,
3310 "notify nic client failed %d(%d)\n", type, ret);
3311 return ret;
3312 }
3313 }
3314
3315 return 0;
3316 }
3317
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3318 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3319 enum hnae3_reset_notify_type type)
3320 {
3321 struct hnae3_client *client = hdev->roce_client;
3322 int ret;
3323 u16 i;
3324
3325 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3326 return 0;
3327
3328 if (!client->ops->reset_notify)
3329 return -EOPNOTSUPP;
3330
3331 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3332 struct hnae3_handle *handle = &hdev->vport[i].roce;
3333
3334 ret = client->ops->reset_notify(handle, type);
3335 if (ret) {
3336 dev_err(&hdev->pdev->dev,
3337 "notify roce client failed %d(%d)",
3338 type, ret);
3339 return ret;
3340 }
3341 }
3342
3343 return ret;
3344 }
3345
hclge_reset_wait(struct hclge_dev * hdev)3346 static int hclge_reset_wait(struct hclge_dev *hdev)
3347 {
3348 #define HCLGE_RESET_WATI_MS 100
3349 #define HCLGE_RESET_WAIT_CNT 350
3350
3351 u32 val, reg, reg_bit;
3352 u32 cnt = 0;
3353
3354 switch (hdev->reset_type) {
3355 case HNAE3_IMP_RESET:
3356 reg = HCLGE_GLOBAL_RESET_REG;
3357 reg_bit = HCLGE_IMP_RESET_BIT;
3358 break;
3359 case HNAE3_GLOBAL_RESET:
3360 reg = HCLGE_GLOBAL_RESET_REG;
3361 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3362 break;
3363 case HNAE3_FUNC_RESET:
3364 reg = HCLGE_FUN_RST_ING;
3365 reg_bit = HCLGE_FUN_RST_ING_B;
3366 break;
3367 default:
3368 dev_err(&hdev->pdev->dev,
3369 "Wait for unsupported reset type: %d\n",
3370 hdev->reset_type);
3371 return -EINVAL;
3372 }
3373
3374 val = hclge_read_dev(&hdev->hw, reg);
3375 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3376 msleep(HCLGE_RESET_WATI_MS);
3377 val = hclge_read_dev(&hdev->hw, reg);
3378 cnt++;
3379 }
3380
3381 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3382 dev_warn(&hdev->pdev->dev,
3383 "Wait for reset timeout: %d\n", hdev->reset_type);
3384 return -EBUSY;
3385 }
3386
3387 return 0;
3388 }
3389
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3390 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3391 {
3392 struct hclge_vf_rst_cmd *req;
3393 struct hclge_desc desc;
3394
3395 req = (struct hclge_vf_rst_cmd *)desc.data;
3396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3397 req->dest_vfid = func_id;
3398
3399 if (reset)
3400 req->vf_rst = 0x1;
3401
3402 return hclge_cmd_send(&hdev->hw, &desc, 1);
3403 }
3404
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3405 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3406 {
3407 int i;
3408
3409 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3410 struct hclge_vport *vport = &hdev->vport[i];
3411 int ret;
3412
3413 /* Send cmd to set/clear VF's FUNC_RST_ING */
3414 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3415 if (ret) {
3416 dev_err(&hdev->pdev->dev,
3417 "set vf(%u) rst failed %d!\n",
3418 vport->vport_id, ret);
3419 return ret;
3420 }
3421
3422 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3423 continue;
3424
3425 /* Inform VF to process the reset.
3426 * hclge_inform_reset_assert_to_vf may fail if VF
3427 * driver is not loaded.
3428 */
3429 ret = hclge_inform_reset_assert_to_vf(vport);
3430 if (ret)
3431 dev_warn(&hdev->pdev->dev,
3432 "inform reset to vf(%u) failed %d!\n",
3433 vport->vport_id, ret);
3434 }
3435
3436 return 0;
3437 }
3438
hclge_mailbox_service_task(struct hclge_dev * hdev)3439 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3440 {
3441 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3442 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3443 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3444 return;
3445
3446 hclge_mbx_handler(hdev);
3447
3448 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3449 }
3450
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3451 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3452 {
3453 struct hclge_pf_rst_sync_cmd *req;
3454 struct hclge_desc desc;
3455 int cnt = 0;
3456 int ret;
3457
3458 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3459 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3460
3461 do {
3462 /* vf need to down netdev by mbx during PF or FLR reset */
3463 hclge_mailbox_service_task(hdev);
3464
3465 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3466 /* for compatible with old firmware, wait
3467 * 100 ms for VF to stop IO
3468 */
3469 if (ret == -EOPNOTSUPP) {
3470 msleep(HCLGE_RESET_SYNC_TIME);
3471 return;
3472 } else if (ret) {
3473 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3474 ret);
3475 return;
3476 } else if (req->all_vf_ready) {
3477 return;
3478 }
3479 msleep(HCLGE_PF_RESET_SYNC_TIME);
3480 hclge_cmd_reuse_desc(&desc, true);
3481 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3482
3483 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3484 }
3485
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3486 void hclge_report_hw_error(struct hclge_dev *hdev,
3487 enum hnae3_hw_error_type type)
3488 {
3489 struct hnae3_client *client = hdev->nic_client;
3490 u16 i;
3491
3492 if (!client || !client->ops->process_hw_error ||
3493 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3494 return;
3495
3496 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3497 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3498 }
3499
hclge_handle_imp_error(struct hclge_dev * hdev)3500 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3501 {
3502 u32 reg_val;
3503
3504 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3505 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3506 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3507 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3508 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3509 }
3510
3511 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3512 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3513 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3514 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3515 }
3516 }
3517
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3518 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3519 {
3520 struct hclge_desc desc;
3521 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3522 int ret;
3523
3524 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3525 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3526 req->fun_reset_vfid = func_id;
3527
3528 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3529 if (ret)
3530 dev_err(&hdev->pdev->dev,
3531 "send function reset cmd fail, status =%d\n", ret);
3532
3533 return ret;
3534 }
3535
hclge_do_reset(struct hclge_dev * hdev)3536 static void hclge_do_reset(struct hclge_dev *hdev)
3537 {
3538 struct hnae3_handle *handle = &hdev->vport[0].nic;
3539 struct pci_dev *pdev = hdev->pdev;
3540 u32 val;
3541
3542 if (hclge_get_hw_reset_stat(handle)) {
3543 dev_info(&pdev->dev, "hardware reset not finish\n");
3544 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3545 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3546 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3547 return;
3548 }
3549
3550 switch (hdev->reset_type) {
3551 case HNAE3_GLOBAL_RESET:
3552 dev_info(&pdev->dev, "global reset requested\n");
3553 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3554 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3555 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3556 break;
3557 case HNAE3_FUNC_RESET:
3558 dev_info(&pdev->dev, "PF reset requested\n");
3559 /* schedule again to check later */
3560 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3561 hclge_reset_task_schedule(hdev);
3562 break;
3563 default:
3564 dev_warn(&pdev->dev,
3565 "unsupported reset type: %d\n", hdev->reset_type);
3566 break;
3567 }
3568 }
3569
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3570 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3571 unsigned long *addr)
3572 {
3573 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3574 struct hclge_dev *hdev = ae_dev->priv;
3575
3576 /* first, resolve any unknown reset type to the known type(s) */
3577 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3578 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3579 HCLGE_MISC_VECTOR_INT_STS);
3580 /* we will intentionally ignore any errors from this function
3581 * as we will end up in *some* reset request in any case
3582 */
3583 if (hclge_handle_hw_msix_error(hdev, addr))
3584 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3585 msix_sts_reg);
3586
3587 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3588 /* We defered the clearing of the error event which caused
3589 * interrupt since it was not posssible to do that in
3590 * interrupt context (and this is the reason we introduced
3591 * new UNKNOWN reset type). Now, the errors have been
3592 * handled and cleared in hardware we can safely enable
3593 * interrupts. This is an exception to the norm.
3594 */
3595 hclge_enable_vector(&hdev->misc_vector, true);
3596 }
3597
3598 /* return the highest priority reset level amongst all */
3599 if (test_bit(HNAE3_IMP_RESET, addr)) {
3600 rst_level = HNAE3_IMP_RESET;
3601 clear_bit(HNAE3_IMP_RESET, addr);
3602 clear_bit(HNAE3_GLOBAL_RESET, addr);
3603 clear_bit(HNAE3_FUNC_RESET, addr);
3604 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3605 rst_level = HNAE3_GLOBAL_RESET;
3606 clear_bit(HNAE3_GLOBAL_RESET, addr);
3607 clear_bit(HNAE3_FUNC_RESET, addr);
3608 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3609 rst_level = HNAE3_FUNC_RESET;
3610 clear_bit(HNAE3_FUNC_RESET, addr);
3611 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3612 rst_level = HNAE3_FLR_RESET;
3613 clear_bit(HNAE3_FLR_RESET, addr);
3614 }
3615
3616 if (hdev->reset_type != HNAE3_NONE_RESET &&
3617 rst_level < hdev->reset_type)
3618 return HNAE3_NONE_RESET;
3619
3620 return rst_level;
3621 }
3622
hclge_clear_reset_cause(struct hclge_dev * hdev)3623 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3624 {
3625 u32 clearval = 0;
3626
3627 switch (hdev->reset_type) {
3628 case HNAE3_IMP_RESET:
3629 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3630 break;
3631 case HNAE3_GLOBAL_RESET:
3632 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3633 break;
3634 default:
3635 break;
3636 }
3637
3638 if (!clearval)
3639 return;
3640
3641 /* For revision 0x20, the reset interrupt source
3642 * can only be cleared after hardware reset done
3643 */
3644 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3645 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3646 clearval);
3647
3648 hclge_enable_vector(&hdev->misc_vector, true);
3649 }
3650
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3651 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3652 {
3653 u32 reg_val;
3654
3655 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3656 if (enable)
3657 reg_val |= HCLGE_NIC_SW_RST_RDY;
3658 else
3659 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3660
3661 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3662 }
3663
hclge_func_reset_notify_vf(struct hclge_dev * hdev)3664 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3665 {
3666 int ret;
3667
3668 ret = hclge_set_all_vf_rst(hdev, true);
3669 if (ret)
3670 return ret;
3671
3672 hclge_func_reset_sync_vf(hdev);
3673
3674 return 0;
3675 }
3676
hclge_reset_prepare_wait(struct hclge_dev * hdev)3677 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3678 {
3679 u32 reg_val;
3680 int ret = 0;
3681
3682 switch (hdev->reset_type) {
3683 case HNAE3_FUNC_RESET:
3684 ret = hclge_func_reset_notify_vf(hdev);
3685 if (ret)
3686 return ret;
3687
3688 ret = hclge_func_reset_cmd(hdev, 0);
3689 if (ret) {
3690 dev_err(&hdev->pdev->dev,
3691 "asserting function reset fail %d!\n", ret);
3692 return ret;
3693 }
3694
3695 /* After performaning pf reset, it is not necessary to do the
3696 * mailbox handling or send any command to firmware, because
3697 * any mailbox handling or command to firmware is only valid
3698 * after hclge_cmd_init is called.
3699 */
3700 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3701 hdev->rst_stats.pf_rst_cnt++;
3702 break;
3703 case HNAE3_FLR_RESET:
3704 ret = hclge_func_reset_notify_vf(hdev);
3705 if (ret)
3706 return ret;
3707 break;
3708 case HNAE3_IMP_RESET:
3709 hclge_handle_imp_error(hdev);
3710 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3711 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3712 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3713 break;
3714 default:
3715 break;
3716 }
3717
3718 /* inform hardware that preparatory work is done */
3719 msleep(HCLGE_RESET_SYNC_TIME);
3720 hclge_reset_handshake(hdev, true);
3721 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3722
3723 return ret;
3724 }
3725
hclge_reset_err_handle(struct hclge_dev * hdev)3726 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3727 {
3728 #define MAX_RESET_FAIL_CNT 5
3729
3730 if (hdev->reset_pending) {
3731 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3732 hdev->reset_pending);
3733 return true;
3734 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3735 HCLGE_RESET_INT_M) {
3736 dev_info(&hdev->pdev->dev,
3737 "reset failed because new reset interrupt\n");
3738 hclge_clear_reset_cause(hdev);
3739 return false;
3740 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3741 hdev->rst_stats.reset_fail_cnt++;
3742 set_bit(hdev->reset_type, &hdev->reset_pending);
3743 dev_info(&hdev->pdev->dev,
3744 "re-schedule reset task(%u)\n",
3745 hdev->rst_stats.reset_fail_cnt);
3746 return true;
3747 }
3748
3749 hclge_clear_reset_cause(hdev);
3750
3751 /* recover the handshake status when reset fail */
3752 hclge_reset_handshake(hdev, true);
3753
3754 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3755
3756 hclge_dbg_dump_rst_info(hdev);
3757
3758 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3759
3760 return false;
3761 }
3762
hclge_set_rst_done(struct hclge_dev * hdev)3763 static int hclge_set_rst_done(struct hclge_dev *hdev)
3764 {
3765 struct hclge_pf_rst_done_cmd *req;
3766 struct hclge_desc desc;
3767 int ret;
3768
3769 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3770 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3771 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3772
3773 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3774 /* To be compatible with the old firmware, which does not support
3775 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3776 * return success
3777 */
3778 if (ret == -EOPNOTSUPP) {
3779 dev_warn(&hdev->pdev->dev,
3780 "current firmware does not support command(0x%x)!\n",
3781 HCLGE_OPC_PF_RST_DONE);
3782 return 0;
3783 } else if (ret) {
3784 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3785 ret);
3786 }
3787
3788 return ret;
3789 }
3790
hclge_reset_prepare_up(struct hclge_dev * hdev)3791 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3792 {
3793 int ret = 0;
3794
3795 switch (hdev->reset_type) {
3796 case HNAE3_FUNC_RESET:
3797 case HNAE3_FLR_RESET:
3798 ret = hclge_set_all_vf_rst(hdev, false);
3799 break;
3800 case HNAE3_GLOBAL_RESET:
3801 case HNAE3_IMP_RESET:
3802 ret = hclge_set_rst_done(hdev);
3803 break;
3804 default:
3805 break;
3806 }
3807
3808 /* clear up the handshake status after re-initialize done */
3809 hclge_reset_handshake(hdev, false);
3810
3811 return ret;
3812 }
3813
hclge_reset_stack(struct hclge_dev * hdev)3814 static int hclge_reset_stack(struct hclge_dev *hdev)
3815 {
3816 int ret;
3817
3818 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3819 if (ret)
3820 return ret;
3821
3822 ret = hclge_reset_ae_dev(hdev->ae_dev);
3823 if (ret)
3824 return ret;
3825
3826 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3827 }
3828
hclge_reset_prepare(struct hclge_dev * hdev)3829 static int hclge_reset_prepare(struct hclge_dev *hdev)
3830 {
3831 int ret;
3832
3833 hdev->rst_stats.reset_cnt++;
3834 /* perform reset of the stack & ae device for a client */
3835 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3836 if (ret)
3837 return ret;
3838
3839 rtnl_lock();
3840 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3841 rtnl_unlock();
3842 if (ret)
3843 return ret;
3844
3845 return hclge_reset_prepare_wait(hdev);
3846 }
3847
hclge_reset_rebuild(struct hclge_dev * hdev)3848 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3849 {
3850 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3851 enum hnae3_reset_type reset_level;
3852 int ret;
3853
3854 hdev->rst_stats.hw_reset_done_cnt++;
3855
3856 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3857 if (ret)
3858 return ret;
3859
3860 rtnl_lock();
3861 ret = hclge_reset_stack(hdev);
3862 rtnl_unlock();
3863 if (ret)
3864 return ret;
3865
3866 hclge_clear_reset_cause(hdev);
3867
3868 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3869 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3870 * times
3871 */
3872 if (ret &&
3873 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3874 return ret;
3875
3876 ret = hclge_reset_prepare_up(hdev);
3877 if (ret)
3878 return ret;
3879
3880 rtnl_lock();
3881 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3882 rtnl_unlock();
3883 if (ret)
3884 return ret;
3885
3886 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3887 if (ret)
3888 return ret;
3889
3890 hdev->last_reset_time = jiffies;
3891 hdev->rst_stats.reset_fail_cnt = 0;
3892 hdev->rst_stats.reset_done_cnt++;
3893 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3894
3895 /* if default_reset_request has a higher level reset request,
3896 * it should be handled as soon as possible. since some errors
3897 * need this kind of reset to fix.
3898 */
3899 reset_level = hclge_get_reset_level(ae_dev,
3900 &hdev->default_reset_request);
3901 if (reset_level != HNAE3_NONE_RESET)
3902 set_bit(reset_level, &hdev->reset_request);
3903
3904 return 0;
3905 }
3906
hclge_reset(struct hclge_dev * hdev)3907 static void hclge_reset(struct hclge_dev *hdev)
3908 {
3909 if (hclge_reset_prepare(hdev))
3910 goto err_reset;
3911
3912 if (hclge_reset_wait(hdev))
3913 goto err_reset;
3914
3915 if (hclge_reset_rebuild(hdev))
3916 goto err_reset;
3917
3918 return;
3919
3920 err_reset:
3921 if (hclge_reset_err_handle(hdev))
3922 hclge_reset_task_schedule(hdev);
3923 }
3924
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)3925 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3926 {
3927 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3928 struct hclge_dev *hdev = ae_dev->priv;
3929
3930 /* We might end up getting called broadly because of 2 below cases:
3931 * 1. Recoverable error was conveyed through APEI and only way to bring
3932 * normalcy is to reset.
3933 * 2. A new reset request from the stack due to timeout
3934 *
3935 * For the first case,error event might not have ae handle available.
3936 * check if this is a new reset request and we are not here just because
3937 * last reset attempt did not succeed and watchdog hit us again. We will
3938 * know this if last reset request did not occur very recently (watchdog
3939 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3940 * In case of new request we reset the "reset level" to PF reset.
3941 * And if it is a repeat reset request of the most recent one then we
3942 * want to make sure we throttle the reset request. Therefore, we will
3943 * not allow it again before 3*HZ times.
3944 */
3945 if (!handle)
3946 handle = &hdev->vport[0].nic;
3947
3948 if (time_before(jiffies, (hdev->last_reset_time +
3949 HCLGE_RESET_INTERVAL))) {
3950 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3951 return;
3952 } else if (hdev->default_reset_request) {
3953 hdev->reset_level =
3954 hclge_get_reset_level(ae_dev,
3955 &hdev->default_reset_request);
3956 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3957 hdev->reset_level = HNAE3_FUNC_RESET;
3958 }
3959
3960 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3961 hdev->reset_level);
3962
3963 /* request reset & schedule reset task */
3964 set_bit(hdev->reset_level, &hdev->reset_request);
3965 hclge_reset_task_schedule(hdev);
3966
3967 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3968 hdev->reset_level++;
3969 }
3970
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)3971 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3972 enum hnae3_reset_type rst_type)
3973 {
3974 struct hclge_dev *hdev = ae_dev->priv;
3975
3976 set_bit(rst_type, &hdev->default_reset_request);
3977 }
3978
hclge_reset_timer(struct timer_list * t)3979 static void hclge_reset_timer(struct timer_list *t)
3980 {
3981 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3982
3983 /* if default_reset_request has no value, it means that this reset
3984 * request has already be handled, so just return here
3985 */
3986 if (!hdev->default_reset_request)
3987 return;
3988
3989 dev_info(&hdev->pdev->dev,
3990 "triggering reset in reset timer\n");
3991 hclge_reset_event(hdev->pdev, NULL);
3992 }
3993
hclge_reset_subtask(struct hclge_dev * hdev)3994 static void hclge_reset_subtask(struct hclge_dev *hdev)
3995 {
3996 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3997
3998 /* check if there is any ongoing reset in the hardware. This status can
3999 * be checked from reset_pending. If there is then, we need to wait for
4000 * hardware to complete reset.
4001 * a. If we are able to figure out in reasonable time that hardware
4002 * has fully resetted then, we can proceed with driver, client
4003 * reset.
4004 * b. else, we can come back later to check this status so re-sched
4005 * now.
4006 */
4007 hdev->last_reset_time = jiffies;
4008 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4009 if (hdev->reset_type != HNAE3_NONE_RESET)
4010 hclge_reset(hdev);
4011
4012 /* check if we got any *new* reset requests to be honored */
4013 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4014 if (hdev->reset_type != HNAE3_NONE_RESET)
4015 hclge_do_reset(hdev);
4016
4017 hdev->reset_type = HNAE3_NONE_RESET;
4018 }
4019
hclge_reset_service_task(struct hclge_dev * hdev)4020 static void hclge_reset_service_task(struct hclge_dev *hdev)
4021 {
4022 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4023 return;
4024
4025 down(&hdev->reset_sem);
4026 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4027
4028 hclge_reset_subtask(hdev);
4029
4030 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4031 up(&hdev->reset_sem);
4032 }
4033
hclge_update_vport_alive(struct hclge_dev * hdev)4034 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4035 {
4036 int i;
4037
4038 /* start from vport 1 for PF is always alive */
4039 for (i = 1; i < hdev->num_alloc_vport; i++) {
4040 struct hclge_vport *vport = &hdev->vport[i];
4041
4042 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4043 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4044
4045 /* If vf is not alive, set to default value */
4046 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4047 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4048 }
4049 }
4050
hclge_periodic_service_task(struct hclge_dev * hdev)4051 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4052 {
4053 unsigned long delta = round_jiffies_relative(HZ);
4054
4055 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4056 return;
4057
4058 /* Always handle the link updating to make sure link state is
4059 * updated when it is triggered by mbx.
4060 */
4061 hclge_update_link_status(hdev);
4062 hclge_sync_mac_table(hdev);
4063 hclge_sync_promisc_mode(hdev);
4064
4065 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4066 delta = jiffies - hdev->last_serv_processed;
4067
4068 if (delta < round_jiffies_relative(HZ)) {
4069 delta = round_jiffies_relative(HZ) - delta;
4070 goto out;
4071 }
4072 }
4073
4074 hdev->serv_processed_cnt++;
4075 hclge_update_vport_alive(hdev);
4076
4077 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4078 hdev->last_serv_processed = jiffies;
4079 goto out;
4080 }
4081
4082 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4083 hclge_update_stats_for_all(hdev);
4084
4085 hclge_update_port_info(hdev);
4086 hclge_sync_vlan_filter(hdev);
4087
4088 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4089 hclge_rfs_filter_expire(hdev);
4090
4091 hdev->last_serv_processed = jiffies;
4092
4093 out:
4094 hclge_task_schedule(hdev, delta);
4095 }
4096
hclge_service_task(struct work_struct * work)4097 static void hclge_service_task(struct work_struct *work)
4098 {
4099 struct hclge_dev *hdev =
4100 container_of(work, struct hclge_dev, service_task.work);
4101
4102 hclge_reset_service_task(hdev);
4103 hclge_mailbox_service_task(hdev);
4104 hclge_periodic_service_task(hdev);
4105
4106 /* Handle reset and mbx again in case periodical task delays the
4107 * handling by calling hclge_task_schedule() in
4108 * hclge_periodic_service_task().
4109 */
4110 hclge_reset_service_task(hdev);
4111 hclge_mailbox_service_task(hdev);
4112 }
4113
hclge_get_vport(struct hnae3_handle * handle)4114 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4115 {
4116 /* VF handle has no client */
4117 if (!handle->client)
4118 return container_of(handle, struct hclge_vport, nic);
4119 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4120 return container_of(handle, struct hclge_vport, roce);
4121 else
4122 return container_of(handle, struct hclge_vport, nic);
4123 }
4124
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4125 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4126 struct hnae3_vector_info *vector_info)
4127 {
4128 struct hclge_vport *vport = hclge_get_vport(handle);
4129 struct hnae3_vector_info *vector = vector_info;
4130 struct hclge_dev *hdev = vport->back;
4131 int alloc = 0;
4132 int i, j;
4133
4134 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4135 vector_num = min(hdev->num_msi_left, vector_num);
4136
4137 for (j = 0; j < vector_num; j++) {
4138 for (i = 1; i < hdev->num_msi; i++) {
4139 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4140 vector->vector = pci_irq_vector(hdev->pdev, i);
4141 vector->io_addr = hdev->hw.io_base +
4142 HCLGE_VECTOR_REG_BASE +
4143 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4144 vport->vport_id *
4145 HCLGE_VECTOR_VF_OFFSET;
4146 hdev->vector_status[i] = vport->vport_id;
4147 hdev->vector_irq[i] = vector->vector;
4148
4149 vector++;
4150 alloc++;
4151
4152 break;
4153 }
4154 }
4155 }
4156 hdev->num_msi_left -= alloc;
4157 hdev->num_msi_used += alloc;
4158
4159 return alloc;
4160 }
4161
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4162 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4163 {
4164 int i;
4165
4166 for (i = 0; i < hdev->num_msi; i++)
4167 if (vector == hdev->vector_irq[i])
4168 return i;
4169
4170 return -EINVAL;
4171 }
4172
hclge_put_vector(struct hnae3_handle * handle,int vector)4173 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4174 {
4175 struct hclge_vport *vport = hclge_get_vport(handle);
4176 struct hclge_dev *hdev = vport->back;
4177 int vector_id;
4178
4179 vector_id = hclge_get_vector_index(hdev, vector);
4180 if (vector_id < 0) {
4181 dev_err(&hdev->pdev->dev,
4182 "Get vector index fail. vector = %d\n", vector);
4183 return vector_id;
4184 }
4185
4186 hclge_free_vector(hdev, vector_id);
4187
4188 return 0;
4189 }
4190
hclge_get_rss_key_size(struct hnae3_handle * handle)4191 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4192 {
4193 return HCLGE_RSS_KEY_SIZE;
4194 }
4195
hclge_get_rss_indir_size(struct hnae3_handle * handle)4196 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4197 {
4198 return HCLGE_RSS_IND_TBL_SIZE;
4199 }
4200
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4201 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4202 const u8 hfunc, const u8 *key)
4203 {
4204 struct hclge_rss_config_cmd *req;
4205 unsigned int key_offset = 0;
4206 struct hclge_desc desc;
4207 int key_counts;
4208 int key_size;
4209 int ret;
4210
4211 key_counts = HCLGE_RSS_KEY_SIZE;
4212 req = (struct hclge_rss_config_cmd *)desc.data;
4213
4214 while (key_counts) {
4215 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4216 false);
4217
4218 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4219 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4220
4221 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4222 memcpy(req->hash_key,
4223 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4224
4225 key_counts -= key_size;
4226 key_offset++;
4227 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4228 if (ret) {
4229 dev_err(&hdev->pdev->dev,
4230 "Configure RSS config fail, status = %d\n",
4231 ret);
4232 return ret;
4233 }
4234 }
4235 return 0;
4236 }
4237
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u8 * indir)4238 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4239 {
4240 struct hclge_rss_indirection_table_cmd *req;
4241 struct hclge_desc desc;
4242 int i, j;
4243 int ret;
4244
4245 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4246
4247 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4248 hclge_cmd_setup_basic_desc
4249 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4250
4251 req->start_table_index =
4252 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4253 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4254
4255 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4256 req->rss_result[j] =
4257 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4258
4259 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4260 if (ret) {
4261 dev_err(&hdev->pdev->dev,
4262 "Configure rss indir table fail,status = %d\n",
4263 ret);
4264 return ret;
4265 }
4266 }
4267 return 0;
4268 }
4269
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4270 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4271 u16 *tc_size, u16 *tc_offset)
4272 {
4273 struct hclge_rss_tc_mode_cmd *req;
4274 struct hclge_desc desc;
4275 int ret;
4276 int i;
4277
4278 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4279 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4280
4281 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4282 u16 mode = 0;
4283
4284 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4285 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4286 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4287 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4288 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4289
4290 req->rss_tc_mode[i] = cpu_to_le16(mode);
4291 }
4292
4293 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4294 if (ret)
4295 dev_err(&hdev->pdev->dev,
4296 "Configure rss tc mode fail, status = %d\n", ret);
4297
4298 return ret;
4299 }
4300
hclge_get_rss_type(struct hclge_vport * vport)4301 static void hclge_get_rss_type(struct hclge_vport *vport)
4302 {
4303 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4304 vport->rss_tuple_sets.ipv4_udp_en ||
4305 vport->rss_tuple_sets.ipv4_sctp_en ||
4306 vport->rss_tuple_sets.ipv6_tcp_en ||
4307 vport->rss_tuple_sets.ipv6_udp_en ||
4308 vport->rss_tuple_sets.ipv6_sctp_en)
4309 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4310 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4311 vport->rss_tuple_sets.ipv6_fragment_en)
4312 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4313 else
4314 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4315 }
4316
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4317 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4318 {
4319 struct hclge_rss_input_tuple_cmd *req;
4320 struct hclge_desc desc;
4321 int ret;
4322
4323 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4324
4325 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4326
4327 /* Get the tuple cfg from pf */
4328 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4329 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4330 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4331 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4332 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4333 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4334 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4335 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4336 hclge_get_rss_type(&hdev->vport[0]);
4337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4338 if (ret)
4339 dev_err(&hdev->pdev->dev,
4340 "Configure rss input fail, status = %d\n", ret);
4341 return ret;
4342 }
4343
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4344 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4345 u8 *key, u8 *hfunc)
4346 {
4347 struct hclge_vport *vport = hclge_get_vport(handle);
4348 int i;
4349
4350 /* Get hash algorithm */
4351 if (hfunc) {
4352 switch (vport->rss_algo) {
4353 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4354 *hfunc = ETH_RSS_HASH_TOP;
4355 break;
4356 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4357 *hfunc = ETH_RSS_HASH_XOR;
4358 break;
4359 default:
4360 *hfunc = ETH_RSS_HASH_UNKNOWN;
4361 break;
4362 }
4363 }
4364
4365 /* Get the RSS Key required by the user */
4366 if (key)
4367 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4368
4369 /* Get indirect table */
4370 if (indir)
4371 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4372 indir[i] = vport->rss_indirection_tbl[i];
4373
4374 return 0;
4375 }
4376
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4377 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4378 const u8 *key, const u8 hfunc)
4379 {
4380 struct hclge_vport *vport = hclge_get_vport(handle);
4381 struct hclge_dev *hdev = vport->back;
4382 u8 hash_algo;
4383 int ret, i;
4384
4385 /* Set the RSS Hash Key if specififed by the user */
4386 if (key) {
4387 switch (hfunc) {
4388 case ETH_RSS_HASH_TOP:
4389 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4390 break;
4391 case ETH_RSS_HASH_XOR:
4392 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4393 break;
4394 case ETH_RSS_HASH_NO_CHANGE:
4395 hash_algo = vport->rss_algo;
4396 break;
4397 default:
4398 return -EINVAL;
4399 }
4400
4401 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4402 if (ret)
4403 return ret;
4404
4405 /* Update the shadow RSS key with user specified qids */
4406 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4407 vport->rss_algo = hash_algo;
4408 }
4409
4410 /* Update the shadow RSS table with user specified qids */
4411 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4412 vport->rss_indirection_tbl[i] = indir[i];
4413
4414 /* Update the hardware */
4415 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4416 }
4417
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4418 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4419 {
4420 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4421
4422 if (nfc->data & RXH_L4_B_2_3)
4423 hash_sets |= HCLGE_D_PORT_BIT;
4424 else
4425 hash_sets &= ~HCLGE_D_PORT_BIT;
4426
4427 if (nfc->data & RXH_IP_SRC)
4428 hash_sets |= HCLGE_S_IP_BIT;
4429 else
4430 hash_sets &= ~HCLGE_S_IP_BIT;
4431
4432 if (nfc->data & RXH_IP_DST)
4433 hash_sets |= HCLGE_D_IP_BIT;
4434 else
4435 hash_sets &= ~HCLGE_D_IP_BIT;
4436
4437 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4438 hash_sets |= HCLGE_V_TAG_BIT;
4439
4440 return hash_sets;
4441 }
4442
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4443 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4444 struct ethtool_rxnfc *nfc)
4445 {
4446 struct hclge_vport *vport = hclge_get_vport(handle);
4447 struct hclge_dev *hdev = vport->back;
4448 struct hclge_rss_input_tuple_cmd *req;
4449 struct hclge_desc desc;
4450 u8 tuple_sets;
4451 int ret;
4452
4453 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4454 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4455 return -EINVAL;
4456
4457 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4458 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4459
4460 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4461 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4462 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4463 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4464 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4465 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4466 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4467 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4468
4469 tuple_sets = hclge_get_rss_hash_bits(nfc);
4470 switch (nfc->flow_type) {
4471 case TCP_V4_FLOW:
4472 req->ipv4_tcp_en = tuple_sets;
4473 break;
4474 case TCP_V6_FLOW:
4475 req->ipv6_tcp_en = tuple_sets;
4476 break;
4477 case UDP_V4_FLOW:
4478 req->ipv4_udp_en = tuple_sets;
4479 break;
4480 case UDP_V6_FLOW:
4481 req->ipv6_udp_en = tuple_sets;
4482 break;
4483 case SCTP_V4_FLOW:
4484 req->ipv4_sctp_en = tuple_sets;
4485 break;
4486 case SCTP_V6_FLOW:
4487 if ((nfc->data & RXH_L4_B_0_1) ||
4488 (nfc->data & RXH_L4_B_2_3))
4489 return -EINVAL;
4490
4491 req->ipv6_sctp_en = tuple_sets;
4492 break;
4493 case IPV4_FLOW:
4494 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4495 break;
4496 case IPV6_FLOW:
4497 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4498 break;
4499 default:
4500 return -EINVAL;
4501 }
4502
4503 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4504 if (ret) {
4505 dev_err(&hdev->pdev->dev,
4506 "Set rss tuple fail, status = %d\n", ret);
4507 return ret;
4508 }
4509
4510 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4511 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4512 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4513 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4514 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4515 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4516 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4517 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4518 hclge_get_rss_type(vport);
4519 return 0;
4520 }
4521
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4522 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4523 struct ethtool_rxnfc *nfc)
4524 {
4525 struct hclge_vport *vport = hclge_get_vport(handle);
4526 u8 tuple_sets;
4527
4528 nfc->data = 0;
4529
4530 switch (nfc->flow_type) {
4531 case TCP_V4_FLOW:
4532 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4533 break;
4534 case UDP_V4_FLOW:
4535 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4536 break;
4537 case TCP_V6_FLOW:
4538 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4539 break;
4540 case UDP_V6_FLOW:
4541 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4542 break;
4543 case SCTP_V4_FLOW:
4544 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4545 break;
4546 case SCTP_V6_FLOW:
4547 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4548 break;
4549 case IPV4_FLOW:
4550 case IPV6_FLOW:
4551 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4552 break;
4553 default:
4554 return -EINVAL;
4555 }
4556
4557 if (!tuple_sets)
4558 return 0;
4559
4560 if (tuple_sets & HCLGE_D_PORT_BIT)
4561 nfc->data |= RXH_L4_B_2_3;
4562 if (tuple_sets & HCLGE_S_PORT_BIT)
4563 nfc->data |= RXH_L4_B_0_1;
4564 if (tuple_sets & HCLGE_D_IP_BIT)
4565 nfc->data |= RXH_IP_DST;
4566 if (tuple_sets & HCLGE_S_IP_BIT)
4567 nfc->data |= RXH_IP_SRC;
4568
4569 return 0;
4570 }
4571
hclge_get_tc_size(struct hnae3_handle * handle)4572 static int hclge_get_tc_size(struct hnae3_handle *handle)
4573 {
4574 struct hclge_vport *vport = hclge_get_vport(handle);
4575 struct hclge_dev *hdev = vport->back;
4576
4577 return hdev->rss_size_max;
4578 }
4579
hclge_rss_init_hw(struct hclge_dev * hdev)4580 int hclge_rss_init_hw(struct hclge_dev *hdev)
4581 {
4582 struct hclge_vport *vport = hdev->vport;
4583 u8 *rss_indir = vport[0].rss_indirection_tbl;
4584 u16 rss_size = vport[0].alloc_rss_size;
4585 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4586 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4587 u8 *key = vport[0].rss_hash_key;
4588 u8 hfunc = vport[0].rss_algo;
4589 u16 tc_valid[HCLGE_MAX_TC_NUM];
4590 u16 roundup_size;
4591 unsigned int i;
4592 int ret;
4593
4594 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4595 if (ret)
4596 return ret;
4597
4598 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4599 if (ret)
4600 return ret;
4601
4602 ret = hclge_set_rss_input_tuple(hdev);
4603 if (ret)
4604 return ret;
4605
4606 /* Each TC have the same queue size, and tc_size set to hardware is
4607 * the log2 of roundup power of two of rss_size, the acutal queue
4608 * size is limited by indirection table.
4609 */
4610 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4611 dev_err(&hdev->pdev->dev,
4612 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4613 rss_size);
4614 return -EINVAL;
4615 }
4616
4617 roundup_size = roundup_pow_of_two(rss_size);
4618 roundup_size = ilog2(roundup_size);
4619
4620 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4621 tc_valid[i] = 0;
4622
4623 if (!(hdev->hw_tc_map & BIT(i)))
4624 continue;
4625
4626 tc_valid[i] = 1;
4627 tc_size[i] = roundup_size;
4628 tc_offset[i] = rss_size * i;
4629 }
4630
4631 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4632 }
4633
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)4634 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4635 {
4636 struct hclge_vport *vport = hdev->vport;
4637 int i, j;
4638
4639 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4640 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4641 vport[j].rss_indirection_tbl[i] =
4642 i % vport[j].alloc_rss_size;
4643 }
4644 }
4645
hclge_rss_init_cfg(struct hclge_dev * hdev)4646 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4647 {
4648 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4649 struct hclge_vport *vport = hdev->vport;
4650
4651 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4652 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4653
4654 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4655 vport[i].rss_tuple_sets.ipv4_tcp_en =
4656 HCLGE_RSS_INPUT_TUPLE_OTHER;
4657 vport[i].rss_tuple_sets.ipv4_udp_en =
4658 HCLGE_RSS_INPUT_TUPLE_OTHER;
4659 vport[i].rss_tuple_sets.ipv4_sctp_en =
4660 HCLGE_RSS_INPUT_TUPLE_SCTP;
4661 vport[i].rss_tuple_sets.ipv4_fragment_en =
4662 HCLGE_RSS_INPUT_TUPLE_OTHER;
4663 vport[i].rss_tuple_sets.ipv6_tcp_en =
4664 HCLGE_RSS_INPUT_TUPLE_OTHER;
4665 vport[i].rss_tuple_sets.ipv6_udp_en =
4666 HCLGE_RSS_INPUT_TUPLE_OTHER;
4667 vport[i].rss_tuple_sets.ipv6_sctp_en =
4668 HCLGE_RSS_INPUT_TUPLE_SCTP;
4669 vport[i].rss_tuple_sets.ipv6_fragment_en =
4670 HCLGE_RSS_INPUT_TUPLE_OTHER;
4671
4672 vport[i].rss_algo = rss_algo;
4673
4674 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4675 HCLGE_RSS_KEY_SIZE);
4676 }
4677
4678 hclge_rss_indir_init_cfg(hdev);
4679 }
4680
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4681 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4682 int vector_id, bool en,
4683 struct hnae3_ring_chain_node *ring_chain)
4684 {
4685 struct hclge_dev *hdev = vport->back;
4686 struct hnae3_ring_chain_node *node;
4687 struct hclge_desc desc;
4688 struct hclge_ctrl_vector_chain_cmd *req =
4689 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4690 enum hclge_cmd_status status;
4691 enum hclge_opcode_type op;
4692 u16 tqp_type_and_id;
4693 int i;
4694
4695 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4696 hclge_cmd_setup_basic_desc(&desc, op, false);
4697 req->int_vector_id = vector_id;
4698
4699 i = 0;
4700 for (node = ring_chain; node; node = node->next) {
4701 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4702 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4703 HCLGE_INT_TYPE_S,
4704 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4705 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4706 HCLGE_TQP_ID_S, node->tqp_index);
4707 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4708 HCLGE_INT_GL_IDX_S,
4709 hnae3_get_field(node->int_gl_idx,
4710 HNAE3_RING_GL_IDX_M,
4711 HNAE3_RING_GL_IDX_S));
4712 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4713 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4714 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4715 req->vfid = vport->vport_id;
4716
4717 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4718 if (status) {
4719 dev_err(&hdev->pdev->dev,
4720 "Map TQP fail, status is %d.\n",
4721 status);
4722 return -EIO;
4723 }
4724 i = 0;
4725
4726 hclge_cmd_setup_basic_desc(&desc,
4727 op,
4728 false);
4729 req->int_vector_id = vector_id;
4730 }
4731 }
4732
4733 if (i > 0) {
4734 req->int_cause_num = i;
4735 req->vfid = vport->vport_id;
4736 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4737 if (status) {
4738 dev_err(&hdev->pdev->dev,
4739 "Map TQP fail, status is %d.\n", status);
4740 return -EIO;
4741 }
4742 }
4743
4744 return 0;
4745 }
4746
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4747 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4748 struct hnae3_ring_chain_node *ring_chain)
4749 {
4750 struct hclge_vport *vport = hclge_get_vport(handle);
4751 struct hclge_dev *hdev = vport->back;
4752 int vector_id;
4753
4754 vector_id = hclge_get_vector_index(hdev, vector);
4755 if (vector_id < 0) {
4756 dev_err(&hdev->pdev->dev,
4757 "failed to get vector index. vector=%d\n", vector);
4758 return vector_id;
4759 }
4760
4761 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4762 }
4763
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4764 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4765 struct hnae3_ring_chain_node *ring_chain)
4766 {
4767 struct hclge_vport *vport = hclge_get_vport(handle);
4768 struct hclge_dev *hdev = vport->back;
4769 int vector_id, ret;
4770
4771 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4772 return 0;
4773
4774 vector_id = hclge_get_vector_index(hdev, vector);
4775 if (vector_id < 0) {
4776 dev_err(&handle->pdev->dev,
4777 "Get vector index fail. ret =%d\n", vector_id);
4778 return vector_id;
4779 }
4780
4781 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4782 if (ret)
4783 dev_err(&handle->pdev->dev,
4784 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4785 vector_id, ret);
4786
4787 return ret;
4788 }
4789
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,struct hclge_promisc_param * param)4790 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4791 struct hclge_promisc_param *param)
4792 {
4793 struct hclge_promisc_cfg_cmd *req;
4794 struct hclge_desc desc;
4795 int ret;
4796
4797 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4798
4799 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4800 req->vf_id = param->vf_id;
4801
4802 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4803 * pdev revision(0x20), new revision support them. The
4804 * value of this two fields will not return error when driver
4805 * send command to fireware in revision(0x20).
4806 */
4807 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4808 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4809
4810 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4811 if (ret)
4812 dev_err(&hdev->pdev->dev,
4813 "failed to set vport %d promisc mode, ret = %d.\n",
4814 param->vf_id, ret);
4815
4816 return ret;
4817 }
4818
hclge_promisc_param_init(struct hclge_promisc_param * param,bool en_uc,bool en_mc,bool en_bc,int vport_id)4819 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4820 bool en_uc, bool en_mc, bool en_bc,
4821 int vport_id)
4822 {
4823 if (!param)
4824 return;
4825
4826 memset(param, 0, sizeof(struct hclge_promisc_param));
4827 if (en_uc)
4828 param->enable = HCLGE_PROMISC_EN_UC;
4829 if (en_mc)
4830 param->enable |= HCLGE_PROMISC_EN_MC;
4831 if (en_bc)
4832 param->enable |= HCLGE_PROMISC_EN_BC;
4833 param->vf_id = vport_id;
4834 }
4835
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)4836 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4837 bool en_mc_pmc, bool en_bc_pmc)
4838 {
4839 struct hclge_dev *hdev = vport->back;
4840 struct hclge_promisc_param param;
4841
4842 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4843 vport->vport_id);
4844 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4845 }
4846
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)4847 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4848 bool en_mc_pmc)
4849 {
4850 struct hclge_vport *vport = hclge_get_vport(handle);
4851 struct hclge_dev *hdev = vport->back;
4852 bool en_bc_pmc = true;
4853
4854 /* For device whose version below V2, if broadcast promisc enabled,
4855 * vlan filter is always bypassed. So broadcast promisc should be
4856 * disabled until user enable promisc mode
4857 */
4858 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4859 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4860
4861 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4862 en_bc_pmc);
4863 }
4864
hclge_request_update_promisc_mode(struct hnae3_handle * handle)4865 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4866 {
4867 struct hclge_vport *vport = hclge_get_vport(handle);
4868 struct hclge_dev *hdev = vport->back;
4869
4870 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4871 }
4872
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)4873 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4874 {
4875 struct hclge_get_fd_mode_cmd *req;
4876 struct hclge_desc desc;
4877 int ret;
4878
4879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4880
4881 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4882
4883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4884 if (ret) {
4885 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4886 return ret;
4887 }
4888
4889 *fd_mode = req->mode;
4890
4891 return ret;
4892 }
4893
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)4894 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4895 u32 *stage1_entry_num,
4896 u32 *stage2_entry_num,
4897 u16 *stage1_counter_num,
4898 u16 *stage2_counter_num)
4899 {
4900 struct hclge_get_fd_allocation_cmd *req;
4901 struct hclge_desc desc;
4902 int ret;
4903
4904 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4905
4906 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4907
4908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4909 if (ret) {
4910 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4911 ret);
4912 return ret;
4913 }
4914
4915 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4916 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4917 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4918 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4919
4920 return ret;
4921 }
4922
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)4923 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4924 enum HCLGE_FD_STAGE stage_num)
4925 {
4926 struct hclge_set_fd_key_config_cmd *req;
4927 struct hclge_fd_key_cfg *stage;
4928 struct hclge_desc desc;
4929 int ret;
4930
4931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4932
4933 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4934 stage = &hdev->fd_cfg.key_cfg[stage_num];
4935 req->stage = stage_num;
4936 req->key_select = stage->key_sel;
4937 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4938 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4939 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4940 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4941 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4942 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4943
4944 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4945 if (ret)
4946 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4947
4948 return ret;
4949 }
4950
hclge_init_fd_config(struct hclge_dev * hdev)4951 static int hclge_init_fd_config(struct hclge_dev *hdev)
4952 {
4953 #define LOW_2_WORDS 0x03
4954 struct hclge_fd_key_cfg *key_cfg;
4955 int ret;
4956
4957 if (!hnae3_dev_fd_supported(hdev))
4958 return 0;
4959
4960 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4961 if (ret)
4962 return ret;
4963
4964 switch (hdev->fd_cfg.fd_mode) {
4965 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4966 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4967 break;
4968 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4969 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4970 break;
4971 default:
4972 dev_err(&hdev->pdev->dev,
4973 "Unsupported flow director mode %u\n",
4974 hdev->fd_cfg.fd_mode);
4975 return -EOPNOTSUPP;
4976 }
4977
4978 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4979 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4980 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4981 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4982 key_cfg->outer_sipv6_word_en = 0;
4983 key_cfg->outer_dipv6_word_en = 0;
4984
4985 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4986 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4987 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4988 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4989
4990 /* If use max 400bit key, we can support tuples for ether type */
4991 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4992 key_cfg->tuple_active |=
4993 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4994
4995 /* roce_type is used to filter roce frames
4996 * dst_vport is used to specify the rule
4997 */
4998 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4999
5000 ret = hclge_get_fd_allocation(hdev,
5001 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5002 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5003 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5004 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5005 if (ret)
5006 return ret;
5007
5008 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5009 }
5010
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5011 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5012 int loc, u8 *key, bool is_add)
5013 {
5014 struct hclge_fd_tcam_config_1_cmd *req1;
5015 struct hclge_fd_tcam_config_2_cmd *req2;
5016 struct hclge_fd_tcam_config_3_cmd *req3;
5017 struct hclge_desc desc[3];
5018 int ret;
5019
5020 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5021 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5022 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5023 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5024 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5025
5026 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5027 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5028 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5029
5030 req1->stage = stage;
5031 req1->xy_sel = sel_x ? 1 : 0;
5032 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5033 req1->index = cpu_to_le32(loc);
5034 req1->entry_vld = sel_x ? is_add : 0;
5035
5036 if (key) {
5037 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5038 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5039 sizeof(req2->tcam_data));
5040 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5041 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5042 }
5043
5044 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5045 if (ret)
5046 dev_err(&hdev->pdev->dev,
5047 "config tcam key fail, ret=%d\n",
5048 ret);
5049
5050 return ret;
5051 }
5052
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5053 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5054 struct hclge_fd_ad_data *action)
5055 {
5056 struct hclge_fd_ad_config_cmd *req;
5057 struct hclge_desc desc;
5058 u64 ad_data = 0;
5059 int ret;
5060
5061 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5062
5063 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5064 req->index = cpu_to_le32(loc);
5065 req->stage = stage;
5066
5067 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5068 action->write_rule_id_to_bd);
5069 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5070 action->rule_id);
5071 ad_data <<= 32;
5072 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5073 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5074 action->forward_to_direct_queue);
5075 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5076 action->queue_id);
5077 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5078 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5079 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5080 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5081 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5082 action->counter_id);
5083
5084 req->ad_data = cpu_to_le64(ad_data);
5085 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5086 if (ret)
5087 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5088
5089 return ret;
5090 }
5091
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5092 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5093 struct hclge_fd_rule *rule)
5094 {
5095 u16 tmp_x_s, tmp_y_s;
5096 u32 tmp_x_l, tmp_y_l;
5097 int i;
5098
5099 if (rule->unused_tuple & tuple_bit)
5100 return true;
5101
5102 switch (tuple_bit) {
5103 case BIT(INNER_DST_MAC):
5104 for (i = 0; i < ETH_ALEN; i++) {
5105 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5106 rule->tuples_mask.dst_mac[i]);
5107 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5108 rule->tuples_mask.dst_mac[i]);
5109 }
5110
5111 return true;
5112 case BIT(INNER_SRC_MAC):
5113 for (i = 0; i < ETH_ALEN; i++) {
5114 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5115 rule->tuples.src_mac[i]);
5116 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5117 rule->tuples.src_mac[i]);
5118 }
5119
5120 return true;
5121 case BIT(INNER_VLAN_TAG_FST):
5122 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5123 rule->tuples_mask.vlan_tag1);
5124 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5125 rule->tuples_mask.vlan_tag1);
5126 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5127 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5128
5129 return true;
5130 case BIT(INNER_ETH_TYPE):
5131 calc_x(tmp_x_s, rule->tuples.ether_proto,
5132 rule->tuples_mask.ether_proto);
5133 calc_y(tmp_y_s, rule->tuples.ether_proto,
5134 rule->tuples_mask.ether_proto);
5135 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5136 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5137
5138 return true;
5139 case BIT(INNER_IP_TOS):
5140 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5141 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5142
5143 return true;
5144 case BIT(INNER_IP_PROTO):
5145 calc_x(*key_x, rule->tuples.ip_proto,
5146 rule->tuples_mask.ip_proto);
5147 calc_y(*key_y, rule->tuples.ip_proto,
5148 rule->tuples_mask.ip_proto);
5149
5150 return true;
5151 case BIT(INNER_SRC_IP):
5152 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5153 rule->tuples_mask.src_ip[IPV4_INDEX]);
5154 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5155 rule->tuples_mask.src_ip[IPV4_INDEX]);
5156 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5157 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5158
5159 return true;
5160 case BIT(INNER_DST_IP):
5161 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5162 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5163 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5164 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5165 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5166 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5167
5168 return true;
5169 case BIT(INNER_SRC_PORT):
5170 calc_x(tmp_x_s, rule->tuples.src_port,
5171 rule->tuples_mask.src_port);
5172 calc_y(tmp_y_s, rule->tuples.src_port,
5173 rule->tuples_mask.src_port);
5174 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5175 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5176
5177 return true;
5178 case BIT(INNER_DST_PORT):
5179 calc_x(tmp_x_s, rule->tuples.dst_port,
5180 rule->tuples_mask.dst_port);
5181 calc_y(tmp_y_s, rule->tuples.dst_port,
5182 rule->tuples_mask.dst_port);
5183 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5184 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5185
5186 return true;
5187 default:
5188 return false;
5189 }
5190 }
5191
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5192 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5193 u8 vf_id, u8 network_port_id)
5194 {
5195 u32 port_number = 0;
5196
5197 if (port_type == HOST_PORT) {
5198 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5199 pf_id);
5200 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5201 vf_id);
5202 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5203 } else {
5204 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5205 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5206 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5207 }
5208
5209 return port_number;
5210 }
5211
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5212 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5213 __le32 *key_x, __le32 *key_y,
5214 struct hclge_fd_rule *rule)
5215 {
5216 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5217 u8 cur_pos = 0, tuple_size, shift_bits;
5218 unsigned int i;
5219
5220 for (i = 0; i < MAX_META_DATA; i++) {
5221 tuple_size = meta_data_key_info[i].key_length;
5222 tuple_bit = key_cfg->meta_data_active & BIT(i);
5223
5224 switch (tuple_bit) {
5225 case BIT(ROCE_TYPE):
5226 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5227 cur_pos += tuple_size;
5228 break;
5229 case BIT(DST_VPORT):
5230 port_number = hclge_get_port_number(HOST_PORT, 0,
5231 rule->vf_id, 0);
5232 hnae3_set_field(meta_data,
5233 GENMASK(cur_pos + tuple_size, cur_pos),
5234 cur_pos, port_number);
5235 cur_pos += tuple_size;
5236 break;
5237 default:
5238 break;
5239 }
5240 }
5241
5242 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5243 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5244 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5245
5246 *key_x = cpu_to_le32(tmp_x << shift_bits);
5247 *key_y = cpu_to_le32(tmp_y << shift_bits);
5248 }
5249
5250 /* A complete key is combined with meta data key and tuple key.
5251 * Meta data key is stored at the MSB region, and tuple key is stored at
5252 * the LSB region, unused bits will be filled 0.
5253 */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5254 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5255 struct hclge_fd_rule *rule)
5256 {
5257 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5258 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5259 u8 *cur_key_x, *cur_key_y;
5260 u8 meta_data_region;
5261 u8 tuple_size;
5262 int ret;
5263 u32 i;
5264
5265 memset(key_x, 0, sizeof(key_x));
5266 memset(key_y, 0, sizeof(key_y));
5267 cur_key_x = key_x;
5268 cur_key_y = key_y;
5269
5270 for (i = 0 ; i < MAX_TUPLE; i++) {
5271 bool tuple_valid;
5272 u32 check_tuple;
5273
5274 tuple_size = tuple_key_info[i].key_length / 8;
5275 check_tuple = key_cfg->tuple_active & BIT(i);
5276
5277 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5278 cur_key_y, rule);
5279 if (tuple_valid) {
5280 cur_key_x += tuple_size;
5281 cur_key_y += tuple_size;
5282 }
5283 }
5284
5285 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5286 MAX_META_DATA_LENGTH / 8;
5287
5288 hclge_fd_convert_meta_data(key_cfg,
5289 (__le32 *)(key_x + meta_data_region),
5290 (__le32 *)(key_y + meta_data_region),
5291 rule);
5292
5293 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5294 true);
5295 if (ret) {
5296 dev_err(&hdev->pdev->dev,
5297 "fd key_y config fail, loc=%u, ret=%d\n",
5298 rule->queue_id, ret);
5299 return ret;
5300 }
5301
5302 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5303 true);
5304 if (ret)
5305 dev_err(&hdev->pdev->dev,
5306 "fd key_x config fail, loc=%u, ret=%d\n",
5307 rule->queue_id, ret);
5308 return ret;
5309 }
5310
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5311 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5312 struct hclge_fd_rule *rule)
5313 {
5314 struct hclge_fd_ad_data ad_data;
5315
5316 ad_data.ad_id = rule->location;
5317
5318 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5319 ad_data.drop_packet = true;
5320 ad_data.forward_to_direct_queue = false;
5321 ad_data.queue_id = 0;
5322 } else {
5323 ad_data.drop_packet = false;
5324 ad_data.forward_to_direct_queue = true;
5325 ad_data.queue_id = rule->queue_id;
5326 }
5327
5328 ad_data.use_counter = false;
5329 ad_data.counter_id = 0;
5330
5331 ad_data.use_next_stage = false;
5332 ad_data.next_input_key = 0;
5333
5334 ad_data.write_rule_id_to_bd = true;
5335 ad_data.rule_id = rule->location;
5336
5337 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5338 }
5339
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5340 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5341 u32 *unused_tuple)
5342 {
5343 if (!spec || !unused_tuple)
5344 return -EINVAL;
5345
5346 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5347
5348 if (!spec->ip4src)
5349 *unused_tuple |= BIT(INNER_SRC_IP);
5350
5351 if (!spec->ip4dst)
5352 *unused_tuple |= BIT(INNER_DST_IP);
5353
5354 if (!spec->psrc)
5355 *unused_tuple |= BIT(INNER_SRC_PORT);
5356
5357 if (!spec->pdst)
5358 *unused_tuple |= BIT(INNER_DST_PORT);
5359
5360 if (!spec->tos)
5361 *unused_tuple |= BIT(INNER_IP_TOS);
5362
5363 return 0;
5364 }
5365
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5366 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5367 u32 *unused_tuple)
5368 {
5369 if (!spec || !unused_tuple)
5370 return -EINVAL;
5371
5372 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5373 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5374
5375 if (!spec->ip4src)
5376 *unused_tuple |= BIT(INNER_SRC_IP);
5377
5378 if (!spec->ip4dst)
5379 *unused_tuple |= BIT(INNER_DST_IP);
5380
5381 if (!spec->tos)
5382 *unused_tuple |= BIT(INNER_IP_TOS);
5383
5384 if (!spec->proto)
5385 *unused_tuple |= BIT(INNER_IP_PROTO);
5386
5387 if (spec->l4_4_bytes)
5388 return -EOPNOTSUPP;
5389
5390 if (spec->ip_ver != ETH_RX_NFC_IP4)
5391 return -EOPNOTSUPP;
5392
5393 return 0;
5394 }
5395
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5396 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5397 u32 *unused_tuple)
5398 {
5399 if (!spec || !unused_tuple)
5400 return -EINVAL;
5401
5402 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5403 BIT(INNER_IP_TOS);
5404
5405 /* check whether src/dst ip address used */
5406 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5407 !spec->ip6src[2] && !spec->ip6src[3])
5408 *unused_tuple |= BIT(INNER_SRC_IP);
5409
5410 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5411 !spec->ip6dst[2] && !spec->ip6dst[3])
5412 *unused_tuple |= BIT(INNER_DST_IP);
5413
5414 if (!spec->psrc)
5415 *unused_tuple |= BIT(INNER_SRC_PORT);
5416
5417 if (!spec->pdst)
5418 *unused_tuple |= BIT(INNER_DST_PORT);
5419
5420 if (spec->tclass)
5421 return -EOPNOTSUPP;
5422
5423 return 0;
5424 }
5425
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5426 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5427 u32 *unused_tuple)
5428 {
5429 if (!spec || !unused_tuple)
5430 return -EINVAL;
5431
5432 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5433 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5434
5435 /* check whether src/dst ip address used */
5436 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5437 !spec->ip6src[2] && !spec->ip6src[3])
5438 *unused_tuple |= BIT(INNER_SRC_IP);
5439
5440 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5441 !spec->ip6dst[2] && !spec->ip6dst[3])
5442 *unused_tuple |= BIT(INNER_DST_IP);
5443
5444 if (!spec->l4_proto)
5445 *unused_tuple |= BIT(INNER_IP_PROTO);
5446
5447 if (spec->tclass)
5448 return -EOPNOTSUPP;
5449
5450 if (spec->l4_4_bytes)
5451 return -EOPNOTSUPP;
5452
5453 return 0;
5454 }
5455
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)5456 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5457 {
5458 if (!spec || !unused_tuple)
5459 return -EINVAL;
5460
5461 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5462 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5463 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5464
5465 if (is_zero_ether_addr(spec->h_source))
5466 *unused_tuple |= BIT(INNER_SRC_MAC);
5467
5468 if (is_zero_ether_addr(spec->h_dest))
5469 *unused_tuple |= BIT(INNER_DST_MAC);
5470
5471 if (!spec->h_proto)
5472 *unused_tuple |= BIT(INNER_ETH_TYPE);
5473
5474 return 0;
5475 }
5476
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5477 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5478 struct ethtool_rx_flow_spec *fs,
5479 u32 *unused_tuple)
5480 {
5481 if (fs->flow_type & FLOW_EXT) {
5482 if (fs->h_ext.vlan_etype) {
5483 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5484 return -EOPNOTSUPP;
5485 }
5486
5487 if (!fs->h_ext.vlan_tci)
5488 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5489
5490 if (fs->m_ext.vlan_tci &&
5491 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5492 dev_err(&hdev->pdev->dev,
5493 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5494 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5495 return -EINVAL;
5496 }
5497 } else {
5498 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5499 }
5500
5501 if (fs->flow_type & FLOW_MAC_EXT) {
5502 if (hdev->fd_cfg.fd_mode !=
5503 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5504 dev_err(&hdev->pdev->dev,
5505 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5506 return -EOPNOTSUPP;
5507 }
5508
5509 if (is_zero_ether_addr(fs->h_ext.h_dest))
5510 *unused_tuple |= BIT(INNER_DST_MAC);
5511 else
5512 *unused_tuple &= ~BIT(INNER_DST_MAC);
5513 }
5514
5515 return 0;
5516 }
5517
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5518 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5519 struct ethtool_rx_flow_spec *fs,
5520 u32 *unused_tuple)
5521 {
5522 u32 flow_type;
5523 int ret;
5524
5525 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5526 dev_err(&hdev->pdev->dev,
5527 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5528 fs->location,
5529 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5530 return -EINVAL;
5531 }
5532
5533 if ((fs->flow_type & FLOW_EXT) &&
5534 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5535 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5536 return -EOPNOTSUPP;
5537 }
5538
5539 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5540 switch (flow_type) {
5541 case SCTP_V4_FLOW:
5542 case TCP_V4_FLOW:
5543 case UDP_V4_FLOW:
5544 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5545 unused_tuple);
5546 break;
5547 case IP_USER_FLOW:
5548 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5549 unused_tuple);
5550 break;
5551 case SCTP_V6_FLOW:
5552 case TCP_V6_FLOW:
5553 case UDP_V6_FLOW:
5554 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5555 unused_tuple);
5556 break;
5557 case IPV6_USER_FLOW:
5558 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5559 unused_tuple);
5560 break;
5561 case ETHER_FLOW:
5562 if (hdev->fd_cfg.fd_mode !=
5563 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5564 dev_err(&hdev->pdev->dev,
5565 "ETHER_FLOW is not supported in current fd mode!\n");
5566 return -EOPNOTSUPP;
5567 }
5568
5569 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5570 unused_tuple);
5571 break;
5572 default:
5573 dev_err(&hdev->pdev->dev,
5574 "unsupported protocol type, protocol type = %#x\n",
5575 flow_type);
5576 return -EOPNOTSUPP;
5577 }
5578
5579 if (ret) {
5580 dev_err(&hdev->pdev->dev,
5581 "failed to check flow union tuple, ret = %d\n",
5582 ret);
5583 return ret;
5584 }
5585
5586 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5587 }
5588
hclge_fd_rule_exist(struct hclge_dev * hdev,u16 location)5589 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5590 {
5591 struct hclge_fd_rule *rule = NULL;
5592 struct hlist_node *node2;
5593
5594 spin_lock_bh(&hdev->fd_rule_lock);
5595 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5596 if (rule->location >= location)
5597 break;
5598 }
5599
5600 spin_unlock_bh(&hdev->fd_rule_lock);
5601
5602 return rule && rule->location == location;
5603 }
5604
5605 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_update_rule_list(struct hclge_dev * hdev,struct hclge_fd_rule * new_rule,u16 location,bool is_add)5606 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5607 struct hclge_fd_rule *new_rule,
5608 u16 location,
5609 bool is_add)
5610 {
5611 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5612 struct hlist_node *node2;
5613
5614 if (is_add && !new_rule)
5615 return -EINVAL;
5616
5617 hlist_for_each_entry_safe(rule, node2,
5618 &hdev->fd_rule_list, rule_node) {
5619 if (rule->location >= location)
5620 break;
5621 parent = rule;
5622 }
5623
5624 if (rule && rule->location == location) {
5625 hlist_del(&rule->rule_node);
5626 kfree(rule);
5627 hdev->hclge_fd_rule_num--;
5628
5629 if (!is_add) {
5630 if (!hdev->hclge_fd_rule_num)
5631 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5632 clear_bit(location, hdev->fd_bmap);
5633
5634 return 0;
5635 }
5636 } else if (!is_add) {
5637 dev_err(&hdev->pdev->dev,
5638 "delete fail, rule %u is inexistent\n",
5639 location);
5640 return -EINVAL;
5641 }
5642
5643 INIT_HLIST_NODE(&new_rule->rule_node);
5644
5645 if (parent)
5646 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5647 else
5648 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5649
5650 set_bit(location, hdev->fd_bmap);
5651 hdev->hclge_fd_rule_num++;
5652 hdev->fd_active_type = new_rule->rule_type;
5653
5654 return 0;
5655 }
5656
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)5657 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5658 struct ethtool_rx_flow_spec *fs,
5659 struct hclge_fd_rule *rule)
5660 {
5661 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5662
5663 switch (flow_type) {
5664 case SCTP_V4_FLOW:
5665 case TCP_V4_FLOW:
5666 case UDP_V4_FLOW:
5667 rule->tuples.src_ip[IPV4_INDEX] =
5668 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5669 rule->tuples_mask.src_ip[IPV4_INDEX] =
5670 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5671
5672 rule->tuples.dst_ip[IPV4_INDEX] =
5673 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5674 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5675 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5676
5677 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5678 rule->tuples_mask.src_port =
5679 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5680
5681 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5682 rule->tuples_mask.dst_port =
5683 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5684
5685 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5686 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5687
5688 rule->tuples.ether_proto = ETH_P_IP;
5689 rule->tuples_mask.ether_proto = 0xFFFF;
5690
5691 break;
5692 case IP_USER_FLOW:
5693 rule->tuples.src_ip[IPV4_INDEX] =
5694 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5695 rule->tuples_mask.src_ip[IPV4_INDEX] =
5696 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5697
5698 rule->tuples.dst_ip[IPV4_INDEX] =
5699 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5700 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5701 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5702
5703 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5704 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5705
5706 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5707 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5708
5709 rule->tuples.ether_proto = ETH_P_IP;
5710 rule->tuples_mask.ether_proto = 0xFFFF;
5711
5712 break;
5713 case SCTP_V6_FLOW:
5714 case TCP_V6_FLOW:
5715 case UDP_V6_FLOW:
5716 be32_to_cpu_array(rule->tuples.src_ip,
5717 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5718 be32_to_cpu_array(rule->tuples_mask.src_ip,
5719 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5720
5721 be32_to_cpu_array(rule->tuples.dst_ip,
5722 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5723 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5724 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5725
5726 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5727 rule->tuples_mask.src_port =
5728 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5729
5730 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5731 rule->tuples_mask.dst_port =
5732 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5733
5734 rule->tuples.ether_proto = ETH_P_IPV6;
5735 rule->tuples_mask.ether_proto = 0xFFFF;
5736
5737 break;
5738 case IPV6_USER_FLOW:
5739 be32_to_cpu_array(rule->tuples.src_ip,
5740 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5741 be32_to_cpu_array(rule->tuples_mask.src_ip,
5742 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5743
5744 be32_to_cpu_array(rule->tuples.dst_ip,
5745 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5746 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5747 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5748
5749 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5750 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5751
5752 rule->tuples.ether_proto = ETH_P_IPV6;
5753 rule->tuples_mask.ether_proto = 0xFFFF;
5754
5755 break;
5756 case ETHER_FLOW:
5757 ether_addr_copy(rule->tuples.src_mac,
5758 fs->h_u.ether_spec.h_source);
5759 ether_addr_copy(rule->tuples_mask.src_mac,
5760 fs->m_u.ether_spec.h_source);
5761
5762 ether_addr_copy(rule->tuples.dst_mac,
5763 fs->h_u.ether_spec.h_dest);
5764 ether_addr_copy(rule->tuples_mask.dst_mac,
5765 fs->m_u.ether_spec.h_dest);
5766
5767 rule->tuples.ether_proto =
5768 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5769 rule->tuples_mask.ether_proto =
5770 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5771
5772 break;
5773 default:
5774 return -EOPNOTSUPP;
5775 }
5776
5777 switch (flow_type) {
5778 case SCTP_V4_FLOW:
5779 case SCTP_V6_FLOW:
5780 rule->tuples.ip_proto = IPPROTO_SCTP;
5781 rule->tuples_mask.ip_proto = 0xFF;
5782 break;
5783 case TCP_V4_FLOW:
5784 case TCP_V6_FLOW:
5785 rule->tuples.ip_proto = IPPROTO_TCP;
5786 rule->tuples_mask.ip_proto = 0xFF;
5787 break;
5788 case UDP_V4_FLOW:
5789 case UDP_V6_FLOW:
5790 rule->tuples.ip_proto = IPPROTO_UDP;
5791 rule->tuples_mask.ip_proto = 0xFF;
5792 break;
5793 default:
5794 break;
5795 }
5796
5797 if (fs->flow_type & FLOW_EXT) {
5798 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5799 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5800 }
5801
5802 if (fs->flow_type & FLOW_MAC_EXT) {
5803 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5804 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5805 }
5806
5807 return 0;
5808 }
5809
5810 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5811 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5812 struct hclge_fd_rule *rule)
5813 {
5814 int ret;
5815
5816 if (!rule) {
5817 dev_err(&hdev->pdev->dev,
5818 "The flow director rule is NULL\n");
5819 return -EINVAL;
5820 }
5821
5822 /* it will never fail here, so needn't to check return value */
5823 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5824
5825 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5826 if (ret)
5827 goto clear_rule;
5828
5829 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5830 if (ret)
5831 goto clear_rule;
5832
5833 return 0;
5834
5835 clear_rule:
5836 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5837 return ret;
5838 }
5839
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5840 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5841 struct ethtool_rxnfc *cmd)
5842 {
5843 struct hclge_vport *vport = hclge_get_vport(handle);
5844 struct hclge_dev *hdev = vport->back;
5845 u16 dst_vport_id = 0, q_index = 0;
5846 struct ethtool_rx_flow_spec *fs;
5847 struct hclge_fd_rule *rule;
5848 u32 unused = 0;
5849 u8 action;
5850 int ret;
5851
5852 if (!hnae3_dev_fd_supported(hdev)) {
5853 dev_err(&hdev->pdev->dev,
5854 "flow table director is not supported\n");
5855 return -EOPNOTSUPP;
5856 }
5857
5858 if (!hdev->fd_en) {
5859 dev_err(&hdev->pdev->dev,
5860 "please enable flow director first\n");
5861 return -EOPNOTSUPP;
5862 }
5863
5864 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5865
5866 ret = hclge_fd_check_spec(hdev, fs, &unused);
5867 if (ret)
5868 return ret;
5869
5870 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5871 action = HCLGE_FD_ACTION_DROP_PACKET;
5872 } else {
5873 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5874 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5875 u16 tqps;
5876
5877 if (vf > hdev->num_req_vfs) {
5878 dev_err(&hdev->pdev->dev,
5879 "Error: vf id (%u) > max vf num (%u)\n",
5880 vf, hdev->num_req_vfs);
5881 return -EINVAL;
5882 }
5883
5884 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5885 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5886
5887 if (ring >= tqps) {
5888 dev_err(&hdev->pdev->dev,
5889 "Error: queue id (%u) > max tqp num (%u)\n",
5890 ring, tqps - 1);
5891 return -EINVAL;
5892 }
5893
5894 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5895 q_index = ring;
5896 }
5897
5898 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5899 if (!rule)
5900 return -ENOMEM;
5901
5902 ret = hclge_fd_get_tuple(hdev, fs, rule);
5903 if (ret) {
5904 kfree(rule);
5905 return ret;
5906 }
5907
5908 rule->flow_type = fs->flow_type;
5909 rule->location = fs->location;
5910 rule->unused_tuple = unused;
5911 rule->vf_id = dst_vport_id;
5912 rule->queue_id = q_index;
5913 rule->action = action;
5914 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5915
5916 /* to avoid rule conflict, when user configure rule by ethtool,
5917 * we need to clear all arfs rules
5918 */
5919 spin_lock_bh(&hdev->fd_rule_lock);
5920 hclge_clear_arfs_rules(handle);
5921
5922 ret = hclge_fd_config_rule(hdev, rule);
5923
5924 spin_unlock_bh(&hdev->fd_rule_lock);
5925
5926 return ret;
5927 }
5928
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5929 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5930 struct ethtool_rxnfc *cmd)
5931 {
5932 struct hclge_vport *vport = hclge_get_vport(handle);
5933 struct hclge_dev *hdev = vport->back;
5934 struct ethtool_rx_flow_spec *fs;
5935 int ret;
5936
5937 if (!hnae3_dev_fd_supported(hdev))
5938 return -EOPNOTSUPP;
5939
5940 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5941
5942 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5943 return -EINVAL;
5944
5945 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5946 dev_err(&hdev->pdev->dev,
5947 "Delete fail, rule %u is inexistent\n", fs->location);
5948 return -ENOENT;
5949 }
5950
5951 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5952 NULL, false);
5953 if (ret)
5954 return ret;
5955
5956 spin_lock_bh(&hdev->fd_rule_lock);
5957 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5958
5959 spin_unlock_bh(&hdev->fd_rule_lock);
5960
5961 return ret;
5962 }
5963
5964 /* make sure being called after lock up with fd_rule_lock */
hclge_del_all_fd_entries(struct hnae3_handle * handle,bool clear_list)5965 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5966 bool clear_list)
5967 {
5968 struct hclge_vport *vport = hclge_get_vport(handle);
5969 struct hclge_dev *hdev = vport->back;
5970 struct hclge_fd_rule *rule;
5971 struct hlist_node *node;
5972 u16 location;
5973
5974 if (!hnae3_dev_fd_supported(hdev))
5975 return;
5976
5977 for_each_set_bit(location, hdev->fd_bmap,
5978 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5979 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5980 NULL, false);
5981
5982 if (clear_list) {
5983 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5984 rule_node) {
5985 hlist_del(&rule->rule_node);
5986 kfree(rule);
5987 }
5988 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5989 hdev->hclge_fd_rule_num = 0;
5990 bitmap_zero(hdev->fd_bmap,
5991 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5992 }
5993 }
5994
hclge_restore_fd_entries(struct hnae3_handle * handle)5995 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5996 {
5997 struct hclge_vport *vport = hclge_get_vport(handle);
5998 struct hclge_dev *hdev = vport->back;
5999 struct hclge_fd_rule *rule;
6000 struct hlist_node *node;
6001 int ret;
6002
6003 /* Return ok here, because reset error handling will check this
6004 * return value. If error is returned here, the reset process will
6005 * fail.
6006 */
6007 if (!hnae3_dev_fd_supported(hdev))
6008 return 0;
6009
6010 /* if fd is disabled, should not restore it when reset */
6011 if (!hdev->fd_en)
6012 return 0;
6013
6014 spin_lock_bh(&hdev->fd_rule_lock);
6015 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6016 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6017 if (!ret)
6018 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6019
6020 if (ret) {
6021 dev_warn(&hdev->pdev->dev,
6022 "Restore rule %u failed, remove it\n",
6023 rule->location);
6024 clear_bit(rule->location, hdev->fd_bmap);
6025 hlist_del(&rule->rule_node);
6026 kfree(rule);
6027 hdev->hclge_fd_rule_num--;
6028 }
6029 }
6030
6031 if (hdev->hclge_fd_rule_num)
6032 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6033
6034 spin_unlock_bh(&hdev->fd_rule_lock);
6035
6036 return 0;
6037 }
6038
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6039 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6040 struct ethtool_rxnfc *cmd)
6041 {
6042 struct hclge_vport *vport = hclge_get_vport(handle);
6043 struct hclge_dev *hdev = vport->back;
6044
6045 if (!hnae3_dev_fd_supported(hdev))
6046 return -EOPNOTSUPP;
6047
6048 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6049 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6050
6051 return 0;
6052 }
6053
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6054 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6055 struct ethtool_tcpip4_spec *spec,
6056 struct ethtool_tcpip4_spec *spec_mask)
6057 {
6058 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6059 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6060 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6061
6062 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6063 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6064 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6065
6066 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6067 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6068 0 : cpu_to_be16(rule->tuples_mask.src_port);
6069
6070 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6071 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6072 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6073
6074 spec->tos = rule->tuples.ip_tos;
6075 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6076 0 : rule->tuples_mask.ip_tos;
6077 }
6078
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6079 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6080 struct ethtool_usrip4_spec *spec,
6081 struct ethtool_usrip4_spec *spec_mask)
6082 {
6083 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6084 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6085 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6086
6087 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6088 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6089 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6090
6091 spec->tos = rule->tuples.ip_tos;
6092 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6093 0 : rule->tuples_mask.ip_tos;
6094
6095 spec->proto = rule->tuples.ip_proto;
6096 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6097 0 : rule->tuples_mask.ip_proto;
6098
6099 spec->ip_ver = ETH_RX_NFC_IP4;
6100 }
6101
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6102 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6103 struct ethtool_tcpip6_spec *spec,
6104 struct ethtool_tcpip6_spec *spec_mask)
6105 {
6106 cpu_to_be32_array(spec->ip6src,
6107 rule->tuples.src_ip, IPV6_SIZE);
6108 cpu_to_be32_array(spec->ip6dst,
6109 rule->tuples.dst_ip, IPV6_SIZE);
6110 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6111 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6112 else
6113 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6114 IPV6_SIZE);
6115
6116 if (rule->unused_tuple & BIT(INNER_DST_IP))
6117 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6118 else
6119 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6120 IPV6_SIZE);
6121
6122 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6123 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6124 0 : cpu_to_be16(rule->tuples_mask.src_port);
6125
6126 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6127 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6128 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6129 }
6130
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6131 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6132 struct ethtool_usrip6_spec *spec,
6133 struct ethtool_usrip6_spec *spec_mask)
6134 {
6135 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6136 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6137 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6138 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6139 else
6140 cpu_to_be32_array(spec_mask->ip6src,
6141 rule->tuples_mask.src_ip, IPV6_SIZE);
6142
6143 if (rule->unused_tuple & BIT(INNER_DST_IP))
6144 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6145 else
6146 cpu_to_be32_array(spec_mask->ip6dst,
6147 rule->tuples_mask.dst_ip, IPV6_SIZE);
6148
6149 spec->l4_proto = rule->tuples.ip_proto;
6150 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6151 0 : rule->tuples_mask.ip_proto;
6152 }
6153
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6154 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6155 struct ethhdr *spec,
6156 struct ethhdr *spec_mask)
6157 {
6158 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6159 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6160
6161 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6162 eth_zero_addr(spec_mask->h_source);
6163 else
6164 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6165
6166 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6167 eth_zero_addr(spec_mask->h_dest);
6168 else
6169 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6170
6171 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6172 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6173 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6174 }
6175
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6176 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6177 struct hclge_fd_rule *rule)
6178 {
6179 if (fs->flow_type & FLOW_EXT) {
6180 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6181 fs->m_ext.vlan_tci =
6182 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6183 cpu_to_be16(VLAN_VID_MASK) :
6184 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6185 }
6186
6187 if (fs->flow_type & FLOW_MAC_EXT) {
6188 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6189 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6190 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6191 else
6192 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6193 rule->tuples_mask.dst_mac);
6194 }
6195 }
6196
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6197 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6198 struct ethtool_rxnfc *cmd)
6199 {
6200 struct hclge_vport *vport = hclge_get_vport(handle);
6201 struct hclge_fd_rule *rule = NULL;
6202 struct hclge_dev *hdev = vport->back;
6203 struct ethtool_rx_flow_spec *fs;
6204 struct hlist_node *node2;
6205
6206 if (!hnae3_dev_fd_supported(hdev))
6207 return -EOPNOTSUPP;
6208
6209 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6210
6211 spin_lock_bh(&hdev->fd_rule_lock);
6212
6213 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6214 if (rule->location >= fs->location)
6215 break;
6216 }
6217
6218 if (!rule || fs->location != rule->location) {
6219 spin_unlock_bh(&hdev->fd_rule_lock);
6220
6221 return -ENOENT;
6222 }
6223
6224 fs->flow_type = rule->flow_type;
6225 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6226 case SCTP_V4_FLOW:
6227 case TCP_V4_FLOW:
6228 case UDP_V4_FLOW:
6229 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6230 &fs->m_u.tcp_ip4_spec);
6231 break;
6232 case IP_USER_FLOW:
6233 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6234 &fs->m_u.usr_ip4_spec);
6235 break;
6236 case SCTP_V6_FLOW:
6237 case TCP_V6_FLOW:
6238 case UDP_V6_FLOW:
6239 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6240 &fs->m_u.tcp_ip6_spec);
6241 break;
6242 case IPV6_USER_FLOW:
6243 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6244 &fs->m_u.usr_ip6_spec);
6245 break;
6246 /* The flow type of fd rule has been checked before adding in to rule
6247 * list. As other flow types have been handled, it must be ETHER_FLOW
6248 * for the default case
6249 */
6250 default:
6251 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6252 &fs->m_u.ether_spec);
6253 break;
6254 }
6255
6256 hclge_fd_get_ext_info(fs, rule);
6257
6258 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6259 fs->ring_cookie = RX_CLS_FLOW_DISC;
6260 } else {
6261 u64 vf_id;
6262
6263 fs->ring_cookie = rule->queue_id;
6264 vf_id = rule->vf_id;
6265 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6266 fs->ring_cookie |= vf_id;
6267 }
6268
6269 spin_unlock_bh(&hdev->fd_rule_lock);
6270
6271 return 0;
6272 }
6273
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6274 static int hclge_get_all_rules(struct hnae3_handle *handle,
6275 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6276 {
6277 struct hclge_vport *vport = hclge_get_vport(handle);
6278 struct hclge_dev *hdev = vport->back;
6279 struct hclge_fd_rule *rule;
6280 struct hlist_node *node2;
6281 int cnt = 0;
6282
6283 if (!hnae3_dev_fd_supported(hdev))
6284 return -EOPNOTSUPP;
6285
6286 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6287
6288 spin_lock_bh(&hdev->fd_rule_lock);
6289 hlist_for_each_entry_safe(rule, node2,
6290 &hdev->fd_rule_list, rule_node) {
6291 if (cnt == cmd->rule_cnt) {
6292 spin_unlock_bh(&hdev->fd_rule_lock);
6293 return -EMSGSIZE;
6294 }
6295
6296 rule_locs[cnt] = rule->location;
6297 cnt++;
6298 }
6299
6300 spin_unlock_bh(&hdev->fd_rule_lock);
6301
6302 cmd->rule_cnt = cnt;
6303
6304 return 0;
6305 }
6306
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6307 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6308 struct hclge_fd_rule_tuples *tuples)
6309 {
6310 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6311 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6312
6313 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6314 tuples->ip_proto = fkeys->basic.ip_proto;
6315 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6316
6317 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6318 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6319 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6320 } else {
6321 int i;
6322
6323 for (i = 0; i < IPV6_SIZE; i++) {
6324 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6325 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6326 }
6327 }
6328 }
6329
6330 /* traverse all rules, check whether an existed rule has the same tuples */
6331 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)6332 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6333 const struct hclge_fd_rule_tuples *tuples)
6334 {
6335 struct hclge_fd_rule *rule = NULL;
6336 struct hlist_node *node;
6337
6338 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6339 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6340 return rule;
6341 }
6342
6343 return NULL;
6344 }
6345
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)6346 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6347 struct hclge_fd_rule *rule)
6348 {
6349 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6350 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6351 BIT(INNER_SRC_PORT);
6352 rule->action = 0;
6353 rule->vf_id = 0;
6354 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6355 if (tuples->ether_proto == ETH_P_IP) {
6356 if (tuples->ip_proto == IPPROTO_TCP)
6357 rule->flow_type = TCP_V4_FLOW;
6358 else
6359 rule->flow_type = UDP_V4_FLOW;
6360 } else {
6361 if (tuples->ip_proto == IPPROTO_TCP)
6362 rule->flow_type = TCP_V6_FLOW;
6363 else
6364 rule->flow_type = UDP_V6_FLOW;
6365 }
6366 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6367 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6368 }
6369
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)6370 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6371 u16 flow_id, struct flow_keys *fkeys)
6372 {
6373 struct hclge_vport *vport = hclge_get_vport(handle);
6374 struct hclge_fd_rule_tuples new_tuples = {};
6375 struct hclge_dev *hdev = vport->back;
6376 struct hclge_fd_rule *rule;
6377 u16 tmp_queue_id;
6378 u16 bit_id;
6379 int ret;
6380
6381 if (!hnae3_dev_fd_supported(hdev))
6382 return -EOPNOTSUPP;
6383
6384 /* when there is already fd rule existed add by user,
6385 * arfs should not work
6386 */
6387 spin_lock_bh(&hdev->fd_rule_lock);
6388 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6389 spin_unlock_bh(&hdev->fd_rule_lock);
6390 return -EOPNOTSUPP;
6391 }
6392
6393 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6394
6395 /* check is there flow director filter existed for this flow,
6396 * if not, create a new filter for it;
6397 * if filter exist with different queue id, modify the filter;
6398 * if filter exist with same queue id, do nothing
6399 */
6400 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6401 if (!rule) {
6402 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6403 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6404 spin_unlock_bh(&hdev->fd_rule_lock);
6405 return -ENOSPC;
6406 }
6407
6408 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6409 if (!rule) {
6410 spin_unlock_bh(&hdev->fd_rule_lock);
6411 return -ENOMEM;
6412 }
6413
6414 set_bit(bit_id, hdev->fd_bmap);
6415 rule->location = bit_id;
6416 rule->flow_id = flow_id;
6417 rule->queue_id = queue_id;
6418 hclge_fd_build_arfs_rule(&new_tuples, rule);
6419 ret = hclge_fd_config_rule(hdev, rule);
6420
6421 spin_unlock_bh(&hdev->fd_rule_lock);
6422
6423 if (ret)
6424 return ret;
6425
6426 return rule->location;
6427 }
6428
6429 spin_unlock_bh(&hdev->fd_rule_lock);
6430
6431 if (rule->queue_id == queue_id)
6432 return rule->location;
6433
6434 tmp_queue_id = rule->queue_id;
6435 rule->queue_id = queue_id;
6436 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6437 if (ret) {
6438 rule->queue_id = tmp_queue_id;
6439 return ret;
6440 }
6441
6442 return rule->location;
6443 }
6444
hclge_rfs_filter_expire(struct hclge_dev * hdev)6445 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6446 {
6447 #ifdef CONFIG_RFS_ACCEL
6448 struct hnae3_handle *handle = &hdev->vport[0].nic;
6449 struct hclge_fd_rule *rule;
6450 struct hlist_node *node;
6451 HLIST_HEAD(del_list);
6452
6453 spin_lock_bh(&hdev->fd_rule_lock);
6454 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6455 spin_unlock_bh(&hdev->fd_rule_lock);
6456 return;
6457 }
6458 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6459 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6460 rule->flow_id, rule->location)) {
6461 hlist_del_init(&rule->rule_node);
6462 hlist_add_head(&rule->rule_node, &del_list);
6463 hdev->hclge_fd_rule_num--;
6464 clear_bit(rule->location, hdev->fd_bmap);
6465 }
6466 }
6467 spin_unlock_bh(&hdev->fd_rule_lock);
6468
6469 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6470 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6471 rule->location, NULL, false);
6472 kfree(rule);
6473 }
6474 #endif
6475 }
6476
6477 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hnae3_handle * handle)6478 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6479 {
6480 #ifdef CONFIG_RFS_ACCEL
6481 struct hclge_vport *vport = hclge_get_vport(handle);
6482 struct hclge_dev *hdev = vport->back;
6483
6484 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6485 hclge_del_all_fd_entries(handle, true);
6486 #endif
6487 }
6488
hclge_get_hw_reset_stat(struct hnae3_handle * handle)6489 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6490 {
6491 struct hclge_vport *vport = hclge_get_vport(handle);
6492 struct hclge_dev *hdev = vport->back;
6493
6494 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6495 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6496 }
6497
hclge_get_cmdq_stat(struct hnae3_handle * handle)6498 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6499 {
6500 struct hclge_vport *vport = hclge_get_vport(handle);
6501 struct hclge_dev *hdev = vport->back;
6502
6503 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6504 }
6505
hclge_ae_dev_resetting(struct hnae3_handle * handle)6506 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6507 {
6508 struct hclge_vport *vport = hclge_get_vport(handle);
6509 struct hclge_dev *hdev = vport->back;
6510
6511 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6512 }
6513
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)6514 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6515 {
6516 struct hclge_vport *vport = hclge_get_vport(handle);
6517 struct hclge_dev *hdev = vport->back;
6518
6519 return hdev->rst_stats.hw_reset_done_cnt;
6520 }
6521
hclge_enable_fd(struct hnae3_handle * handle,bool enable)6522 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6523 {
6524 struct hclge_vport *vport = hclge_get_vport(handle);
6525 struct hclge_dev *hdev = vport->back;
6526 bool clear;
6527
6528 hdev->fd_en = enable;
6529 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6530
6531 if (!enable) {
6532 spin_lock_bh(&hdev->fd_rule_lock);
6533 hclge_del_all_fd_entries(handle, clear);
6534 spin_unlock_bh(&hdev->fd_rule_lock);
6535 } else {
6536 hclge_restore_fd_entries(handle);
6537 }
6538 }
6539
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)6540 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6541 {
6542 struct hclge_desc desc;
6543 struct hclge_config_mac_mode_cmd *req =
6544 (struct hclge_config_mac_mode_cmd *)desc.data;
6545 u32 loop_en = 0;
6546 int ret;
6547
6548 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6549
6550 if (enable) {
6551 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6552 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6553 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6554 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6555 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6556 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6557 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6558 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6559 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6560 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6561 }
6562
6563 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6564
6565 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6566 if (ret)
6567 dev_err(&hdev->pdev->dev,
6568 "mac enable fail, ret =%d.\n", ret);
6569 }
6570
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)6571 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6572 u8 switch_param, u8 param_mask)
6573 {
6574 struct hclge_mac_vlan_switch_cmd *req;
6575 struct hclge_desc desc;
6576 u32 func_id;
6577 int ret;
6578
6579 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6580 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6581
6582 /* read current config parameter */
6583 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6584 true);
6585 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6586 req->func_id = cpu_to_le32(func_id);
6587
6588 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6589 if (ret) {
6590 dev_err(&hdev->pdev->dev,
6591 "read mac vlan switch parameter fail, ret = %d\n", ret);
6592 return ret;
6593 }
6594
6595 /* modify and write new config parameter */
6596 hclge_cmd_reuse_desc(&desc, false);
6597 req->switch_param = (req->switch_param & param_mask) | switch_param;
6598 req->param_mask = param_mask;
6599
6600 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6601 if (ret)
6602 dev_err(&hdev->pdev->dev,
6603 "set mac vlan switch parameter fail, ret = %d\n", ret);
6604 return ret;
6605 }
6606
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)6607 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6608 int link_ret)
6609 {
6610 #define HCLGE_PHY_LINK_STATUS_NUM 200
6611
6612 struct phy_device *phydev = hdev->hw.mac.phydev;
6613 int i = 0;
6614 int ret;
6615
6616 do {
6617 ret = phy_read_status(phydev);
6618 if (ret) {
6619 dev_err(&hdev->pdev->dev,
6620 "phy update link status fail, ret = %d\n", ret);
6621 return;
6622 }
6623
6624 if (phydev->link == link_ret)
6625 break;
6626
6627 msleep(HCLGE_LINK_STATUS_MS);
6628 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6629 }
6630
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret)6631 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6632 {
6633 #define HCLGE_MAC_LINK_STATUS_NUM 100
6634
6635 int link_status;
6636 int i = 0;
6637 int ret;
6638
6639 do {
6640 ret = hclge_get_mac_link_status(hdev, &link_status);
6641 if (ret)
6642 return ret;
6643 if (link_status == link_ret)
6644 return 0;
6645
6646 msleep(HCLGE_LINK_STATUS_MS);
6647 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6648 return -EBUSY;
6649 }
6650
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)6651 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6652 bool is_phy)
6653 {
6654 int link_ret;
6655
6656 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6657
6658 if (is_phy)
6659 hclge_phy_link_status_wait(hdev, link_ret);
6660
6661 return hclge_mac_link_status_wait(hdev, link_ret);
6662 }
6663
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)6664 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6665 {
6666 struct hclge_config_mac_mode_cmd *req;
6667 struct hclge_desc desc;
6668 u32 loop_en;
6669 int ret;
6670
6671 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6672 /* 1 Read out the MAC mode config at first */
6673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6674 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6675 if (ret) {
6676 dev_err(&hdev->pdev->dev,
6677 "mac loopback get fail, ret =%d.\n", ret);
6678 return ret;
6679 }
6680
6681 /* 2 Then setup the loopback flag */
6682 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6683 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6684
6685 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6686
6687 /* 3 Config mac work mode with loopback flag
6688 * and its original configure parameters
6689 */
6690 hclge_cmd_reuse_desc(&desc, false);
6691 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6692 if (ret)
6693 dev_err(&hdev->pdev->dev,
6694 "mac loopback set fail, ret =%d.\n", ret);
6695 return ret;
6696 }
6697
hclge_cfg_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6698 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6699 enum hnae3_loop loop_mode)
6700 {
6701 #define HCLGE_SERDES_RETRY_MS 10
6702 #define HCLGE_SERDES_RETRY_NUM 100
6703
6704 struct hclge_serdes_lb_cmd *req;
6705 struct hclge_desc desc;
6706 int ret, i = 0;
6707 u8 loop_mode_b;
6708
6709 req = (struct hclge_serdes_lb_cmd *)desc.data;
6710 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6711
6712 switch (loop_mode) {
6713 case HNAE3_LOOP_SERIAL_SERDES:
6714 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6715 break;
6716 case HNAE3_LOOP_PARALLEL_SERDES:
6717 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6718 break;
6719 default:
6720 dev_err(&hdev->pdev->dev,
6721 "unsupported serdes loopback mode %d\n", loop_mode);
6722 return -ENOTSUPP;
6723 }
6724
6725 if (en) {
6726 req->enable = loop_mode_b;
6727 req->mask = loop_mode_b;
6728 } else {
6729 req->mask = loop_mode_b;
6730 }
6731
6732 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6733 if (ret) {
6734 dev_err(&hdev->pdev->dev,
6735 "serdes loopback set fail, ret = %d\n", ret);
6736 return ret;
6737 }
6738
6739 do {
6740 msleep(HCLGE_SERDES_RETRY_MS);
6741 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6742 true);
6743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6744 if (ret) {
6745 dev_err(&hdev->pdev->dev,
6746 "serdes loopback get, ret = %d\n", ret);
6747 return ret;
6748 }
6749 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6750 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6751
6752 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6753 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6754 return -EBUSY;
6755 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6756 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6757 return -EIO;
6758 }
6759 return ret;
6760 }
6761
hclge_set_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6762 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6763 enum hnae3_loop loop_mode)
6764 {
6765 int ret;
6766
6767 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6768 if (ret)
6769 return ret;
6770
6771 hclge_cfg_mac_mode(hdev, en);
6772
6773 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6774 if (ret)
6775 dev_err(&hdev->pdev->dev,
6776 "serdes loopback config mac mode timeout\n");
6777
6778 return ret;
6779 }
6780
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6781 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6782 struct phy_device *phydev)
6783 {
6784 int ret;
6785
6786 if (!phydev->suspended) {
6787 ret = phy_suspend(phydev);
6788 if (ret)
6789 return ret;
6790 }
6791
6792 ret = phy_resume(phydev);
6793 if (ret)
6794 return ret;
6795
6796 return phy_loopback(phydev, true);
6797 }
6798
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6799 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6800 struct phy_device *phydev)
6801 {
6802 int ret;
6803
6804 ret = phy_loopback(phydev, false);
6805 if (ret)
6806 return ret;
6807
6808 return phy_suspend(phydev);
6809 }
6810
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)6811 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6812 {
6813 struct phy_device *phydev = hdev->hw.mac.phydev;
6814 int ret;
6815
6816 if (!phydev)
6817 return -ENOTSUPP;
6818
6819 if (en)
6820 ret = hclge_enable_phy_loopback(hdev, phydev);
6821 else
6822 ret = hclge_disable_phy_loopback(hdev, phydev);
6823 if (ret) {
6824 dev_err(&hdev->pdev->dev,
6825 "set phy loopback fail, ret = %d\n", ret);
6826 return ret;
6827 }
6828
6829 hclge_cfg_mac_mode(hdev, en);
6830
6831 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6832 if (ret)
6833 dev_err(&hdev->pdev->dev,
6834 "phy loopback config mac mode timeout\n");
6835
6836 return ret;
6837 }
6838
hclge_tqp_enable(struct hclge_dev * hdev,unsigned int tqp_id,int stream_id,bool enable)6839 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6840 int stream_id, bool enable)
6841 {
6842 struct hclge_desc desc;
6843 struct hclge_cfg_com_tqp_queue_cmd *req =
6844 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6845 int ret;
6846
6847 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6848 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6849 req->stream_id = cpu_to_le16(stream_id);
6850 if (enable)
6851 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6852
6853 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6854 if (ret)
6855 dev_err(&hdev->pdev->dev,
6856 "Tqp enable fail, status =%d.\n", ret);
6857 return ret;
6858 }
6859
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)6860 static int hclge_set_loopback(struct hnae3_handle *handle,
6861 enum hnae3_loop loop_mode, bool en)
6862 {
6863 struct hclge_vport *vport = hclge_get_vport(handle);
6864 struct hnae3_knic_private_info *kinfo;
6865 struct hclge_dev *hdev = vport->back;
6866 int i, ret;
6867
6868 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6869 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6870 * the same, the packets are looped back in the SSU. If SSU loopback
6871 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6872 */
6873 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6874 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6875
6876 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6877 HCLGE_SWITCH_ALW_LPBK_MASK);
6878 if (ret)
6879 return ret;
6880 }
6881
6882 switch (loop_mode) {
6883 case HNAE3_LOOP_APP:
6884 ret = hclge_set_app_loopback(hdev, en);
6885 break;
6886 case HNAE3_LOOP_SERIAL_SERDES:
6887 case HNAE3_LOOP_PARALLEL_SERDES:
6888 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6889 break;
6890 case HNAE3_LOOP_PHY:
6891 ret = hclge_set_phy_loopback(hdev, en);
6892 break;
6893 default:
6894 ret = -ENOTSUPP;
6895 dev_err(&hdev->pdev->dev,
6896 "loop_mode %d is not supported\n", loop_mode);
6897 break;
6898 }
6899
6900 if (ret)
6901 return ret;
6902
6903 kinfo = &vport->nic.kinfo;
6904 for (i = 0; i < kinfo->num_tqps; i++) {
6905 ret = hclge_tqp_enable(hdev, i, 0, en);
6906 if (ret)
6907 return ret;
6908 }
6909
6910 return 0;
6911 }
6912
hclge_set_default_loopback(struct hclge_dev * hdev)6913 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6914 {
6915 int ret;
6916
6917 ret = hclge_set_app_loopback(hdev, false);
6918 if (ret)
6919 return ret;
6920
6921 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6922 if (ret)
6923 return ret;
6924
6925 return hclge_cfg_serdes_loopback(hdev, false,
6926 HNAE3_LOOP_PARALLEL_SERDES);
6927 }
6928
hclge_reset_tqp_stats(struct hnae3_handle * handle)6929 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6930 {
6931 struct hclge_vport *vport = hclge_get_vport(handle);
6932 struct hnae3_knic_private_info *kinfo;
6933 struct hnae3_queue *queue;
6934 struct hclge_tqp *tqp;
6935 int i;
6936
6937 kinfo = &vport->nic.kinfo;
6938 for (i = 0; i < kinfo->num_tqps; i++) {
6939 queue = handle->kinfo.tqp[i];
6940 tqp = container_of(queue, struct hclge_tqp, q);
6941 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6942 }
6943 }
6944
hclge_flush_link_update(struct hclge_dev * hdev)6945 static void hclge_flush_link_update(struct hclge_dev *hdev)
6946 {
6947 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6948
6949 unsigned long last = hdev->serv_processed_cnt;
6950 int i = 0;
6951
6952 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6953 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6954 last == hdev->serv_processed_cnt)
6955 usleep_range(1, 1);
6956 }
6957
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)6958 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6959 {
6960 struct hclge_vport *vport = hclge_get_vport(handle);
6961 struct hclge_dev *hdev = vport->back;
6962
6963 if (enable) {
6964 hclge_task_schedule(hdev, 0);
6965 } else {
6966 /* Set the DOWN flag here to disable link updating */
6967 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6968
6969 /* flush memory to make sure DOWN is seen by service task */
6970 smp_mb__before_atomic();
6971 hclge_flush_link_update(hdev);
6972 }
6973 }
6974
hclge_ae_start(struct hnae3_handle * handle)6975 static int hclge_ae_start(struct hnae3_handle *handle)
6976 {
6977 struct hclge_vport *vport = hclge_get_vport(handle);
6978 struct hclge_dev *hdev = vport->back;
6979
6980 /* mac enable */
6981 hclge_cfg_mac_mode(hdev, true);
6982 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6983 hdev->hw.mac.link = 0;
6984
6985 /* reset tqp stats */
6986 hclge_reset_tqp_stats(handle);
6987
6988 hclge_mac_start_phy(hdev);
6989
6990 return 0;
6991 }
6992
hclge_ae_stop(struct hnae3_handle * handle)6993 static void hclge_ae_stop(struct hnae3_handle *handle)
6994 {
6995 struct hclge_vport *vport = hclge_get_vport(handle);
6996 struct hclge_dev *hdev = vport->back;
6997 int i;
6998
6999 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7000 spin_lock_bh(&hdev->fd_rule_lock);
7001 hclge_clear_arfs_rules(handle);
7002 spin_unlock_bh(&hdev->fd_rule_lock);
7003
7004 /* If it is not PF reset, the firmware will disable the MAC,
7005 * so it only need to stop phy here.
7006 */
7007 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7008 hdev->reset_type != HNAE3_FUNC_RESET) {
7009 hclge_mac_stop_phy(hdev);
7010 hclge_update_link_status(hdev);
7011 return;
7012 }
7013
7014 for (i = 0; i < handle->kinfo.num_tqps; i++)
7015 hclge_reset_tqp(handle, i);
7016
7017 hclge_config_mac_tnl_int(hdev, false);
7018
7019 /* Mac disable */
7020 hclge_cfg_mac_mode(hdev, false);
7021
7022 hclge_mac_stop_phy(hdev);
7023
7024 /* reset tqp stats */
7025 hclge_reset_tqp_stats(handle);
7026 hclge_update_link_status(hdev);
7027 }
7028
hclge_vport_start(struct hclge_vport * vport)7029 int hclge_vport_start(struct hclge_vport *vport)
7030 {
7031 struct hclge_dev *hdev = vport->back;
7032
7033 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7034 vport->last_active_jiffies = jiffies;
7035
7036 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7037 if (vport->vport_id) {
7038 hclge_restore_mac_table_common(vport);
7039 hclge_restore_vport_vlan_table(vport);
7040 } else {
7041 hclge_restore_hw_table(hdev);
7042 }
7043 }
7044
7045 clear_bit(vport->vport_id, hdev->vport_config_block);
7046
7047 return 0;
7048 }
7049
hclge_vport_stop(struct hclge_vport * vport)7050 void hclge_vport_stop(struct hclge_vport *vport)
7051 {
7052 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7053 }
7054
hclge_client_start(struct hnae3_handle * handle)7055 static int hclge_client_start(struct hnae3_handle *handle)
7056 {
7057 struct hclge_vport *vport = hclge_get_vport(handle);
7058
7059 return hclge_vport_start(vport);
7060 }
7061
hclge_client_stop(struct hnae3_handle * handle)7062 static void hclge_client_stop(struct hnae3_handle *handle)
7063 {
7064 struct hclge_vport *vport = hclge_get_vport(handle);
7065
7066 hclge_vport_stop(vport);
7067 }
7068
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)7069 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7070 u16 cmdq_resp, u8 resp_code,
7071 enum hclge_mac_vlan_tbl_opcode op)
7072 {
7073 struct hclge_dev *hdev = vport->back;
7074
7075 if (cmdq_resp) {
7076 dev_err(&hdev->pdev->dev,
7077 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7078 cmdq_resp);
7079 return -EIO;
7080 }
7081
7082 if (op == HCLGE_MAC_VLAN_ADD) {
7083 if (!resp_code || resp_code == 1)
7084 return 0;
7085 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7086 resp_code == HCLGE_ADD_MC_OVERFLOW)
7087 return -ENOSPC;
7088
7089 dev_err(&hdev->pdev->dev,
7090 "add mac addr failed for undefined, code=%u.\n",
7091 resp_code);
7092 return -EIO;
7093 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7094 if (!resp_code) {
7095 return 0;
7096 } else if (resp_code == 1) {
7097 dev_dbg(&hdev->pdev->dev,
7098 "remove mac addr failed for miss.\n");
7099 return -ENOENT;
7100 }
7101
7102 dev_err(&hdev->pdev->dev,
7103 "remove mac addr failed for undefined, code=%u.\n",
7104 resp_code);
7105 return -EIO;
7106 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7107 if (!resp_code) {
7108 return 0;
7109 } else if (resp_code == 1) {
7110 dev_dbg(&hdev->pdev->dev,
7111 "lookup mac addr failed for miss.\n");
7112 return -ENOENT;
7113 }
7114
7115 dev_err(&hdev->pdev->dev,
7116 "lookup mac addr failed for undefined, code=%u.\n",
7117 resp_code);
7118 return -EIO;
7119 }
7120
7121 dev_err(&hdev->pdev->dev,
7122 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7123
7124 return -EINVAL;
7125 }
7126
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)7127 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7128 {
7129 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7130
7131 unsigned int word_num;
7132 unsigned int bit_num;
7133
7134 if (vfid > 255 || vfid < 0)
7135 return -EIO;
7136
7137 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7138 word_num = vfid / 32;
7139 bit_num = vfid % 32;
7140 if (clr)
7141 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7142 else
7143 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7144 } else {
7145 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7146 bit_num = vfid % 32;
7147 if (clr)
7148 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7149 else
7150 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7151 }
7152
7153 return 0;
7154 }
7155
hclge_is_all_function_id_zero(struct hclge_desc * desc)7156 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7157 {
7158 #define HCLGE_DESC_NUMBER 3
7159 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7160 int i, j;
7161
7162 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7163 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7164 if (desc[i].data[j])
7165 return false;
7166
7167 return true;
7168 }
7169
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)7170 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7171 const u8 *addr, bool is_mc)
7172 {
7173 const unsigned char *mac_addr = addr;
7174 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7175 (mac_addr[0]) | (mac_addr[1] << 8);
7176 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7177
7178 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7179 if (is_mc) {
7180 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7181 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7182 }
7183
7184 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7185 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7186 }
7187
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)7188 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7189 struct hclge_mac_vlan_tbl_entry_cmd *req)
7190 {
7191 struct hclge_dev *hdev = vport->back;
7192 struct hclge_desc desc;
7193 u8 resp_code;
7194 u16 retval;
7195 int ret;
7196
7197 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7198
7199 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7200
7201 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7202 if (ret) {
7203 dev_err(&hdev->pdev->dev,
7204 "del mac addr failed for cmd_send, ret =%d.\n",
7205 ret);
7206 return ret;
7207 }
7208 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7209 retval = le16_to_cpu(desc.retval);
7210
7211 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7212 HCLGE_MAC_VLAN_REMOVE);
7213 }
7214
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)7215 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7216 struct hclge_mac_vlan_tbl_entry_cmd *req,
7217 struct hclge_desc *desc,
7218 bool is_mc)
7219 {
7220 struct hclge_dev *hdev = vport->back;
7221 u8 resp_code;
7222 u16 retval;
7223 int ret;
7224
7225 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7226 if (is_mc) {
7227 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7228 memcpy(desc[0].data,
7229 req,
7230 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7231 hclge_cmd_setup_basic_desc(&desc[1],
7232 HCLGE_OPC_MAC_VLAN_ADD,
7233 true);
7234 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7235 hclge_cmd_setup_basic_desc(&desc[2],
7236 HCLGE_OPC_MAC_VLAN_ADD,
7237 true);
7238 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7239 } else {
7240 memcpy(desc[0].data,
7241 req,
7242 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7243 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7244 }
7245 if (ret) {
7246 dev_err(&hdev->pdev->dev,
7247 "lookup mac addr failed for cmd_send, ret =%d.\n",
7248 ret);
7249 return ret;
7250 }
7251 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7252 retval = le16_to_cpu(desc[0].retval);
7253
7254 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7255 HCLGE_MAC_VLAN_LKUP);
7256 }
7257
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)7258 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7259 struct hclge_mac_vlan_tbl_entry_cmd *req,
7260 struct hclge_desc *mc_desc)
7261 {
7262 struct hclge_dev *hdev = vport->back;
7263 int cfg_status;
7264 u8 resp_code;
7265 u16 retval;
7266 int ret;
7267
7268 if (!mc_desc) {
7269 struct hclge_desc desc;
7270
7271 hclge_cmd_setup_basic_desc(&desc,
7272 HCLGE_OPC_MAC_VLAN_ADD,
7273 false);
7274 memcpy(desc.data, req,
7275 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7276 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7277 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7278 retval = le16_to_cpu(desc.retval);
7279
7280 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7281 resp_code,
7282 HCLGE_MAC_VLAN_ADD);
7283 } else {
7284 hclge_cmd_reuse_desc(&mc_desc[0], false);
7285 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7286 hclge_cmd_reuse_desc(&mc_desc[1], false);
7287 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7288 hclge_cmd_reuse_desc(&mc_desc[2], false);
7289 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7290 memcpy(mc_desc[0].data, req,
7291 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7292 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7293 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7294 retval = le16_to_cpu(mc_desc[0].retval);
7295
7296 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7297 resp_code,
7298 HCLGE_MAC_VLAN_ADD);
7299 }
7300
7301 if (ret) {
7302 dev_err(&hdev->pdev->dev,
7303 "add mac addr failed for cmd_send, ret =%d.\n",
7304 ret);
7305 return ret;
7306 }
7307
7308 return cfg_status;
7309 }
7310
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)7311 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7312 u16 *allocated_size)
7313 {
7314 struct hclge_umv_spc_alc_cmd *req;
7315 struct hclge_desc desc;
7316 int ret;
7317
7318 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7319 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7320
7321 req->space_size = cpu_to_le32(space_size);
7322
7323 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7324 if (ret) {
7325 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7326 ret);
7327 return ret;
7328 }
7329
7330 *allocated_size = le32_to_cpu(desc.data[1]);
7331
7332 return 0;
7333 }
7334
hclge_init_umv_space(struct hclge_dev * hdev)7335 static int hclge_init_umv_space(struct hclge_dev *hdev)
7336 {
7337 u16 allocated_size = 0;
7338 int ret;
7339
7340 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7341 if (ret)
7342 return ret;
7343
7344 if (allocated_size < hdev->wanted_umv_size)
7345 dev_warn(&hdev->pdev->dev,
7346 "failed to alloc umv space, want %u, get %u\n",
7347 hdev->wanted_umv_size, allocated_size);
7348
7349 hdev->max_umv_size = allocated_size;
7350 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7351 hdev->share_umv_size = hdev->priv_umv_size +
7352 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7353
7354 return 0;
7355 }
7356
hclge_reset_umv_space(struct hclge_dev * hdev)7357 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7358 {
7359 struct hclge_vport *vport;
7360 int i;
7361
7362 for (i = 0; i < hdev->num_alloc_vport; i++) {
7363 vport = &hdev->vport[i];
7364 vport->used_umv_num = 0;
7365 }
7366
7367 mutex_lock(&hdev->vport_lock);
7368 hdev->share_umv_size = hdev->priv_umv_size +
7369 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7370 mutex_unlock(&hdev->vport_lock);
7371 }
7372
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)7373 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7374 {
7375 struct hclge_dev *hdev = vport->back;
7376 bool is_full;
7377
7378 if (need_lock)
7379 mutex_lock(&hdev->vport_lock);
7380
7381 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7382 hdev->share_umv_size == 0);
7383
7384 if (need_lock)
7385 mutex_unlock(&hdev->vport_lock);
7386
7387 return is_full;
7388 }
7389
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)7390 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7391 {
7392 struct hclge_dev *hdev = vport->back;
7393
7394 if (is_free) {
7395 if (vport->used_umv_num > hdev->priv_umv_size)
7396 hdev->share_umv_size++;
7397
7398 if (vport->used_umv_num > 0)
7399 vport->used_umv_num--;
7400 } else {
7401 if (vport->used_umv_num >= hdev->priv_umv_size &&
7402 hdev->share_umv_size > 0)
7403 hdev->share_umv_size--;
7404 vport->used_umv_num++;
7405 }
7406 }
7407
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)7408 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7409 const u8 *mac_addr)
7410 {
7411 struct hclge_mac_node *mac_node, *tmp;
7412
7413 list_for_each_entry_safe(mac_node, tmp, list, node)
7414 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7415 return mac_node;
7416
7417 return NULL;
7418 }
7419
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)7420 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7421 enum HCLGE_MAC_NODE_STATE state)
7422 {
7423 switch (state) {
7424 /* from set_rx_mode or tmp_add_list */
7425 case HCLGE_MAC_TO_ADD:
7426 if (mac_node->state == HCLGE_MAC_TO_DEL)
7427 mac_node->state = HCLGE_MAC_ACTIVE;
7428 break;
7429 /* only from set_rx_mode */
7430 case HCLGE_MAC_TO_DEL:
7431 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7432 list_del(&mac_node->node);
7433 kfree(mac_node);
7434 } else {
7435 mac_node->state = HCLGE_MAC_TO_DEL;
7436 }
7437 break;
7438 /* only from tmp_add_list, the mac_node->state won't be
7439 * ACTIVE.
7440 */
7441 case HCLGE_MAC_ACTIVE:
7442 if (mac_node->state == HCLGE_MAC_TO_ADD)
7443 mac_node->state = HCLGE_MAC_ACTIVE;
7444
7445 break;
7446 }
7447 }
7448
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)7449 int hclge_update_mac_list(struct hclge_vport *vport,
7450 enum HCLGE_MAC_NODE_STATE state,
7451 enum HCLGE_MAC_ADDR_TYPE mac_type,
7452 const unsigned char *addr)
7453 {
7454 struct hclge_dev *hdev = vport->back;
7455 struct hclge_mac_node *mac_node;
7456 struct list_head *list;
7457
7458 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7459 &vport->uc_mac_list : &vport->mc_mac_list;
7460
7461 spin_lock_bh(&vport->mac_list_lock);
7462
7463 /* if the mac addr is already in the mac list, no need to add a new
7464 * one into it, just check the mac addr state, convert it to a new
7465 * new state, or just remove it, or do nothing.
7466 */
7467 mac_node = hclge_find_mac_node(list, addr);
7468 if (mac_node) {
7469 hclge_update_mac_node(mac_node, state);
7470 spin_unlock_bh(&vport->mac_list_lock);
7471 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7472 return 0;
7473 }
7474
7475 /* if this address is never added, unnecessary to delete */
7476 if (state == HCLGE_MAC_TO_DEL) {
7477 spin_unlock_bh(&vport->mac_list_lock);
7478 dev_err(&hdev->pdev->dev,
7479 "failed to delete address %pM from mac list\n",
7480 addr);
7481 return -ENOENT;
7482 }
7483
7484 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7485 if (!mac_node) {
7486 spin_unlock_bh(&vport->mac_list_lock);
7487 return -ENOMEM;
7488 }
7489
7490 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7491
7492 mac_node->state = state;
7493 ether_addr_copy(mac_node->mac_addr, addr);
7494 list_add_tail(&mac_node->node, list);
7495
7496 spin_unlock_bh(&vport->mac_list_lock);
7497
7498 return 0;
7499 }
7500
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7501 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7502 const unsigned char *addr)
7503 {
7504 struct hclge_vport *vport = hclge_get_vport(handle);
7505
7506 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7507 addr);
7508 }
7509
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7510 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7511 const unsigned char *addr)
7512 {
7513 struct hclge_dev *hdev = vport->back;
7514 struct hclge_mac_vlan_tbl_entry_cmd req;
7515 struct hclge_desc desc;
7516 u16 egress_port = 0;
7517 int ret;
7518
7519 /* mac addr check */
7520 if (is_zero_ether_addr(addr) ||
7521 is_broadcast_ether_addr(addr) ||
7522 is_multicast_ether_addr(addr)) {
7523 dev_err(&hdev->pdev->dev,
7524 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7525 addr, is_zero_ether_addr(addr),
7526 is_broadcast_ether_addr(addr),
7527 is_multicast_ether_addr(addr));
7528 return -EINVAL;
7529 }
7530
7531 memset(&req, 0, sizeof(req));
7532
7533 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7534 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7535
7536 req.egress_port = cpu_to_le16(egress_port);
7537
7538 hclge_prepare_mac_addr(&req, addr, false);
7539
7540 /* Lookup the mac address in the mac_vlan table, and add
7541 * it if the entry is inexistent. Repeated unicast entry
7542 * is not allowed in the mac vlan table.
7543 */
7544 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7545 if (ret == -ENOENT) {
7546 mutex_lock(&hdev->vport_lock);
7547 if (!hclge_is_umv_space_full(vport, false)) {
7548 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7549 if (!ret)
7550 hclge_update_umv_space(vport, false);
7551 mutex_unlock(&hdev->vport_lock);
7552 return ret;
7553 }
7554 mutex_unlock(&hdev->vport_lock);
7555
7556 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7557 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7558 hdev->priv_umv_size);
7559
7560 return -ENOSPC;
7561 }
7562
7563 /* check if we just hit the duplicate */
7564 if (!ret) {
7565 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7566 vport->vport_id, addr);
7567 return 0;
7568 }
7569
7570 dev_err(&hdev->pdev->dev,
7571 "PF failed to add unicast entry(%pM) in the MAC table\n",
7572 addr);
7573
7574 return ret;
7575 }
7576
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7577 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7578 const unsigned char *addr)
7579 {
7580 struct hclge_vport *vport = hclge_get_vport(handle);
7581
7582 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7583 addr);
7584 }
7585
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7586 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7587 const unsigned char *addr)
7588 {
7589 struct hclge_dev *hdev = vport->back;
7590 struct hclge_mac_vlan_tbl_entry_cmd req;
7591 int ret;
7592
7593 /* mac addr check */
7594 if (is_zero_ether_addr(addr) ||
7595 is_broadcast_ether_addr(addr) ||
7596 is_multicast_ether_addr(addr)) {
7597 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7598 addr);
7599 return -EINVAL;
7600 }
7601
7602 memset(&req, 0, sizeof(req));
7603 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7604 hclge_prepare_mac_addr(&req, addr, false);
7605 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7606 if (!ret) {
7607 mutex_lock(&hdev->vport_lock);
7608 hclge_update_umv_space(vport, true);
7609 mutex_unlock(&hdev->vport_lock);
7610 } else if (ret == -ENOENT) {
7611 ret = 0;
7612 }
7613
7614 return ret;
7615 }
7616
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7617 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7618 const unsigned char *addr)
7619 {
7620 struct hclge_vport *vport = hclge_get_vport(handle);
7621
7622 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7623 addr);
7624 }
7625
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7626 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7627 const unsigned char *addr)
7628 {
7629 struct hclge_dev *hdev = vport->back;
7630 struct hclge_mac_vlan_tbl_entry_cmd req;
7631 struct hclge_desc desc[3];
7632 int status;
7633
7634 /* mac addr check */
7635 if (!is_multicast_ether_addr(addr)) {
7636 dev_err(&hdev->pdev->dev,
7637 "Add mc mac err! invalid mac:%pM.\n",
7638 addr);
7639 return -EINVAL;
7640 }
7641 memset(&req, 0, sizeof(req));
7642 hclge_prepare_mac_addr(&req, addr, true);
7643 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7644 if (status) {
7645 /* This mac addr do not exist, add new entry for it */
7646 memset(desc[0].data, 0, sizeof(desc[0].data));
7647 memset(desc[1].data, 0, sizeof(desc[0].data));
7648 memset(desc[2].data, 0, sizeof(desc[0].data));
7649 }
7650 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7651 if (status)
7652 return status;
7653 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7654
7655 /* if already overflow, not to print each time */
7656 if (status == -ENOSPC &&
7657 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7658 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7659
7660 return status;
7661 }
7662
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7663 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7664 const unsigned char *addr)
7665 {
7666 struct hclge_vport *vport = hclge_get_vport(handle);
7667
7668 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7669 addr);
7670 }
7671
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7672 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7673 const unsigned char *addr)
7674 {
7675 struct hclge_dev *hdev = vport->back;
7676 struct hclge_mac_vlan_tbl_entry_cmd req;
7677 enum hclge_cmd_status status;
7678 struct hclge_desc desc[3];
7679
7680 /* mac addr check */
7681 if (!is_multicast_ether_addr(addr)) {
7682 dev_dbg(&hdev->pdev->dev,
7683 "Remove mc mac err! invalid mac:%pM.\n",
7684 addr);
7685 return -EINVAL;
7686 }
7687
7688 memset(&req, 0, sizeof(req));
7689 hclge_prepare_mac_addr(&req, addr, true);
7690 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7691 if (!status) {
7692 /* This mac addr exist, remove this handle's VFID for it */
7693 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7694 if (status)
7695 return status;
7696
7697 if (hclge_is_all_function_id_zero(desc))
7698 /* All the vfid is zero, so need to delete this entry */
7699 status = hclge_remove_mac_vlan_tbl(vport, &req);
7700 else
7701 /* Not all the vfid is zero, update the vfid */
7702 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7703
7704 } else if (status == -ENOENT) {
7705 status = 0;
7706 }
7707
7708 return status;
7709 }
7710
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* sync)(struct hclge_vport *,const unsigned char *))7711 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7712 struct list_head *list,
7713 int (*sync)(struct hclge_vport *,
7714 const unsigned char *))
7715 {
7716 struct hclge_mac_node *mac_node, *tmp;
7717 int ret;
7718
7719 list_for_each_entry_safe(mac_node, tmp, list, node) {
7720 ret = sync(vport, mac_node->mac_addr);
7721 if (!ret) {
7722 mac_node->state = HCLGE_MAC_ACTIVE;
7723 } else {
7724 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7725 &vport->state);
7726 break;
7727 }
7728 }
7729 }
7730
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* unsync)(struct hclge_vport *,const unsigned char *))7731 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7732 struct list_head *list,
7733 int (*unsync)(struct hclge_vport *,
7734 const unsigned char *))
7735 {
7736 struct hclge_mac_node *mac_node, *tmp;
7737 int ret;
7738
7739 list_for_each_entry_safe(mac_node, tmp, list, node) {
7740 ret = unsync(vport, mac_node->mac_addr);
7741 if (!ret || ret == -ENOENT) {
7742 list_del(&mac_node->node);
7743 kfree(mac_node);
7744 } else {
7745 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7746 &vport->state);
7747 break;
7748 }
7749 }
7750 }
7751
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)7752 static bool hclge_sync_from_add_list(struct list_head *add_list,
7753 struct list_head *mac_list)
7754 {
7755 struct hclge_mac_node *mac_node, *tmp, *new_node;
7756 bool all_added = true;
7757
7758 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7759 if (mac_node->state == HCLGE_MAC_TO_ADD)
7760 all_added = false;
7761
7762 /* if the mac address from tmp_add_list is not in the
7763 * uc/mc_mac_list, it means have received a TO_DEL request
7764 * during the time window of adding the mac address into mac
7765 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7766 * then it will be removed at next time. else it must be TO_ADD,
7767 * this address hasn't been added into mac table,
7768 * so just remove the mac node.
7769 */
7770 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7771 if (new_node) {
7772 hclge_update_mac_node(new_node, mac_node->state);
7773 list_del(&mac_node->node);
7774 kfree(mac_node);
7775 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7776 mac_node->state = HCLGE_MAC_TO_DEL;
7777 list_del(&mac_node->node);
7778 list_add_tail(&mac_node->node, mac_list);
7779 } else {
7780 list_del(&mac_node->node);
7781 kfree(mac_node);
7782 }
7783 }
7784
7785 return all_added;
7786 }
7787
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)7788 static void hclge_sync_from_del_list(struct list_head *del_list,
7789 struct list_head *mac_list)
7790 {
7791 struct hclge_mac_node *mac_node, *tmp, *new_node;
7792
7793 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7794 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7795 if (new_node) {
7796 /* If the mac addr exists in the mac list, it means
7797 * received a new TO_ADD request during the time window
7798 * of configuring the mac address. For the mac node
7799 * state is TO_ADD, and the address is already in the
7800 * in the hardware(due to delete fail), so we just need
7801 * to change the mac node state to ACTIVE.
7802 */
7803 new_node->state = HCLGE_MAC_ACTIVE;
7804 list_del(&mac_node->node);
7805 kfree(mac_node);
7806 } else {
7807 list_del(&mac_node->node);
7808 list_add_tail(&mac_node->node, mac_list);
7809 }
7810 }
7811 }
7812
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)7813 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7814 enum HCLGE_MAC_ADDR_TYPE mac_type,
7815 bool is_all_added)
7816 {
7817 if (mac_type == HCLGE_MAC_ADDR_UC) {
7818 if (is_all_added)
7819 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7820 else
7821 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7822 } else {
7823 if (is_all_added)
7824 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7825 else
7826 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7827 }
7828 }
7829
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)7830 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7831 enum HCLGE_MAC_ADDR_TYPE mac_type)
7832 {
7833 struct hclge_mac_node *mac_node, *tmp, *new_node;
7834 struct list_head tmp_add_list, tmp_del_list;
7835 struct list_head *list;
7836 bool all_added;
7837
7838 INIT_LIST_HEAD(&tmp_add_list);
7839 INIT_LIST_HEAD(&tmp_del_list);
7840
7841 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7842 * we can add/delete these mac addr outside the spin lock
7843 */
7844 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7845 &vport->uc_mac_list : &vport->mc_mac_list;
7846
7847 spin_lock_bh(&vport->mac_list_lock);
7848
7849 list_for_each_entry_safe(mac_node, tmp, list, node) {
7850 switch (mac_node->state) {
7851 case HCLGE_MAC_TO_DEL:
7852 list_del(&mac_node->node);
7853 list_add_tail(&mac_node->node, &tmp_del_list);
7854 break;
7855 case HCLGE_MAC_TO_ADD:
7856 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7857 if (!new_node)
7858 goto stop_traverse;
7859 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7860 new_node->state = mac_node->state;
7861 list_add_tail(&new_node->node, &tmp_add_list);
7862 break;
7863 default:
7864 break;
7865 }
7866 }
7867
7868 stop_traverse:
7869 spin_unlock_bh(&vport->mac_list_lock);
7870
7871 /* delete first, in order to get max mac table space for adding */
7872 if (mac_type == HCLGE_MAC_ADDR_UC) {
7873 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7874 hclge_rm_uc_addr_common);
7875 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7876 hclge_add_uc_addr_common);
7877 } else {
7878 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7879 hclge_rm_mc_addr_common);
7880 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7881 hclge_add_mc_addr_common);
7882 }
7883
7884 /* if some mac addresses were added/deleted fail, move back to the
7885 * mac_list, and retry at next time.
7886 */
7887 spin_lock_bh(&vport->mac_list_lock);
7888
7889 hclge_sync_from_del_list(&tmp_del_list, list);
7890 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7891
7892 spin_unlock_bh(&vport->mac_list_lock);
7893
7894 hclge_update_overflow_flags(vport, mac_type, all_added);
7895 }
7896
hclge_need_sync_mac_table(struct hclge_vport * vport)7897 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7898 {
7899 struct hclge_dev *hdev = vport->back;
7900
7901 if (test_bit(vport->vport_id, hdev->vport_config_block))
7902 return false;
7903
7904 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7905 return true;
7906
7907 return false;
7908 }
7909
hclge_sync_mac_table(struct hclge_dev * hdev)7910 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7911 {
7912 int i;
7913
7914 for (i = 0; i < hdev->num_alloc_vport; i++) {
7915 struct hclge_vport *vport = &hdev->vport[i];
7916
7917 if (!hclge_need_sync_mac_table(vport))
7918 continue;
7919
7920 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7921 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7922 }
7923 }
7924
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)7925 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7926 enum HCLGE_MAC_ADDR_TYPE mac_type)
7927 {
7928 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7929 struct hclge_mac_node *mac_cfg, *tmp;
7930 struct hclge_dev *hdev = vport->back;
7931 struct list_head tmp_del_list, *list;
7932 int ret;
7933
7934 if (mac_type == HCLGE_MAC_ADDR_UC) {
7935 list = &vport->uc_mac_list;
7936 unsync = hclge_rm_uc_addr_common;
7937 } else {
7938 list = &vport->mc_mac_list;
7939 unsync = hclge_rm_mc_addr_common;
7940 }
7941
7942 INIT_LIST_HEAD(&tmp_del_list);
7943
7944 if (!is_del_list)
7945 set_bit(vport->vport_id, hdev->vport_config_block);
7946
7947 spin_lock_bh(&vport->mac_list_lock);
7948
7949 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7950 switch (mac_cfg->state) {
7951 case HCLGE_MAC_TO_DEL:
7952 case HCLGE_MAC_ACTIVE:
7953 list_del(&mac_cfg->node);
7954 list_add_tail(&mac_cfg->node, &tmp_del_list);
7955 break;
7956 case HCLGE_MAC_TO_ADD:
7957 if (is_del_list) {
7958 list_del(&mac_cfg->node);
7959 kfree(mac_cfg);
7960 }
7961 break;
7962 }
7963 }
7964
7965 spin_unlock_bh(&vport->mac_list_lock);
7966
7967 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7968 ret = unsync(vport, mac_cfg->mac_addr);
7969 if (!ret || ret == -ENOENT) {
7970 /* clear all mac addr from hardware, but remain these
7971 * mac addr in the mac list, and restore them after
7972 * vf reset finished.
7973 */
7974 if (!is_del_list &&
7975 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7976 mac_cfg->state = HCLGE_MAC_TO_ADD;
7977 } else {
7978 list_del(&mac_cfg->node);
7979 kfree(mac_cfg);
7980 }
7981 } else if (is_del_list) {
7982 mac_cfg->state = HCLGE_MAC_TO_DEL;
7983 }
7984 }
7985
7986 spin_lock_bh(&vport->mac_list_lock);
7987
7988 hclge_sync_from_del_list(&tmp_del_list, list);
7989
7990 spin_unlock_bh(&vport->mac_list_lock);
7991 }
7992
7993 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)7994 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7995 enum HCLGE_MAC_ADDR_TYPE mac_type)
7996 {
7997 struct hclge_mac_node *mac_node, *tmp;
7998 struct hclge_dev *hdev = vport->back;
7999 struct list_head tmp_del_list, *list;
8000
8001 INIT_LIST_HEAD(&tmp_del_list);
8002
8003 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8004 &vport->uc_mac_list : &vport->mc_mac_list;
8005
8006 spin_lock_bh(&vport->mac_list_lock);
8007
8008 list_for_each_entry_safe(mac_node, tmp, list, node) {
8009 switch (mac_node->state) {
8010 case HCLGE_MAC_TO_DEL:
8011 case HCLGE_MAC_ACTIVE:
8012 list_del(&mac_node->node);
8013 list_add_tail(&mac_node->node, &tmp_del_list);
8014 break;
8015 case HCLGE_MAC_TO_ADD:
8016 list_del(&mac_node->node);
8017 kfree(mac_node);
8018 break;
8019 }
8020 }
8021
8022 spin_unlock_bh(&vport->mac_list_lock);
8023
8024 if (mac_type == HCLGE_MAC_ADDR_UC)
8025 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8026 hclge_rm_uc_addr_common);
8027 else
8028 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8029 hclge_rm_mc_addr_common);
8030
8031 if (!list_empty(&tmp_del_list))
8032 dev_warn(&hdev->pdev->dev,
8033 "uninit %s mac list for vport %u not completely.\n",
8034 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8035 vport->vport_id);
8036
8037 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8038 list_del(&mac_node->node);
8039 kfree(mac_node);
8040 }
8041 }
8042
hclge_uninit_mac_table(struct hclge_dev * hdev)8043 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8044 {
8045 struct hclge_vport *vport;
8046 int i;
8047
8048 for (i = 0; i < hdev->num_alloc_vport; i++) {
8049 vport = &hdev->vport[i];
8050 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8051 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8052 }
8053 }
8054
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)8055 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8056 u16 cmdq_resp, u8 resp_code)
8057 {
8058 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8059 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8060 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8061 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8062
8063 int return_status;
8064
8065 if (cmdq_resp) {
8066 dev_err(&hdev->pdev->dev,
8067 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8068 cmdq_resp);
8069 return -EIO;
8070 }
8071
8072 switch (resp_code) {
8073 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8074 case HCLGE_ETHERTYPE_ALREADY_ADD:
8075 return_status = 0;
8076 break;
8077 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8078 dev_err(&hdev->pdev->dev,
8079 "add mac ethertype failed for manager table overflow.\n");
8080 return_status = -EIO;
8081 break;
8082 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8083 dev_err(&hdev->pdev->dev,
8084 "add mac ethertype failed for key conflict.\n");
8085 return_status = -EIO;
8086 break;
8087 default:
8088 dev_err(&hdev->pdev->dev,
8089 "add mac ethertype failed for undefined, code=%u.\n",
8090 resp_code);
8091 return_status = -EIO;
8092 }
8093
8094 return return_status;
8095 }
8096
hclge_check_vf_mac_exist(struct hclge_vport * vport,int vf_idx,u8 * mac_addr)8097 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8098 u8 *mac_addr)
8099 {
8100 struct hclge_mac_vlan_tbl_entry_cmd req;
8101 struct hclge_dev *hdev = vport->back;
8102 struct hclge_desc desc;
8103 u16 egress_port = 0;
8104 int i;
8105
8106 if (is_zero_ether_addr(mac_addr))
8107 return false;
8108
8109 memset(&req, 0, sizeof(req));
8110 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8111 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8112 req.egress_port = cpu_to_le16(egress_port);
8113 hclge_prepare_mac_addr(&req, mac_addr, false);
8114
8115 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8116 return true;
8117
8118 vf_idx += HCLGE_VF_VPORT_START_NUM;
8119 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8120 if (i != vf_idx &&
8121 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8122 return true;
8123
8124 return false;
8125 }
8126
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)8127 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8128 u8 *mac_addr)
8129 {
8130 struct hclge_vport *vport = hclge_get_vport(handle);
8131 struct hclge_dev *hdev = vport->back;
8132
8133 vport = hclge_get_vf_vport(hdev, vf);
8134 if (!vport)
8135 return -EINVAL;
8136
8137 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8138 dev_info(&hdev->pdev->dev,
8139 "Specified MAC(=%pM) is same as before, no change committed!\n",
8140 mac_addr);
8141 return 0;
8142 }
8143
8144 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8145 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8146 mac_addr);
8147 return -EEXIST;
8148 }
8149
8150 ether_addr_copy(vport->vf_info.mac, mac_addr);
8151
8152 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8153 dev_info(&hdev->pdev->dev,
8154 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8155 vf, mac_addr);
8156 return hclge_inform_reset_assert_to_vf(vport);
8157 }
8158
8159 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8160 vf, mac_addr);
8161 return 0;
8162 }
8163
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)8164 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8165 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8166 {
8167 struct hclge_desc desc;
8168 u8 resp_code;
8169 u16 retval;
8170 int ret;
8171
8172 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8173 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8174
8175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8176 if (ret) {
8177 dev_err(&hdev->pdev->dev,
8178 "add mac ethertype failed for cmd_send, ret =%d.\n",
8179 ret);
8180 return ret;
8181 }
8182
8183 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8184 retval = le16_to_cpu(desc.retval);
8185
8186 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8187 }
8188
init_mgr_tbl(struct hclge_dev * hdev)8189 static int init_mgr_tbl(struct hclge_dev *hdev)
8190 {
8191 int ret;
8192 int i;
8193
8194 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8195 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8196 if (ret) {
8197 dev_err(&hdev->pdev->dev,
8198 "add mac ethertype failed, ret =%d.\n",
8199 ret);
8200 return ret;
8201 }
8202 }
8203
8204 return 0;
8205 }
8206
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)8207 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8208 {
8209 struct hclge_vport *vport = hclge_get_vport(handle);
8210 struct hclge_dev *hdev = vport->back;
8211
8212 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8213 }
8214
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)8215 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8216 const u8 *old_addr, const u8 *new_addr)
8217 {
8218 struct list_head *list = &vport->uc_mac_list;
8219 struct hclge_mac_node *old_node, *new_node;
8220
8221 new_node = hclge_find_mac_node(list, new_addr);
8222 if (!new_node) {
8223 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8224 if (!new_node)
8225 return -ENOMEM;
8226
8227 new_node->state = HCLGE_MAC_TO_ADD;
8228 ether_addr_copy(new_node->mac_addr, new_addr);
8229 list_add(&new_node->node, list);
8230 } else {
8231 if (new_node->state == HCLGE_MAC_TO_DEL)
8232 new_node->state = HCLGE_MAC_ACTIVE;
8233
8234 /* make sure the new addr is in the list head, avoid dev
8235 * addr may be not re-added into mac table for the umv space
8236 * limitation after global/imp reset which will clear mac
8237 * table by hardware.
8238 */
8239 list_move(&new_node->node, list);
8240 }
8241
8242 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8243 old_node = hclge_find_mac_node(list, old_addr);
8244 if (old_node) {
8245 if (old_node->state == HCLGE_MAC_TO_ADD) {
8246 list_del(&old_node->node);
8247 kfree(old_node);
8248 } else {
8249 old_node->state = HCLGE_MAC_TO_DEL;
8250 }
8251 }
8252 }
8253
8254 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8255
8256 return 0;
8257 }
8258
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)8259 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8260 bool is_first)
8261 {
8262 const unsigned char *new_addr = (const unsigned char *)p;
8263 struct hclge_vport *vport = hclge_get_vport(handle);
8264 struct hclge_dev *hdev = vport->back;
8265 unsigned char *old_addr = NULL;
8266 int ret;
8267
8268 /* mac addr check */
8269 if (is_zero_ether_addr(new_addr) ||
8270 is_broadcast_ether_addr(new_addr) ||
8271 is_multicast_ether_addr(new_addr)) {
8272 dev_err(&hdev->pdev->dev,
8273 "change uc mac err! invalid mac: %pM.\n",
8274 new_addr);
8275 return -EINVAL;
8276 }
8277
8278 ret = hclge_pause_addr_cfg(hdev, new_addr);
8279 if (ret) {
8280 dev_err(&hdev->pdev->dev,
8281 "failed to configure mac pause address, ret = %d\n",
8282 ret);
8283 return ret;
8284 }
8285
8286 if (!is_first)
8287 old_addr = hdev->hw.mac.mac_addr;
8288
8289 spin_lock_bh(&vport->mac_list_lock);
8290 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8291 if (ret) {
8292 dev_err(&hdev->pdev->dev,
8293 "failed to change the mac addr:%pM, ret = %d\n",
8294 new_addr, ret);
8295 spin_unlock_bh(&vport->mac_list_lock);
8296
8297 if (!is_first)
8298 hclge_pause_addr_cfg(hdev, old_addr);
8299
8300 return ret;
8301 }
8302 /* we must update dev addr with spin lock protect, preventing dev addr
8303 * being removed by set_rx_mode path.
8304 */
8305 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8306 spin_unlock_bh(&vport->mac_list_lock);
8307
8308 hclge_task_schedule(hdev, 0);
8309
8310 return 0;
8311 }
8312
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)8313 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8314 int cmd)
8315 {
8316 struct hclge_vport *vport = hclge_get_vport(handle);
8317 struct hclge_dev *hdev = vport->back;
8318
8319 if (!hdev->hw.mac.phydev)
8320 return -EOPNOTSUPP;
8321
8322 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8323 }
8324
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)8325 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8326 u8 fe_type, bool filter_en, u8 vf_id)
8327 {
8328 struct hclge_vlan_filter_ctrl_cmd *req;
8329 struct hclge_desc desc;
8330 int ret;
8331
8332 /* read current vlan filter parameter */
8333 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8334 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8335 req->vlan_type = vlan_type;
8336 req->vf_id = vf_id;
8337
8338 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8339 if (ret) {
8340 dev_err(&hdev->pdev->dev,
8341 "failed to get vlan filter config, ret = %d.\n", ret);
8342 return ret;
8343 }
8344
8345 /* modify and write new config parameter */
8346 hclge_cmd_reuse_desc(&desc, false);
8347 req->vlan_fe = filter_en ?
8348 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8349
8350 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8351 if (ret)
8352 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8353 ret);
8354
8355 return ret;
8356 }
8357
8358 #define HCLGE_FILTER_TYPE_VF 0
8359 #define HCLGE_FILTER_TYPE_PORT 1
8360 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8361 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8362 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8363 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8364 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8365 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8366 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8367 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8368 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8369
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)8370 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8371 {
8372 struct hclge_vport *vport = hclge_get_vport(handle);
8373 struct hclge_dev *hdev = vport->back;
8374
8375 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8376 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8377 HCLGE_FILTER_FE_EGRESS, enable, 0);
8378 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8379 HCLGE_FILTER_FE_INGRESS, enable, 0);
8380 } else {
8381 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8382 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8383 0);
8384 }
8385 if (enable)
8386 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8387 else
8388 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8389 }
8390
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,__be16 proto)8391 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8392 bool is_kill, u16 vlan,
8393 __be16 proto)
8394 {
8395 struct hclge_vport *vport = &hdev->vport[vfid];
8396 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8397 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8398 struct hclge_desc desc[2];
8399 u8 vf_byte_val;
8400 u8 vf_byte_off;
8401 int ret;
8402
8403 /* if vf vlan table is full, firmware will close vf vlan filter, it
8404 * is unable and unnecessary to add new vlan id to vf vlan filter.
8405 * If spoof check is enable, and vf vlan is full, it shouldn't add
8406 * new vlan, because tx packets with these vlan id will be dropped.
8407 */
8408 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8409 if (vport->vf_info.spoofchk && vlan) {
8410 dev_err(&hdev->pdev->dev,
8411 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8412 return -EPERM;
8413 }
8414 return 0;
8415 }
8416
8417 hclge_cmd_setup_basic_desc(&desc[0],
8418 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8419 hclge_cmd_setup_basic_desc(&desc[1],
8420 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8421
8422 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8423
8424 vf_byte_off = vfid / 8;
8425 vf_byte_val = 1 << (vfid % 8);
8426
8427 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8428 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8429
8430 req0->vlan_id = cpu_to_le16(vlan);
8431 req0->vlan_cfg = is_kill;
8432
8433 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8434 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8435 else
8436 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8437
8438 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8439 if (ret) {
8440 dev_err(&hdev->pdev->dev,
8441 "Send vf vlan command fail, ret =%d.\n",
8442 ret);
8443 return ret;
8444 }
8445
8446 if (!is_kill) {
8447 #define HCLGE_VF_VLAN_NO_ENTRY 2
8448 if (!req0->resp_code || req0->resp_code == 1)
8449 return 0;
8450
8451 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8452 set_bit(vfid, hdev->vf_vlan_full);
8453 dev_warn(&hdev->pdev->dev,
8454 "vf vlan table is full, vf vlan filter is disabled\n");
8455 return 0;
8456 }
8457
8458 dev_err(&hdev->pdev->dev,
8459 "Add vf vlan filter fail, ret =%u.\n",
8460 req0->resp_code);
8461 } else {
8462 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8463 if (!req0->resp_code)
8464 return 0;
8465
8466 /* vf vlan filter is disabled when vf vlan table is full,
8467 * then new vlan id will not be added into vf vlan table.
8468 * Just return 0 without warning, avoid massive verbose
8469 * print logs when unload.
8470 */
8471 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8472 return 0;
8473
8474 dev_err(&hdev->pdev->dev,
8475 "Kill vf vlan filter fail, ret =%u.\n",
8476 req0->resp_code);
8477 }
8478
8479 return -EIO;
8480 }
8481
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)8482 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8483 u16 vlan_id, bool is_kill)
8484 {
8485 struct hclge_vlan_filter_pf_cfg_cmd *req;
8486 struct hclge_desc desc;
8487 u8 vlan_offset_byte_val;
8488 u8 vlan_offset_byte;
8489 u8 vlan_offset_160;
8490 int ret;
8491
8492 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8493
8494 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8495 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8496 HCLGE_VLAN_BYTE_SIZE;
8497 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8498
8499 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8500 req->vlan_offset = vlan_offset_160;
8501 req->vlan_cfg = is_kill;
8502 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8503
8504 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8505 if (ret)
8506 dev_err(&hdev->pdev->dev,
8507 "port vlan command, send fail, ret =%d.\n", ret);
8508 return ret;
8509 }
8510
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)8511 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8512 u16 vport_id, u16 vlan_id,
8513 bool is_kill)
8514 {
8515 u16 vport_idx, vport_num = 0;
8516 int ret;
8517
8518 if (is_kill && !vlan_id)
8519 return 0;
8520
8521 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8522 proto);
8523 if (ret) {
8524 dev_err(&hdev->pdev->dev,
8525 "Set %u vport vlan filter config fail, ret =%d.\n",
8526 vport_id, ret);
8527 return ret;
8528 }
8529
8530 /* vlan 0 may be added twice when 8021q module is enabled */
8531 if (!is_kill && !vlan_id &&
8532 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8533 return 0;
8534
8535 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8536 dev_err(&hdev->pdev->dev,
8537 "Add port vlan failed, vport %u is already in vlan %u\n",
8538 vport_id, vlan_id);
8539 return -EINVAL;
8540 }
8541
8542 if (is_kill &&
8543 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8544 dev_err(&hdev->pdev->dev,
8545 "Delete port vlan failed, vport %u is not in vlan %u\n",
8546 vport_id, vlan_id);
8547 return -EINVAL;
8548 }
8549
8550 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8551 vport_num++;
8552
8553 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8554 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8555 is_kill);
8556
8557 return ret;
8558 }
8559
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)8560 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8561 {
8562 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8563 struct hclge_vport_vtag_tx_cfg_cmd *req;
8564 struct hclge_dev *hdev = vport->back;
8565 struct hclge_desc desc;
8566 u16 bmap_index;
8567 int status;
8568
8569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8570
8571 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8572 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8573 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8574 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8575 vcfg->accept_tag1 ? 1 : 0);
8576 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8577 vcfg->accept_untag1 ? 1 : 0);
8578 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8579 vcfg->accept_tag2 ? 1 : 0);
8580 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8581 vcfg->accept_untag2 ? 1 : 0);
8582 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8583 vcfg->insert_tag1_en ? 1 : 0);
8584 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8585 vcfg->insert_tag2_en ? 1 : 0);
8586 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8587
8588 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8589 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8590 HCLGE_VF_NUM_PER_BYTE;
8591 req->vf_bitmap[bmap_index] =
8592 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8593
8594 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8595 if (status)
8596 dev_err(&hdev->pdev->dev,
8597 "Send port txvlan cfg command fail, ret =%d\n",
8598 status);
8599
8600 return status;
8601 }
8602
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)8603 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8604 {
8605 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8606 struct hclge_vport_vtag_rx_cfg_cmd *req;
8607 struct hclge_dev *hdev = vport->back;
8608 struct hclge_desc desc;
8609 u16 bmap_index;
8610 int status;
8611
8612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8613
8614 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8615 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8616 vcfg->strip_tag1_en ? 1 : 0);
8617 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8618 vcfg->strip_tag2_en ? 1 : 0);
8619 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8620 vcfg->vlan1_vlan_prionly ? 1 : 0);
8621 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8622 vcfg->vlan2_vlan_prionly ? 1 : 0);
8623
8624 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8625 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8626 HCLGE_VF_NUM_PER_BYTE;
8627 req->vf_bitmap[bmap_index] =
8628 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8629
8630 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8631 if (status)
8632 dev_err(&hdev->pdev->dev,
8633 "Send port rxvlan cfg command fail, ret =%d\n",
8634 status);
8635
8636 return status;
8637 }
8638
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag)8639 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8640 u16 port_base_vlan_state,
8641 u16 vlan_tag)
8642 {
8643 int ret;
8644
8645 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8646 vport->txvlan_cfg.accept_tag1 = true;
8647 vport->txvlan_cfg.insert_tag1_en = false;
8648 vport->txvlan_cfg.default_tag1 = 0;
8649 } else {
8650 vport->txvlan_cfg.accept_tag1 = false;
8651 vport->txvlan_cfg.insert_tag1_en = true;
8652 vport->txvlan_cfg.default_tag1 = vlan_tag;
8653 }
8654
8655 vport->txvlan_cfg.accept_untag1 = true;
8656
8657 /* accept_tag2 and accept_untag2 are not supported on
8658 * pdev revision(0x20), new revision support them,
8659 * this two fields can not be configured by user.
8660 */
8661 vport->txvlan_cfg.accept_tag2 = true;
8662 vport->txvlan_cfg.accept_untag2 = true;
8663 vport->txvlan_cfg.insert_tag2_en = false;
8664 vport->txvlan_cfg.default_tag2 = 0;
8665
8666 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8667 vport->rxvlan_cfg.strip_tag1_en = false;
8668 vport->rxvlan_cfg.strip_tag2_en =
8669 vport->rxvlan_cfg.rx_vlan_offload_en;
8670 } else {
8671 vport->rxvlan_cfg.strip_tag1_en =
8672 vport->rxvlan_cfg.rx_vlan_offload_en;
8673 vport->rxvlan_cfg.strip_tag2_en = true;
8674 }
8675 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8676 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8677
8678 ret = hclge_set_vlan_tx_offload_cfg(vport);
8679 if (ret)
8680 return ret;
8681
8682 return hclge_set_vlan_rx_offload_cfg(vport);
8683 }
8684
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)8685 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8686 {
8687 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8688 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8689 struct hclge_desc desc;
8690 int status;
8691
8692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8693 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8694 rx_req->ot_fst_vlan_type =
8695 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8696 rx_req->ot_sec_vlan_type =
8697 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8698 rx_req->in_fst_vlan_type =
8699 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8700 rx_req->in_sec_vlan_type =
8701 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8702
8703 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8704 if (status) {
8705 dev_err(&hdev->pdev->dev,
8706 "Send rxvlan protocol type command fail, ret =%d\n",
8707 status);
8708 return status;
8709 }
8710
8711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8712
8713 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8714 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8715 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8716
8717 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8718 if (status)
8719 dev_err(&hdev->pdev->dev,
8720 "Send txvlan protocol type command fail, ret =%d\n",
8721 status);
8722
8723 return status;
8724 }
8725
hclge_init_vlan_config(struct hclge_dev * hdev)8726 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8727 {
8728 #define HCLGE_DEF_VLAN_TYPE 0x8100
8729
8730 struct hnae3_handle *handle = &hdev->vport[0].nic;
8731 struct hclge_vport *vport;
8732 int ret;
8733 int i;
8734
8735 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8736 /* for revision 0x21, vf vlan filter is per function */
8737 for (i = 0; i < hdev->num_alloc_vport; i++) {
8738 vport = &hdev->vport[i];
8739 ret = hclge_set_vlan_filter_ctrl(hdev,
8740 HCLGE_FILTER_TYPE_VF,
8741 HCLGE_FILTER_FE_EGRESS,
8742 true,
8743 vport->vport_id);
8744 if (ret)
8745 return ret;
8746 }
8747
8748 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8749 HCLGE_FILTER_FE_INGRESS, true,
8750 0);
8751 if (ret)
8752 return ret;
8753 } else {
8754 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8755 HCLGE_FILTER_FE_EGRESS_V1_B,
8756 true, 0);
8757 if (ret)
8758 return ret;
8759 }
8760
8761 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8762
8763 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8764 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8765 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8766 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8767 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8768 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8769
8770 ret = hclge_set_vlan_protocol_type(hdev);
8771 if (ret)
8772 return ret;
8773
8774 for (i = 0; i < hdev->num_alloc_vport; i++) {
8775 u16 vlan_tag;
8776
8777 vport = &hdev->vport[i];
8778 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8779
8780 ret = hclge_vlan_offload_cfg(vport,
8781 vport->port_base_vlan_cfg.state,
8782 vlan_tag);
8783 if (ret)
8784 return ret;
8785 }
8786
8787 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8788 }
8789
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)8790 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8791 bool writen_to_tbl)
8792 {
8793 struct hclge_vport_vlan_cfg *vlan;
8794
8795 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8796 if (!vlan)
8797 return;
8798
8799 vlan->hd_tbl_status = writen_to_tbl;
8800 vlan->vlan_id = vlan_id;
8801
8802 list_add_tail(&vlan->node, &vport->vlan_list);
8803 }
8804
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)8805 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8806 {
8807 struct hclge_vport_vlan_cfg *vlan, *tmp;
8808 struct hclge_dev *hdev = vport->back;
8809 int ret;
8810
8811 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8812 if (!vlan->hd_tbl_status) {
8813 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8814 vport->vport_id,
8815 vlan->vlan_id, false);
8816 if (ret) {
8817 dev_err(&hdev->pdev->dev,
8818 "restore vport vlan list failed, ret=%d\n",
8819 ret);
8820 return ret;
8821 }
8822 }
8823 vlan->hd_tbl_status = true;
8824 }
8825
8826 return 0;
8827 }
8828
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)8829 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8830 bool is_write_tbl)
8831 {
8832 struct hclge_vport_vlan_cfg *vlan, *tmp;
8833 struct hclge_dev *hdev = vport->back;
8834
8835 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8836 if (vlan->vlan_id == vlan_id) {
8837 if (is_write_tbl && vlan->hd_tbl_status)
8838 hclge_set_vlan_filter_hw(hdev,
8839 htons(ETH_P_8021Q),
8840 vport->vport_id,
8841 vlan_id,
8842 true);
8843
8844 list_del(&vlan->node);
8845 kfree(vlan);
8846 break;
8847 }
8848 }
8849 }
8850
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)8851 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8852 {
8853 struct hclge_vport_vlan_cfg *vlan, *tmp;
8854 struct hclge_dev *hdev = vport->back;
8855
8856 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8857 if (vlan->hd_tbl_status)
8858 hclge_set_vlan_filter_hw(hdev,
8859 htons(ETH_P_8021Q),
8860 vport->vport_id,
8861 vlan->vlan_id,
8862 true);
8863
8864 vlan->hd_tbl_status = false;
8865 if (is_del_list) {
8866 list_del(&vlan->node);
8867 kfree(vlan);
8868 }
8869 }
8870 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8871 }
8872
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)8873 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8874 {
8875 struct hclge_vport_vlan_cfg *vlan, *tmp;
8876 struct hclge_vport *vport;
8877 int i;
8878
8879 for (i = 0; i < hdev->num_alloc_vport; i++) {
8880 vport = &hdev->vport[i];
8881 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8882 list_del(&vlan->node);
8883 kfree(vlan);
8884 }
8885 }
8886 }
8887
hclge_restore_vport_vlan_table(struct hclge_vport * vport)8888 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8889 {
8890 struct hclge_vport_vlan_cfg *vlan, *tmp;
8891 struct hclge_dev *hdev = vport->back;
8892 u16 vlan_proto;
8893 u16 vlan_id;
8894 u16 state;
8895 int ret;
8896
8897 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8898 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8899 state = vport->port_base_vlan_cfg.state;
8900
8901 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8902 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8903 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8904 vport->vport_id, vlan_id,
8905 false);
8906 return;
8907 }
8908
8909 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8910 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8911 vport->vport_id,
8912 vlan->vlan_id, false);
8913 if (ret)
8914 break;
8915 vlan->hd_tbl_status = true;
8916 }
8917 }
8918
8919 /* For global reset and imp reset, hardware will clear the mac table,
8920 * so we change the mac address state from ACTIVE to TO_ADD, then they
8921 * can be restored in the service task after reset complete. Furtherly,
8922 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8923 * be restored after reset, so just remove these mac nodes from mac_list.
8924 */
hclge_mac_node_convert_for_reset(struct list_head * list)8925 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8926 {
8927 struct hclge_mac_node *mac_node, *tmp;
8928
8929 list_for_each_entry_safe(mac_node, tmp, list, node) {
8930 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8931 mac_node->state = HCLGE_MAC_TO_ADD;
8932 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8933 list_del(&mac_node->node);
8934 kfree(mac_node);
8935 }
8936 }
8937 }
8938
hclge_restore_mac_table_common(struct hclge_vport * vport)8939 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8940 {
8941 spin_lock_bh(&vport->mac_list_lock);
8942
8943 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8944 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8945 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8946
8947 spin_unlock_bh(&vport->mac_list_lock);
8948 }
8949
hclge_restore_hw_table(struct hclge_dev * hdev)8950 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8951 {
8952 struct hclge_vport *vport = &hdev->vport[0];
8953 struct hnae3_handle *handle = &vport->nic;
8954
8955 hclge_restore_mac_table_common(vport);
8956 hclge_restore_vport_vlan_table(vport);
8957 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8958
8959 hclge_restore_fd_entries(handle);
8960 }
8961
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)8962 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8963 {
8964 struct hclge_vport *vport = hclge_get_vport(handle);
8965
8966 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8967 vport->rxvlan_cfg.strip_tag1_en = false;
8968 vport->rxvlan_cfg.strip_tag2_en = enable;
8969 } else {
8970 vport->rxvlan_cfg.strip_tag1_en = enable;
8971 vport->rxvlan_cfg.strip_tag2_en = true;
8972 }
8973 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8974 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8975 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8976
8977 return hclge_set_vlan_rx_offload_cfg(vport);
8978 }
8979
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)8980 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8981 u16 port_base_vlan_state,
8982 struct hclge_vlan_info *new_info,
8983 struct hclge_vlan_info *old_info)
8984 {
8985 struct hclge_dev *hdev = vport->back;
8986 int ret;
8987
8988 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8989 hclge_rm_vport_all_vlan_table(vport, false);
8990 return hclge_set_vlan_filter_hw(hdev,
8991 htons(new_info->vlan_proto),
8992 vport->vport_id,
8993 new_info->vlan_tag,
8994 false);
8995 }
8996
8997 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8998 vport->vport_id, old_info->vlan_tag,
8999 true);
9000 if (ret)
9001 return ret;
9002
9003 return hclge_add_vport_all_vlan_table(vport);
9004 }
9005
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)9006 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9007 struct hclge_vlan_info *vlan_info)
9008 {
9009 struct hnae3_handle *nic = &vport->nic;
9010 struct hclge_vlan_info *old_vlan_info;
9011 struct hclge_dev *hdev = vport->back;
9012 int ret;
9013
9014 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9015
9016 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9017 if (ret)
9018 return ret;
9019
9020 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9021 /* add new VLAN tag */
9022 ret = hclge_set_vlan_filter_hw(hdev,
9023 htons(vlan_info->vlan_proto),
9024 vport->vport_id,
9025 vlan_info->vlan_tag,
9026 false);
9027 if (ret)
9028 return ret;
9029
9030 /* remove old VLAN tag */
9031 ret = hclge_set_vlan_filter_hw(hdev,
9032 htons(old_vlan_info->vlan_proto),
9033 vport->vport_id,
9034 old_vlan_info->vlan_tag,
9035 true);
9036 if (ret)
9037 return ret;
9038
9039 goto update;
9040 }
9041
9042 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9043 old_vlan_info);
9044 if (ret)
9045 return ret;
9046
9047 /* update state only when disable/enable port based VLAN */
9048 vport->port_base_vlan_cfg.state = state;
9049 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9050 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9051 else
9052 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9053
9054 update:
9055 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9056 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9057 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9058
9059 return 0;
9060 }
9061
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan)9062 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9063 enum hnae3_port_base_vlan_state state,
9064 u16 vlan)
9065 {
9066 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9067 if (!vlan)
9068 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9069 else
9070 return HNAE3_PORT_BASE_VLAN_ENABLE;
9071 } else {
9072 if (!vlan)
9073 return HNAE3_PORT_BASE_VLAN_DISABLE;
9074 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9075 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9076 else
9077 return HNAE3_PORT_BASE_VLAN_MODIFY;
9078 }
9079 }
9080
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)9081 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9082 u16 vlan, u8 qos, __be16 proto)
9083 {
9084 struct hclge_vport *vport = hclge_get_vport(handle);
9085 struct hclge_dev *hdev = vport->back;
9086 struct hclge_vlan_info vlan_info;
9087 u16 state;
9088 int ret;
9089
9090 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9091 return -EOPNOTSUPP;
9092
9093 vport = hclge_get_vf_vport(hdev, vfid);
9094 if (!vport)
9095 return -EINVAL;
9096
9097 /* qos is a 3 bits value, so can not be bigger than 7 */
9098 if (vlan > VLAN_N_VID - 1 || qos > 7)
9099 return -EINVAL;
9100 if (proto != htons(ETH_P_8021Q))
9101 return -EPROTONOSUPPORT;
9102
9103 state = hclge_get_port_base_vlan_state(vport,
9104 vport->port_base_vlan_cfg.state,
9105 vlan);
9106 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9107 return 0;
9108
9109 vlan_info.vlan_tag = vlan;
9110 vlan_info.qos = qos;
9111 vlan_info.vlan_proto = ntohs(proto);
9112
9113 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9114 return hclge_update_port_base_vlan_cfg(vport, state,
9115 &vlan_info);
9116 } else {
9117 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9118 vport->vport_id, state,
9119 vlan, qos,
9120 ntohs(proto));
9121 return ret;
9122 }
9123 }
9124
hclge_clear_vf_vlan(struct hclge_dev * hdev)9125 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9126 {
9127 struct hclge_vlan_info *vlan_info;
9128 struct hclge_vport *vport;
9129 int ret;
9130 int vf;
9131
9132 /* clear port base vlan for all vf */
9133 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9134 vport = &hdev->vport[vf];
9135 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9136
9137 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9138 vport->vport_id,
9139 vlan_info->vlan_tag, true);
9140 if (ret)
9141 dev_err(&hdev->pdev->dev,
9142 "failed to clear vf vlan for vf%d, ret = %d\n",
9143 vf - HCLGE_VF_VPORT_START_NUM, ret);
9144 }
9145 }
9146
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)9147 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9148 u16 vlan_id, bool is_kill)
9149 {
9150 struct hclge_vport *vport = hclge_get_vport(handle);
9151 struct hclge_dev *hdev = vport->back;
9152 bool writen_to_tbl = false;
9153 int ret = 0;
9154
9155 /* When device is resetting or reset failed, firmware is unable to
9156 * handle mailbox. Just record the vlan id, and remove it after
9157 * reset finished.
9158 */
9159 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9160 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9161 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9162 return -EBUSY;
9163 }
9164
9165 /* when port base vlan enabled, we use port base vlan as the vlan
9166 * filter entry. In this case, we don't update vlan filter table
9167 * when user add new vlan or remove exist vlan, just update the vport
9168 * vlan list. The vlan id in vlan list will be writen in vlan filter
9169 * table until port base vlan disabled
9170 */
9171 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9172 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9173 vlan_id, is_kill);
9174 writen_to_tbl = true;
9175 }
9176
9177 if (!ret) {
9178 if (is_kill)
9179 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9180 else
9181 hclge_add_vport_vlan_table(vport, vlan_id,
9182 writen_to_tbl);
9183 } else if (is_kill) {
9184 /* when remove hw vlan filter failed, record the vlan id,
9185 * and try to remove it from hw later, to be consistence
9186 * with stack
9187 */
9188 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9189 }
9190 return ret;
9191 }
9192
hclge_sync_vlan_filter(struct hclge_dev * hdev)9193 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9194 {
9195 #define HCLGE_MAX_SYNC_COUNT 60
9196
9197 int i, ret, sync_cnt = 0;
9198 u16 vlan_id;
9199
9200 /* start from vport 1 for PF is always alive */
9201 for (i = 0; i < hdev->num_alloc_vport; i++) {
9202 struct hclge_vport *vport = &hdev->vport[i];
9203
9204 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9205 VLAN_N_VID);
9206 while (vlan_id != VLAN_N_VID) {
9207 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9208 vport->vport_id, vlan_id,
9209 true);
9210 if (ret && ret != -EINVAL)
9211 return;
9212
9213 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9214 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9215
9216 sync_cnt++;
9217 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9218 return;
9219
9220 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9221 VLAN_N_VID);
9222 }
9223 }
9224 }
9225
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)9226 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9227 {
9228 struct hclge_config_max_frm_size_cmd *req;
9229 struct hclge_desc desc;
9230
9231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9232
9233 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9234 req->max_frm_size = cpu_to_le16(new_mps);
9235 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9236
9237 return hclge_cmd_send(&hdev->hw, &desc, 1);
9238 }
9239
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)9240 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9241 {
9242 struct hclge_vport *vport = hclge_get_vport(handle);
9243
9244 return hclge_set_vport_mtu(vport, new_mtu);
9245 }
9246
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)9247 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9248 {
9249 struct hclge_dev *hdev = vport->back;
9250 int i, max_frm_size, ret;
9251
9252 /* HW supprt 2 layer vlan */
9253 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9254 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9255 max_frm_size > HCLGE_MAC_MAX_FRAME)
9256 return -EINVAL;
9257
9258 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9259 mutex_lock(&hdev->vport_lock);
9260 /* VF's mps must fit within hdev->mps */
9261 if (vport->vport_id && max_frm_size > hdev->mps) {
9262 mutex_unlock(&hdev->vport_lock);
9263 return -EINVAL;
9264 } else if (vport->vport_id) {
9265 vport->mps = max_frm_size;
9266 mutex_unlock(&hdev->vport_lock);
9267 return 0;
9268 }
9269
9270 /* PF's mps must be greater then VF's mps */
9271 for (i = 1; i < hdev->num_alloc_vport; i++)
9272 if (max_frm_size < hdev->vport[i].mps) {
9273 mutex_unlock(&hdev->vport_lock);
9274 return -EINVAL;
9275 }
9276
9277 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9278
9279 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9280 if (ret) {
9281 dev_err(&hdev->pdev->dev,
9282 "Change mtu fail, ret =%d\n", ret);
9283 goto out;
9284 }
9285
9286 hdev->mps = max_frm_size;
9287 vport->mps = max_frm_size;
9288
9289 ret = hclge_buffer_alloc(hdev);
9290 if (ret)
9291 dev_err(&hdev->pdev->dev,
9292 "Allocate buffer fail, ret =%d\n", ret);
9293
9294 out:
9295 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9296 mutex_unlock(&hdev->vport_lock);
9297 return ret;
9298 }
9299
hclge_send_reset_tqp_cmd(struct hclge_dev * hdev,u16 queue_id,bool enable)9300 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9301 bool enable)
9302 {
9303 struct hclge_reset_tqp_queue_cmd *req;
9304 struct hclge_desc desc;
9305 int ret;
9306
9307 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9308
9309 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9310 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9311 if (enable)
9312 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9313
9314 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9315 if (ret) {
9316 dev_err(&hdev->pdev->dev,
9317 "Send tqp reset cmd error, status =%d\n", ret);
9318 return ret;
9319 }
9320
9321 return 0;
9322 }
9323
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id)9324 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9325 {
9326 struct hclge_reset_tqp_queue_cmd *req;
9327 struct hclge_desc desc;
9328 int ret;
9329
9330 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9331
9332 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9333 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9334
9335 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9336 if (ret) {
9337 dev_err(&hdev->pdev->dev,
9338 "Get reset status error, status =%d\n", ret);
9339 return ret;
9340 }
9341
9342 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9343 }
9344
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)9345 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9346 {
9347 struct hnae3_queue *queue;
9348 struct hclge_tqp *tqp;
9349
9350 queue = handle->kinfo.tqp[queue_id];
9351 tqp = container_of(queue, struct hclge_tqp, q);
9352
9353 return tqp->index;
9354 }
9355
hclge_reset_tqp(struct hnae3_handle * handle,u16 queue_id)9356 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9357 {
9358 struct hclge_vport *vport = hclge_get_vport(handle);
9359 struct hclge_dev *hdev = vport->back;
9360 int reset_try_times = 0;
9361 int reset_status;
9362 u16 queue_gid;
9363 int ret;
9364
9365 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9366
9367 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9368 if (ret) {
9369 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9370 return ret;
9371 }
9372
9373 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9374 if (ret) {
9375 dev_err(&hdev->pdev->dev,
9376 "Send reset tqp cmd fail, ret = %d\n", ret);
9377 return ret;
9378 }
9379
9380 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9381 reset_status = hclge_get_reset_status(hdev, queue_gid);
9382 if (reset_status)
9383 break;
9384
9385 /* Wait for tqp hw reset */
9386 usleep_range(1000, 1200);
9387 }
9388
9389 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9390 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9391 return ret;
9392 }
9393
9394 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9395 if (ret)
9396 dev_err(&hdev->pdev->dev,
9397 "Deassert the soft reset fail, ret = %d\n", ret);
9398
9399 return ret;
9400 }
9401
hclge_reset_vf_queue(struct hclge_vport * vport,u16 queue_id)9402 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9403 {
9404 struct hclge_dev *hdev = vport->back;
9405 int reset_try_times = 0;
9406 int reset_status;
9407 u16 queue_gid;
9408 int ret;
9409
9410 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9411
9412 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9413 if (ret) {
9414 dev_warn(&hdev->pdev->dev,
9415 "Send reset tqp cmd fail, ret = %d\n", ret);
9416 return;
9417 }
9418
9419 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9420 reset_status = hclge_get_reset_status(hdev, queue_gid);
9421 if (reset_status)
9422 break;
9423
9424 /* Wait for tqp hw reset */
9425 usleep_range(1000, 1200);
9426 }
9427
9428 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9429 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9430 return;
9431 }
9432
9433 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9434 if (ret)
9435 dev_warn(&hdev->pdev->dev,
9436 "Deassert the soft reset fail, ret = %d\n", ret);
9437 }
9438
hclge_get_fw_version(struct hnae3_handle * handle)9439 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9440 {
9441 struct hclge_vport *vport = hclge_get_vport(handle);
9442 struct hclge_dev *hdev = vport->back;
9443
9444 return hdev->fw_version;
9445 }
9446
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9447 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9448 {
9449 struct phy_device *phydev = hdev->hw.mac.phydev;
9450
9451 if (!phydev)
9452 return;
9453
9454 phy_set_asym_pause(phydev, rx_en, tx_en);
9455 }
9456
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9457 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9458 {
9459 int ret;
9460
9461 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9462 return 0;
9463
9464 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9465 if (ret)
9466 dev_err(&hdev->pdev->dev,
9467 "configure pauseparam error, ret = %d.\n", ret);
9468
9469 return ret;
9470 }
9471
hclge_cfg_flowctrl(struct hclge_dev * hdev)9472 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9473 {
9474 struct phy_device *phydev = hdev->hw.mac.phydev;
9475 u16 remote_advertising = 0;
9476 u16 local_advertising;
9477 u32 rx_pause, tx_pause;
9478 u8 flowctl;
9479
9480 if (!phydev->link || !phydev->autoneg)
9481 return 0;
9482
9483 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9484
9485 if (phydev->pause)
9486 remote_advertising = LPA_PAUSE_CAP;
9487
9488 if (phydev->asym_pause)
9489 remote_advertising |= LPA_PAUSE_ASYM;
9490
9491 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9492 remote_advertising);
9493 tx_pause = flowctl & FLOW_CTRL_TX;
9494 rx_pause = flowctl & FLOW_CTRL_RX;
9495
9496 if (phydev->duplex == HCLGE_MAC_HALF) {
9497 tx_pause = 0;
9498 rx_pause = 0;
9499 }
9500
9501 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9502 }
9503
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)9504 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9505 u32 *rx_en, u32 *tx_en)
9506 {
9507 struct hclge_vport *vport = hclge_get_vport(handle);
9508 struct hclge_dev *hdev = vport->back;
9509 struct phy_device *phydev = hdev->hw.mac.phydev;
9510
9511 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9512
9513 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9514 *rx_en = 0;
9515 *tx_en = 0;
9516 return;
9517 }
9518
9519 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9520 *rx_en = 1;
9521 *tx_en = 0;
9522 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9523 *tx_en = 1;
9524 *rx_en = 0;
9525 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9526 *rx_en = 1;
9527 *tx_en = 1;
9528 } else {
9529 *rx_en = 0;
9530 *tx_en = 0;
9531 }
9532 }
9533
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9534 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9535 u32 rx_en, u32 tx_en)
9536 {
9537 if (rx_en && tx_en)
9538 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9539 else if (rx_en && !tx_en)
9540 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9541 else if (!rx_en && tx_en)
9542 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9543 else
9544 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9545
9546 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9547 }
9548
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)9549 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9550 u32 rx_en, u32 tx_en)
9551 {
9552 struct hclge_vport *vport = hclge_get_vport(handle);
9553 struct hclge_dev *hdev = vport->back;
9554 struct phy_device *phydev = hdev->hw.mac.phydev;
9555 u32 fc_autoneg;
9556
9557 if (phydev) {
9558 fc_autoneg = hclge_get_autoneg(handle);
9559 if (auto_neg != fc_autoneg) {
9560 dev_info(&hdev->pdev->dev,
9561 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9562 return -EOPNOTSUPP;
9563 }
9564 }
9565
9566 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9567 dev_info(&hdev->pdev->dev,
9568 "Priority flow control enabled. Cannot set link flow control.\n");
9569 return -EOPNOTSUPP;
9570 }
9571
9572 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9573
9574 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9575
9576 if (!auto_neg)
9577 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9578
9579 if (phydev)
9580 return phy_start_aneg(phydev);
9581
9582 return -EOPNOTSUPP;
9583 }
9584
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)9585 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9586 u8 *auto_neg, u32 *speed, u8 *duplex)
9587 {
9588 struct hclge_vport *vport = hclge_get_vport(handle);
9589 struct hclge_dev *hdev = vport->back;
9590
9591 if (speed)
9592 *speed = hdev->hw.mac.speed;
9593 if (duplex)
9594 *duplex = hdev->hw.mac.duplex;
9595 if (auto_neg)
9596 *auto_neg = hdev->hw.mac.autoneg;
9597 }
9598
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)9599 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9600 u8 *module_type)
9601 {
9602 struct hclge_vport *vport = hclge_get_vport(handle);
9603 struct hclge_dev *hdev = vport->back;
9604
9605 /* When nic is down, the service task is not running, doesn't update
9606 * the port information per second. Query the port information before
9607 * return the media type, ensure getting the correct media information.
9608 */
9609 hclge_update_port_info(hdev);
9610
9611 if (media_type)
9612 *media_type = hdev->hw.mac.media_type;
9613
9614 if (module_type)
9615 *module_type = hdev->hw.mac.module_type;
9616 }
9617
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)9618 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9619 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9620 {
9621 struct hclge_vport *vport = hclge_get_vport(handle);
9622 struct hclge_dev *hdev = vport->back;
9623 struct phy_device *phydev = hdev->hw.mac.phydev;
9624 int mdix_ctrl, mdix, is_resolved;
9625 unsigned int retval;
9626
9627 if (!phydev) {
9628 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9629 *tp_mdix = ETH_TP_MDI_INVALID;
9630 return;
9631 }
9632
9633 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9634
9635 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9636 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9637 HCLGE_PHY_MDIX_CTRL_S);
9638
9639 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9640 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9641 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9642
9643 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9644
9645 switch (mdix_ctrl) {
9646 case 0x0:
9647 *tp_mdix_ctrl = ETH_TP_MDI;
9648 break;
9649 case 0x1:
9650 *tp_mdix_ctrl = ETH_TP_MDI_X;
9651 break;
9652 case 0x3:
9653 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9654 break;
9655 default:
9656 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9657 break;
9658 }
9659
9660 if (!is_resolved)
9661 *tp_mdix = ETH_TP_MDI_INVALID;
9662 else if (mdix)
9663 *tp_mdix = ETH_TP_MDI_X;
9664 else
9665 *tp_mdix = ETH_TP_MDI;
9666 }
9667
hclge_info_show(struct hclge_dev * hdev)9668 static void hclge_info_show(struct hclge_dev *hdev)
9669 {
9670 struct device *dev = &hdev->pdev->dev;
9671
9672 dev_info(dev, "PF info begin:\n");
9673
9674 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9675 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9676 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9677 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9678 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9679 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9680 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9681 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9682 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9683 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9684 dev_info(dev, "This is %s PF\n",
9685 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9686 dev_info(dev, "DCB %s\n",
9687 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9688 dev_info(dev, "MQPRIO %s\n",
9689 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9690
9691 dev_info(dev, "PF info end.\n");
9692 }
9693
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9694 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9695 struct hclge_vport *vport)
9696 {
9697 struct hnae3_client *client = vport->nic.client;
9698 struct hclge_dev *hdev = ae_dev->priv;
9699 int rst_cnt = hdev->rst_stats.reset_cnt;
9700 int ret;
9701
9702 ret = client->ops->init_instance(&vport->nic);
9703 if (ret)
9704 return ret;
9705
9706 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9707 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9708 rst_cnt != hdev->rst_stats.reset_cnt) {
9709 ret = -EBUSY;
9710 goto init_nic_err;
9711 }
9712
9713 /* Enable nic hw error interrupts */
9714 ret = hclge_config_nic_hw_error(hdev, true);
9715 if (ret) {
9716 dev_err(&ae_dev->pdev->dev,
9717 "fail(%d) to enable hw error interrupts\n", ret);
9718 goto init_nic_err;
9719 }
9720
9721 hnae3_set_client_init_flag(client, ae_dev, 1);
9722
9723 if (netif_msg_drv(&hdev->vport->nic))
9724 hclge_info_show(hdev);
9725
9726 return ret;
9727
9728 init_nic_err:
9729 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9730 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9731 msleep(HCLGE_WAIT_RESET_DONE);
9732
9733 client->ops->uninit_instance(&vport->nic, 0);
9734
9735 return ret;
9736 }
9737
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9738 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9739 struct hclge_vport *vport)
9740 {
9741 struct hclge_dev *hdev = ae_dev->priv;
9742 struct hnae3_client *client;
9743 int rst_cnt;
9744 int ret;
9745
9746 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9747 !hdev->nic_client)
9748 return 0;
9749
9750 client = hdev->roce_client;
9751 ret = hclge_init_roce_base_info(vport);
9752 if (ret)
9753 return ret;
9754
9755 rst_cnt = hdev->rst_stats.reset_cnt;
9756 ret = client->ops->init_instance(&vport->roce);
9757 if (ret)
9758 return ret;
9759
9760 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9761 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9762 rst_cnt != hdev->rst_stats.reset_cnt) {
9763 ret = -EBUSY;
9764 goto init_roce_err;
9765 }
9766
9767 /* Enable roce ras interrupts */
9768 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9769 if (ret) {
9770 dev_err(&ae_dev->pdev->dev,
9771 "fail(%d) to enable roce ras interrupts\n", ret);
9772 goto init_roce_err;
9773 }
9774
9775 hnae3_set_client_init_flag(client, ae_dev, 1);
9776
9777 return 0;
9778
9779 init_roce_err:
9780 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9781 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9782 msleep(HCLGE_WAIT_RESET_DONE);
9783
9784 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9785
9786 return ret;
9787 }
9788
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9789 static int hclge_init_client_instance(struct hnae3_client *client,
9790 struct hnae3_ae_dev *ae_dev)
9791 {
9792 struct hclge_dev *hdev = ae_dev->priv;
9793 struct hclge_vport *vport;
9794 int i, ret;
9795
9796 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9797 vport = &hdev->vport[i];
9798
9799 switch (client->type) {
9800 case HNAE3_CLIENT_KNIC:
9801 hdev->nic_client = client;
9802 vport->nic.client = client;
9803 ret = hclge_init_nic_client_instance(ae_dev, vport);
9804 if (ret)
9805 goto clear_nic;
9806
9807 ret = hclge_init_roce_client_instance(ae_dev, vport);
9808 if (ret)
9809 goto clear_roce;
9810
9811 break;
9812 case HNAE3_CLIENT_ROCE:
9813 if (hnae3_dev_roce_supported(hdev)) {
9814 hdev->roce_client = client;
9815 vport->roce.client = client;
9816 }
9817
9818 ret = hclge_init_roce_client_instance(ae_dev, vport);
9819 if (ret)
9820 goto clear_roce;
9821
9822 break;
9823 default:
9824 return -EINVAL;
9825 }
9826 }
9827
9828 return 0;
9829
9830 clear_nic:
9831 hdev->nic_client = NULL;
9832 vport->nic.client = NULL;
9833 return ret;
9834 clear_roce:
9835 hdev->roce_client = NULL;
9836 vport->roce.client = NULL;
9837 return ret;
9838 }
9839
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9840 static void hclge_uninit_client_instance(struct hnae3_client *client,
9841 struct hnae3_ae_dev *ae_dev)
9842 {
9843 struct hclge_dev *hdev = ae_dev->priv;
9844 struct hclge_vport *vport;
9845 int i;
9846
9847 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9848 vport = &hdev->vport[i];
9849 if (hdev->roce_client) {
9850 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9851 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9852 msleep(HCLGE_WAIT_RESET_DONE);
9853
9854 hdev->roce_client->ops->uninit_instance(&vport->roce,
9855 0);
9856 hdev->roce_client = NULL;
9857 vport->roce.client = NULL;
9858 }
9859 if (client->type == HNAE3_CLIENT_ROCE)
9860 return;
9861 if (hdev->nic_client && client->ops->uninit_instance) {
9862 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9863 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9864 msleep(HCLGE_WAIT_RESET_DONE);
9865
9866 client->ops->uninit_instance(&vport->nic, 0);
9867 hdev->nic_client = NULL;
9868 vport->nic.client = NULL;
9869 }
9870 }
9871 }
9872
hclge_pci_init(struct hclge_dev * hdev)9873 static int hclge_pci_init(struct hclge_dev *hdev)
9874 {
9875 struct pci_dev *pdev = hdev->pdev;
9876 struct hclge_hw *hw;
9877 int ret;
9878
9879 ret = pci_enable_device(pdev);
9880 if (ret) {
9881 dev_err(&pdev->dev, "failed to enable PCI device\n");
9882 return ret;
9883 }
9884
9885 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9886 if (ret) {
9887 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9888 if (ret) {
9889 dev_err(&pdev->dev,
9890 "can't set consistent PCI DMA");
9891 goto err_disable_device;
9892 }
9893 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9894 }
9895
9896 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9897 if (ret) {
9898 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9899 goto err_disable_device;
9900 }
9901
9902 pci_set_master(pdev);
9903 hw = &hdev->hw;
9904 hw->io_base = pcim_iomap(pdev, 2, 0);
9905 if (!hw->io_base) {
9906 dev_err(&pdev->dev, "Can't map configuration register space\n");
9907 ret = -ENOMEM;
9908 goto err_clr_master;
9909 }
9910
9911 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9912
9913 return 0;
9914 err_clr_master:
9915 pci_clear_master(pdev);
9916 pci_release_regions(pdev);
9917 err_disable_device:
9918 pci_disable_device(pdev);
9919
9920 return ret;
9921 }
9922
hclge_pci_uninit(struct hclge_dev * hdev)9923 static void hclge_pci_uninit(struct hclge_dev *hdev)
9924 {
9925 struct pci_dev *pdev = hdev->pdev;
9926
9927 pcim_iounmap(pdev, hdev->hw.io_base);
9928 pci_free_irq_vectors(pdev);
9929 pci_clear_master(pdev);
9930 pci_release_mem_regions(pdev);
9931 pci_disable_device(pdev);
9932 }
9933
hclge_state_init(struct hclge_dev * hdev)9934 static void hclge_state_init(struct hclge_dev *hdev)
9935 {
9936 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9937 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9938 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9939 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9940 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9941 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9942 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9943 }
9944
hclge_state_uninit(struct hclge_dev * hdev)9945 static void hclge_state_uninit(struct hclge_dev *hdev)
9946 {
9947 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9948 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9949
9950 if (hdev->reset_timer.function)
9951 del_timer_sync(&hdev->reset_timer);
9952 if (hdev->service_task.work.func)
9953 cancel_delayed_work_sync(&hdev->service_task);
9954 }
9955
hclge_flr_prepare(struct hnae3_ae_dev * ae_dev)9956 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9957 {
9958 #define HCLGE_FLR_RETRY_WAIT_MS 500
9959 #define HCLGE_FLR_RETRY_CNT 5
9960
9961 struct hclge_dev *hdev = ae_dev->priv;
9962 int retry_cnt = 0;
9963 int ret;
9964
9965 retry:
9966 down(&hdev->reset_sem);
9967 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9968 hdev->reset_type = HNAE3_FLR_RESET;
9969 ret = hclge_reset_prepare(hdev);
9970 if (ret || hdev->reset_pending) {
9971 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9972 ret);
9973 if (hdev->reset_pending ||
9974 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9975 dev_err(&hdev->pdev->dev,
9976 "reset_pending:0x%lx, retry_cnt:%d\n",
9977 hdev->reset_pending, retry_cnt);
9978 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9979 up(&hdev->reset_sem);
9980 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9981 goto retry;
9982 }
9983 }
9984
9985 /* disable misc vector before FLR done */
9986 hclge_enable_vector(&hdev->misc_vector, false);
9987 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9988 hdev->rst_stats.flr_rst_cnt++;
9989 }
9990
hclge_flr_done(struct hnae3_ae_dev * ae_dev)9991 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9992 {
9993 struct hclge_dev *hdev = ae_dev->priv;
9994 int ret;
9995
9996 hclge_enable_vector(&hdev->misc_vector, true);
9997
9998 ret = hclge_reset_rebuild(hdev);
9999 if (ret)
10000 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10001
10002 hdev->reset_type = HNAE3_NONE_RESET;
10003 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10004 up(&hdev->reset_sem);
10005 }
10006
hclge_clear_resetting_state(struct hclge_dev * hdev)10007 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10008 {
10009 u16 i;
10010
10011 for (i = 0; i < hdev->num_alloc_vport; i++) {
10012 struct hclge_vport *vport = &hdev->vport[i];
10013 int ret;
10014
10015 /* Send cmd to clear VF's FUNC_RST_ING */
10016 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10017 if (ret)
10018 dev_warn(&hdev->pdev->dev,
10019 "clear vf(%u) rst failed %d!\n",
10020 vport->vport_id, ret);
10021 }
10022 }
10023
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)10024 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10025 {
10026 struct pci_dev *pdev = ae_dev->pdev;
10027 struct hclge_dev *hdev;
10028 int ret;
10029
10030 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10031 if (!hdev)
10032 return -ENOMEM;
10033
10034 hdev->pdev = pdev;
10035 hdev->ae_dev = ae_dev;
10036 hdev->reset_type = HNAE3_NONE_RESET;
10037 hdev->reset_level = HNAE3_FUNC_RESET;
10038 ae_dev->priv = hdev;
10039
10040 /* HW supprt 2 layer vlan */
10041 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10042
10043 mutex_init(&hdev->vport_lock);
10044 spin_lock_init(&hdev->fd_rule_lock);
10045 sema_init(&hdev->reset_sem, 1);
10046
10047 ret = hclge_pci_init(hdev);
10048 if (ret)
10049 goto out;
10050
10051 /* Firmware command queue initialize */
10052 ret = hclge_cmd_queue_init(hdev);
10053 if (ret)
10054 goto err_pci_uninit;
10055
10056 /* Firmware command initialize */
10057 ret = hclge_cmd_init(hdev);
10058 if (ret)
10059 goto err_cmd_uninit;
10060
10061 ret = hclge_get_cap(hdev);
10062 if (ret)
10063 goto err_cmd_uninit;
10064
10065 ret = hclge_query_dev_specs(hdev);
10066 if (ret) {
10067 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10068 ret);
10069 goto err_cmd_uninit;
10070 }
10071
10072 ret = hclge_configure(hdev);
10073 if (ret) {
10074 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10075 goto err_cmd_uninit;
10076 }
10077
10078 ret = hclge_init_msi(hdev);
10079 if (ret) {
10080 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10081 goto err_cmd_uninit;
10082 }
10083
10084 ret = hclge_misc_irq_init(hdev);
10085 if (ret)
10086 goto err_msi_uninit;
10087
10088 ret = hclge_alloc_tqps(hdev);
10089 if (ret) {
10090 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10091 goto err_msi_irq_uninit;
10092 }
10093
10094 ret = hclge_alloc_vport(hdev);
10095 if (ret)
10096 goto err_msi_irq_uninit;
10097
10098 ret = hclge_map_tqp(hdev);
10099 if (ret)
10100 goto err_msi_irq_uninit;
10101
10102 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10103 ret = hclge_mac_mdio_config(hdev);
10104 if (ret)
10105 goto err_msi_irq_uninit;
10106 }
10107
10108 ret = hclge_init_umv_space(hdev);
10109 if (ret)
10110 goto err_mdiobus_unreg;
10111
10112 ret = hclge_mac_init(hdev);
10113 if (ret) {
10114 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10115 goto err_mdiobus_unreg;
10116 }
10117
10118 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10119 if (ret) {
10120 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10121 goto err_mdiobus_unreg;
10122 }
10123
10124 ret = hclge_config_gro(hdev, true);
10125 if (ret)
10126 goto err_mdiobus_unreg;
10127
10128 ret = hclge_init_vlan_config(hdev);
10129 if (ret) {
10130 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10131 goto err_mdiobus_unreg;
10132 }
10133
10134 ret = hclge_tm_schd_init(hdev);
10135 if (ret) {
10136 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10137 goto err_mdiobus_unreg;
10138 }
10139
10140 hclge_rss_init_cfg(hdev);
10141 ret = hclge_rss_init_hw(hdev);
10142 if (ret) {
10143 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10144 goto err_mdiobus_unreg;
10145 }
10146
10147 ret = init_mgr_tbl(hdev);
10148 if (ret) {
10149 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10150 goto err_mdiobus_unreg;
10151 }
10152
10153 ret = hclge_init_fd_config(hdev);
10154 if (ret) {
10155 dev_err(&pdev->dev,
10156 "fd table init fail, ret=%d\n", ret);
10157 goto err_mdiobus_unreg;
10158 }
10159
10160 INIT_KFIFO(hdev->mac_tnl_log);
10161
10162 hclge_dcb_ops_set(hdev);
10163
10164 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10165 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10166
10167 /* Setup affinity after service timer setup because add_timer_on
10168 * is called in affinity notify.
10169 */
10170 hclge_misc_affinity_setup(hdev);
10171
10172 hclge_clear_all_event_cause(hdev);
10173 hclge_clear_resetting_state(hdev);
10174
10175 /* Log and clear the hw errors those already occurred */
10176 hclge_handle_all_hns_hw_errors(ae_dev);
10177
10178 /* request delayed reset for the error recovery because an immediate
10179 * global reset on a PF affecting pending initialization of other PFs
10180 */
10181 if (ae_dev->hw_err_reset_req) {
10182 enum hnae3_reset_type reset_level;
10183
10184 reset_level = hclge_get_reset_level(ae_dev,
10185 &ae_dev->hw_err_reset_req);
10186 hclge_set_def_reset_request(ae_dev, reset_level);
10187 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10188 }
10189
10190 /* Enable MISC vector(vector0) */
10191 hclge_enable_vector(&hdev->misc_vector, true);
10192
10193 hclge_state_init(hdev);
10194 hdev->last_reset_time = jiffies;
10195
10196 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10197 HCLGE_DRIVER_NAME);
10198
10199 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10200
10201 return 0;
10202
10203 err_mdiobus_unreg:
10204 if (hdev->hw.mac.phydev)
10205 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10206 err_msi_irq_uninit:
10207 hclge_misc_irq_uninit(hdev);
10208 err_msi_uninit:
10209 pci_free_irq_vectors(pdev);
10210 err_cmd_uninit:
10211 hclge_cmd_uninit(hdev);
10212 err_pci_uninit:
10213 pcim_iounmap(pdev, hdev->hw.io_base);
10214 pci_clear_master(pdev);
10215 pci_release_regions(pdev);
10216 pci_disable_device(pdev);
10217 out:
10218 mutex_destroy(&hdev->vport_lock);
10219 return ret;
10220 }
10221
hclge_stats_clear(struct hclge_dev * hdev)10222 static void hclge_stats_clear(struct hclge_dev *hdev)
10223 {
10224 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10225 }
10226
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10227 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10228 {
10229 return hclge_config_switch_param(hdev, vf, enable,
10230 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10231 }
10232
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10233 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10234 {
10235 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10236 HCLGE_FILTER_FE_NIC_INGRESS_B,
10237 enable, vf);
10238 }
10239
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)10240 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10241 {
10242 int ret;
10243
10244 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10245 if (ret) {
10246 dev_err(&hdev->pdev->dev,
10247 "Set vf %d mac spoof check %s failed, ret=%d\n",
10248 vf, enable ? "on" : "off", ret);
10249 return ret;
10250 }
10251
10252 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10253 if (ret)
10254 dev_err(&hdev->pdev->dev,
10255 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10256 vf, enable ? "on" : "off", ret);
10257
10258 return ret;
10259 }
10260
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)10261 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10262 bool enable)
10263 {
10264 struct hclge_vport *vport = hclge_get_vport(handle);
10265 struct hclge_dev *hdev = vport->back;
10266 u32 new_spoofchk = enable ? 1 : 0;
10267 int ret;
10268
10269 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10270 return -EOPNOTSUPP;
10271
10272 vport = hclge_get_vf_vport(hdev, vf);
10273 if (!vport)
10274 return -EINVAL;
10275
10276 if (vport->vf_info.spoofchk == new_spoofchk)
10277 return 0;
10278
10279 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10280 dev_warn(&hdev->pdev->dev,
10281 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10282 vf);
10283 else if (enable && hclge_is_umv_space_full(vport, true))
10284 dev_warn(&hdev->pdev->dev,
10285 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10286 vf);
10287
10288 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10289 if (ret)
10290 return ret;
10291
10292 vport->vf_info.spoofchk = new_spoofchk;
10293 return 0;
10294 }
10295
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)10296 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10297 {
10298 struct hclge_vport *vport = hdev->vport;
10299 int ret;
10300 int i;
10301
10302 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10303 return 0;
10304
10305 /* resume the vf spoof check state after reset */
10306 for (i = 0; i < hdev->num_alloc_vport; i++) {
10307 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10308 vport->vf_info.spoofchk);
10309 if (ret)
10310 return ret;
10311
10312 vport++;
10313 }
10314
10315 return 0;
10316 }
10317
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)10318 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10319 {
10320 struct hclge_vport *vport = hclge_get_vport(handle);
10321 struct hclge_dev *hdev = vport->back;
10322 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10323 u32 new_trusted = enable ? 1 : 0;
10324 bool en_bc_pmc;
10325 int ret;
10326
10327 vport = hclge_get_vf_vport(hdev, vf);
10328 if (!vport)
10329 return -EINVAL;
10330
10331 if (vport->vf_info.trusted == new_trusted)
10332 return 0;
10333
10334 /* Disable promisc mode for VF if it is not trusted any more. */
10335 if (!enable && vport->vf_info.promisc_enable) {
10336 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10337 ret = hclge_set_vport_promisc_mode(vport, false, false,
10338 en_bc_pmc);
10339 if (ret)
10340 return ret;
10341 vport->vf_info.promisc_enable = 0;
10342 hclge_inform_vf_promisc_info(vport);
10343 }
10344
10345 vport->vf_info.trusted = new_trusted;
10346
10347 return 0;
10348 }
10349
hclge_reset_vf_rate(struct hclge_dev * hdev)10350 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10351 {
10352 int ret;
10353 int vf;
10354
10355 /* reset vf rate to default value */
10356 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10357 struct hclge_vport *vport = &hdev->vport[vf];
10358
10359 vport->vf_info.max_tx_rate = 0;
10360 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10361 if (ret)
10362 dev_err(&hdev->pdev->dev,
10363 "vf%d failed to reset to default, ret=%d\n",
10364 vf - HCLGE_VF_VPORT_START_NUM, ret);
10365 }
10366 }
10367
hclge_vf_rate_param_check(struct hclge_dev * hdev,int vf,int min_tx_rate,int max_tx_rate)10368 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10369 int min_tx_rate, int max_tx_rate)
10370 {
10371 if (min_tx_rate != 0 ||
10372 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10373 dev_err(&hdev->pdev->dev,
10374 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10375 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10376 return -EINVAL;
10377 }
10378
10379 return 0;
10380 }
10381
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)10382 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10383 int min_tx_rate, int max_tx_rate, bool force)
10384 {
10385 struct hclge_vport *vport = hclge_get_vport(handle);
10386 struct hclge_dev *hdev = vport->back;
10387 int ret;
10388
10389 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10390 if (ret)
10391 return ret;
10392
10393 vport = hclge_get_vf_vport(hdev, vf);
10394 if (!vport)
10395 return -EINVAL;
10396
10397 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10398 return 0;
10399
10400 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10401 if (ret)
10402 return ret;
10403
10404 vport->vf_info.max_tx_rate = max_tx_rate;
10405
10406 return 0;
10407 }
10408
hclge_resume_vf_rate(struct hclge_dev * hdev)10409 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10410 {
10411 struct hnae3_handle *handle = &hdev->vport->nic;
10412 struct hclge_vport *vport;
10413 int ret;
10414 int vf;
10415
10416 /* resume the vf max_tx_rate after reset */
10417 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10418 vport = hclge_get_vf_vport(hdev, vf);
10419 if (!vport)
10420 return -EINVAL;
10421
10422 /* zero means max rate, after reset, firmware already set it to
10423 * max rate, so just continue.
10424 */
10425 if (!vport->vf_info.max_tx_rate)
10426 continue;
10427
10428 ret = hclge_set_vf_rate(handle, vf, 0,
10429 vport->vf_info.max_tx_rate, true);
10430 if (ret) {
10431 dev_err(&hdev->pdev->dev,
10432 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10433 vf, vport->vf_info.max_tx_rate, ret);
10434 return ret;
10435 }
10436 }
10437
10438 return 0;
10439 }
10440
hclge_reset_vport_state(struct hclge_dev * hdev)10441 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10442 {
10443 struct hclge_vport *vport = hdev->vport;
10444 int i;
10445
10446 for (i = 0; i < hdev->num_alloc_vport; i++) {
10447 hclge_vport_stop(vport);
10448 vport++;
10449 }
10450 }
10451
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)10452 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10453 {
10454 struct hclge_dev *hdev = ae_dev->priv;
10455 struct pci_dev *pdev = ae_dev->pdev;
10456 int ret;
10457
10458 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10459
10460 hclge_stats_clear(hdev);
10461 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10462 * so here should not clean table in memory.
10463 */
10464 if (hdev->reset_type == HNAE3_IMP_RESET ||
10465 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10466 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10467 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10468 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10469 hclge_reset_umv_space(hdev);
10470 }
10471
10472 ret = hclge_cmd_init(hdev);
10473 if (ret) {
10474 dev_err(&pdev->dev, "Cmd queue init failed\n");
10475 return ret;
10476 }
10477
10478 ret = hclge_map_tqp(hdev);
10479 if (ret) {
10480 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10481 return ret;
10482 }
10483
10484 ret = hclge_mac_init(hdev);
10485 if (ret) {
10486 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10487 return ret;
10488 }
10489
10490 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10491 if (ret) {
10492 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10493 return ret;
10494 }
10495
10496 ret = hclge_config_gro(hdev, true);
10497 if (ret)
10498 return ret;
10499
10500 ret = hclge_init_vlan_config(hdev);
10501 if (ret) {
10502 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10503 return ret;
10504 }
10505
10506 ret = hclge_tm_init_hw(hdev, true);
10507 if (ret) {
10508 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10509 return ret;
10510 }
10511
10512 ret = hclge_rss_init_hw(hdev);
10513 if (ret) {
10514 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10515 return ret;
10516 }
10517
10518 ret = init_mgr_tbl(hdev);
10519 if (ret) {
10520 dev_err(&pdev->dev,
10521 "failed to reinit manager table, ret = %d\n", ret);
10522 return ret;
10523 }
10524
10525 ret = hclge_init_fd_config(hdev);
10526 if (ret) {
10527 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10528 return ret;
10529 }
10530
10531 /* Log and clear the hw errors those already occurred */
10532 hclge_handle_all_hns_hw_errors(ae_dev);
10533
10534 /* Re-enable the hw error interrupts because
10535 * the interrupts get disabled on global reset.
10536 */
10537 ret = hclge_config_nic_hw_error(hdev, true);
10538 if (ret) {
10539 dev_err(&pdev->dev,
10540 "fail(%d) to re-enable NIC hw error interrupts\n",
10541 ret);
10542 return ret;
10543 }
10544
10545 if (hdev->roce_client) {
10546 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10547 if (ret) {
10548 dev_err(&pdev->dev,
10549 "fail(%d) to re-enable roce ras interrupts\n",
10550 ret);
10551 return ret;
10552 }
10553 }
10554
10555 hclge_reset_vport_state(hdev);
10556 ret = hclge_reset_vport_spoofchk(hdev);
10557 if (ret)
10558 return ret;
10559
10560 ret = hclge_resume_vf_rate(hdev);
10561 if (ret)
10562 return ret;
10563
10564 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10565 HCLGE_DRIVER_NAME);
10566
10567 return 0;
10568 }
10569
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)10570 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10571 {
10572 struct hclge_dev *hdev = ae_dev->priv;
10573 struct hclge_mac *mac = &hdev->hw.mac;
10574
10575 hclge_reset_vf_rate(hdev);
10576 hclge_clear_vf_vlan(hdev);
10577 hclge_misc_affinity_teardown(hdev);
10578 hclge_state_uninit(hdev);
10579 hclge_uninit_mac_table(hdev);
10580
10581 if (mac->phydev)
10582 mdiobus_unregister(mac->mdio_bus);
10583
10584 /* Disable MISC vector(vector0) */
10585 hclge_enable_vector(&hdev->misc_vector, false);
10586 synchronize_irq(hdev->misc_vector.vector_irq);
10587
10588 /* Disable all hw interrupts */
10589 hclge_config_mac_tnl_int(hdev, false);
10590 hclge_config_nic_hw_error(hdev, false);
10591 hclge_config_rocee_ras_interrupt(hdev, false);
10592
10593 hclge_cmd_uninit(hdev);
10594 hclge_misc_irq_uninit(hdev);
10595 hclge_pci_uninit(hdev);
10596 mutex_destroy(&hdev->vport_lock);
10597 hclge_uninit_vport_vlan_table(hdev);
10598 ae_dev->priv = NULL;
10599 }
10600
hclge_get_max_channels(struct hnae3_handle * handle)10601 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10602 {
10603 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10604 struct hclge_vport *vport = hclge_get_vport(handle);
10605 struct hclge_dev *hdev = vport->back;
10606
10607 return min_t(u32, hdev->rss_size_max,
10608 vport->alloc_tqps / kinfo->num_tc);
10609 }
10610
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)10611 static void hclge_get_channels(struct hnae3_handle *handle,
10612 struct ethtool_channels *ch)
10613 {
10614 ch->max_combined = hclge_get_max_channels(handle);
10615 ch->other_count = 1;
10616 ch->max_other = 1;
10617 ch->combined_count = handle->kinfo.rss_size;
10618 }
10619
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)10620 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10621 u16 *alloc_tqps, u16 *max_rss_size)
10622 {
10623 struct hclge_vport *vport = hclge_get_vport(handle);
10624 struct hclge_dev *hdev = vport->back;
10625
10626 *alloc_tqps = vport->alloc_tqps;
10627 *max_rss_size = hdev->rss_size_max;
10628 }
10629
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)10630 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10631 bool rxfh_configured)
10632 {
10633 struct hclge_vport *vport = hclge_get_vport(handle);
10634 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10635 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10636 struct hclge_dev *hdev = vport->back;
10637 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10638 u16 cur_rss_size = kinfo->rss_size;
10639 u16 cur_tqps = kinfo->num_tqps;
10640 u16 tc_valid[HCLGE_MAX_TC_NUM];
10641 u16 roundup_size;
10642 u32 *rss_indir;
10643 unsigned int i;
10644 int ret;
10645
10646 kinfo->req_rss_size = new_tqps_num;
10647
10648 ret = hclge_tm_vport_map_update(hdev);
10649 if (ret) {
10650 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10651 return ret;
10652 }
10653
10654 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10655 roundup_size = ilog2(roundup_size);
10656 /* Set the RSS TC mode according to the new RSS size */
10657 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10658 tc_valid[i] = 0;
10659
10660 if (!(hdev->hw_tc_map & BIT(i)))
10661 continue;
10662
10663 tc_valid[i] = 1;
10664 tc_size[i] = roundup_size;
10665 tc_offset[i] = kinfo->rss_size * i;
10666 }
10667 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10668 if (ret)
10669 return ret;
10670
10671 /* RSS indirection table has been configuared by user */
10672 if (rxfh_configured)
10673 goto out;
10674
10675 /* Reinitializes the rss indirect table according to the new RSS size */
10676 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10677 if (!rss_indir)
10678 return -ENOMEM;
10679
10680 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10681 rss_indir[i] = i % kinfo->rss_size;
10682
10683 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10684 if (ret)
10685 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10686 ret);
10687
10688 kfree(rss_indir);
10689
10690 out:
10691 if (!ret)
10692 dev_info(&hdev->pdev->dev,
10693 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10694 cur_rss_size, kinfo->rss_size,
10695 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10696
10697 return ret;
10698 }
10699
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)10700 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10701 u32 *regs_num_64_bit)
10702 {
10703 struct hclge_desc desc;
10704 u32 total_num;
10705 int ret;
10706
10707 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10709 if (ret) {
10710 dev_err(&hdev->pdev->dev,
10711 "Query register number cmd failed, ret = %d.\n", ret);
10712 return ret;
10713 }
10714
10715 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10716 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10717
10718 total_num = *regs_num_32_bit + *regs_num_64_bit;
10719 if (!total_num)
10720 return -EINVAL;
10721
10722 return 0;
10723 }
10724
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10725 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10726 void *data)
10727 {
10728 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10729 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10730
10731 struct hclge_desc *desc;
10732 u32 *reg_val = data;
10733 __le32 *desc_data;
10734 int nodata_num;
10735 int cmd_num;
10736 int i, k, n;
10737 int ret;
10738
10739 if (regs_num == 0)
10740 return 0;
10741
10742 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10743 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10744 HCLGE_32_BIT_REG_RTN_DATANUM);
10745 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10746 if (!desc)
10747 return -ENOMEM;
10748
10749 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10750 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10751 if (ret) {
10752 dev_err(&hdev->pdev->dev,
10753 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10754 kfree(desc);
10755 return ret;
10756 }
10757
10758 for (i = 0; i < cmd_num; i++) {
10759 if (i == 0) {
10760 desc_data = (__le32 *)(&desc[i].data[0]);
10761 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10762 } else {
10763 desc_data = (__le32 *)(&desc[i]);
10764 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10765 }
10766 for (k = 0; k < n; k++) {
10767 *reg_val++ = le32_to_cpu(*desc_data++);
10768
10769 regs_num--;
10770 if (!regs_num)
10771 break;
10772 }
10773 }
10774
10775 kfree(desc);
10776 return 0;
10777 }
10778
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10779 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10780 void *data)
10781 {
10782 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10783 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10784
10785 struct hclge_desc *desc;
10786 u64 *reg_val = data;
10787 __le64 *desc_data;
10788 int nodata_len;
10789 int cmd_num;
10790 int i, k, n;
10791 int ret;
10792
10793 if (regs_num == 0)
10794 return 0;
10795
10796 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10797 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10798 HCLGE_64_BIT_REG_RTN_DATANUM);
10799 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10800 if (!desc)
10801 return -ENOMEM;
10802
10803 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10804 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10805 if (ret) {
10806 dev_err(&hdev->pdev->dev,
10807 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10808 kfree(desc);
10809 return ret;
10810 }
10811
10812 for (i = 0; i < cmd_num; i++) {
10813 if (i == 0) {
10814 desc_data = (__le64 *)(&desc[i].data[0]);
10815 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10816 } else {
10817 desc_data = (__le64 *)(&desc[i]);
10818 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10819 }
10820 for (k = 0; k < n; k++) {
10821 *reg_val++ = le64_to_cpu(*desc_data++);
10822
10823 regs_num--;
10824 if (!regs_num)
10825 break;
10826 }
10827 }
10828
10829 kfree(desc);
10830 return 0;
10831 }
10832
10833 #define MAX_SEPARATE_NUM 4
10834 #define SEPARATOR_VALUE 0xFDFCFBFA
10835 #define REG_NUM_PER_LINE 4
10836 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10837 #define REG_SEPARATOR_LINE 1
10838 #define REG_NUM_REMAIN_MASK 3
10839 #define BD_LIST_MAX_NUM 30
10840
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)10841 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10842 {
10843 int i;
10844
10845 /* initialize command BD except the last one */
10846 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10847 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10848 true);
10849 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10850 }
10851
10852 /* initialize the last command BD */
10853 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10854
10855 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10856 }
10857
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)10858 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10859 int *bd_num_list,
10860 u32 type_num)
10861 {
10862 u32 entries_per_desc, desc_index, index, offset, i;
10863 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10864 int ret;
10865
10866 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10867 if (ret) {
10868 dev_err(&hdev->pdev->dev,
10869 "Get dfx bd num fail, status is %d.\n", ret);
10870 return ret;
10871 }
10872
10873 entries_per_desc = ARRAY_SIZE(desc[0].data);
10874 for (i = 0; i < type_num; i++) {
10875 offset = hclge_dfx_bd_offset_list[i];
10876 index = offset % entries_per_desc;
10877 desc_index = offset / entries_per_desc;
10878 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10879 }
10880
10881 return ret;
10882 }
10883
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)10884 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10885 struct hclge_desc *desc_src, int bd_num,
10886 enum hclge_opcode_type cmd)
10887 {
10888 struct hclge_desc *desc = desc_src;
10889 int i, ret;
10890
10891 hclge_cmd_setup_basic_desc(desc, cmd, true);
10892 for (i = 0; i < bd_num - 1; i++) {
10893 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10894 desc++;
10895 hclge_cmd_setup_basic_desc(desc, cmd, true);
10896 }
10897
10898 desc = desc_src;
10899 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10900 if (ret)
10901 dev_err(&hdev->pdev->dev,
10902 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10903 cmd, ret);
10904
10905 return ret;
10906 }
10907
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)10908 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10909 void *data)
10910 {
10911 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10912 struct hclge_desc *desc = desc_src;
10913 u32 *reg = data;
10914
10915 entries_per_desc = ARRAY_SIZE(desc->data);
10916 reg_num = entries_per_desc * bd_num;
10917 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10918 for (i = 0; i < reg_num; i++) {
10919 index = i % entries_per_desc;
10920 desc_index = i / entries_per_desc;
10921 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10922 }
10923 for (i = 0; i < separator_num; i++)
10924 *reg++ = SEPARATOR_VALUE;
10925
10926 return reg_num + separator_num;
10927 }
10928
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)10929 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10930 {
10931 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10932 int data_len_per_desc, bd_num, i;
10933 int bd_num_list[BD_LIST_MAX_NUM];
10934 u32 data_len;
10935 int ret;
10936
10937 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10938 if (ret) {
10939 dev_err(&hdev->pdev->dev,
10940 "Get dfx reg bd num fail, status is %d.\n", ret);
10941 return ret;
10942 }
10943
10944 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10945 *len = 0;
10946 for (i = 0; i < dfx_reg_type_num; i++) {
10947 bd_num = bd_num_list[i];
10948 data_len = data_len_per_desc * bd_num;
10949 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10950 }
10951
10952 return ret;
10953 }
10954
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)10955 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10956 {
10957 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10958 int bd_num, bd_num_max, buf_len, i;
10959 int bd_num_list[BD_LIST_MAX_NUM];
10960 struct hclge_desc *desc_src;
10961 u32 *reg = data;
10962 int ret;
10963
10964 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10965 if (ret) {
10966 dev_err(&hdev->pdev->dev,
10967 "Get dfx reg bd num fail, status is %d.\n", ret);
10968 return ret;
10969 }
10970
10971 bd_num_max = bd_num_list[0];
10972 for (i = 1; i < dfx_reg_type_num; i++)
10973 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10974
10975 buf_len = sizeof(*desc_src) * bd_num_max;
10976 desc_src = kzalloc(buf_len, GFP_KERNEL);
10977 if (!desc_src)
10978 return -ENOMEM;
10979
10980 for (i = 0; i < dfx_reg_type_num; i++) {
10981 bd_num = bd_num_list[i];
10982 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10983 hclge_dfx_reg_opcode_list[i]);
10984 if (ret) {
10985 dev_err(&hdev->pdev->dev,
10986 "Get dfx reg fail, status is %d.\n", ret);
10987 break;
10988 }
10989
10990 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10991 }
10992
10993 kfree(desc_src);
10994 return ret;
10995 }
10996
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)10997 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10998 struct hnae3_knic_private_info *kinfo)
10999 {
11000 #define HCLGE_RING_REG_OFFSET 0x200
11001 #define HCLGE_RING_INT_REG_OFFSET 0x4
11002
11003 int i, j, reg_num, separator_num;
11004 int data_num_sum;
11005 u32 *reg = data;
11006
11007 /* fetching per-PF registers valus from PF PCIe register space */
11008 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11009 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11010 for (i = 0; i < reg_num; i++)
11011 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11012 for (i = 0; i < separator_num; i++)
11013 *reg++ = SEPARATOR_VALUE;
11014 data_num_sum = reg_num + separator_num;
11015
11016 reg_num = ARRAY_SIZE(common_reg_addr_list);
11017 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11018 for (i = 0; i < reg_num; i++)
11019 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11020 for (i = 0; i < separator_num; i++)
11021 *reg++ = SEPARATOR_VALUE;
11022 data_num_sum += reg_num + separator_num;
11023
11024 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11025 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11026 for (j = 0; j < kinfo->num_tqps; j++) {
11027 for (i = 0; i < reg_num; i++)
11028 *reg++ = hclge_read_dev(&hdev->hw,
11029 ring_reg_addr_list[i] +
11030 HCLGE_RING_REG_OFFSET * j);
11031 for (i = 0; i < separator_num; i++)
11032 *reg++ = SEPARATOR_VALUE;
11033 }
11034 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11035
11036 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11037 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11038 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11039 for (i = 0; i < reg_num; i++)
11040 *reg++ = hclge_read_dev(&hdev->hw,
11041 tqp_intr_reg_addr_list[i] +
11042 HCLGE_RING_INT_REG_OFFSET * j);
11043 for (i = 0; i < separator_num; i++)
11044 *reg++ = SEPARATOR_VALUE;
11045 }
11046 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11047
11048 return data_num_sum;
11049 }
11050
hclge_get_regs_len(struct hnae3_handle * handle)11051 static int hclge_get_regs_len(struct hnae3_handle *handle)
11052 {
11053 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11054 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11055 struct hclge_vport *vport = hclge_get_vport(handle);
11056 struct hclge_dev *hdev = vport->back;
11057 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11058 int regs_lines_32_bit, regs_lines_64_bit;
11059 int ret;
11060
11061 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11062 if (ret) {
11063 dev_err(&hdev->pdev->dev,
11064 "Get register number failed, ret = %d.\n", ret);
11065 return ret;
11066 }
11067
11068 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11069 if (ret) {
11070 dev_err(&hdev->pdev->dev,
11071 "Get dfx reg len failed, ret = %d.\n", ret);
11072 return ret;
11073 }
11074
11075 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11076 REG_SEPARATOR_LINE;
11077 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11078 REG_SEPARATOR_LINE;
11079 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11080 REG_SEPARATOR_LINE;
11081 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11082 REG_SEPARATOR_LINE;
11083 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11084 REG_SEPARATOR_LINE;
11085 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11086 REG_SEPARATOR_LINE;
11087
11088 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11089 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11090 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11091 }
11092
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)11093 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11094 void *data)
11095 {
11096 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11097 struct hclge_vport *vport = hclge_get_vport(handle);
11098 struct hclge_dev *hdev = vport->back;
11099 u32 regs_num_32_bit, regs_num_64_bit;
11100 int i, reg_num, separator_num, ret;
11101 u32 *reg = data;
11102
11103 *version = hdev->fw_version;
11104
11105 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11106 if (ret) {
11107 dev_err(&hdev->pdev->dev,
11108 "Get register number failed, ret = %d.\n", ret);
11109 return;
11110 }
11111
11112 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11113
11114 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11115 if (ret) {
11116 dev_err(&hdev->pdev->dev,
11117 "Get 32 bit register failed, ret = %d.\n", ret);
11118 return;
11119 }
11120 reg_num = regs_num_32_bit;
11121 reg += reg_num;
11122 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11123 for (i = 0; i < separator_num; i++)
11124 *reg++ = SEPARATOR_VALUE;
11125
11126 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11127 if (ret) {
11128 dev_err(&hdev->pdev->dev,
11129 "Get 64 bit register failed, ret = %d.\n", ret);
11130 return;
11131 }
11132 reg_num = regs_num_64_bit * 2;
11133 reg += reg_num;
11134 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11135 for (i = 0; i < separator_num; i++)
11136 *reg++ = SEPARATOR_VALUE;
11137
11138 ret = hclge_get_dfx_reg(hdev, reg);
11139 if (ret)
11140 dev_err(&hdev->pdev->dev,
11141 "Get dfx register failed, ret = %d.\n", ret);
11142 }
11143
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)11144 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11145 {
11146 struct hclge_set_led_state_cmd *req;
11147 struct hclge_desc desc;
11148 int ret;
11149
11150 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11151
11152 req = (struct hclge_set_led_state_cmd *)desc.data;
11153 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11154 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11155
11156 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11157 if (ret)
11158 dev_err(&hdev->pdev->dev,
11159 "Send set led state cmd error, ret =%d\n", ret);
11160
11161 return ret;
11162 }
11163
11164 enum hclge_led_status {
11165 HCLGE_LED_OFF,
11166 HCLGE_LED_ON,
11167 HCLGE_LED_NO_CHANGE = 0xFF,
11168 };
11169
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)11170 static int hclge_set_led_id(struct hnae3_handle *handle,
11171 enum ethtool_phys_id_state status)
11172 {
11173 struct hclge_vport *vport = hclge_get_vport(handle);
11174 struct hclge_dev *hdev = vport->back;
11175
11176 switch (status) {
11177 case ETHTOOL_ID_ACTIVE:
11178 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11179 case ETHTOOL_ID_INACTIVE:
11180 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11181 default:
11182 return -EINVAL;
11183 }
11184 }
11185
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)11186 static void hclge_get_link_mode(struct hnae3_handle *handle,
11187 unsigned long *supported,
11188 unsigned long *advertising)
11189 {
11190 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11191 struct hclge_vport *vport = hclge_get_vport(handle);
11192 struct hclge_dev *hdev = vport->back;
11193 unsigned int idx = 0;
11194
11195 for (; idx < size; idx++) {
11196 supported[idx] = hdev->hw.mac.supported[idx];
11197 advertising[idx] = hdev->hw.mac.advertising[idx];
11198 }
11199 }
11200
hclge_gro_en(struct hnae3_handle * handle,bool enable)11201 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11202 {
11203 struct hclge_vport *vport = hclge_get_vport(handle);
11204 struct hclge_dev *hdev = vport->back;
11205
11206 return hclge_config_gro(hdev, enable);
11207 }
11208
hclge_sync_promisc_mode(struct hclge_dev * hdev)11209 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11210 {
11211 struct hclge_vport *vport = &hdev->vport[0];
11212 struct hnae3_handle *handle = &vport->nic;
11213 u8 tmp_flags;
11214 int ret;
11215
11216 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11217 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11218 vport->last_promisc_flags = vport->overflow_promisc_flags;
11219 }
11220
11221 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11222 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11223 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11224 tmp_flags & HNAE3_MPE);
11225 if (!ret) {
11226 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11227 hclge_enable_vlan_filter(handle,
11228 tmp_flags & HNAE3_VLAN_FLTR);
11229 }
11230 }
11231 }
11232
hclge_module_existed(struct hclge_dev * hdev)11233 static bool hclge_module_existed(struct hclge_dev *hdev)
11234 {
11235 struct hclge_desc desc;
11236 u32 existed;
11237 int ret;
11238
11239 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11240 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11241 if (ret) {
11242 dev_err(&hdev->pdev->dev,
11243 "failed to get SFP exist state, ret = %d\n", ret);
11244 return false;
11245 }
11246
11247 existed = le32_to_cpu(desc.data[0]);
11248
11249 return existed != 0;
11250 }
11251
11252 /* need 6 bds(total 140 bytes) in one reading
11253 * return the number of bytes actually read, 0 means read failed.
11254 */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)11255 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11256 u32 len, u8 *data)
11257 {
11258 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11259 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11260 u16 read_len;
11261 u16 copy_len;
11262 int ret;
11263 int i;
11264
11265 /* setup all 6 bds to read module eeprom info. */
11266 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11267 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11268 true);
11269
11270 /* bd0~bd4 need next flag */
11271 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11272 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11273 }
11274
11275 /* setup bd0, this bd contains offset and read length. */
11276 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11277 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11278 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11279 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11280
11281 ret = hclge_cmd_send(&hdev->hw, desc, i);
11282 if (ret) {
11283 dev_err(&hdev->pdev->dev,
11284 "failed to get SFP eeprom info, ret = %d\n", ret);
11285 return 0;
11286 }
11287
11288 /* copy sfp info from bd0 to out buffer. */
11289 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11290 memcpy(data, sfp_info_bd0->data, copy_len);
11291 read_len = copy_len;
11292
11293 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11294 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11295 if (read_len >= len)
11296 return read_len;
11297
11298 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11299 memcpy(data + read_len, desc[i].data, copy_len);
11300 read_len += copy_len;
11301 }
11302
11303 return read_len;
11304 }
11305
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)11306 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11307 u32 len, u8 *data)
11308 {
11309 struct hclge_vport *vport = hclge_get_vport(handle);
11310 struct hclge_dev *hdev = vport->back;
11311 u32 read_len = 0;
11312 u16 data_len;
11313
11314 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11315 return -EOPNOTSUPP;
11316
11317 if (!hclge_module_existed(hdev))
11318 return -ENXIO;
11319
11320 while (read_len < len) {
11321 data_len = hclge_get_sfp_eeprom_info(hdev,
11322 offset + read_len,
11323 len - read_len,
11324 data + read_len);
11325 if (!data_len)
11326 return -EIO;
11327
11328 read_len += data_len;
11329 }
11330
11331 return 0;
11332 }
11333
11334 static const struct hnae3_ae_ops hclge_ops = {
11335 .init_ae_dev = hclge_init_ae_dev,
11336 .uninit_ae_dev = hclge_uninit_ae_dev,
11337 .flr_prepare = hclge_flr_prepare,
11338 .flr_done = hclge_flr_done,
11339 .init_client_instance = hclge_init_client_instance,
11340 .uninit_client_instance = hclge_uninit_client_instance,
11341 .map_ring_to_vector = hclge_map_ring_to_vector,
11342 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11343 .get_vector = hclge_get_vector,
11344 .put_vector = hclge_put_vector,
11345 .set_promisc_mode = hclge_set_promisc_mode,
11346 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11347 .set_loopback = hclge_set_loopback,
11348 .start = hclge_ae_start,
11349 .stop = hclge_ae_stop,
11350 .client_start = hclge_client_start,
11351 .client_stop = hclge_client_stop,
11352 .get_status = hclge_get_status,
11353 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11354 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11355 .get_media_type = hclge_get_media_type,
11356 .check_port_speed = hclge_check_port_speed,
11357 .get_fec = hclge_get_fec,
11358 .set_fec = hclge_set_fec,
11359 .get_rss_key_size = hclge_get_rss_key_size,
11360 .get_rss_indir_size = hclge_get_rss_indir_size,
11361 .get_rss = hclge_get_rss,
11362 .set_rss = hclge_set_rss,
11363 .set_rss_tuple = hclge_set_rss_tuple,
11364 .get_rss_tuple = hclge_get_rss_tuple,
11365 .get_tc_size = hclge_get_tc_size,
11366 .get_mac_addr = hclge_get_mac_addr,
11367 .set_mac_addr = hclge_set_mac_addr,
11368 .do_ioctl = hclge_do_ioctl,
11369 .add_uc_addr = hclge_add_uc_addr,
11370 .rm_uc_addr = hclge_rm_uc_addr,
11371 .add_mc_addr = hclge_add_mc_addr,
11372 .rm_mc_addr = hclge_rm_mc_addr,
11373 .set_autoneg = hclge_set_autoneg,
11374 .get_autoneg = hclge_get_autoneg,
11375 .restart_autoneg = hclge_restart_autoneg,
11376 .halt_autoneg = hclge_halt_autoneg,
11377 .get_pauseparam = hclge_get_pauseparam,
11378 .set_pauseparam = hclge_set_pauseparam,
11379 .set_mtu = hclge_set_mtu,
11380 .reset_queue = hclge_reset_tqp,
11381 .get_stats = hclge_get_stats,
11382 .get_mac_stats = hclge_get_mac_stat,
11383 .update_stats = hclge_update_stats,
11384 .get_strings = hclge_get_strings,
11385 .get_sset_count = hclge_get_sset_count,
11386 .get_fw_version = hclge_get_fw_version,
11387 .get_mdix_mode = hclge_get_mdix_mode,
11388 .enable_vlan_filter = hclge_enable_vlan_filter,
11389 .set_vlan_filter = hclge_set_vlan_filter,
11390 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11391 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11392 .reset_event = hclge_reset_event,
11393 .get_reset_level = hclge_get_reset_level,
11394 .set_default_reset_request = hclge_set_def_reset_request,
11395 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11396 .set_channels = hclge_set_channels,
11397 .get_channels = hclge_get_channels,
11398 .get_regs_len = hclge_get_regs_len,
11399 .get_regs = hclge_get_regs,
11400 .set_led_id = hclge_set_led_id,
11401 .get_link_mode = hclge_get_link_mode,
11402 .add_fd_entry = hclge_add_fd_entry,
11403 .del_fd_entry = hclge_del_fd_entry,
11404 .del_all_fd_entries = hclge_del_all_fd_entries,
11405 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11406 .get_fd_rule_info = hclge_get_fd_rule_info,
11407 .get_fd_all_rules = hclge_get_all_rules,
11408 .enable_fd = hclge_enable_fd,
11409 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11410 .dbg_run_cmd = hclge_dbg_run_cmd,
11411 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11412 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11413 .ae_dev_resetting = hclge_ae_dev_resetting,
11414 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11415 .set_gro_en = hclge_gro_en,
11416 .get_global_queue_id = hclge_covert_handle_qid_global,
11417 .set_timer_task = hclge_set_timer_task,
11418 .mac_connect_phy = hclge_mac_connect_phy,
11419 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11420 .get_vf_config = hclge_get_vf_config,
11421 .set_vf_link_state = hclge_set_vf_link_state,
11422 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11423 .set_vf_trust = hclge_set_vf_trust,
11424 .set_vf_rate = hclge_set_vf_rate,
11425 .set_vf_mac = hclge_set_vf_mac,
11426 .get_module_eeprom = hclge_get_module_eeprom,
11427 .get_cmdq_stat = hclge_get_cmdq_stat,
11428 };
11429
11430 static struct hnae3_ae_algo ae_algo = {
11431 .ops = &hclge_ops,
11432 .pdev_id_table = ae_algo_pci_tbl,
11433 };
11434
hclge_init(void)11435 static int hclge_init(void)
11436 {
11437 pr_info("%s is initializing\n", HCLGE_NAME);
11438
11439 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11440 if (!hclge_wq) {
11441 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11442 return -ENOMEM;
11443 }
11444
11445 hnae3_register_ae_algo(&ae_algo);
11446
11447 return 0;
11448 }
11449
hclge_exit(void)11450 static void hclge_exit(void)
11451 {
11452 hnae3_unregister_ae_algo(&ae_algo);
11453 destroy_workqueue(hclge_wq);
11454 }
11455 module_init(hclge_init);
11456 module_exit(hclge_exit);
11457
11458 MODULE_LICENSE("GPL");
11459 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11460 MODULE_DESCRIPTION("HCLGE Driver");
11461 MODULE_VERSION(HCLGE_MOD_VERSION);
11462