1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
64 			       u16 *allocated_size, bool is_alloc);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static struct hnae3_ae_algo ae_algo;
72 
73 static const struct pci_device_id ae_algo_pci_tbl[] = {
74 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
75 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
76 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
77 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
81 	/* required last entry */
82 	{0, }
83 };
84 
85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
86 
87 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
88 					 HCLGE_CMDQ_TX_ADDR_H_REG,
89 					 HCLGE_CMDQ_TX_DEPTH_REG,
90 					 HCLGE_CMDQ_TX_TAIL_REG,
91 					 HCLGE_CMDQ_TX_HEAD_REG,
92 					 HCLGE_CMDQ_RX_ADDR_L_REG,
93 					 HCLGE_CMDQ_RX_ADDR_H_REG,
94 					 HCLGE_CMDQ_RX_DEPTH_REG,
95 					 HCLGE_CMDQ_RX_TAIL_REG,
96 					 HCLGE_CMDQ_RX_HEAD_REG,
97 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
98 					 HCLGE_CMDQ_INTR_STS_REG,
99 					 HCLGE_CMDQ_INTR_EN_REG,
100 					 HCLGE_CMDQ_INTR_GEN_REG};
101 
102 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
103 					   HCLGE_VECTOR0_OTER_EN_REG,
104 					   HCLGE_MISC_RESET_STS_REG,
105 					   HCLGE_MISC_VECTOR_INT_STS,
106 					   HCLGE_GLOBAL_RESET_REG,
107 					   HCLGE_FUN_RST_ING,
108 					   HCLGE_GRO_EN_REG};
109 
110 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
111 					 HCLGE_RING_RX_ADDR_H_REG,
112 					 HCLGE_RING_RX_BD_NUM_REG,
113 					 HCLGE_RING_RX_BD_LENGTH_REG,
114 					 HCLGE_RING_RX_MERGE_EN_REG,
115 					 HCLGE_RING_RX_TAIL_REG,
116 					 HCLGE_RING_RX_HEAD_REG,
117 					 HCLGE_RING_RX_FBD_NUM_REG,
118 					 HCLGE_RING_RX_OFFSET_REG,
119 					 HCLGE_RING_RX_FBD_OFFSET_REG,
120 					 HCLGE_RING_RX_STASH_REG,
121 					 HCLGE_RING_RX_BD_ERR_REG,
122 					 HCLGE_RING_TX_ADDR_L_REG,
123 					 HCLGE_RING_TX_ADDR_H_REG,
124 					 HCLGE_RING_TX_BD_NUM_REG,
125 					 HCLGE_RING_TX_PRIORITY_REG,
126 					 HCLGE_RING_TX_TC_REG,
127 					 HCLGE_RING_TX_MERGE_EN_REG,
128 					 HCLGE_RING_TX_TAIL_REG,
129 					 HCLGE_RING_TX_HEAD_REG,
130 					 HCLGE_RING_TX_FBD_NUM_REG,
131 					 HCLGE_RING_TX_OFFSET_REG,
132 					 HCLGE_RING_TX_EBD_NUM_REG,
133 					 HCLGE_RING_TX_EBD_OFFSET_REG,
134 					 HCLGE_RING_TX_BD_ERR_REG,
135 					 HCLGE_RING_EN_REG};
136 
137 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
138 					     HCLGE_TQP_INTR_GL0_REG,
139 					     HCLGE_TQP_INTR_GL1_REG,
140 					     HCLGE_TQP_INTR_GL2_REG,
141 					     HCLGE_TQP_INTR_RL_REG};
142 
143 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
144 	"App    Loopback test",
145 	"Serdes serial Loopback test",
146 	"Serdes parallel Loopback test",
147 	"Phy    Loopback test"
148 };
149 
150 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
151 	{"mac_tx_mac_pause_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
153 	{"mac_rx_mac_pause_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
155 	{"mac_tx_control_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
157 	{"mac_rx_control_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
159 	{"mac_tx_pfc_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
161 	{"mac_tx_pfc_pri0_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
163 	{"mac_tx_pfc_pri1_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
165 	{"mac_tx_pfc_pri2_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
167 	{"mac_tx_pfc_pri3_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
169 	{"mac_tx_pfc_pri4_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
171 	{"mac_tx_pfc_pri5_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
173 	{"mac_tx_pfc_pri6_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
175 	{"mac_tx_pfc_pri7_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
177 	{"mac_rx_pfc_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
179 	{"mac_rx_pfc_pri0_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
181 	{"mac_rx_pfc_pri1_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
183 	{"mac_rx_pfc_pri2_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
185 	{"mac_rx_pfc_pri3_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
187 	{"mac_rx_pfc_pri4_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
189 	{"mac_rx_pfc_pri5_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
191 	{"mac_rx_pfc_pri6_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
193 	{"mac_rx_pfc_pri7_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
195 	{"mac_tx_total_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
197 	{"mac_tx_total_oct_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
199 	{"mac_tx_good_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
201 	{"mac_tx_bad_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
203 	{"mac_tx_good_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
205 	{"mac_tx_bad_oct_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
207 	{"mac_tx_uni_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
209 	{"mac_tx_multi_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
211 	{"mac_tx_broad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
213 	{"mac_tx_undersize_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
215 	{"mac_tx_oversize_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
217 	{"mac_tx_64_oct_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
219 	{"mac_tx_65_127_oct_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
221 	{"mac_tx_128_255_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
223 	{"mac_tx_256_511_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
225 	{"mac_tx_512_1023_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
227 	{"mac_tx_1024_1518_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
229 	{"mac_tx_1519_2047_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
231 	{"mac_tx_2048_4095_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
233 	{"mac_tx_4096_8191_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
235 	{"mac_tx_8192_9216_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
237 	{"mac_tx_9217_12287_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
239 	{"mac_tx_12288_16383_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
241 	{"mac_tx_1519_max_good_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
243 	{"mac_tx_1519_max_bad_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
245 	{"mac_rx_total_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
247 	{"mac_rx_total_oct_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
249 	{"mac_rx_good_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
251 	{"mac_rx_bad_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
253 	{"mac_rx_good_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
255 	{"mac_rx_bad_oct_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
257 	{"mac_rx_uni_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
259 	{"mac_rx_multi_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
261 	{"mac_rx_broad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
263 	{"mac_rx_undersize_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
265 	{"mac_rx_oversize_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
267 	{"mac_rx_64_oct_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
269 	{"mac_rx_65_127_oct_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
271 	{"mac_rx_128_255_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
273 	{"mac_rx_256_511_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
275 	{"mac_rx_512_1023_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
277 	{"mac_rx_1024_1518_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
279 	{"mac_rx_1519_2047_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
281 	{"mac_rx_2048_4095_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
283 	{"mac_rx_4096_8191_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
285 	{"mac_rx_8192_9216_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
287 	{"mac_rx_9217_12287_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
289 	{"mac_rx_12288_16383_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
291 	{"mac_rx_1519_max_good_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
293 	{"mac_rx_1519_max_bad_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
295 
296 	{"mac_tx_fragment_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
298 	{"mac_tx_undermin_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
300 	{"mac_tx_jabber_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
302 	{"mac_tx_err_all_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
304 	{"mac_tx_from_app_good_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
306 	{"mac_tx_from_app_bad_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
308 	{"mac_rx_fragment_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
310 	{"mac_rx_undermin_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
312 	{"mac_rx_jabber_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
314 	{"mac_rx_fcs_err_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
316 	{"mac_rx_send_app_good_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
318 	{"mac_rx_send_app_bad_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
320 };
321 
322 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
323 	{
324 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
325 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
326 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
327 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
328 		.i_port_bitmap = 0x1,
329 	},
330 };
331 
332 static const u8 hclge_hash_key[] = {
333 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
334 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
335 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
336 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
337 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
338 };
339 
340 static const u32 hclge_dfx_bd_offset_list[] = {
341 	HCLGE_DFX_BIOS_BD_OFFSET,
342 	HCLGE_DFX_SSU_0_BD_OFFSET,
343 	HCLGE_DFX_SSU_1_BD_OFFSET,
344 	HCLGE_DFX_IGU_BD_OFFSET,
345 	HCLGE_DFX_RPU_0_BD_OFFSET,
346 	HCLGE_DFX_RPU_1_BD_OFFSET,
347 	HCLGE_DFX_NCSI_BD_OFFSET,
348 	HCLGE_DFX_RTC_BD_OFFSET,
349 	HCLGE_DFX_PPP_BD_OFFSET,
350 	HCLGE_DFX_RCB_BD_OFFSET,
351 	HCLGE_DFX_TQP_BD_OFFSET,
352 	HCLGE_DFX_SSU_2_BD_OFFSET
353 };
354 
355 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
356 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
357 	HCLGE_OPC_DFX_SSU_REG_0,
358 	HCLGE_OPC_DFX_SSU_REG_1,
359 	HCLGE_OPC_DFX_IGU_EGU_REG,
360 	HCLGE_OPC_DFX_RPU_REG_0,
361 	HCLGE_OPC_DFX_RPU_REG_1,
362 	HCLGE_OPC_DFX_NCSI_REG,
363 	HCLGE_OPC_DFX_RTC_REG,
364 	HCLGE_OPC_DFX_PPP_REG,
365 	HCLGE_OPC_DFX_RCB_REG,
366 	HCLGE_OPC_DFX_TQP_REG,
367 	HCLGE_OPC_DFX_SSU_REG_2
368 };
369 
370 static const struct key_info meta_data_key_info[] = {
371 	{ PACKET_TYPE_ID, 6},
372 	{ IP_FRAGEMENT, 1},
373 	{ ROCE_TYPE, 1},
374 	{ NEXT_KEY, 5},
375 	{ VLAN_NUMBER, 2},
376 	{ SRC_VPORT, 12},
377 	{ DST_VPORT, 12},
378 	{ TUNNEL_PACKET, 1},
379 };
380 
381 static const struct key_info tuple_key_info[] = {
382 	{ OUTER_DST_MAC, 48},
383 	{ OUTER_SRC_MAC, 48},
384 	{ OUTER_VLAN_TAG_FST, 16},
385 	{ OUTER_VLAN_TAG_SEC, 16},
386 	{ OUTER_ETH_TYPE, 16},
387 	{ OUTER_L2_RSV, 16},
388 	{ OUTER_IP_TOS, 8},
389 	{ OUTER_IP_PROTO, 8},
390 	{ OUTER_SRC_IP, 32},
391 	{ OUTER_DST_IP, 32},
392 	{ OUTER_L3_RSV, 16},
393 	{ OUTER_SRC_PORT, 16},
394 	{ OUTER_DST_PORT, 16},
395 	{ OUTER_L4_RSV, 32},
396 	{ OUTER_TUN_VNI, 24},
397 	{ OUTER_TUN_FLOW_ID, 8},
398 	{ INNER_DST_MAC, 48},
399 	{ INNER_SRC_MAC, 48},
400 	{ INNER_VLAN_TAG_FST, 16},
401 	{ INNER_VLAN_TAG_SEC, 16},
402 	{ INNER_ETH_TYPE, 16},
403 	{ INNER_L2_RSV, 16},
404 	{ INNER_IP_TOS, 8},
405 	{ INNER_IP_PROTO, 8},
406 	{ INNER_SRC_IP, 32},
407 	{ INNER_DST_IP, 32},
408 	{ INNER_L3_RSV, 16},
409 	{ INNER_SRC_PORT, 16},
410 	{ INNER_DST_PORT, 16},
411 	{ INNER_L4_RSV, 32},
412 };
413 
hclge_mac_update_stats_defective(struct hclge_dev * hdev)414 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
415 {
416 #define HCLGE_MAC_CMD_NUM 21
417 
418 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
419 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
420 	__le64 *desc_data;
421 	int i, k, n;
422 	int ret;
423 
424 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
425 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
426 	if (ret) {
427 		dev_err(&hdev->pdev->dev,
428 			"Get MAC pkt stats fail, status = %d.\n", ret);
429 
430 		return ret;
431 	}
432 
433 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
434 		/* for special opcode 0032, only the first desc has the head */
435 		if (unlikely(i == 0)) {
436 			desc_data = (__le64 *)(&desc[i].data[0]);
437 			n = HCLGE_RD_FIRST_STATS_NUM;
438 		} else {
439 			desc_data = (__le64 *)(&desc[i]);
440 			n = HCLGE_RD_OTHER_STATS_NUM;
441 		}
442 
443 		for (k = 0; k < n; k++) {
444 			*data += le64_to_cpu(*desc_data);
445 			data++;
446 			desc_data++;
447 		}
448 	}
449 
450 	return 0;
451 }
452 
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)453 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
454 {
455 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
456 	struct hclge_desc *desc;
457 	__le64 *desc_data;
458 	u16 i, k, n;
459 	int ret;
460 
461 	/* This may be called inside atomic sections,
462 	 * so GFP_ATOMIC is more suitalbe here
463 	 */
464 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
465 	if (!desc)
466 		return -ENOMEM;
467 
468 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
469 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
470 	if (ret) {
471 		kfree(desc);
472 		return ret;
473 	}
474 
475 	for (i = 0; i < desc_num; i++) {
476 		/* for special opcode 0034, only the first desc has the head */
477 		if (i == 0) {
478 			desc_data = (__le64 *)(&desc[i].data[0]);
479 			n = HCLGE_RD_FIRST_STATS_NUM;
480 		} else {
481 			desc_data = (__le64 *)(&desc[i]);
482 			n = HCLGE_RD_OTHER_STATS_NUM;
483 		}
484 
485 		for (k = 0; k < n; k++) {
486 			*data += le64_to_cpu(*desc_data);
487 			data++;
488 			desc_data++;
489 		}
490 	}
491 
492 	kfree(desc);
493 
494 	return 0;
495 }
496 
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)497 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
498 {
499 	struct hclge_desc desc;
500 	__le32 *desc_data;
501 	u32 reg_num;
502 	int ret;
503 
504 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
505 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
506 	if (ret)
507 		return ret;
508 
509 	desc_data = (__le32 *)(&desc.data[0]);
510 	reg_num = le32_to_cpu(*desc_data);
511 
512 	*desc_num = 1 + ((reg_num - 3) >> 2) +
513 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
514 
515 	return 0;
516 }
517 
hclge_mac_update_stats(struct hclge_dev * hdev)518 static int hclge_mac_update_stats(struct hclge_dev *hdev)
519 {
520 	u32 desc_num;
521 	int ret;
522 
523 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
524 
525 	/* The firmware supports the new statistics acquisition method */
526 	if (!ret)
527 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
528 	else if (ret == -EOPNOTSUPP)
529 		ret = hclge_mac_update_stats_defective(hdev);
530 	else
531 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
532 
533 	return ret;
534 }
535 
hclge_tqps_update_stats(struct hnae3_handle * handle)536 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
537 {
538 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
539 	struct hclge_vport *vport = hclge_get_vport(handle);
540 	struct hclge_dev *hdev = vport->back;
541 	struct hnae3_queue *queue;
542 	struct hclge_desc desc[1];
543 	struct hclge_tqp *tqp;
544 	int ret, i;
545 
546 	for (i = 0; i < kinfo->num_tqps; i++) {
547 		queue = handle->kinfo.tqp[i];
548 		tqp = container_of(queue, struct hclge_tqp, q);
549 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
550 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
551 					   true);
552 
553 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
554 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
555 		if (ret) {
556 			dev_err(&hdev->pdev->dev,
557 				"Query tqp stat fail, status = %d,queue = %d\n",
558 				ret, i);
559 			return ret;
560 		}
561 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
562 			le32_to_cpu(desc[0].data[1]);
563 	}
564 
565 	for (i = 0; i < kinfo->num_tqps; i++) {
566 		queue = handle->kinfo.tqp[i];
567 		tqp = container_of(queue, struct hclge_tqp, q);
568 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
569 		hclge_cmd_setup_basic_desc(&desc[0],
570 					   HCLGE_OPC_QUERY_TX_STATUS,
571 					   true);
572 
573 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
574 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
575 		if (ret) {
576 			dev_err(&hdev->pdev->dev,
577 				"Query tqp stat fail, status = %d,queue = %d\n",
578 				ret, i);
579 			return ret;
580 		}
581 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
582 			le32_to_cpu(desc[0].data[1]);
583 	}
584 
585 	return 0;
586 }
587 
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)588 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
589 {
590 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
591 	struct hclge_tqp *tqp;
592 	u64 *buff = data;
593 	int i;
594 
595 	for (i = 0; i < kinfo->num_tqps; i++) {
596 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
597 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
598 	}
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
603 	}
604 
605 	return buff;
606 }
607 
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)608 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
609 {
610 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
611 
612 	/* each tqp has TX & RX two queues */
613 	return kinfo->num_tqps * (2);
614 }
615 
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)616 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
617 {
618 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
619 	u8 *buff = data;
620 	int i = 0;
621 
622 	for (i = 0; i < kinfo->num_tqps; i++) {
623 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
624 			struct hclge_tqp, q);
625 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
626 			 tqp->index);
627 		buff = buff + ETH_GSTRING_LEN;
628 	}
629 
630 	for (i = 0; i < kinfo->num_tqps; i++) {
631 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
632 			struct hclge_tqp, q);
633 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
634 			 tqp->index);
635 		buff = buff + ETH_GSTRING_LEN;
636 	}
637 
638 	return buff;
639 }
640 
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)641 static u64 *hclge_comm_get_stats(const void *comm_stats,
642 				 const struct hclge_comm_stats_str strs[],
643 				 int size, u64 *data)
644 {
645 	u64 *buf = data;
646 	u32 i;
647 
648 	for (i = 0; i < size; i++)
649 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
650 
651 	return buf + size;
652 }
653 
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)654 static u8 *hclge_comm_get_strings(u32 stringset,
655 				  const struct hclge_comm_stats_str strs[],
656 				  int size, u8 *data)
657 {
658 	char *buff = (char *)data;
659 	u32 i;
660 
661 	if (stringset != ETH_SS_STATS)
662 		return buff;
663 
664 	for (i = 0; i < size; i++) {
665 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return (u8 *)buff;
670 }
671 
hclge_update_stats_for_all(struct hclge_dev * hdev)672 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
673 {
674 	struct hnae3_handle *handle;
675 	int status;
676 
677 	handle = &hdev->vport[0].nic;
678 	if (handle->client) {
679 		status = hclge_tqps_update_stats(handle);
680 		if (status) {
681 			dev_err(&hdev->pdev->dev,
682 				"Update TQPS stats fail, status = %d.\n",
683 				status);
684 		}
685 	}
686 
687 	status = hclge_mac_update_stats(hdev);
688 	if (status)
689 		dev_err(&hdev->pdev->dev,
690 			"Update MAC stats fail, status = %d.\n", status);
691 }
692 
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)693 static void hclge_update_stats(struct hnae3_handle *handle,
694 			       struct net_device_stats *net_stats)
695 {
696 	struct hclge_vport *vport = hclge_get_vport(handle);
697 	struct hclge_dev *hdev = vport->back;
698 	int status;
699 
700 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
701 		return;
702 
703 	status = hclge_mac_update_stats(hdev);
704 	if (status)
705 		dev_err(&hdev->pdev->dev,
706 			"Update MAC stats fail, status = %d.\n",
707 			status);
708 
709 	status = hclge_tqps_update_stats(handle);
710 	if (status)
711 		dev_err(&hdev->pdev->dev,
712 			"Update TQPS stats fail, status = %d.\n",
713 			status);
714 
715 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
716 }
717 
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)718 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
719 {
720 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
721 		HNAE3_SUPPORT_PHY_LOOPBACK |\
722 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
723 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
724 
725 	struct hclge_vport *vport = hclge_get_vport(handle);
726 	struct hclge_dev *hdev = vport->back;
727 	int count = 0;
728 
729 	/* Loopback test support rules:
730 	 * mac: only GE mode support
731 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
732 	 * phy: only support when phy device exist on board
733 	 */
734 	if (stringset == ETH_SS_TEST) {
735 		/* clear loopback bit flags at first */
736 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
737 		if (hdev->pdev->revision >= 0x21 ||
738 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
739 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
740 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
741 			count += 1;
742 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
743 		}
744 
745 		count += 2;
746 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
747 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
748 
749 		if (hdev->hw.mac.phydev) {
750 			count += 1;
751 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
752 		}
753 
754 	} else if (stringset == ETH_SS_STATS) {
755 		count = ARRAY_SIZE(g_mac_stats_string) +
756 			hclge_tqps_get_sset_count(handle, stringset);
757 	}
758 
759 	return count;
760 }
761 
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)762 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
763 			      u8 *data)
764 {
765 	u8 *p = (char *)data;
766 	int size;
767 
768 	if (stringset == ETH_SS_STATS) {
769 		size = ARRAY_SIZE(g_mac_stats_string);
770 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
771 					   size, p);
772 		p = hclge_tqps_get_strings(handle, p);
773 	} else if (stringset == ETH_SS_TEST) {
774 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
775 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
776 			       ETH_GSTRING_LEN);
777 			p += ETH_GSTRING_LEN;
778 		}
779 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
785 			memcpy(p,
786 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
787 			       ETH_GSTRING_LEN);
788 			p += ETH_GSTRING_LEN;
789 		}
790 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
791 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 	}
796 }
797 
hclge_get_stats(struct hnae3_handle * handle,u64 * data)798 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
799 {
800 	struct hclge_vport *vport = hclge_get_vport(handle);
801 	struct hclge_dev *hdev = vport->back;
802 	u64 *p;
803 
804 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
805 				 ARRAY_SIZE(g_mac_stats_string), data);
806 	p = hclge_tqps_get_stats(handle, p);
807 }
808 
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)809 static void hclge_get_mac_stat(struct hnae3_handle *handle,
810 			       struct hns3_mac_stats *mac_stats)
811 {
812 	struct hclge_vport *vport = hclge_get_vport(handle);
813 	struct hclge_dev *hdev = vport->back;
814 
815 	hclge_update_stats(handle, NULL);
816 
817 	mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
818 	mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
819 }
820 
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)821 static int hclge_parse_func_status(struct hclge_dev *hdev,
822 				   struct hclge_func_status_cmd *status)
823 {
824 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
825 		return -EINVAL;
826 
827 	/* Set the pf to main pf */
828 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
829 		hdev->flag |= HCLGE_FLAG_MAIN;
830 	else
831 		hdev->flag &= ~HCLGE_FLAG_MAIN;
832 
833 	return 0;
834 }
835 
hclge_query_function_status(struct hclge_dev * hdev)836 static int hclge_query_function_status(struct hclge_dev *hdev)
837 {
838 #define HCLGE_QUERY_MAX_CNT	5
839 
840 	struct hclge_func_status_cmd *req;
841 	struct hclge_desc desc;
842 	int timeout = 0;
843 	int ret;
844 
845 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
846 	req = (struct hclge_func_status_cmd *)desc.data;
847 
848 	do {
849 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 		if (ret) {
851 			dev_err(&hdev->pdev->dev,
852 				"query function status failed %d.\n", ret);
853 			return ret;
854 		}
855 
856 		/* Check pf reset is done */
857 		if (req->pf_state)
858 			break;
859 		usleep_range(1000, 2000);
860 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
861 
862 	ret = hclge_parse_func_status(hdev, req);
863 
864 	return ret;
865 }
866 
hclge_query_pf_resource(struct hclge_dev * hdev)867 static int hclge_query_pf_resource(struct hclge_dev *hdev)
868 {
869 	struct hclge_pf_res_cmd *req;
870 	struct hclge_desc desc;
871 	int ret;
872 
873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
874 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
875 	if (ret) {
876 		dev_err(&hdev->pdev->dev,
877 			"query pf resource failed %d.\n", ret);
878 		return ret;
879 	}
880 
881 	req = (struct hclge_pf_res_cmd *)desc.data;
882 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
883 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
884 
885 	if (req->tx_buf_size)
886 		hdev->tx_buf_size =
887 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
888 	else
889 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
890 
891 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
892 
893 	if (req->dv_buf_size)
894 		hdev->dv_buf_size =
895 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
898 
899 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (hnae3_dev_roce_supported(hdev)) {
902 		hdev->roce_base_msix_offset =
903 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
904 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
905 		hdev->num_roce_msi =
906 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
907 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
908 
909 		/* nic's msix numbers is always equals to the roce's. */
910 		hdev->num_nic_msi = hdev->num_roce_msi;
911 
912 		/* PF should have NIC vectors and Roce vectors,
913 		 * NIC vectors are queued before Roce vectors.
914 		 */
915 		hdev->num_msi = hdev->num_roce_msi +
916 				hdev->roce_base_msix_offset;
917 	} else {
918 		hdev->num_msi =
919 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
920 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
921 
922 		hdev->num_nic_msi = hdev->num_msi;
923 	}
924 
925 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
926 		dev_err(&hdev->pdev->dev,
927 			"Just %u msi resources, not enough for pf(min:2).\n",
928 			hdev->num_nic_msi);
929 		return -EINVAL;
930 	}
931 
932 	return 0;
933 }
934 
hclge_parse_speed(int speed_cmd,int * speed)935 static int hclge_parse_speed(int speed_cmd, int *speed)
936 {
937 	switch (speed_cmd) {
938 	case 6:
939 		*speed = HCLGE_MAC_SPEED_10M;
940 		break;
941 	case 7:
942 		*speed = HCLGE_MAC_SPEED_100M;
943 		break;
944 	case 0:
945 		*speed = HCLGE_MAC_SPEED_1G;
946 		break;
947 	case 1:
948 		*speed = HCLGE_MAC_SPEED_10G;
949 		break;
950 	case 2:
951 		*speed = HCLGE_MAC_SPEED_25G;
952 		break;
953 	case 3:
954 		*speed = HCLGE_MAC_SPEED_40G;
955 		break;
956 	case 4:
957 		*speed = HCLGE_MAC_SPEED_50G;
958 		break;
959 	case 5:
960 		*speed = HCLGE_MAC_SPEED_100G;
961 		break;
962 	default:
963 		return -EINVAL;
964 	}
965 
966 	return 0;
967 }
968 
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971 	struct hclge_vport *vport = hclge_get_vport(handle);
972 	struct hclge_dev *hdev = vport->back;
973 	u32 speed_ability = hdev->hw.mac.speed_ability;
974 	u32 speed_bit = 0;
975 
976 	switch (speed) {
977 	case HCLGE_MAC_SPEED_10M:
978 		speed_bit = HCLGE_SUPPORT_10M_BIT;
979 		break;
980 	case HCLGE_MAC_SPEED_100M:
981 		speed_bit = HCLGE_SUPPORT_100M_BIT;
982 		break;
983 	case HCLGE_MAC_SPEED_1G:
984 		speed_bit = HCLGE_SUPPORT_1G_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_10G:
987 		speed_bit = HCLGE_SUPPORT_10G_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_25G:
990 		speed_bit = HCLGE_SUPPORT_25G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_40G:
993 		speed_bit = HCLGE_SUPPORT_40G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_50G:
996 		speed_bit = HCLGE_SUPPORT_50G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_100G:
999 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 		break;
1001 	default:
1002 		return -EINVAL;
1003 	}
1004 
1005 	if (speed_bit & speed_ability)
1006 		return 0;
1007 
1008 	return -EINVAL;
1009 }
1010 
hclge_convert_setting_sr(struct hclge_mac * mac,u8 speed_ability)1011 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1012 {
1013 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1014 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1015 				 mac->supported);
1016 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1017 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1018 				 mac->supported);
1019 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1027 				 mac->supported);
1028 }
1029 
hclge_convert_setting_lr(struct hclge_mac * mac,u8 speed_ability)1030 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1031 {
1032 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1033 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1034 				 mac->supported);
1035 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1036 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1037 				 mac->supported);
1038 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1046 				 mac->supported);
1047 }
1048 
hclge_convert_setting_cr(struct hclge_mac * mac,u8 speed_ability)1049 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1050 {
1051 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1059 				 mac->supported);
1060 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1065 				 mac->supported);
1066 }
1067 
hclge_convert_setting_kr(struct hclge_mac * mac,u8 speed_ability)1068 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1069 {
1070 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1080 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1081 				 mac->supported);
1082 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1083 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1084 				 mac->supported);
1085 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1087 				 mac->supported);
1088 }
1089 
hclge_convert_setting_fec(struct hclge_mac * mac)1090 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1091 {
1092 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1093 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1094 
1095 	switch (mac->speed) {
1096 	case HCLGE_MAC_SPEED_10G:
1097 	case HCLGE_MAC_SPEED_40G:
1098 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1099 				 mac->supported);
1100 		mac->fec_ability =
1101 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1102 		break;
1103 	case HCLGE_MAC_SPEED_25G:
1104 	case HCLGE_MAC_SPEED_50G:
1105 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1106 				 mac->supported);
1107 		mac->fec_ability =
1108 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1109 			BIT(HNAE3_FEC_AUTO);
1110 		break;
1111 	case HCLGE_MAC_SPEED_100G:
1112 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1113 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1114 		break;
1115 	default:
1116 		mac->fec_ability = 0;
1117 		break;
1118 	}
1119 }
1120 
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u8 speed_ability)1121 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1122 					u8 speed_ability)
1123 {
1124 	struct hclge_mac *mac = &hdev->hw.mac;
1125 
1126 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1127 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1128 				 mac->supported);
1129 
1130 	hclge_convert_setting_sr(mac, speed_ability);
1131 	hclge_convert_setting_lr(mac, speed_ability);
1132 	hclge_convert_setting_cr(mac, speed_ability);
1133 	if (hdev->pdev->revision >= 0x21)
1134 		hclge_convert_setting_fec(mac);
1135 
1136 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1137 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1138 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1139 }
1140 
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u8 speed_ability)1141 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1142 					    u8 speed_ability)
1143 {
1144 	struct hclge_mac *mac = &hdev->hw.mac;
1145 
1146 	hclge_convert_setting_kr(mac, speed_ability);
1147 	if (hdev->pdev->revision >= 0x21)
1148 		hclge_convert_setting_fec(mac);
1149 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1150 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1151 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1152 }
1153 
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u8 speed_ability)1154 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1155 					 u8 speed_ability)
1156 {
1157 	unsigned long *supported = hdev->hw.mac.supported;
1158 
1159 	/* default to support all speed for GE port */
1160 	if (!speed_ability)
1161 		speed_ability = HCLGE_SUPPORT_GE;
1162 
1163 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1164 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1165 				 supported);
1166 
1167 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1168 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1169 				 supported);
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1171 				 supported);
1172 	}
1173 
1174 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1175 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1177 	}
1178 
1179 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1180 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1181 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1183 }
1184 
hclge_parse_link_mode(struct hclge_dev * hdev,u8 speed_ability)1185 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1186 {
1187 	u8 media_type = hdev->hw.mac.media_type;
1188 
1189 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1190 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1191 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1192 		hclge_parse_copper_link_mode(hdev, speed_ability);
1193 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1194 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1195 }
1196 
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1197 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1198 {
1199 	struct hclge_cfg_param_cmd *req;
1200 	u64 mac_addr_tmp_high;
1201 	u64 mac_addr_tmp;
1202 	unsigned int i;
1203 
1204 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1205 
1206 	/* get the configuration */
1207 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1208 					      HCLGE_CFG_VMDQ_M,
1209 					      HCLGE_CFG_VMDQ_S);
1210 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1211 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1212 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1213 					    HCLGE_CFG_TQP_DESC_N_M,
1214 					    HCLGE_CFG_TQP_DESC_N_S);
1215 
1216 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1217 					HCLGE_CFG_PHY_ADDR_M,
1218 					HCLGE_CFG_PHY_ADDR_S);
1219 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1220 					  HCLGE_CFG_MEDIA_TP_M,
1221 					  HCLGE_CFG_MEDIA_TP_S);
1222 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1223 					  HCLGE_CFG_RX_BUF_LEN_M,
1224 					  HCLGE_CFG_RX_BUF_LEN_S);
1225 	/* get mac_address */
1226 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1227 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1228 					    HCLGE_CFG_MAC_ADDR_H_M,
1229 					    HCLGE_CFG_MAC_ADDR_H_S);
1230 
1231 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1232 
1233 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1234 					     HCLGE_CFG_DEFAULT_SPEED_M,
1235 					     HCLGE_CFG_DEFAULT_SPEED_S);
1236 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1237 					    HCLGE_CFG_RSS_SIZE_M,
1238 					    HCLGE_CFG_RSS_SIZE_S);
1239 
1240 	for (i = 0; i < ETH_ALEN; i++)
1241 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1242 
1243 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1244 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1245 
1246 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 					     HCLGE_CFG_SPEED_ABILITY_M,
1248 					     HCLGE_CFG_SPEED_ABILITY_S);
1249 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1251 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1252 	if (!cfg->umv_space)
1253 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1254 }
1255 
1256 /* hclge_get_cfg: query the static parameter from flash
1257  * @hdev: pointer to struct hclge_dev
1258  * @hcfg: the config structure to be getted
1259  */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1260 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1261 {
1262 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1263 	struct hclge_cfg_param_cmd *req;
1264 	unsigned int i;
1265 	int ret;
1266 
1267 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1268 		u32 offset = 0;
1269 
1270 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1271 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1272 					   true);
1273 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1274 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1275 		/* Len should be united by 4 bytes when send to hardware */
1276 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1277 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1278 		req->offset = cpu_to_le32(offset);
1279 	}
1280 
1281 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1282 	if (ret) {
1283 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1284 		return ret;
1285 	}
1286 
1287 	hclge_parse_cfg(hcfg, desc);
1288 
1289 	return 0;
1290 }
1291 
hclge_get_cap(struct hclge_dev * hdev)1292 static int hclge_get_cap(struct hclge_dev *hdev)
1293 {
1294 	int ret;
1295 
1296 	ret = hclge_query_function_status(hdev);
1297 	if (ret) {
1298 		dev_err(&hdev->pdev->dev,
1299 			"query function status error %d.\n", ret);
1300 		return ret;
1301 	}
1302 
1303 	/* get pf resource */
1304 	ret = hclge_query_pf_resource(hdev);
1305 	if (ret)
1306 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1307 
1308 	return ret;
1309 }
1310 
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1311 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1312 {
1313 #define HCLGE_MIN_TX_DESC	64
1314 #define HCLGE_MIN_RX_DESC	64
1315 
1316 	if (!is_kdump_kernel())
1317 		return;
1318 
1319 	dev_info(&hdev->pdev->dev,
1320 		 "Running kdump kernel. Using minimal resources\n");
1321 
1322 	/* minimal queue pairs equals to the number of vports */
1323 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1324 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1325 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1326 }
1327 
hclge_configure(struct hclge_dev * hdev)1328 static int hclge_configure(struct hclge_dev *hdev)
1329 {
1330 	struct hclge_cfg cfg;
1331 	unsigned int i;
1332 	int ret;
1333 
1334 	ret = hclge_get_cfg(hdev, &cfg);
1335 	if (ret) {
1336 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1337 		return ret;
1338 	}
1339 
1340 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1341 	hdev->base_tqp_pid = 0;
1342 	hdev->rss_size_max = cfg.rss_size_max;
1343 	hdev->rx_buf_len = cfg.rx_buf_len;
1344 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1345 	hdev->hw.mac.media_type = cfg.media_type;
1346 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1347 	hdev->num_tx_desc = cfg.tqp_desc_num;
1348 	hdev->num_rx_desc = cfg.tqp_desc_num;
1349 	hdev->tm_info.num_pg = 1;
1350 	hdev->tc_max = cfg.tc_num;
1351 	hdev->tm_info.hw_pfc_map = 0;
1352 	hdev->wanted_umv_size = cfg.umv_space;
1353 
1354 	if (hnae3_dev_fd_supported(hdev)) {
1355 		hdev->fd_en = true;
1356 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1357 	}
1358 
1359 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1360 	if (ret) {
1361 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1362 		return ret;
1363 	}
1364 
1365 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1366 
1367 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1368 	    (hdev->tc_max < 1)) {
1369 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1370 			 hdev->tc_max);
1371 		hdev->tc_max = 1;
1372 	}
1373 
1374 	/* Dev does not support DCB */
1375 	if (!hnae3_dev_dcb_supported(hdev)) {
1376 		hdev->tc_max = 1;
1377 		hdev->pfc_max = 0;
1378 	} else {
1379 		hdev->pfc_max = hdev->tc_max;
1380 	}
1381 
1382 	hdev->tm_info.num_tc = 1;
1383 
1384 	/* Currently not support uncontiuous tc */
1385 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1386 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1387 
1388 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1389 
1390 	hclge_init_kdump_kernel_config(hdev);
1391 
1392 	/* Set the init affinity based on pci func number */
1393 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1394 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1395 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1396 			&hdev->affinity_mask);
1397 
1398 	return ret;
1399 }
1400 
hclge_config_tso(struct hclge_dev * hdev,unsigned int tso_mss_min,unsigned int tso_mss_max)1401 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1402 			    unsigned int tso_mss_max)
1403 {
1404 	struct hclge_cfg_tso_status_cmd *req;
1405 	struct hclge_desc desc;
1406 	u16 tso_mss;
1407 
1408 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1409 
1410 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1411 
1412 	tso_mss = 0;
1413 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1414 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1415 	req->tso_mss_min = cpu_to_le16(tso_mss);
1416 
1417 	tso_mss = 0;
1418 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1419 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1420 	req->tso_mss_max = cpu_to_le16(tso_mss);
1421 
1422 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1423 }
1424 
hclge_config_gro(struct hclge_dev * hdev,bool en)1425 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1426 {
1427 	struct hclge_cfg_gro_status_cmd *req;
1428 	struct hclge_desc desc;
1429 	int ret;
1430 
1431 	if (!hnae3_dev_gro_supported(hdev))
1432 		return 0;
1433 
1434 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1435 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1436 
1437 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1438 
1439 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1440 	if (ret)
1441 		dev_err(&hdev->pdev->dev,
1442 			"GRO hardware config cmd failed, ret = %d\n", ret);
1443 
1444 	return ret;
1445 }
1446 
hclge_alloc_tqps(struct hclge_dev * hdev)1447 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1448 {
1449 	struct hclge_tqp *tqp;
1450 	int i;
1451 
1452 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1453 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1454 	if (!hdev->htqp)
1455 		return -ENOMEM;
1456 
1457 	tqp = hdev->htqp;
1458 
1459 	for (i = 0; i < hdev->num_tqps; i++) {
1460 		tqp->dev = &hdev->pdev->dev;
1461 		tqp->index = i;
1462 
1463 		tqp->q.ae_algo = &ae_algo;
1464 		tqp->q.buf_size = hdev->rx_buf_len;
1465 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1466 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1467 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1468 			i * HCLGE_TQP_REG_SIZE;
1469 
1470 		tqp++;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1476 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1477 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1478 {
1479 	struct hclge_tqp_map_cmd *req;
1480 	struct hclge_desc desc;
1481 	int ret;
1482 
1483 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1484 
1485 	req = (struct hclge_tqp_map_cmd *)desc.data;
1486 	req->tqp_id = cpu_to_le16(tqp_pid);
1487 	req->tqp_vf = func_id;
1488 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1489 	if (!is_pf)
1490 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1491 	req->tqp_vid = cpu_to_le16(tqp_vid);
1492 
1493 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1494 	if (ret)
1495 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1496 
1497 	return ret;
1498 }
1499 
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1500 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1501 {
1502 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1503 	struct hclge_dev *hdev = vport->back;
1504 	int i, alloced;
1505 
1506 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1507 	     alloced < num_tqps; i++) {
1508 		if (!hdev->htqp[i].alloced) {
1509 			hdev->htqp[i].q.handle = &vport->nic;
1510 			hdev->htqp[i].q.tqp_index = alloced;
1511 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1512 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1513 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1514 			hdev->htqp[i].alloced = true;
1515 			alloced++;
1516 		}
1517 	}
1518 	vport->alloc_tqps = alloced;
1519 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1520 				vport->alloc_tqps / hdev->tm_info.num_tc);
1521 
1522 	/* ensure one to one mapping between irq and queue at default */
1523 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1524 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1525 
1526 	return 0;
1527 }
1528 
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1529 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1530 			    u16 num_tx_desc, u16 num_rx_desc)
1531 
1532 {
1533 	struct hnae3_handle *nic = &vport->nic;
1534 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1535 	struct hclge_dev *hdev = vport->back;
1536 	int ret;
1537 
1538 	kinfo->num_tx_desc = num_tx_desc;
1539 	kinfo->num_rx_desc = num_rx_desc;
1540 
1541 	kinfo->rx_buf_len = hdev->rx_buf_len;
1542 
1543 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1544 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1545 	if (!kinfo->tqp)
1546 		return -ENOMEM;
1547 
1548 	ret = hclge_assign_tqp(vport, num_tqps);
1549 	if (ret)
1550 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1551 
1552 	return ret;
1553 }
1554 
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1555 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1556 				  struct hclge_vport *vport)
1557 {
1558 	struct hnae3_handle *nic = &vport->nic;
1559 	struct hnae3_knic_private_info *kinfo;
1560 	u16 i;
1561 
1562 	kinfo = &nic->kinfo;
1563 	for (i = 0; i < vport->alloc_tqps; i++) {
1564 		struct hclge_tqp *q =
1565 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1566 		bool is_pf;
1567 		int ret;
1568 
1569 		is_pf = !(vport->vport_id);
1570 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1571 					     i, is_pf);
1572 		if (ret)
1573 			return ret;
1574 	}
1575 
1576 	return 0;
1577 }
1578 
hclge_map_tqp(struct hclge_dev * hdev)1579 static int hclge_map_tqp(struct hclge_dev *hdev)
1580 {
1581 	struct hclge_vport *vport = hdev->vport;
1582 	u16 i, num_vport;
1583 
1584 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1585 	for (i = 0; i < num_vport; i++)	{
1586 		int ret;
1587 
1588 		ret = hclge_map_tqp_to_vport(hdev, vport);
1589 		if (ret)
1590 			return ret;
1591 
1592 		vport++;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1598 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1599 {
1600 	struct hnae3_handle *nic = &vport->nic;
1601 	struct hclge_dev *hdev = vport->back;
1602 	int ret;
1603 
1604 	nic->pdev = hdev->pdev;
1605 	nic->ae_algo = &ae_algo;
1606 	nic->numa_node_mask = hdev->numa_node_mask;
1607 
1608 	ret = hclge_knic_setup(vport, num_tqps,
1609 			       hdev->num_tx_desc, hdev->num_rx_desc);
1610 	if (ret)
1611 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1612 
1613 	return ret;
1614 }
1615 
hclge_alloc_vport(struct hclge_dev * hdev)1616 static int hclge_alloc_vport(struct hclge_dev *hdev)
1617 {
1618 	struct pci_dev *pdev = hdev->pdev;
1619 	struct hclge_vport *vport;
1620 	u32 tqp_main_vport;
1621 	u32 tqp_per_vport;
1622 	int num_vport, i;
1623 	int ret;
1624 
1625 	/* We need to alloc a vport for main NIC of PF */
1626 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1627 
1628 	if (hdev->num_tqps < num_vport) {
1629 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1630 			hdev->num_tqps, num_vport);
1631 		return -EINVAL;
1632 	}
1633 
1634 	/* Alloc the same number of TQPs for every vport */
1635 	tqp_per_vport = hdev->num_tqps / num_vport;
1636 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1637 
1638 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1639 			     GFP_KERNEL);
1640 	if (!vport)
1641 		return -ENOMEM;
1642 
1643 	hdev->vport = vport;
1644 	hdev->num_alloc_vport = num_vport;
1645 
1646 	if (IS_ENABLED(CONFIG_PCI_IOV))
1647 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1648 
1649 	for (i = 0; i < num_vport; i++) {
1650 		vport->back = hdev;
1651 		vport->vport_id = i;
1652 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1653 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1654 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1655 		INIT_LIST_HEAD(&vport->vlan_list);
1656 		INIT_LIST_HEAD(&vport->uc_mac_list);
1657 		INIT_LIST_HEAD(&vport->mc_mac_list);
1658 
1659 		if (i == 0)
1660 			ret = hclge_vport_setup(vport, tqp_main_vport);
1661 		else
1662 			ret = hclge_vport_setup(vport, tqp_per_vport);
1663 		if (ret) {
1664 			dev_err(&pdev->dev,
1665 				"vport setup failed for vport %d, %d\n",
1666 				i, ret);
1667 			return ret;
1668 		}
1669 
1670 		vport++;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1676 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1677 				    struct hclge_pkt_buf_alloc *buf_alloc)
1678 {
1679 /* TX buffer size is unit by 128 byte */
1680 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1681 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1682 	struct hclge_tx_buff_alloc_cmd *req;
1683 	struct hclge_desc desc;
1684 	int ret;
1685 	u8 i;
1686 
1687 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1688 
1689 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1690 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1691 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1692 
1693 		req->tx_pkt_buff[i] =
1694 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1695 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1696 	}
1697 
1698 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1699 	if (ret)
1700 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1701 			ret);
1702 
1703 	return ret;
1704 }
1705 
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1706 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1707 				 struct hclge_pkt_buf_alloc *buf_alloc)
1708 {
1709 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1710 
1711 	if (ret)
1712 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1713 
1714 	return ret;
1715 }
1716 
hclge_get_tc_num(struct hclge_dev * hdev)1717 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1718 {
1719 	unsigned int i;
1720 	u32 cnt = 0;
1721 
1722 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1723 		if (hdev->hw_tc_map & BIT(i))
1724 			cnt++;
1725 	return cnt;
1726 }
1727 
1728 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1729 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1730 				  struct hclge_pkt_buf_alloc *buf_alloc)
1731 {
1732 	struct hclge_priv_buf *priv;
1733 	unsigned int i;
1734 	int cnt = 0;
1735 
1736 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1737 		priv = &buf_alloc->priv_buf[i];
1738 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1739 		    priv->enable)
1740 			cnt++;
1741 	}
1742 
1743 	return cnt;
1744 }
1745 
1746 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1747 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1748 				     struct hclge_pkt_buf_alloc *buf_alloc)
1749 {
1750 	struct hclge_priv_buf *priv;
1751 	unsigned int i;
1752 	int cnt = 0;
1753 
1754 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755 		priv = &buf_alloc->priv_buf[i];
1756 		if (hdev->hw_tc_map & BIT(i) &&
1757 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1758 		    priv->enable)
1759 			cnt++;
1760 	}
1761 
1762 	return cnt;
1763 }
1764 
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1765 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767 	struct hclge_priv_buf *priv;
1768 	u32 rx_priv = 0;
1769 	int i;
1770 
1771 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 		priv = &buf_alloc->priv_buf[i];
1773 		if (priv->enable)
1774 			rx_priv += priv->buf_size;
1775 	}
1776 	return rx_priv;
1777 }
1778 
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1779 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1780 {
1781 	u32 i, total_tx_size = 0;
1782 
1783 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1784 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1785 
1786 	return total_tx_size;
1787 }
1788 
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1789 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1790 				struct hclge_pkt_buf_alloc *buf_alloc,
1791 				u32 rx_all)
1792 {
1793 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1794 	u32 tc_num = hclge_get_tc_num(hdev);
1795 	u32 shared_buf, aligned_mps;
1796 	u32 rx_priv;
1797 	int i;
1798 
1799 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1800 
1801 	if (hnae3_dev_dcb_supported(hdev))
1802 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1803 					hdev->dv_buf_size;
1804 	else
1805 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1806 					+ hdev->dv_buf_size;
1807 
1808 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1809 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1810 			     HCLGE_BUF_SIZE_UNIT);
1811 
1812 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1813 	if (rx_all < rx_priv + shared_std)
1814 		return false;
1815 
1816 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1817 	buf_alloc->s_buf.buf_size = shared_buf;
1818 	if (hnae3_dev_dcb_supported(hdev)) {
1819 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1820 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1821 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1822 				  HCLGE_BUF_SIZE_UNIT);
1823 	} else {
1824 		buf_alloc->s_buf.self.high = aligned_mps +
1825 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1826 		buf_alloc->s_buf.self.low = aligned_mps;
1827 	}
1828 
1829 	if (hnae3_dev_dcb_supported(hdev)) {
1830 		hi_thrd = shared_buf - hdev->dv_buf_size;
1831 
1832 		if (tc_num <= NEED_RESERVE_TC_NUM)
1833 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1834 					/ BUF_MAX_PERCENT;
1835 
1836 		if (tc_num)
1837 			hi_thrd = hi_thrd / tc_num;
1838 
1839 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1840 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1841 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1842 	} else {
1843 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1844 		lo_thrd = aligned_mps;
1845 	}
1846 
1847 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1848 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1849 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1850 	}
1851 
1852 	return true;
1853 }
1854 
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1855 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1856 				struct hclge_pkt_buf_alloc *buf_alloc)
1857 {
1858 	u32 i, total_size;
1859 
1860 	total_size = hdev->pkt_buf_size;
1861 
1862 	/* alloc tx buffer for all enabled tc */
1863 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1864 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1865 
1866 		if (hdev->hw_tc_map & BIT(i)) {
1867 			if (total_size < hdev->tx_buf_size)
1868 				return -ENOMEM;
1869 
1870 			priv->tx_buf_size = hdev->tx_buf_size;
1871 		} else {
1872 			priv->tx_buf_size = 0;
1873 		}
1874 
1875 		total_size -= priv->tx_buf_size;
1876 	}
1877 
1878 	return 0;
1879 }
1880 
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)1881 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1882 				  struct hclge_pkt_buf_alloc *buf_alloc)
1883 {
1884 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1885 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1886 	unsigned int i;
1887 
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 
1891 		priv->enable = 0;
1892 		priv->wl.low = 0;
1893 		priv->wl.high = 0;
1894 		priv->buf_size = 0;
1895 
1896 		if (!(hdev->hw_tc_map & BIT(i)))
1897 			continue;
1898 
1899 		priv->enable = 1;
1900 
1901 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1902 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1903 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1904 						HCLGE_BUF_SIZE_UNIT);
1905 		} else {
1906 			priv->wl.low = 0;
1907 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1908 					aligned_mps;
1909 		}
1910 
1911 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1912 	}
1913 
1914 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1915 }
1916 
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1917 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1918 					  struct hclge_pkt_buf_alloc *buf_alloc)
1919 {
1920 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1921 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1922 	int i;
1923 
1924 	/* let the last to be cleared first */
1925 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1926 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1927 		unsigned int mask = BIT((unsigned int)i);
1928 
1929 		if (hdev->hw_tc_map & mask &&
1930 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1931 			/* Clear the no pfc TC private buffer */
1932 			priv->wl.low = 0;
1933 			priv->wl.high = 0;
1934 			priv->buf_size = 0;
1935 			priv->enable = 0;
1936 			no_pfc_priv_num--;
1937 		}
1938 
1939 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1940 		    no_pfc_priv_num == 0)
1941 			break;
1942 	}
1943 
1944 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1945 }
1946 
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1947 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1948 					struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1951 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1952 	int i;
1953 
1954 	/* let the last to be cleared first */
1955 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1956 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1957 		unsigned int mask = BIT((unsigned int)i);
1958 
1959 		if (hdev->hw_tc_map & mask &&
1960 		    hdev->tm_info.hw_pfc_map & mask) {
1961 			/* Reduce the number of pfc TC with private buffer */
1962 			priv->wl.low = 0;
1963 			priv->enable = 0;
1964 			priv->wl.high = 0;
1965 			priv->buf_size = 0;
1966 			pfc_priv_num--;
1967 		}
1968 
1969 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1970 		    pfc_priv_num == 0)
1971 			break;
1972 	}
1973 
1974 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1975 }
1976 
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1977 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1978 				      struct hclge_pkt_buf_alloc *buf_alloc)
1979 {
1980 #define COMPENSATE_BUFFER	0x3C00
1981 #define COMPENSATE_HALF_MPS_NUM	5
1982 #define PRIV_WL_GAP		0x1800
1983 
1984 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1985 	u32 tc_num = hclge_get_tc_num(hdev);
1986 	u32 half_mps = hdev->mps >> 1;
1987 	u32 min_rx_priv;
1988 	unsigned int i;
1989 
1990 	if (tc_num)
1991 		rx_priv = rx_priv / tc_num;
1992 
1993 	if (tc_num <= NEED_RESERVE_TC_NUM)
1994 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1995 
1996 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1997 			COMPENSATE_HALF_MPS_NUM * half_mps;
1998 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1999 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2000 
2001 	if (rx_priv < min_rx_priv)
2002 		return false;
2003 
2004 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2005 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2006 
2007 		priv->enable = 0;
2008 		priv->wl.low = 0;
2009 		priv->wl.high = 0;
2010 		priv->buf_size = 0;
2011 
2012 		if (!(hdev->hw_tc_map & BIT(i)))
2013 			continue;
2014 
2015 		priv->enable = 1;
2016 		priv->buf_size = rx_priv;
2017 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2018 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2019 	}
2020 
2021 	buf_alloc->s_buf.buf_size = 0;
2022 
2023 	return true;
2024 }
2025 
2026 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2027  * @hdev: pointer to struct hclge_dev
2028  * @buf_alloc: pointer to buffer calculation data
2029  * @return: 0: calculate sucessful, negative: fail
2030  */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2031 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2032 				struct hclge_pkt_buf_alloc *buf_alloc)
2033 {
2034 	/* When DCB is not supported, rx private buffer is not allocated. */
2035 	if (!hnae3_dev_dcb_supported(hdev)) {
2036 		u32 rx_all = hdev->pkt_buf_size;
2037 
2038 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2039 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2040 			return -ENOMEM;
2041 
2042 		return 0;
2043 	}
2044 
2045 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2046 		return 0;
2047 
2048 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2049 		return 0;
2050 
2051 	/* try to decrease the buffer size */
2052 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2053 		return 0;
2054 
2055 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2056 		return 0;
2057 
2058 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2059 		return 0;
2060 
2061 	return -ENOMEM;
2062 }
2063 
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2064 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2065 				   struct hclge_pkt_buf_alloc *buf_alloc)
2066 {
2067 	struct hclge_rx_priv_buff_cmd *req;
2068 	struct hclge_desc desc;
2069 	int ret;
2070 	int i;
2071 
2072 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2073 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2074 
2075 	/* Alloc private buffer TCs */
2076 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2077 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2078 
2079 		req->buf_num[i] =
2080 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2081 		req->buf_num[i] |=
2082 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2083 	}
2084 
2085 	req->shared_buf =
2086 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2087 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2088 
2089 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2090 	if (ret)
2091 		dev_err(&hdev->pdev->dev,
2092 			"rx private buffer alloc cmd failed %d\n", ret);
2093 
2094 	return ret;
2095 }
2096 
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2097 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2098 				   struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100 	struct hclge_rx_priv_wl_buf *req;
2101 	struct hclge_priv_buf *priv;
2102 	struct hclge_desc desc[2];
2103 	int i, j;
2104 	int ret;
2105 
2106 	for (i = 0; i < 2; i++) {
2107 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2108 					   false);
2109 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2110 
2111 		/* The first descriptor set the NEXT bit to 1 */
2112 		if (i == 0)
2113 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2114 		else
2115 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2116 
2117 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2118 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2119 
2120 			priv = &buf_alloc->priv_buf[idx];
2121 			req->tc_wl[j].high =
2122 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2123 			req->tc_wl[j].high |=
2124 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2125 			req->tc_wl[j].low =
2126 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2127 			req->tc_wl[j].low |=
2128 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2129 		}
2130 	}
2131 
2132 	/* Send 2 descriptor at one time */
2133 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2134 	if (ret)
2135 		dev_err(&hdev->pdev->dev,
2136 			"rx private waterline config cmd failed %d\n",
2137 			ret);
2138 	return ret;
2139 }
2140 
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2141 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2142 				    struct hclge_pkt_buf_alloc *buf_alloc)
2143 {
2144 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2145 	struct hclge_rx_com_thrd *req;
2146 	struct hclge_desc desc[2];
2147 	struct hclge_tc_thrd *tc;
2148 	int i, j;
2149 	int ret;
2150 
2151 	for (i = 0; i < 2; i++) {
2152 		hclge_cmd_setup_basic_desc(&desc[i],
2153 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2154 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2155 
2156 		/* The first descriptor set the NEXT bit to 1 */
2157 		if (i == 0)
2158 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2159 		else
2160 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2161 
2162 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2163 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2164 
2165 			req->com_thrd[j].high =
2166 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2167 			req->com_thrd[j].high |=
2168 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2169 			req->com_thrd[j].low =
2170 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2171 			req->com_thrd[j].low |=
2172 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2173 		}
2174 	}
2175 
2176 	/* Send 2 descriptors at one time */
2177 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2178 	if (ret)
2179 		dev_err(&hdev->pdev->dev,
2180 			"common threshold config cmd failed %d\n", ret);
2181 	return ret;
2182 }
2183 
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2184 static int hclge_common_wl_config(struct hclge_dev *hdev,
2185 				  struct hclge_pkt_buf_alloc *buf_alloc)
2186 {
2187 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2188 	struct hclge_rx_com_wl *req;
2189 	struct hclge_desc desc;
2190 	int ret;
2191 
2192 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2193 
2194 	req = (struct hclge_rx_com_wl *)desc.data;
2195 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2196 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2197 
2198 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2199 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2200 
2201 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2202 	if (ret)
2203 		dev_err(&hdev->pdev->dev,
2204 			"common waterline config cmd failed %d\n", ret);
2205 
2206 	return ret;
2207 }
2208 
hclge_buffer_alloc(struct hclge_dev * hdev)2209 int hclge_buffer_alloc(struct hclge_dev *hdev)
2210 {
2211 	struct hclge_pkt_buf_alloc *pkt_buf;
2212 	int ret;
2213 
2214 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2215 	if (!pkt_buf)
2216 		return -ENOMEM;
2217 
2218 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2219 	if (ret) {
2220 		dev_err(&hdev->pdev->dev,
2221 			"could not calc tx buffer size for all TCs %d\n", ret);
2222 		goto out;
2223 	}
2224 
2225 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2226 	if (ret) {
2227 		dev_err(&hdev->pdev->dev,
2228 			"could not alloc tx buffers %d\n", ret);
2229 		goto out;
2230 	}
2231 
2232 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2233 	if (ret) {
2234 		dev_err(&hdev->pdev->dev,
2235 			"could not calc rx priv buffer size for all TCs %d\n",
2236 			ret);
2237 		goto out;
2238 	}
2239 
2240 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2241 	if (ret) {
2242 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2243 			ret);
2244 		goto out;
2245 	}
2246 
2247 	if (hnae3_dev_dcb_supported(hdev)) {
2248 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2249 		if (ret) {
2250 			dev_err(&hdev->pdev->dev,
2251 				"could not configure rx private waterline %d\n",
2252 				ret);
2253 			goto out;
2254 		}
2255 
2256 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2257 		if (ret) {
2258 			dev_err(&hdev->pdev->dev,
2259 				"could not configure common threshold %d\n",
2260 				ret);
2261 			goto out;
2262 		}
2263 	}
2264 
2265 	ret = hclge_common_wl_config(hdev, pkt_buf);
2266 	if (ret)
2267 		dev_err(&hdev->pdev->dev,
2268 			"could not configure common waterline %d\n", ret);
2269 
2270 out:
2271 	kfree(pkt_buf);
2272 	return ret;
2273 }
2274 
hclge_init_roce_base_info(struct hclge_vport * vport)2275 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2276 {
2277 	struct hnae3_handle *roce = &vport->roce;
2278 	struct hnae3_handle *nic = &vport->nic;
2279 
2280 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2281 
2282 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2283 	    vport->back->num_msi_left == 0)
2284 		return -EINVAL;
2285 
2286 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2287 
2288 	roce->rinfo.netdev = nic->kinfo.netdev;
2289 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2290 
2291 	roce->pdev = nic->pdev;
2292 	roce->ae_algo = nic->ae_algo;
2293 	roce->numa_node_mask = nic->numa_node_mask;
2294 
2295 	return 0;
2296 }
2297 
hclge_init_msi(struct hclge_dev * hdev)2298 static int hclge_init_msi(struct hclge_dev *hdev)
2299 {
2300 	struct pci_dev *pdev = hdev->pdev;
2301 	int vectors;
2302 	int i;
2303 
2304 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2305 					hdev->num_msi,
2306 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2307 	if (vectors < 0) {
2308 		dev_err(&pdev->dev,
2309 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2310 			vectors);
2311 		return vectors;
2312 	}
2313 	if (vectors < hdev->num_msi)
2314 		dev_warn(&hdev->pdev->dev,
2315 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2316 			 hdev->num_msi, vectors);
2317 
2318 	hdev->num_msi = vectors;
2319 	hdev->num_msi_left = vectors;
2320 
2321 	hdev->base_msi_vector = pdev->irq;
2322 	hdev->roce_base_vector = hdev->base_msi_vector +
2323 				hdev->roce_base_msix_offset;
2324 
2325 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2326 					   sizeof(u16), GFP_KERNEL);
2327 	if (!hdev->vector_status) {
2328 		pci_free_irq_vectors(pdev);
2329 		return -ENOMEM;
2330 	}
2331 
2332 	for (i = 0; i < hdev->num_msi; i++)
2333 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2334 
2335 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2336 					sizeof(int), GFP_KERNEL);
2337 	if (!hdev->vector_irq) {
2338 		pci_free_irq_vectors(pdev);
2339 		return -ENOMEM;
2340 	}
2341 
2342 	return 0;
2343 }
2344 
hclge_check_speed_dup(u8 duplex,int speed)2345 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2346 {
2347 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2348 		duplex = HCLGE_MAC_FULL;
2349 
2350 	return duplex;
2351 }
2352 
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2353 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2354 				      u8 duplex)
2355 {
2356 	struct hclge_config_mac_speed_dup_cmd *req;
2357 	struct hclge_desc desc;
2358 	int ret;
2359 
2360 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2361 
2362 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2363 
2364 	if (duplex)
2365 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2366 
2367 	switch (speed) {
2368 	case HCLGE_MAC_SPEED_10M:
2369 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2370 				HCLGE_CFG_SPEED_S, 6);
2371 		break;
2372 	case HCLGE_MAC_SPEED_100M:
2373 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2374 				HCLGE_CFG_SPEED_S, 7);
2375 		break;
2376 	case HCLGE_MAC_SPEED_1G:
2377 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2378 				HCLGE_CFG_SPEED_S, 0);
2379 		break;
2380 	case HCLGE_MAC_SPEED_10G:
2381 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2382 				HCLGE_CFG_SPEED_S, 1);
2383 		break;
2384 	case HCLGE_MAC_SPEED_25G:
2385 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2386 				HCLGE_CFG_SPEED_S, 2);
2387 		break;
2388 	case HCLGE_MAC_SPEED_40G:
2389 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2390 				HCLGE_CFG_SPEED_S, 3);
2391 		break;
2392 	case HCLGE_MAC_SPEED_50G:
2393 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2394 				HCLGE_CFG_SPEED_S, 4);
2395 		break;
2396 	case HCLGE_MAC_SPEED_100G:
2397 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2398 				HCLGE_CFG_SPEED_S, 5);
2399 		break;
2400 	default:
2401 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2402 		return -EINVAL;
2403 	}
2404 
2405 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2406 		      1);
2407 
2408 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2409 	if (ret) {
2410 		dev_err(&hdev->pdev->dev,
2411 			"mac speed/duplex config cmd failed %d.\n", ret);
2412 		return ret;
2413 	}
2414 
2415 	return 0;
2416 }
2417 
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2418 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2419 {
2420 	int ret;
2421 
2422 	duplex = hclge_check_speed_dup(duplex, speed);
2423 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2424 		return 0;
2425 
2426 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2427 	if (ret)
2428 		return ret;
2429 
2430 	hdev->hw.mac.speed = speed;
2431 	hdev->hw.mac.duplex = duplex;
2432 
2433 	return 0;
2434 }
2435 
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2436 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2437 				     u8 duplex)
2438 {
2439 	struct hclge_vport *vport = hclge_get_vport(handle);
2440 	struct hclge_dev *hdev = vport->back;
2441 
2442 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2443 }
2444 
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2445 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2446 {
2447 	struct hclge_config_auto_neg_cmd *req;
2448 	struct hclge_desc desc;
2449 	u32 flag = 0;
2450 	int ret;
2451 
2452 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2453 
2454 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2455 	if (enable)
2456 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2457 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2458 
2459 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2460 	if (ret)
2461 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2462 			ret);
2463 
2464 	return ret;
2465 }
2466 
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2467 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2468 {
2469 	struct hclge_vport *vport = hclge_get_vport(handle);
2470 	struct hclge_dev *hdev = vport->back;
2471 
2472 	if (!hdev->hw.mac.support_autoneg) {
2473 		if (enable) {
2474 			dev_err(&hdev->pdev->dev,
2475 				"autoneg is not supported by current port\n");
2476 			return -EOPNOTSUPP;
2477 		} else {
2478 			return 0;
2479 		}
2480 	}
2481 
2482 	return hclge_set_autoneg_en(hdev, enable);
2483 }
2484 
hclge_get_autoneg(struct hnae3_handle * handle)2485 static int hclge_get_autoneg(struct hnae3_handle *handle)
2486 {
2487 	struct hclge_vport *vport = hclge_get_vport(handle);
2488 	struct hclge_dev *hdev = vport->back;
2489 	struct phy_device *phydev = hdev->hw.mac.phydev;
2490 
2491 	if (phydev)
2492 		return phydev->autoneg;
2493 
2494 	return hdev->hw.mac.autoneg;
2495 }
2496 
hclge_restart_autoneg(struct hnae3_handle * handle)2497 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2498 {
2499 	struct hclge_vport *vport = hclge_get_vport(handle);
2500 	struct hclge_dev *hdev = vport->back;
2501 	int ret;
2502 
2503 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2504 
2505 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2506 	if (ret)
2507 		return ret;
2508 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2509 }
2510 
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2511 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2512 {
2513 	struct hclge_vport *vport = hclge_get_vport(handle);
2514 	struct hclge_dev *hdev = vport->back;
2515 
2516 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2517 		return hclge_set_autoneg_en(hdev, !halt);
2518 
2519 	return 0;
2520 }
2521 
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2522 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2523 {
2524 	struct hclge_config_fec_cmd *req;
2525 	struct hclge_desc desc;
2526 	int ret;
2527 
2528 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2529 
2530 	req = (struct hclge_config_fec_cmd *)desc.data;
2531 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2532 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2533 	if (fec_mode & BIT(HNAE3_FEC_RS))
2534 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2535 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2536 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2537 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2538 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2539 
2540 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2541 	if (ret)
2542 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2543 
2544 	return ret;
2545 }
2546 
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2547 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2548 {
2549 	struct hclge_vport *vport = hclge_get_vport(handle);
2550 	struct hclge_dev *hdev = vport->back;
2551 	struct hclge_mac *mac = &hdev->hw.mac;
2552 	int ret;
2553 
2554 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2555 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2556 		return -EINVAL;
2557 	}
2558 
2559 	ret = hclge_set_fec_hw(hdev, fec_mode);
2560 	if (ret)
2561 		return ret;
2562 
2563 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2564 	return 0;
2565 }
2566 
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2567 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2568 			  u8 *fec_mode)
2569 {
2570 	struct hclge_vport *vport = hclge_get_vport(handle);
2571 	struct hclge_dev *hdev = vport->back;
2572 	struct hclge_mac *mac = &hdev->hw.mac;
2573 
2574 	if (fec_ability)
2575 		*fec_ability = mac->fec_ability;
2576 	if (fec_mode)
2577 		*fec_mode = mac->fec_mode;
2578 }
2579 
hclge_mac_init(struct hclge_dev * hdev)2580 static int hclge_mac_init(struct hclge_dev *hdev)
2581 {
2582 	struct hclge_mac *mac = &hdev->hw.mac;
2583 	int ret;
2584 
2585 	hdev->support_sfp_query = true;
2586 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2587 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2588 					 hdev->hw.mac.duplex);
2589 	if (ret) {
2590 		dev_err(&hdev->pdev->dev,
2591 			"Config mac speed dup fail ret=%d\n", ret);
2592 		return ret;
2593 	}
2594 
2595 	if (hdev->hw.mac.support_autoneg) {
2596 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2597 		if (ret) {
2598 			dev_err(&hdev->pdev->dev,
2599 				"Config mac autoneg fail ret=%d\n", ret);
2600 			return ret;
2601 		}
2602 	}
2603 
2604 	mac->link = 0;
2605 
2606 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2607 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2608 		if (ret) {
2609 			dev_err(&hdev->pdev->dev,
2610 				"Fec mode init fail, ret = %d\n", ret);
2611 			return ret;
2612 		}
2613 	}
2614 
2615 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2616 	if (ret) {
2617 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2618 		return ret;
2619 	}
2620 
2621 	ret = hclge_set_default_loopback(hdev);
2622 	if (ret)
2623 		return ret;
2624 
2625 	ret = hclge_buffer_alloc(hdev);
2626 	if (ret)
2627 		dev_err(&hdev->pdev->dev,
2628 			"allocate buffer fail, ret=%d\n", ret);
2629 
2630 	return ret;
2631 }
2632 
hclge_mbx_task_schedule(struct hclge_dev * hdev)2633 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2634 {
2635 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2636 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2637 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2638 			      &hdev->mbx_service_task);
2639 }
2640 
hclge_reset_task_schedule(struct hclge_dev * hdev)2641 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2642 {
2643 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2644 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2645 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2646 			      &hdev->rst_service_task);
2647 }
2648 
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2649 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2650 {
2651 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2652 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2653 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2654 		hdev->hw_stats.stats_timer++;
2655 		hdev->fd_arfs_expire_timer++;
2656 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2657 				    system_wq, &hdev->service_task,
2658 				    delay_time);
2659 	}
2660 }
2661 
hclge_get_mac_link_status(struct hclge_dev * hdev)2662 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2663 {
2664 	struct hclge_link_status_cmd *req;
2665 	struct hclge_desc desc;
2666 	int link_status;
2667 	int ret;
2668 
2669 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2670 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2671 	if (ret) {
2672 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2673 			ret);
2674 		return ret;
2675 	}
2676 
2677 	req = (struct hclge_link_status_cmd *)desc.data;
2678 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2679 
2680 	return !!link_status;
2681 }
2682 
hclge_get_mac_phy_link(struct hclge_dev * hdev)2683 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2684 {
2685 	unsigned int mac_state;
2686 	int link_stat;
2687 
2688 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2689 		return 0;
2690 
2691 	mac_state = hclge_get_mac_link_status(hdev);
2692 
2693 	if (hdev->hw.mac.phydev) {
2694 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2695 			link_stat = mac_state &
2696 				hdev->hw.mac.phydev->link;
2697 		else
2698 			link_stat = 0;
2699 
2700 	} else {
2701 		link_stat = mac_state;
2702 	}
2703 
2704 	return !!link_stat;
2705 }
2706 
hclge_update_link_status(struct hclge_dev * hdev)2707 static void hclge_update_link_status(struct hclge_dev *hdev)
2708 {
2709 	struct hnae3_client *rclient = hdev->roce_client;
2710 	struct hnae3_client *client = hdev->nic_client;
2711 	struct hnae3_handle *rhandle;
2712 	struct hnae3_handle *handle;
2713 	int state;
2714 	int i;
2715 
2716 	if (!client)
2717 		return;
2718 	state = hclge_get_mac_phy_link(hdev);
2719 	if (state != hdev->hw.mac.link) {
2720 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2721 			handle = &hdev->vport[i].nic;
2722 			client->ops->link_status_change(handle, state);
2723 			hclge_config_mac_tnl_int(hdev, state);
2724 			rhandle = &hdev->vport[i].roce;
2725 			if (rclient && rclient->ops->link_status_change)
2726 				rclient->ops->link_status_change(rhandle,
2727 								 state);
2728 		}
2729 		hdev->hw.mac.link = state;
2730 	}
2731 }
2732 
hclge_update_port_capability(struct hclge_mac * mac)2733 static void hclge_update_port_capability(struct hclge_mac *mac)
2734 {
2735 	/* update fec ability by speed */
2736 	hclge_convert_setting_fec(mac);
2737 
2738 	/* firmware can not identify back plane type, the media type
2739 	 * read from configuration can help deal it
2740 	 */
2741 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2742 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2743 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2744 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2745 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2746 
2747 	if (mac->support_autoneg == true) {
2748 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2749 		linkmode_copy(mac->advertising, mac->supported);
2750 	} else {
2751 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2752 				   mac->supported);
2753 		linkmode_zero(mac->advertising);
2754 	}
2755 }
2756 
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2757 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2758 {
2759 	struct hclge_sfp_info_cmd *resp;
2760 	struct hclge_desc desc;
2761 	int ret;
2762 
2763 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2764 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2765 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2766 	if (ret == -EOPNOTSUPP) {
2767 		dev_warn(&hdev->pdev->dev,
2768 			 "IMP do not support get SFP speed %d\n", ret);
2769 		return ret;
2770 	} else if (ret) {
2771 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2772 		return ret;
2773 	}
2774 
2775 	*speed = le32_to_cpu(resp->speed);
2776 
2777 	return 0;
2778 }
2779 
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)2780 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2781 {
2782 	struct hclge_sfp_info_cmd *resp;
2783 	struct hclge_desc desc;
2784 	int ret;
2785 
2786 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2787 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2788 
2789 	resp->query_type = QUERY_ACTIVE_SPEED;
2790 
2791 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2792 	if (ret == -EOPNOTSUPP) {
2793 		dev_warn(&hdev->pdev->dev,
2794 			 "IMP does not support get SFP info %d\n", ret);
2795 		return ret;
2796 	} else if (ret) {
2797 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2798 		return ret;
2799 	}
2800 
2801 	mac->speed = le32_to_cpu(resp->speed);
2802 	/* if resp->speed_ability is 0, it means it's an old version
2803 	 * firmware, do not update these params
2804 	 */
2805 	if (resp->speed_ability) {
2806 		mac->module_type = le32_to_cpu(resp->module_type);
2807 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2808 		mac->autoneg = resp->autoneg;
2809 		mac->support_autoneg = resp->autoneg_ability;
2810 		mac->speed_type = QUERY_ACTIVE_SPEED;
2811 		if (!resp->active_fec)
2812 			mac->fec_mode = 0;
2813 		else
2814 			mac->fec_mode = BIT(resp->active_fec);
2815 	} else {
2816 		mac->speed_type = QUERY_SFP_SPEED;
2817 	}
2818 
2819 	return 0;
2820 }
2821 
hclge_update_port_info(struct hclge_dev * hdev)2822 static int hclge_update_port_info(struct hclge_dev *hdev)
2823 {
2824 	struct hclge_mac *mac = &hdev->hw.mac;
2825 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2826 	int ret;
2827 
2828 	/* get the port info from SFP cmd if not copper port */
2829 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2830 		return 0;
2831 
2832 	/* if IMP does not support get SFP/qSFP info, return directly */
2833 	if (!hdev->support_sfp_query)
2834 		return 0;
2835 
2836 	if (hdev->pdev->revision >= 0x21)
2837 		ret = hclge_get_sfp_info(hdev, mac);
2838 	else
2839 		ret = hclge_get_sfp_speed(hdev, &speed);
2840 
2841 	if (ret == -EOPNOTSUPP) {
2842 		hdev->support_sfp_query = false;
2843 		return ret;
2844 	} else if (ret) {
2845 		return ret;
2846 	}
2847 
2848 	if (hdev->pdev->revision >= 0x21) {
2849 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2850 			hclge_update_port_capability(mac);
2851 			return 0;
2852 		}
2853 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2854 					       HCLGE_MAC_FULL);
2855 	} else {
2856 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2857 			return 0; /* do nothing if no SFP */
2858 
2859 		/* must config full duplex for SFP */
2860 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2861 	}
2862 }
2863 
hclge_get_status(struct hnae3_handle * handle)2864 static int hclge_get_status(struct hnae3_handle *handle)
2865 {
2866 	struct hclge_vport *vport = hclge_get_vport(handle);
2867 	struct hclge_dev *hdev = vport->back;
2868 
2869 	hclge_update_link_status(hdev);
2870 
2871 	return hdev->hw.mac.link;
2872 }
2873 
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)2874 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2875 {
2876 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2877 
2878 	/* fetch the events from their corresponding regs */
2879 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2880 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2881 	msix_src_reg = hclge_read_dev(&hdev->hw,
2882 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2883 
2884 	/* Assumption: If by any chance reset and mailbox events are reported
2885 	 * together then we will only process reset event in this go and will
2886 	 * defer the processing of the mailbox events. Since, we would have not
2887 	 * cleared RX CMDQ event this time we would receive again another
2888 	 * interrupt from H/W just for the mailbox.
2889 	 *
2890 	 * check for vector0 reset event sources
2891 	 */
2892 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2893 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2894 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2895 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2896 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2897 		hdev->rst_stats.imp_rst_cnt++;
2898 		return HCLGE_VECTOR0_EVENT_RST;
2899 	}
2900 
2901 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2902 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2903 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2904 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2905 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2906 		hdev->rst_stats.global_rst_cnt++;
2907 		return HCLGE_VECTOR0_EVENT_RST;
2908 	}
2909 
2910 	/* check for vector0 msix event source */
2911 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2912 		dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2913 			 msix_src_reg);
2914 		*clearval = msix_src_reg;
2915 		return HCLGE_VECTOR0_EVENT_ERR;
2916 	}
2917 
2918 	/* check for vector0 mailbox(=CMDQ RX) event source */
2919 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2920 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2921 		*clearval = cmdq_src_reg;
2922 		return HCLGE_VECTOR0_EVENT_MBX;
2923 	}
2924 
2925 	/* print other vector0 event source */
2926 	dev_info(&hdev->pdev->dev,
2927 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2928 		 cmdq_src_reg, msix_src_reg);
2929 	*clearval = msix_src_reg;
2930 
2931 	return HCLGE_VECTOR0_EVENT_OTHER;
2932 }
2933 
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)2934 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2935 				    u32 regclr)
2936 {
2937 	switch (event_type) {
2938 	case HCLGE_VECTOR0_EVENT_RST:
2939 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2940 		break;
2941 	case HCLGE_VECTOR0_EVENT_MBX:
2942 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2943 		break;
2944 	default:
2945 		break;
2946 	}
2947 }
2948 
hclge_clear_all_event_cause(struct hclge_dev * hdev)2949 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2950 {
2951 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2952 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2953 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2954 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2955 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2956 }
2957 
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)2958 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2959 {
2960 	writel(enable ? 1 : 0, vector->addr);
2961 }
2962 
hclge_misc_irq_handle(int irq,void * data)2963 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2964 {
2965 	struct hclge_dev *hdev = data;
2966 	u32 clearval = 0;
2967 	u32 event_cause;
2968 
2969 	hclge_enable_vector(&hdev->misc_vector, false);
2970 	event_cause = hclge_check_event_cause(hdev, &clearval);
2971 
2972 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2973 	switch (event_cause) {
2974 	case HCLGE_VECTOR0_EVENT_ERR:
2975 		/* we do not know what type of reset is required now. This could
2976 		 * only be decided after we fetch the type of errors which
2977 		 * caused this event. Therefore, we will do below for now:
2978 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2979 		 *    have defered type of reset to be used.
2980 		 * 2. Schedule the reset serivce task.
2981 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2982 		 *    will fetch the correct type of reset.  This would be done
2983 		 *    by first decoding the types of errors.
2984 		 */
2985 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2986 		/* fall through */
2987 	case HCLGE_VECTOR0_EVENT_RST:
2988 		hclge_reset_task_schedule(hdev);
2989 		break;
2990 	case HCLGE_VECTOR0_EVENT_MBX:
2991 		/* If we are here then,
2992 		 * 1. Either we are not handling any mbx task and we are not
2993 		 *    scheduled as well
2994 		 *                        OR
2995 		 * 2. We could be handling a mbx task but nothing more is
2996 		 *    scheduled.
2997 		 * In both cases, we should schedule mbx task as there are more
2998 		 * mbx messages reported by this interrupt.
2999 		 */
3000 		hclge_mbx_task_schedule(hdev);
3001 		break;
3002 	default:
3003 		dev_warn(&hdev->pdev->dev,
3004 			 "received unknown or unhandled event of vector0\n");
3005 		break;
3006 	}
3007 
3008 	hclge_clear_event_cause(hdev, event_cause, clearval);
3009 
3010 	/* Enable interrupt if it is not cause by reset. And when
3011 	 * clearval equal to 0, it means interrupt status may be
3012 	 * cleared by hardware before driver reads status register.
3013 	 * For this case, vector0 interrupt also should be enabled.
3014 	 */
3015 	if (!clearval ||
3016 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3017 		hclge_enable_vector(&hdev->misc_vector, true);
3018 	}
3019 
3020 	return IRQ_HANDLED;
3021 }
3022 
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3023 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3024 {
3025 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3026 		dev_warn(&hdev->pdev->dev,
3027 			 "vector(vector_id %d) has been freed.\n", vector_id);
3028 		return;
3029 	}
3030 
3031 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3032 	hdev->num_msi_left += 1;
3033 	hdev->num_msi_used -= 1;
3034 }
3035 
hclge_get_misc_vector(struct hclge_dev * hdev)3036 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3037 {
3038 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3039 
3040 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3041 
3042 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3043 	hdev->vector_status[0] = 0;
3044 
3045 	hdev->num_msi_left -= 1;
3046 	hdev->num_msi_used += 1;
3047 }
3048 
hclge_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)3049 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3050 				      const cpumask_t *mask)
3051 {
3052 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3053 					      affinity_notify);
3054 
3055 	cpumask_copy(&hdev->affinity_mask, mask);
3056 }
3057 
hclge_irq_affinity_release(struct kref * ref)3058 static void hclge_irq_affinity_release(struct kref *ref)
3059 {
3060 }
3061 
hclge_misc_affinity_setup(struct hclge_dev * hdev)3062 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3063 {
3064 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3065 			      &hdev->affinity_mask);
3066 
3067 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3068 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3069 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3070 				  &hdev->affinity_notify);
3071 }
3072 
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3073 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3074 {
3075 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3076 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3077 }
3078 
hclge_misc_irq_init(struct hclge_dev * hdev)3079 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3080 {
3081 	int ret;
3082 
3083 	hclge_get_misc_vector(hdev);
3084 
3085 	/* this would be explicitly freed in the end */
3086 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3087 			  0, "hclge_misc", hdev);
3088 	if (ret) {
3089 		hclge_free_vector(hdev, 0);
3090 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3091 			hdev->misc_vector.vector_irq);
3092 	}
3093 
3094 	return ret;
3095 }
3096 
hclge_misc_irq_uninit(struct hclge_dev * hdev)3097 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3098 {
3099 	free_irq(hdev->misc_vector.vector_irq, hdev);
3100 	hclge_free_vector(hdev, 0);
3101 }
3102 
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3103 int hclge_notify_client(struct hclge_dev *hdev,
3104 			enum hnae3_reset_notify_type type)
3105 {
3106 	struct hnae3_client *client = hdev->nic_client;
3107 	u16 i;
3108 
3109 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3110 		return 0;
3111 
3112 	if (!client->ops->reset_notify)
3113 		return -EOPNOTSUPP;
3114 
3115 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3116 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3117 		int ret;
3118 
3119 		ret = client->ops->reset_notify(handle, type);
3120 		if (ret) {
3121 			dev_err(&hdev->pdev->dev,
3122 				"notify nic client failed %d(%d)\n", type, ret);
3123 			return ret;
3124 		}
3125 	}
3126 
3127 	return 0;
3128 }
3129 
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3130 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3131 				    enum hnae3_reset_notify_type type)
3132 {
3133 	struct hnae3_client *client = hdev->roce_client;
3134 	int ret = 0;
3135 	u16 i;
3136 
3137 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3138 		return 0;
3139 
3140 	if (!client->ops->reset_notify)
3141 		return -EOPNOTSUPP;
3142 
3143 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3144 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3145 
3146 		ret = client->ops->reset_notify(handle, type);
3147 		if (ret) {
3148 			dev_err(&hdev->pdev->dev,
3149 				"notify roce client failed %d(%d)",
3150 				type, ret);
3151 			return ret;
3152 		}
3153 	}
3154 
3155 	return ret;
3156 }
3157 
hclge_reset_wait(struct hclge_dev * hdev)3158 static int hclge_reset_wait(struct hclge_dev *hdev)
3159 {
3160 #define HCLGE_RESET_WATI_MS	100
3161 #define HCLGE_RESET_WAIT_CNT	200
3162 	u32 val, reg, reg_bit;
3163 	u32 cnt = 0;
3164 
3165 	switch (hdev->reset_type) {
3166 	case HNAE3_IMP_RESET:
3167 		reg = HCLGE_GLOBAL_RESET_REG;
3168 		reg_bit = HCLGE_IMP_RESET_BIT;
3169 		break;
3170 	case HNAE3_GLOBAL_RESET:
3171 		reg = HCLGE_GLOBAL_RESET_REG;
3172 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3173 		break;
3174 	case HNAE3_FUNC_RESET:
3175 		reg = HCLGE_FUN_RST_ING;
3176 		reg_bit = HCLGE_FUN_RST_ING_B;
3177 		break;
3178 	case HNAE3_FLR_RESET:
3179 		break;
3180 	default:
3181 		dev_err(&hdev->pdev->dev,
3182 			"Wait for unsupported reset type: %d\n",
3183 			hdev->reset_type);
3184 		return -EINVAL;
3185 	}
3186 
3187 	if (hdev->reset_type == HNAE3_FLR_RESET) {
3188 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3189 		       cnt++ < HCLGE_RESET_WAIT_CNT)
3190 			msleep(HCLGE_RESET_WATI_MS);
3191 
3192 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3193 			dev_err(&hdev->pdev->dev,
3194 				"flr wait timeout: %d\n", cnt);
3195 			return -EBUSY;
3196 		}
3197 
3198 		return 0;
3199 	}
3200 
3201 	val = hclge_read_dev(&hdev->hw, reg);
3202 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3203 		msleep(HCLGE_RESET_WATI_MS);
3204 		val = hclge_read_dev(&hdev->hw, reg);
3205 		cnt++;
3206 	}
3207 
3208 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3209 		dev_warn(&hdev->pdev->dev,
3210 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3211 		return -EBUSY;
3212 	}
3213 
3214 	return 0;
3215 }
3216 
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3217 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3218 {
3219 	struct hclge_vf_rst_cmd *req;
3220 	struct hclge_desc desc;
3221 
3222 	req = (struct hclge_vf_rst_cmd *)desc.data;
3223 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3224 	req->dest_vfid = func_id;
3225 
3226 	if (reset)
3227 		req->vf_rst = 0x1;
3228 
3229 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3230 }
3231 
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3232 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3233 {
3234 	int i;
3235 
3236 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3237 		struct hclge_vport *vport = &hdev->vport[i];
3238 		int ret;
3239 
3240 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3241 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3242 		if (ret) {
3243 			dev_err(&hdev->pdev->dev,
3244 				"set vf(%d) rst failed %d!\n",
3245 				vport->vport_id, ret);
3246 			return ret;
3247 		}
3248 
3249 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3250 			continue;
3251 
3252 		/* Inform VF to process the reset.
3253 		 * hclge_inform_reset_assert_to_vf may fail if VF
3254 		 * driver is not loaded.
3255 		 */
3256 		ret = hclge_inform_reset_assert_to_vf(vport);
3257 		if (ret)
3258 			dev_warn(&hdev->pdev->dev,
3259 				 "inform reset to vf(%d) failed %d!\n",
3260 				 vport->vport_id, ret);
3261 	}
3262 
3263 	return 0;
3264 }
3265 
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3266 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3267 {
3268 	struct hclge_pf_rst_sync_cmd *req;
3269 	struct hclge_desc desc;
3270 	int cnt = 0;
3271 	int ret;
3272 
3273 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3274 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3275 
3276 	do {
3277 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3278 		/* for compatible with old firmware, wait
3279 		 * 100 ms for VF to stop IO
3280 		 */
3281 		if (ret == -EOPNOTSUPP) {
3282 			msleep(HCLGE_RESET_SYNC_TIME);
3283 			return 0;
3284 		} else if (ret) {
3285 			dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3286 				ret);
3287 			return ret;
3288 		} else if (req->all_vf_ready) {
3289 			return 0;
3290 		}
3291 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3292 		hclge_cmd_reuse_desc(&desc, true);
3293 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3294 
3295 	dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3296 	return -ETIME;
3297 }
3298 
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3299 void hclge_report_hw_error(struct hclge_dev *hdev,
3300 			   enum hnae3_hw_error_type type)
3301 {
3302 	struct hnae3_client *client = hdev->nic_client;
3303 	u16 i;
3304 
3305 	if (!client || !client->ops->process_hw_error ||
3306 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3307 		return;
3308 
3309 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3310 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3311 }
3312 
hclge_handle_imp_error(struct hclge_dev * hdev)3313 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3314 {
3315 	u32 reg_val;
3316 
3317 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3318 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3319 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3320 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3321 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3322 	}
3323 
3324 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3325 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3326 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3327 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3328 	}
3329 }
3330 
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3331 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3332 {
3333 	struct hclge_desc desc;
3334 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3335 	int ret;
3336 
3337 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3338 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3339 	req->fun_reset_vfid = func_id;
3340 
3341 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3342 	if (ret)
3343 		dev_err(&hdev->pdev->dev,
3344 			"send function reset cmd fail, status =%d\n", ret);
3345 
3346 	return ret;
3347 }
3348 
hclge_do_reset(struct hclge_dev * hdev)3349 static void hclge_do_reset(struct hclge_dev *hdev)
3350 {
3351 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3352 	struct pci_dev *pdev = hdev->pdev;
3353 	u32 val;
3354 
3355 	if (hclge_get_hw_reset_stat(handle)) {
3356 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3357 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3358 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3359 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3360 		return;
3361 	}
3362 
3363 	switch (hdev->reset_type) {
3364 	case HNAE3_GLOBAL_RESET:
3365 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3366 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3367 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3368 		dev_info(&pdev->dev, "Global Reset requested\n");
3369 		break;
3370 	case HNAE3_FUNC_RESET:
3371 		dev_info(&pdev->dev, "PF Reset requested\n");
3372 		/* schedule again to check later */
3373 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3374 		hclge_reset_task_schedule(hdev);
3375 		break;
3376 	case HNAE3_FLR_RESET:
3377 		dev_info(&pdev->dev, "FLR requested\n");
3378 		/* schedule again to check later */
3379 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3380 		hclge_reset_task_schedule(hdev);
3381 		break;
3382 	default:
3383 		dev_warn(&pdev->dev,
3384 			 "Unsupported reset type: %d\n", hdev->reset_type);
3385 		break;
3386 	}
3387 }
3388 
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3389 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3390 						   unsigned long *addr)
3391 {
3392 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3393 	struct hclge_dev *hdev = ae_dev->priv;
3394 
3395 	/* first, resolve any unknown reset type to the known type(s) */
3396 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3397 		/* we will intentionally ignore any errors from this function
3398 		 *  as we will end up in *some* reset request in any case
3399 		 */
3400 		hclge_handle_hw_msix_error(hdev, addr);
3401 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3402 		/* We defered the clearing of the error event which caused
3403 		 * interrupt since it was not posssible to do that in
3404 		 * interrupt context (and this is the reason we introduced
3405 		 * new UNKNOWN reset type). Now, the errors have been
3406 		 * handled and cleared in hardware we can safely enable
3407 		 * interrupts. This is an exception to the norm.
3408 		 */
3409 		hclge_enable_vector(&hdev->misc_vector, true);
3410 	}
3411 
3412 	/* return the highest priority reset level amongst all */
3413 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3414 		rst_level = HNAE3_IMP_RESET;
3415 		clear_bit(HNAE3_IMP_RESET, addr);
3416 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3417 		clear_bit(HNAE3_FUNC_RESET, addr);
3418 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3419 		rst_level = HNAE3_GLOBAL_RESET;
3420 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3421 		clear_bit(HNAE3_FUNC_RESET, addr);
3422 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3423 		rst_level = HNAE3_FUNC_RESET;
3424 		clear_bit(HNAE3_FUNC_RESET, addr);
3425 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3426 		rst_level = HNAE3_FLR_RESET;
3427 		clear_bit(HNAE3_FLR_RESET, addr);
3428 	}
3429 
3430 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3431 	    rst_level < hdev->reset_type)
3432 		return HNAE3_NONE_RESET;
3433 
3434 	return rst_level;
3435 }
3436 
hclge_clear_reset_cause(struct hclge_dev * hdev)3437 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3438 {
3439 	u32 clearval = 0;
3440 
3441 	switch (hdev->reset_type) {
3442 	case HNAE3_IMP_RESET:
3443 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3444 		break;
3445 	case HNAE3_GLOBAL_RESET:
3446 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3447 		break;
3448 	default:
3449 		break;
3450 	}
3451 
3452 	if (!clearval)
3453 		return;
3454 
3455 	/* For revision 0x20, the reset interrupt source
3456 	 * can only be cleared after hardware reset done
3457 	 */
3458 	if (hdev->pdev->revision == 0x20)
3459 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3460 				clearval);
3461 
3462 	hclge_enable_vector(&hdev->misc_vector, true);
3463 }
3464 
hclge_reset_prepare_down(struct hclge_dev * hdev)3465 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3466 {
3467 	int ret = 0;
3468 
3469 	switch (hdev->reset_type) {
3470 	case HNAE3_FUNC_RESET:
3471 		/* fall through */
3472 	case HNAE3_FLR_RESET:
3473 		ret = hclge_set_all_vf_rst(hdev, true);
3474 		break;
3475 	default:
3476 		break;
3477 	}
3478 
3479 	return ret;
3480 }
3481 
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3482 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3483 {
3484 	u32 reg_val;
3485 
3486 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3487 	if (enable)
3488 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3489 	else
3490 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3491 
3492 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3493 }
3494 
hclge_reset_prepare_wait(struct hclge_dev * hdev)3495 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3496 {
3497 	u32 reg_val;
3498 	int ret = 0;
3499 
3500 	switch (hdev->reset_type) {
3501 	case HNAE3_FUNC_RESET:
3502 		/* to confirm whether all running VF is ready
3503 		 * before request PF reset
3504 		 */
3505 		ret = hclge_func_reset_sync_vf(hdev);
3506 		if (ret)
3507 			return ret;
3508 
3509 		ret = hclge_func_reset_cmd(hdev, 0);
3510 		if (ret) {
3511 			dev_err(&hdev->pdev->dev,
3512 				"asserting function reset fail %d!\n", ret);
3513 			return ret;
3514 		}
3515 
3516 		/* After performaning pf reset, it is not necessary to do the
3517 		 * mailbox handling or send any command to firmware, because
3518 		 * any mailbox handling or command to firmware is only valid
3519 		 * after hclge_cmd_init is called.
3520 		 */
3521 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3522 		hdev->rst_stats.pf_rst_cnt++;
3523 		break;
3524 	case HNAE3_FLR_RESET:
3525 		/* to confirm whether all running VF is ready
3526 		 * before request PF reset
3527 		 */
3528 		ret = hclge_func_reset_sync_vf(hdev);
3529 		if (ret)
3530 			return ret;
3531 
3532 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3533 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3534 		hdev->rst_stats.flr_rst_cnt++;
3535 		break;
3536 	case HNAE3_IMP_RESET:
3537 		hclge_handle_imp_error(hdev);
3538 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3539 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3540 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3541 		break;
3542 	default:
3543 		break;
3544 	}
3545 
3546 	/* inform hardware that preparatory work is done */
3547 	msleep(HCLGE_RESET_SYNC_TIME);
3548 	hclge_reset_handshake(hdev, true);
3549 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3550 
3551 	return ret;
3552 }
3553 
hclge_reset_err_handle(struct hclge_dev * hdev)3554 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3555 {
3556 #define MAX_RESET_FAIL_CNT 5
3557 
3558 	if (hdev->reset_pending) {
3559 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3560 			 hdev->reset_pending);
3561 		return true;
3562 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3563 		   HCLGE_RESET_INT_M) {
3564 		dev_info(&hdev->pdev->dev,
3565 			 "reset failed because new reset interrupt\n");
3566 		hclge_clear_reset_cause(hdev);
3567 		return false;
3568 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3569 		hdev->rst_stats.reset_fail_cnt++;
3570 		set_bit(hdev->reset_type, &hdev->reset_pending);
3571 		dev_info(&hdev->pdev->dev,
3572 			 "re-schedule reset task(%d)\n",
3573 			 hdev->rst_stats.reset_fail_cnt);
3574 		return true;
3575 	}
3576 
3577 	hclge_clear_reset_cause(hdev);
3578 
3579 	/* recover the handshake status when reset fail */
3580 	hclge_reset_handshake(hdev, true);
3581 
3582 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3583 	return false;
3584 }
3585 
hclge_set_rst_done(struct hclge_dev * hdev)3586 static int hclge_set_rst_done(struct hclge_dev *hdev)
3587 {
3588 	struct hclge_pf_rst_done_cmd *req;
3589 	struct hclge_desc desc;
3590 	int ret;
3591 
3592 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3593 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3594 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3595 
3596 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3597 	/* To be compatible with the old firmware, which does not support
3598 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3599 	 * return success
3600 	 */
3601 	if (ret == -EOPNOTSUPP) {
3602 		dev_warn(&hdev->pdev->dev,
3603 			 "current firmware does not support command(0x%x)!\n",
3604 			 HCLGE_OPC_PF_RST_DONE);
3605 		return 0;
3606 	} else if (ret) {
3607 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3608 			ret);
3609 	}
3610 
3611 	return ret;
3612 }
3613 
hclge_reset_prepare_up(struct hclge_dev * hdev)3614 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3615 {
3616 	int ret = 0;
3617 
3618 	switch (hdev->reset_type) {
3619 	case HNAE3_FUNC_RESET:
3620 		/* fall through */
3621 	case HNAE3_FLR_RESET:
3622 		ret = hclge_set_all_vf_rst(hdev, false);
3623 		break;
3624 	case HNAE3_GLOBAL_RESET:
3625 		/* fall through */
3626 	case HNAE3_IMP_RESET:
3627 		ret = hclge_set_rst_done(hdev);
3628 		break;
3629 	default:
3630 		break;
3631 	}
3632 
3633 	/* clear up the handshake status after re-initialize done */
3634 	hclge_reset_handshake(hdev, false);
3635 
3636 	return ret;
3637 }
3638 
hclge_reset_stack(struct hclge_dev * hdev)3639 static int hclge_reset_stack(struct hclge_dev *hdev)
3640 {
3641 	int ret;
3642 
3643 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3644 	if (ret)
3645 		return ret;
3646 
3647 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3648 	if (ret)
3649 		return ret;
3650 
3651 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3652 	if (ret)
3653 		return ret;
3654 
3655 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3656 }
3657 
hclge_reset(struct hclge_dev * hdev)3658 static void hclge_reset(struct hclge_dev *hdev)
3659 {
3660 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3661 	enum hnae3_reset_type reset_level;
3662 	int ret;
3663 
3664 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3665 	 * know if device is undergoing reset
3666 	 */
3667 	ae_dev->reset_type = hdev->reset_type;
3668 	hdev->rst_stats.reset_cnt++;
3669 	/* perform reset of the stack & ae device for a client */
3670 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3671 	if (ret)
3672 		goto err_reset;
3673 
3674 	ret = hclge_reset_prepare_down(hdev);
3675 	if (ret)
3676 		goto err_reset;
3677 
3678 	rtnl_lock();
3679 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3680 	if (ret)
3681 		goto err_reset_lock;
3682 
3683 	rtnl_unlock();
3684 
3685 	ret = hclge_reset_prepare_wait(hdev);
3686 	if (ret)
3687 		goto err_reset;
3688 
3689 	if (hclge_reset_wait(hdev))
3690 		goto err_reset;
3691 
3692 	hdev->rst_stats.hw_reset_done_cnt++;
3693 
3694 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3695 	if (ret)
3696 		goto err_reset;
3697 
3698 	rtnl_lock();
3699 
3700 	ret = hclge_reset_stack(hdev);
3701 	if (ret)
3702 		goto err_reset_lock;
3703 
3704 	hclge_clear_reset_cause(hdev);
3705 
3706 	ret = hclge_reset_prepare_up(hdev);
3707 	if (ret)
3708 		goto err_reset_lock;
3709 
3710 	rtnl_unlock();
3711 
3712 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3713 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3714 	 * times
3715 	 */
3716 	if (ret &&
3717 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3718 		goto err_reset;
3719 
3720 	rtnl_lock();
3721 
3722 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3723 	if (ret)
3724 		goto err_reset_lock;
3725 
3726 	rtnl_unlock();
3727 
3728 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3729 	if (ret)
3730 		goto err_reset;
3731 
3732 	hdev->last_reset_time = jiffies;
3733 	hdev->rst_stats.reset_fail_cnt = 0;
3734 	hdev->rst_stats.reset_done_cnt++;
3735 	ae_dev->reset_type = HNAE3_NONE_RESET;
3736 
3737 	/* if default_reset_request has a higher level reset request,
3738 	 * it should be handled as soon as possible. since some errors
3739 	 * need this kind of reset to fix.
3740 	 */
3741 	reset_level = hclge_get_reset_level(ae_dev,
3742 					    &hdev->default_reset_request);
3743 	if (reset_level != HNAE3_NONE_RESET)
3744 		set_bit(reset_level, &hdev->reset_request);
3745 
3746 	return;
3747 
3748 err_reset_lock:
3749 	rtnl_unlock();
3750 err_reset:
3751 	if (hclge_reset_err_handle(hdev))
3752 		hclge_reset_task_schedule(hdev);
3753 }
3754 
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)3755 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3756 {
3757 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3758 	struct hclge_dev *hdev = ae_dev->priv;
3759 
3760 	/* We might end up getting called broadly because of 2 below cases:
3761 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3762 	 *    normalcy is to reset.
3763 	 * 2. A new reset request from the stack due to timeout
3764 	 *
3765 	 * For the first case,error event might not have ae handle available.
3766 	 * check if this is a new reset request and we are not here just because
3767 	 * last reset attempt did not succeed and watchdog hit us again. We will
3768 	 * know this if last reset request did not occur very recently (watchdog
3769 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3770 	 * In case of new request we reset the "reset level" to PF reset.
3771 	 * And if it is a repeat reset request of the most recent one then we
3772 	 * want to make sure we throttle the reset request. Therefore, we will
3773 	 * not allow it again before 3*HZ times.
3774 	 */
3775 	if (!handle)
3776 		handle = &hdev->vport[0].nic;
3777 
3778 	if (time_before(jiffies, (hdev->last_reset_time +
3779 				  HCLGE_RESET_INTERVAL))) {
3780 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3781 		return;
3782 	} else if (hdev->default_reset_request)
3783 		hdev->reset_level =
3784 			hclge_get_reset_level(ae_dev,
3785 					      &hdev->default_reset_request);
3786 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3787 		hdev->reset_level = HNAE3_FUNC_RESET;
3788 
3789 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3790 		 hdev->reset_level);
3791 
3792 	/* request reset & schedule reset task */
3793 	set_bit(hdev->reset_level, &hdev->reset_request);
3794 	hclge_reset_task_schedule(hdev);
3795 
3796 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3797 		hdev->reset_level++;
3798 }
3799 
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)3800 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3801 					enum hnae3_reset_type rst_type)
3802 {
3803 	struct hclge_dev *hdev = ae_dev->priv;
3804 
3805 	set_bit(rst_type, &hdev->default_reset_request);
3806 }
3807 
hclge_reset_timer(struct timer_list * t)3808 static void hclge_reset_timer(struct timer_list *t)
3809 {
3810 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3811 
3812 	/* if default_reset_request has no value, it means that this reset
3813 	 * request has already be handled, so just return here
3814 	 */
3815 	if (!hdev->default_reset_request)
3816 		return;
3817 
3818 	dev_info(&hdev->pdev->dev,
3819 		 "triggering reset in reset timer\n");
3820 	hclge_reset_event(hdev->pdev, NULL);
3821 }
3822 
hclge_reset_subtask(struct hclge_dev * hdev)3823 static void hclge_reset_subtask(struct hclge_dev *hdev)
3824 {
3825 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3826 
3827 	/* check if there is any ongoing reset in the hardware. This status can
3828 	 * be checked from reset_pending. If there is then, we need to wait for
3829 	 * hardware to complete reset.
3830 	 *    a. If we are able to figure out in reasonable time that hardware
3831 	 *       has fully resetted then, we can proceed with driver, client
3832 	 *       reset.
3833 	 *    b. else, we can come back later to check this status so re-sched
3834 	 *       now.
3835 	 */
3836 	hdev->last_reset_time = jiffies;
3837 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3838 	if (hdev->reset_type != HNAE3_NONE_RESET)
3839 		hclge_reset(hdev);
3840 
3841 	/* check if we got any *new* reset requests to be honored */
3842 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3843 	if (hdev->reset_type != HNAE3_NONE_RESET)
3844 		hclge_do_reset(hdev);
3845 
3846 	hdev->reset_type = HNAE3_NONE_RESET;
3847 }
3848 
hclge_reset_service_task(struct work_struct * work)3849 static void hclge_reset_service_task(struct work_struct *work)
3850 {
3851 	struct hclge_dev *hdev =
3852 		container_of(work, struct hclge_dev, rst_service_task);
3853 
3854 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3855 		return;
3856 
3857 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3858 
3859 	hclge_reset_subtask(hdev);
3860 
3861 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3862 }
3863 
hclge_mailbox_service_task(struct work_struct * work)3864 static void hclge_mailbox_service_task(struct work_struct *work)
3865 {
3866 	struct hclge_dev *hdev =
3867 		container_of(work, struct hclge_dev, mbx_service_task);
3868 
3869 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3870 		return;
3871 
3872 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3873 
3874 	hclge_mbx_handler(hdev);
3875 
3876 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3877 }
3878 
hclge_update_vport_alive(struct hclge_dev * hdev)3879 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3880 {
3881 	int i;
3882 
3883 	/* start from vport 1 for PF is always alive */
3884 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3885 		struct hclge_vport *vport = &hdev->vport[i];
3886 
3887 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3888 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3889 
3890 		/* If vf is not alive, set to default value */
3891 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3892 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3893 	}
3894 }
3895 
hclge_service_task(struct work_struct * work)3896 static void hclge_service_task(struct work_struct *work)
3897 {
3898 	struct hclge_dev *hdev =
3899 		container_of(work, struct hclge_dev, service_task.work);
3900 
3901 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3902 
3903 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3904 		hclge_update_stats_for_all(hdev);
3905 		hdev->hw_stats.stats_timer = 0;
3906 	}
3907 
3908 	hclge_update_port_info(hdev);
3909 	hclge_update_link_status(hdev);
3910 	hclge_update_vport_alive(hdev);
3911 	hclge_sync_vlan_filter(hdev);
3912 	if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3913 		hclge_rfs_filter_expire(hdev);
3914 		hdev->fd_arfs_expire_timer = 0;
3915 	}
3916 
3917 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3918 }
3919 
hclge_get_vport(struct hnae3_handle * handle)3920 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3921 {
3922 	/* VF handle has no client */
3923 	if (!handle->client)
3924 		return container_of(handle, struct hclge_vport, nic);
3925 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3926 		return container_of(handle, struct hclge_vport, roce);
3927 	else
3928 		return container_of(handle, struct hclge_vport, nic);
3929 }
3930 
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)3931 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3932 			    struct hnae3_vector_info *vector_info)
3933 {
3934 	struct hclge_vport *vport = hclge_get_vport(handle);
3935 	struct hnae3_vector_info *vector = vector_info;
3936 	struct hclge_dev *hdev = vport->back;
3937 	int alloc = 0;
3938 	int i, j;
3939 
3940 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
3941 	vector_num = min(hdev->num_msi_left, vector_num);
3942 
3943 	for (j = 0; j < vector_num; j++) {
3944 		for (i = 1; i < hdev->num_msi; i++) {
3945 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3946 				vector->vector = pci_irq_vector(hdev->pdev, i);
3947 				vector->io_addr = hdev->hw.io_base +
3948 					HCLGE_VECTOR_REG_BASE +
3949 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3950 					vport->vport_id *
3951 					HCLGE_VECTOR_VF_OFFSET;
3952 				hdev->vector_status[i] = vport->vport_id;
3953 				hdev->vector_irq[i] = vector->vector;
3954 
3955 				vector++;
3956 				alloc++;
3957 
3958 				break;
3959 			}
3960 		}
3961 	}
3962 	hdev->num_msi_left -= alloc;
3963 	hdev->num_msi_used += alloc;
3964 
3965 	return alloc;
3966 }
3967 
hclge_get_vector_index(struct hclge_dev * hdev,int vector)3968 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3969 {
3970 	int i;
3971 
3972 	for (i = 0; i < hdev->num_msi; i++)
3973 		if (vector == hdev->vector_irq[i])
3974 			return i;
3975 
3976 	return -EINVAL;
3977 }
3978 
hclge_put_vector(struct hnae3_handle * handle,int vector)3979 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3980 {
3981 	struct hclge_vport *vport = hclge_get_vport(handle);
3982 	struct hclge_dev *hdev = vport->back;
3983 	int vector_id;
3984 
3985 	vector_id = hclge_get_vector_index(hdev, vector);
3986 	if (vector_id < 0) {
3987 		dev_err(&hdev->pdev->dev,
3988 			"Get vector index fail. vector_id =%d\n", vector_id);
3989 		return vector_id;
3990 	}
3991 
3992 	hclge_free_vector(hdev, vector_id);
3993 
3994 	return 0;
3995 }
3996 
hclge_get_rss_key_size(struct hnae3_handle * handle)3997 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3998 {
3999 	return HCLGE_RSS_KEY_SIZE;
4000 }
4001 
hclge_get_rss_indir_size(struct hnae3_handle * handle)4002 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4003 {
4004 	return HCLGE_RSS_IND_TBL_SIZE;
4005 }
4006 
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4007 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4008 				  const u8 hfunc, const u8 *key)
4009 {
4010 	struct hclge_rss_config_cmd *req;
4011 	unsigned int key_offset = 0;
4012 	struct hclge_desc desc;
4013 	int key_counts;
4014 	int key_size;
4015 	int ret;
4016 
4017 	key_counts = HCLGE_RSS_KEY_SIZE;
4018 	req = (struct hclge_rss_config_cmd *)desc.data;
4019 
4020 	while (key_counts) {
4021 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4022 					   false);
4023 
4024 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4025 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4026 
4027 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4028 		memcpy(req->hash_key,
4029 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4030 
4031 		key_counts -= key_size;
4032 		key_offset++;
4033 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4034 		if (ret) {
4035 			dev_err(&hdev->pdev->dev,
4036 				"Configure RSS config fail, status = %d\n",
4037 				ret);
4038 			return ret;
4039 		}
4040 	}
4041 	return 0;
4042 }
4043 
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u8 * indir)4044 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4045 {
4046 	struct hclge_rss_indirection_table_cmd *req;
4047 	struct hclge_desc desc;
4048 	int i, j;
4049 	int ret;
4050 
4051 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4052 
4053 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4054 		hclge_cmd_setup_basic_desc
4055 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4056 
4057 		req->start_table_index =
4058 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4059 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4060 
4061 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4062 			req->rss_result[j] =
4063 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4064 
4065 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4066 		if (ret) {
4067 			dev_err(&hdev->pdev->dev,
4068 				"Configure rss indir table fail,status = %d\n",
4069 				ret);
4070 			return ret;
4071 		}
4072 	}
4073 	return 0;
4074 }
4075 
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4076 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4077 				 u16 *tc_size, u16 *tc_offset)
4078 {
4079 	struct hclge_rss_tc_mode_cmd *req;
4080 	struct hclge_desc desc;
4081 	int ret;
4082 	int i;
4083 
4084 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4085 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4086 
4087 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4088 		u16 mode = 0;
4089 
4090 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4091 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4092 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4093 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4094 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4095 
4096 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4097 	}
4098 
4099 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4100 	if (ret)
4101 		dev_err(&hdev->pdev->dev,
4102 			"Configure rss tc mode fail, status = %d\n", ret);
4103 
4104 	return ret;
4105 }
4106 
hclge_get_rss_type(struct hclge_vport * vport)4107 static void hclge_get_rss_type(struct hclge_vport *vport)
4108 {
4109 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4110 	    vport->rss_tuple_sets.ipv4_udp_en ||
4111 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4112 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4113 	    vport->rss_tuple_sets.ipv6_udp_en ||
4114 	    vport->rss_tuple_sets.ipv6_sctp_en)
4115 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4116 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4117 		 vport->rss_tuple_sets.ipv6_fragment_en)
4118 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4119 	else
4120 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4121 }
4122 
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4123 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4124 {
4125 	struct hclge_rss_input_tuple_cmd *req;
4126 	struct hclge_desc desc;
4127 	int ret;
4128 
4129 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4130 
4131 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4132 
4133 	/* Get the tuple cfg from pf */
4134 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4135 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4136 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4137 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4138 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4139 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4140 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4141 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4142 	hclge_get_rss_type(&hdev->vport[0]);
4143 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4144 	if (ret)
4145 		dev_err(&hdev->pdev->dev,
4146 			"Configure rss input fail, status = %d\n", ret);
4147 	return ret;
4148 }
4149 
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4150 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4151 			 u8 *key, u8 *hfunc)
4152 {
4153 	struct hclge_vport *vport = hclge_get_vport(handle);
4154 	int i;
4155 
4156 	/* Get hash algorithm */
4157 	if (hfunc) {
4158 		switch (vport->rss_algo) {
4159 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4160 			*hfunc = ETH_RSS_HASH_TOP;
4161 			break;
4162 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4163 			*hfunc = ETH_RSS_HASH_XOR;
4164 			break;
4165 		default:
4166 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4167 			break;
4168 		}
4169 	}
4170 
4171 	/* Get the RSS Key required by the user */
4172 	if (key)
4173 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4174 
4175 	/* Get indirect table */
4176 	if (indir)
4177 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4178 			indir[i] =  vport->rss_indirection_tbl[i];
4179 
4180 	return 0;
4181 }
4182 
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4183 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4184 			 const  u8 *key, const  u8 hfunc)
4185 {
4186 	struct hclge_vport *vport = hclge_get_vport(handle);
4187 	struct hclge_dev *hdev = vport->back;
4188 	u8 hash_algo;
4189 	int ret, i;
4190 
4191 	/* Set the RSS Hash Key if specififed by the user */
4192 	if (key) {
4193 		switch (hfunc) {
4194 		case ETH_RSS_HASH_TOP:
4195 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4196 			break;
4197 		case ETH_RSS_HASH_XOR:
4198 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4199 			break;
4200 		case ETH_RSS_HASH_NO_CHANGE:
4201 			hash_algo = vport->rss_algo;
4202 			break;
4203 		default:
4204 			return -EINVAL;
4205 		}
4206 
4207 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4208 		if (ret)
4209 			return ret;
4210 
4211 		/* Update the shadow RSS key with user specified qids */
4212 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4213 		vport->rss_algo = hash_algo;
4214 	}
4215 
4216 	/* Update the shadow RSS table with user specified qids */
4217 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4218 		vport->rss_indirection_tbl[i] = indir[i];
4219 
4220 	/* Update the hardware */
4221 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4222 }
4223 
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4224 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4225 {
4226 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4227 
4228 	if (nfc->data & RXH_L4_B_2_3)
4229 		hash_sets |= HCLGE_D_PORT_BIT;
4230 	else
4231 		hash_sets &= ~HCLGE_D_PORT_BIT;
4232 
4233 	if (nfc->data & RXH_IP_SRC)
4234 		hash_sets |= HCLGE_S_IP_BIT;
4235 	else
4236 		hash_sets &= ~HCLGE_S_IP_BIT;
4237 
4238 	if (nfc->data & RXH_IP_DST)
4239 		hash_sets |= HCLGE_D_IP_BIT;
4240 	else
4241 		hash_sets &= ~HCLGE_D_IP_BIT;
4242 
4243 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4244 		hash_sets |= HCLGE_V_TAG_BIT;
4245 
4246 	return hash_sets;
4247 }
4248 
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4249 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4250 			       struct ethtool_rxnfc *nfc)
4251 {
4252 	struct hclge_vport *vport = hclge_get_vport(handle);
4253 	struct hclge_dev *hdev = vport->back;
4254 	struct hclge_rss_input_tuple_cmd *req;
4255 	struct hclge_desc desc;
4256 	u8 tuple_sets;
4257 	int ret;
4258 
4259 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4260 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4261 		return -EINVAL;
4262 
4263 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4264 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4265 
4266 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4267 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4268 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4269 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4270 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4271 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4272 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4273 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4274 
4275 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4276 	switch (nfc->flow_type) {
4277 	case TCP_V4_FLOW:
4278 		req->ipv4_tcp_en = tuple_sets;
4279 		break;
4280 	case TCP_V6_FLOW:
4281 		req->ipv6_tcp_en = tuple_sets;
4282 		break;
4283 	case UDP_V4_FLOW:
4284 		req->ipv4_udp_en = tuple_sets;
4285 		break;
4286 	case UDP_V6_FLOW:
4287 		req->ipv6_udp_en = tuple_sets;
4288 		break;
4289 	case SCTP_V4_FLOW:
4290 		req->ipv4_sctp_en = tuple_sets;
4291 		break;
4292 	case SCTP_V6_FLOW:
4293 		if ((nfc->data & RXH_L4_B_0_1) ||
4294 		    (nfc->data & RXH_L4_B_2_3))
4295 			return -EINVAL;
4296 
4297 		req->ipv6_sctp_en = tuple_sets;
4298 		break;
4299 	case IPV4_FLOW:
4300 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4301 		break;
4302 	case IPV6_FLOW:
4303 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4304 		break;
4305 	default:
4306 		return -EINVAL;
4307 	}
4308 
4309 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4310 	if (ret) {
4311 		dev_err(&hdev->pdev->dev,
4312 			"Set rss tuple fail, status = %d\n", ret);
4313 		return ret;
4314 	}
4315 
4316 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4317 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4318 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4319 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4320 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4321 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4322 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4323 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4324 	hclge_get_rss_type(vport);
4325 	return 0;
4326 }
4327 
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4328 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4329 			       struct ethtool_rxnfc *nfc)
4330 {
4331 	struct hclge_vport *vport = hclge_get_vport(handle);
4332 	u8 tuple_sets;
4333 
4334 	nfc->data = 0;
4335 
4336 	switch (nfc->flow_type) {
4337 	case TCP_V4_FLOW:
4338 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4339 		break;
4340 	case UDP_V4_FLOW:
4341 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4342 		break;
4343 	case TCP_V6_FLOW:
4344 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4345 		break;
4346 	case UDP_V6_FLOW:
4347 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4348 		break;
4349 	case SCTP_V4_FLOW:
4350 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4351 		break;
4352 	case SCTP_V6_FLOW:
4353 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4354 		break;
4355 	case IPV4_FLOW:
4356 	case IPV6_FLOW:
4357 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4358 		break;
4359 	default:
4360 		return -EINVAL;
4361 	}
4362 
4363 	if (!tuple_sets)
4364 		return 0;
4365 
4366 	if (tuple_sets & HCLGE_D_PORT_BIT)
4367 		nfc->data |= RXH_L4_B_2_3;
4368 	if (tuple_sets & HCLGE_S_PORT_BIT)
4369 		nfc->data |= RXH_L4_B_0_1;
4370 	if (tuple_sets & HCLGE_D_IP_BIT)
4371 		nfc->data |= RXH_IP_DST;
4372 	if (tuple_sets & HCLGE_S_IP_BIT)
4373 		nfc->data |= RXH_IP_SRC;
4374 
4375 	return 0;
4376 }
4377 
hclge_get_tc_size(struct hnae3_handle * handle)4378 static int hclge_get_tc_size(struct hnae3_handle *handle)
4379 {
4380 	struct hclge_vport *vport = hclge_get_vport(handle);
4381 	struct hclge_dev *hdev = vport->back;
4382 
4383 	return hdev->rss_size_max;
4384 }
4385 
hclge_rss_init_hw(struct hclge_dev * hdev)4386 int hclge_rss_init_hw(struct hclge_dev *hdev)
4387 {
4388 	struct hclge_vport *vport = hdev->vport;
4389 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4390 	u16 rss_size = vport[0].alloc_rss_size;
4391 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4392 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4393 	u8 *key = vport[0].rss_hash_key;
4394 	u8 hfunc = vport[0].rss_algo;
4395 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4396 	u16 roundup_size;
4397 	unsigned int i;
4398 	int ret;
4399 
4400 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4401 	if (ret)
4402 		return ret;
4403 
4404 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4405 	if (ret)
4406 		return ret;
4407 
4408 	ret = hclge_set_rss_input_tuple(hdev);
4409 	if (ret)
4410 		return ret;
4411 
4412 	/* Each TC have the same queue size, and tc_size set to hardware is
4413 	 * the log2 of roundup power of two of rss_size, the acutal queue
4414 	 * size is limited by indirection table.
4415 	 */
4416 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4417 		dev_err(&hdev->pdev->dev,
4418 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
4419 			rss_size);
4420 		return -EINVAL;
4421 	}
4422 
4423 	roundup_size = roundup_pow_of_two(rss_size);
4424 	roundup_size = ilog2(roundup_size);
4425 
4426 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4427 		tc_valid[i] = 0;
4428 
4429 		if (!(hdev->hw_tc_map & BIT(i)))
4430 			continue;
4431 
4432 		tc_valid[i] = 1;
4433 		tc_size[i] = roundup_size;
4434 		tc_offset[i] = rss_size * i;
4435 	}
4436 
4437 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4438 }
4439 
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)4440 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4441 {
4442 	struct hclge_vport *vport = hdev->vport;
4443 	int i, j;
4444 
4445 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4446 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4447 			vport[j].rss_indirection_tbl[i] =
4448 				i % vport[j].alloc_rss_size;
4449 	}
4450 }
4451 
hclge_rss_init_cfg(struct hclge_dev * hdev)4452 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4453 {
4454 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4455 	struct hclge_vport *vport = hdev->vport;
4456 
4457 	if (hdev->pdev->revision >= 0x21)
4458 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4459 
4460 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4461 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4462 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4463 		vport[i].rss_tuple_sets.ipv4_udp_en =
4464 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4465 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4466 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4467 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4468 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4469 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4470 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4471 		vport[i].rss_tuple_sets.ipv6_udp_en =
4472 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4473 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4474 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4475 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4476 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4477 
4478 		vport[i].rss_algo = rss_algo;
4479 
4480 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4481 		       HCLGE_RSS_KEY_SIZE);
4482 	}
4483 
4484 	hclge_rss_indir_init_cfg(hdev);
4485 }
4486 
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4487 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4488 				int vector_id, bool en,
4489 				struct hnae3_ring_chain_node *ring_chain)
4490 {
4491 	struct hclge_dev *hdev = vport->back;
4492 	struct hnae3_ring_chain_node *node;
4493 	struct hclge_desc desc;
4494 	struct hclge_ctrl_vector_chain_cmd *req =
4495 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4496 	enum hclge_cmd_status status;
4497 	enum hclge_opcode_type op;
4498 	u16 tqp_type_and_id;
4499 	int i;
4500 
4501 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4502 	hclge_cmd_setup_basic_desc(&desc, op, false);
4503 	req->int_vector_id = vector_id;
4504 
4505 	i = 0;
4506 	for (node = ring_chain; node; node = node->next) {
4507 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4508 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4509 				HCLGE_INT_TYPE_S,
4510 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4511 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4512 				HCLGE_TQP_ID_S, node->tqp_index);
4513 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4514 				HCLGE_INT_GL_IDX_S,
4515 				hnae3_get_field(node->int_gl_idx,
4516 						HNAE3_RING_GL_IDX_M,
4517 						HNAE3_RING_GL_IDX_S));
4518 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4519 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4520 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4521 			req->vfid = vport->vport_id;
4522 
4523 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4524 			if (status) {
4525 				dev_err(&hdev->pdev->dev,
4526 					"Map TQP fail, status is %d.\n",
4527 					status);
4528 				return -EIO;
4529 			}
4530 			i = 0;
4531 
4532 			hclge_cmd_setup_basic_desc(&desc,
4533 						   op,
4534 						   false);
4535 			req->int_vector_id = vector_id;
4536 		}
4537 	}
4538 
4539 	if (i > 0) {
4540 		req->int_cause_num = i;
4541 		req->vfid = vport->vport_id;
4542 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4543 		if (status) {
4544 			dev_err(&hdev->pdev->dev,
4545 				"Map TQP fail, status is %d.\n", status);
4546 			return -EIO;
4547 		}
4548 	}
4549 
4550 	return 0;
4551 }
4552 
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4553 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4554 				    struct hnae3_ring_chain_node *ring_chain)
4555 {
4556 	struct hclge_vport *vport = hclge_get_vport(handle);
4557 	struct hclge_dev *hdev = vport->back;
4558 	int vector_id;
4559 
4560 	vector_id = hclge_get_vector_index(hdev, vector);
4561 	if (vector_id < 0) {
4562 		dev_err(&hdev->pdev->dev,
4563 			"Get vector index fail. vector_id =%d\n", vector_id);
4564 		return vector_id;
4565 	}
4566 
4567 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4568 }
4569 
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4570 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4571 				       struct hnae3_ring_chain_node *ring_chain)
4572 {
4573 	struct hclge_vport *vport = hclge_get_vport(handle);
4574 	struct hclge_dev *hdev = vport->back;
4575 	int vector_id, ret;
4576 
4577 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4578 		return 0;
4579 
4580 	vector_id = hclge_get_vector_index(hdev, vector);
4581 	if (vector_id < 0) {
4582 		dev_err(&handle->pdev->dev,
4583 			"Get vector index fail. ret =%d\n", vector_id);
4584 		return vector_id;
4585 	}
4586 
4587 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4588 	if (ret)
4589 		dev_err(&handle->pdev->dev,
4590 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4591 			vector_id, ret);
4592 
4593 	return ret;
4594 }
4595 
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,struct hclge_promisc_param * param)4596 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4597 			       struct hclge_promisc_param *param)
4598 {
4599 	struct hclge_promisc_cfg_cmd *req;
4600 	struct hclge_desc desc;
4601 	int ret;
4602 
4603 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4604 
4605 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4606 	req->vf_id = param->vf_id;
4607 
4608 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4609 	 * pdev revision(0x20), new revision support them. The
4610 	 * value of this two fields will not return error when driver
4611 	 * send command to fireware in revision(0x20).
4612 	 */
4613 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4614 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4615 
4616 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4617 	if (ret)
4618 		dev_err(&hdev->pdev->dev,
4619 			"Set promisc mode fail, status is %d.\n", ret);
4620 
4621 	return ret;
4622 }
4623 
hclge_promisc_param_init(struct hclge_promisc_param * param,bool en_uc,bool en_mc,bool en_bc,int vport_id)4624 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4625 			      bool en_mc, bool en_bc, int vport_id)
4626 {
4627 	if (!param)
4628 		return;
4629 
4630 	memset(param, 0, sizeof(struct hclge_promisc_param));
4631 	if (en_uc)
4632 		param->enable = HCLGE_PROMISC_EN_UC;
4633 	if (en_mc)
4634 		param->enable |= HCLGE_PROMISC_EN_MC;
4635 	if (en_bc)
4636 		param->enable |= HCLGE_PROMISC_EN_BC;
4637 	param->vf_id = vport_id;
4638 }
4639 
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)4640 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4641 				  bool en_mc_pmc)
4642 {
4643 	struct hclge_vport *vport = hclge_get_vport(handle);
4644 	struct hclge_dev *hdev = vport->back;
4645 	struct hclge_promisc_param param;
4646 	bool en_bc_pmc = true;
4647 
4648 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4649 	 * always bypassed. So broadcast promisc should be disabled until
4650 	 * user enable promisc mode
4651 	 */
4652 	if (handle->pdev->revision == 0x20)
4653 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4654 
4655 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4656 				 vport->vport_id);
4657 	return hclge_cmd_set_promisc_mode(hdev, &param);
4658 }
4659 
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)4660 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4661 {
4662 	struct hclge_get_fd_mode_cmd *req;
4663 	struct hclge_desc desc;
4664 	int ret;
4665 
4666 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4667 
4668 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4669 
4670 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4671 	if (ret) {
4672 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4673 		return ret;
4674 	}
4675 
4676 	*fd_mode = req->mode;
4677 
4678 	return ret;
4679 }
4680 
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)4681 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4682 				   u32 *stage1_entry_num,
4683 				   u32 *stage2_entry_num,
4684 				   u16 *stage1_counter_num,
4685 				   u16 *stage2_counter_num)
4686 {
4687 	struct hclge_get_fd_allocation_cmd *req;
4688 	struct hclge_desc desc;
4689 	int ret;
4690 
4691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4692 
4693 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4694 
4695 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4696 	if (ret) {
4697 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4698 			ret);
4699 		return ret;
4700 	}
4701 
4702 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4703 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4704 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4705 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4706 
4707 	return ret;
4708 }
4709 
hclge_set_fd_key_config(struct hclge_dev * hdev,int stage_num)4710 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4711 {
4712 	struct hclge_set_fd_key_config_cmd *req;
4713 	struct hclge_fd_key_cfg *stage;
4714 	struct hclge_desc desc;
4715 	int ret;
4716 
4717 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4718 
4719 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4720 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4721 	req->stage = stage_num;
4722 	req->key_select = stage->key_sel;
4723 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4724 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4725 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4726 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4727 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4728 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4729 
4730 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4731 	if (ret)
4732 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4733 
4734 	return ret;
4735 }
4736 
hclge_init_fd_config(struct hclge_dev * hdev)4737 static int hclge_init_fd_config(struct hclge_dev *hdev)
4738 {
4739 #define LOW_2_WORDS		0x03
4740 	struct hclge_fd_key_cfg *key_cfg;
4741 	int ret;
4742 
4743 	if (!hnae3_dev_fd_supported(hdev))
4744 		return 0;
4745 
4746 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4747 	if (ret)
4748 		return ret;
4749 
4750 	switch (hdev->fd_cfg.fd_mode) {
4751 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4752 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4753 		break;
4754 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4755 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4756 		break;
4757 	default:
4758 		dev_err(&hdev->pdev->dev,
4759 			"Unsupported flow director mode %d\n",
4760 			hdev->fd_cfg.fd_mode);
4761 		return -EOPNOTSUPP;
4762 	}
4763 
4764 	hdev->fd_cfg.proto_support =
4765 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4766 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4767 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4768 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4769 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4770 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4771 	key_cfg->outer_sipv6_word_en = 0;
4772 	key_cfg->outer_dipv6_word_en = 0;
4773 
4774 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4775 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4776 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4777 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4778 
4779 	/* If use max 400bit key, we can support tuples for ether type */
4780 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4781 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4782 		key_cfg->tuple_active |=
4783 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4784 	}
4785 
4786 	/* roce_type is used to filter roce frames
4787 	 * dst_vport is used to specify the rule
4788 	 */
4789 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4790 
4791 	ret = hclge_get_fd_allocation(hdev,
4792 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4793 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4794 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4795 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4796 	if (ret)
4797 		return ret;
4798 
4799 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4800 }
4801 
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)4802 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4803 				int loc, u8 *key, bool is_add)
4804 {
4805 	struct hclge_fd_tcam_config_1_cmd *req1;
4806 	struct hclge_fd_tcam_config_2_cmd *req2;
4807 	struct hclge_fd_tcam_config_3_cmd *req3;
4808 	struct hclge_desc desc[3];
4809 	int ret;
4810 
4811 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4812 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4813 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4814 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4815 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4816 
4817 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4818 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4819 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4820 
4821 	req1->stage = stage;
4822 	req1->xy_sel = sel_x ? 1 : 0;
4823 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4824 	req1->index = cpu_to_le32(loc);
4825 	req1->entry_vld = sel_x ? is_add : 0;
4826 
4827 	if (key) {
4828 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4829 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4830 		       sizeof(req2->tcam_data));
4831 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4832 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4833 	}
4834 
4835 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4836 	if (ret)
4837 		dev_err(&hdev->pdev->dev,
4838 			"config tcam key fail, ret=%d\n",
4839 			ret);
4840 
4841 	return ret;
4842 }
4843 
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)4844 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4845 			      struct hclge_fd_ad_data *action)
4846 {
4847 	struct hclge_fd_ad_config_cmd *req;
4848 	struct hclge_desc desc;
4849 	u64 ad_data = 0;
4850 	int ret;
4851 
4852 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4853 
4854 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4855 	req->index = cpu_to_le32(loc);
4856 	req->stage = stage;
4857 
4858 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4859 		      action->write_rule_id_to_bd);
4860 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4861 			action->rule_id);
4862 	ad_data <<= 32;
4863 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4864 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4865 		      action->forward_to_direct_queue);
4866 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4867 			action->queue_id);
4868 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4869 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4870 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4871 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4872 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4873 			action->counter_id);
4874 
4875 	req->ad_data = cpu_to_le64(ad_data);
4876 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4877 	if (ret)
4878 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4879 
4880 	return ret;
4881 }
4882 
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)4883 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4884 				   struct hclge_fd_rule *rule)
4885 {
4886 	u16 tmp_x_s, tmp_y_s;
4887 	u32 tmp_x_l, tmp_y_l;
4888 	int i;
4889 
4890 	if (rule->unused_tuple & tuple_bit)
4891 		return true;
4892 
4893 	switch (tuple_bit) {
4894 	case 0:
4895 		return false;
4896 	case BIT(INNER_DST_MAC):
4897 		for (i = 0; i < ETH_ALEN; i++) {
4898 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4899 			       rule->tuples_mask.dst_mac[i]);
4900 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4901 			       rule->tuples_mask.dst_mac[i]);
4902 		}
4903 
4904 		return true;
4905 	case BIT(INNER_SRC_MAC):
4906 		for (i = 0; i < ETH_ALEN; i++) {
4907 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4908 			       rule->tuples.src_mac[i]);
4909 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4910 			       rule->tuples.src_mac[i]);
4911 		}
4912 
4913 		return true;
4914 	case BIT(INNER_VLAN_TAG_FST):
4915 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4916 		       rule->tuples_mask.vlan_tag1);
4917 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4918 		       rule->tuples_mask.vlan_tag1);
4919 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4920 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4921 
4922 		return true;
4923 	case BIT(INNER_ETH_TYPE):
4924 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4925 		       rule->tuples_mask.ether_proto);
4926 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4927 		       rule->tuples_mask.ether_proto);
4928 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4929 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4930 
4931 		return true;
4932 	case BIT(INNER_IP_TOS):
4933 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4934 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4935 
4936 		return true;
4937 	case BIT(INNER_IP_PROTO):
4938 		calc_x(*key_x, rule->tuples.ip_proto,
4939 		       rule->tuples_mask.ip_proto);
4940 		calc_y(*key_y, rule->tuples.ip_proto,
4941 		       rule->tuples_mask.ip_proto);
4942 
4943 		return true;
4944 	case BIT(INNER_SRC_IP):
4945 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4946 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4947 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4948 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4949 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4950 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4951 
4952 		return true;
4953 	case BIT(INNER_DST_IP):
4954 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4955 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4956 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4957 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4958 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4959 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4960 
4961 		return true;
4962 	case BIT(INNER_SRC_PORT):
4963 		calc_x(tmp_x_s, rule->tuples.src_port,
4964 		       rule->tuples_mask.src_port);
4965 		calc_y(tmp_y_s, rule->tuples.src_port,
4966 		       rule->tuples_mask.src_port);
4967 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4968 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4969 
4970 		return true;
4971 	case BIT(INNER_DST_PORT):
4972 		calc_x(tmp_x_s, rule->tuples.dst_port,
4973 		       rule->tuples_mask.dst_port);
4974 		calc_y(tmp_y_s, rule->tuples.dst_port,
4975 		       rule->tuples_mask.dst_port);
4976 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4977 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4978 
4979 		return true;
4980 	default:
4981 		return false;
4982 	}
4983 }
4984 
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)4985 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4986 				 u8 vf_id, u8 network_port_id)
4987 {
4988 	u32 port_number = 0;
4989 
4990 	if (port_type == HOST_PORT) {
4991 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4992 				pf_id);
4993 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4994 				vf_id);
4995 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4996 	} else {
4997 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4998 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4999 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5000 	}
5001 
5002 	return port_number;
5003 }
5004 
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5005 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5006 				       __le32 *key_x, __le32 *key_y,
5007 				       struct hclge_fd_rule *rule)
5008 {
5009 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5010 	u8 cur_pos = 0, tuple_size, shift_bits;
5011 	unsigned int i;
5012 
5013 	for (i = 0; i < MAX_META_DATA; i++) {
5014 		tuple_size = meta_data_key_info[i].key_length;
5015 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5016 
5017 		switch (tuple_bit) {
5018 		case BIT(ROCE_TYPE):
5019 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5020 			cur_pos += tuple_size;
5021 			break;
5022 		case BIT(DST_VPORT):
5023 			port_number = hclge_get_port_number(HOST_PORT, 0,
5024 							    rule->vf_id, 0);
5025 			hnae3_set_field(meta_data,
5026 					GENMASK(cur_pos + tuple_size, cur_pos),
5027 					cur_pos, port_number);
5028 			cur_pos += tuple_size;
5029 			break;
5030 		default:
5031 			break;
5032 		}
5033 	}
5034 
5035 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5036 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5037 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5038 
5039 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5040 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5041 }
5042 
5043 /* A complete key is combined with meta data key and tuple key.
5044  * Meta data key is stored at the MSB region, and tuple key is stored at
5045  * the LSB region, unused bits will be filled 0.
5046  */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5047 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5048 			    struct hclge_fd_rule *rule)
5049 {
5050 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5051 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5052 	u8 *cur_key_x, *cur_key_y;
5053 	unsigned int i;
5054 	int ret, tuple_size;
5055 	u8 meta_data_region;
5056 
5057 	memset(key_x, 0, sizeof(key_x));
5058 	memset(key_y, 0, sizeof(key_y));
5059 	cur_key_x = key_x;
5060 	cur_key_y = key_y;
5061 
5062 	for (i = 0 ; i < MAX_TUPLE; i++) {
5063 		bool tuple_valid;
5064 		u32 check_tuple;
5065 
5066 		tuple_size = tuple_key_info[i].key_length / 8;
5067 		check_tuple = key_cfg->tuple_active & BIT(i);
5068 
5069 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5070 						     cur_key_y, rule);
5071 		if (tuple_valid) {
5072 			cur_key_x += tuple_size;
5073 			cur_key_y += tuple_size;
5074 		}
5075 	}
5076 
5077 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5078 			MAX_META_DATA_LENGTH / 8;
5079 
5080 	hclge_fd_convert_meta_data(key_cfg,
5081 				   (__le32 *)(key_x + meta_data_region),
5082 				   (__le32 *)(key_y + meta_data_region),
5083 				   rule);
5084 
5085 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5086 				   true);
5087 	if (ret) {
5088 		dev_err(&hdev->pdev->dev,
5089 			"fd key_y config fail, loc=%d, ret=%d\n",
5090 			rule->queue_id, ret);
5091 		return ret;
5092 	}
5093 
5094 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5095 				   true);
5096 	if (ret)
5097 		dev_err(&hdev->pdev->dev,
5098 			"fd key_x config fail, loc=%d, ret=%d\n",
5099 			rule->queue_id, ret);
5100 	return ret;
5101 }
5102 
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5103 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5104 			       struct hclge_fd_rule *rule)
5105 {
5106 	struct hclge_fd_ad_data ad_data;
5107 
5108 	ad_data.ad_id = rule->location;
5109 
5110 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5111 		ad_data.drop_packet = true;
5112 		ad_data.forward_to_direct_queue = false;
5113 		ad_data.queue_id = 0;
5114 	} else {
5115 		ad_data.drop_packet = false;
5116 		ad_data.forward_to_direct_queue = true;
5117 		ad_data.queue_id = rule->queue_id;
5118 	}
5119 
5120 	ad_data.use_counter = false;
5121 	ad_data.counter_id = 0;
5122 
5123 	ad_data.use_next_stage = false;
5124 	ad_data.next_input_key = 0;
5125 
5126 	ad_data.write_rule_id_to_bd = true;
5127 	ad_data.rule_id = rule->location;
5128 
5129 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5130 }
5131 
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused)5132 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5133 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5134 {
5135 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5136 	struct ethtool_usrip4_spec *usr_ip4_spec;
5137 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5138 	struct ethtool_usrip6_spec *usr_ip6_spec;
5139 	struct ethhdr *ether_spec;
5140 
5141 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5142 		return -EINVAL;
5143 
5144 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5145 		return -EOPNOTSUPP;
5146 
5147 	if ((fs->flow_type & FLOW_EXT) &&
5148 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5149 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5150 		return -EOPNOTSUPP;
5151 	}
5152 
5153 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5154 	case SCTP_V4_FLOW:
5155 	case TCP_V4_FLOW:
5156 	case UDP_V4_FLOW:
5157 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5158 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5159 
5160 		if (!tcp_ip4_spec->ip4src)
5161 			*unused |= BIT(INNER_SRC_IP);
5162 
5163 		if (!tcp_ip4_spec->ip4dst)
5164 			*unused |= BIT(INNER_DST_IP);
5165 
5166 		if (!tcp_ip4_spec->psrc)
5167 			*unused |= BIT(INNER_SRC_PORT);
5168 
5169 		if (!tcp_ip4_spec->pdst)
5170 			*unused |= BIT(INNER_DST_PORT);
5171 
5172 		if (!tcp_ip4_spec->tos)
5173 			*unused |= BIT(INNER_IP_TOS);
5174 
5175 		break;
5176 	case IP_USER_FLOW:
5177 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5178 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5179 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5180 
5181 		if (!usr_ip4_spec->ip4src)
5182 			*unused |= BIT(INNER_SRC_IP);
5183 
5184 		if (!usr_ip4_spec->ip4dst)
5185 			*unused |= BIT(INNER_DST_IP);
5186 
5187 		if (!usr_ip4_spec->tos)
5188 			*unused |= BIT(INNER_IP_TOS);
5189 
5190 		if (!usr_ip4_spec->proto)
5191 			*unused |= BIT(INNER_IP_PROTO);
5192 
5193 		if (usr_ip4_spec->l4_4_bytes)
5194 			return -EOPNOTSUPP;
5195 
5196 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5197 			return -EOPNOTSUPP;
5198 
5199 		break;
5200 	case SCTP_V6_FLOW:
5201 	case TCP_V6_FLOW:
5202 	case UDP_V6_FLOW:
5203 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5204 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5205 			BIT(INNER_IP_TOS);
5206 
5207 		/* check whether src/dst ip address used */
5208 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5209 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5210 			*unused |= BIT(INNER_SRC_IP);
5211 
5212 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5213 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5214 			*unused |= BIT(INNER_DST_IP);
5215 
5216 		if (!tcp_ip6_spec->psrc)
5217 			*unused |= BIT(INNER_SRC_PORT);
5218 
5219 		if (!tcp_ip6_spec->pdst)
5220 			*unused |= BIT(INNER_DST_PORT);
5221 
5222 		if (tcp_ip6_spec->tclass)
5223 			return -EOPNOTSUPP;
5224 
5225 		break;
5226 	case IPV6_USER_FLOW:
5227 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5228 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5229 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5230 			BIT(INNER_DST_PORT);
5231 
5232 		/* check whether src/dst ip address used */
5233 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5234 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5235 			*unused |= BIT(INNER_SRC_IP);
5236 
5237 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5238 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5239 			*unused |= BIT(INNER_DST_IP);
5240 
5241 		if (!usr_ip6_spec->l4_proto)
5242 			*unused |= BIT(INNER_IP_PROTO);
5243 
5244 		if (usr_ip6_spec->tclass)
5245 			return -EOPNOTSUPP;
5246 
5247 		if (usr_ip6_spec->l4_4_bytes)
5248 			return -EOPNOTSUPP;
5249 
5250 		break;
5251 	case ETHER_FLOW:
5252 		ether_spec = &fs->h_u.ether_spec;
5253 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5254 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5255 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5256 
5257 		if (is_zero_ether_addr(ether_spec->h_source))
5258 			*unused |= BIT(INNER_SRC_MAC);
5259 
5260 		if (is_zero_ether_addr(ether_spec->h_dest))
5261 			*unused |= BIT(INNER_DST_MAC);
5262 
5263 		if (!ether_spec->h_proto)
5264 			*unused |= BIT(INNER_ETH_TYPE);
5265 
5266 		break;
5267 	default:
5268 		return -EOPNOTSUPP;
5269 	}
5270 
5271 	if ((fs->flow_type & FLOW_EXT)) {
5272 		if (fs->h_ext.vlan_etype)
5273 			return -EOPNOTSUPP;
5274 		if (!fs->h_ext.vlan_tci)
5275 			*unused |= BIT(INNER_VLAN_TAG_FST);
5276 
5277 		if (fs->m_ext.vlan_tci) {
5278 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5279 				return -EINVAL;
5280 		}
5281 	} else {
5282 		*unused |= BIT(INNER_VLAN_TAG_FST);
5283 	}
5284 
5285 	if (fs->flow_type & FLOW_MAC_EXT) {
5286 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5287 			return -EOPNOTSUPP;
5288 
5289 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5290 			*unused |= BIT(INNER_DST_MAC);
5291 		else
5292 			*unused &= ~(BIT(INNER_DST_MAC));
5293 	}
5294 
5295 	return 0;
5296 }
5297 
hclge_fd_rule_exist(struct hclge_dev * hdev,u16 location)5298 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5299 {
5300 	struct hclge_fd_rule *rule = NULL;
5301 	struct hlist_node *node2;
5302 
5303 	spin_lock_bh(&hdev->fd_rule_lock);
5304 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5305 		if (rule->location >= location)
5306 			break;
5307 	}
5308 
5309 	spin_unlock_bh(&hdev->fd_rule_lock);
5310 
5311 	return  rule && rule->location == location;
5312 }
5313 
5314 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_update_rule_list(struct hclge_dev * hdev,struct hclge_fd_rule * new_rule,u16 location,bool is_add)5315 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5316 				     struct hclge_fd_rule *new_rule,
5317 				     u16 location,
5318 				     bool is_add)
5319 {
5320 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5321 	struct hlist_node *node2;
5322 
5323 	if (is_add && !new_rule)
5324 		return -EINVAL;
5325 
5326 	hlist_for_each_entry_safe(rule, node2,
5327 				  &hdev->fd_rule_list, rule_node) {
5328 		if (rule->location >= location)
5329 			break;
5330 		parent = rule;
5331 	}
5332 
5333 	if (rule && rule->location == location) {
5334 		hlist_del(&rule->rule_node);
5335 		kfree(rule);
5336 		hdev->hclge_fd_rule_num--;
5337 
5338 		if (!is_add) {
5339 			if (!hdev->hclge_fd_rule_num)
5340 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5341 			clear_bit(location, hdev->fd_bmap);
5342 
5343 			return 0;
5344 		}
5345 	} else if (!is_add) {
5346 		dev_err(&hdev->pdev->dev,
5347 			"delete fail, rule %d is inexistent\n",
5348 			location);
5349 		return -EINVAL;
5350 	}
5351 
5352 	INIT_HLIST_NODE(&new_rule->rule_node);
5353 
5354 	if (parent)
5355 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5356 	else
5357 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5358 
5359 	set_bit(location, hdev->fd_bmap);
5360 	hdev->hclge_fd_rule_num++;
5361 	hdev->fd_active_type = new_rule->rule_type;
5362 
5363 	return 0;
5364 }
5365 
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)5366 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5367 			      struct ethtool_rx_flow_spec *fs,
5368 			      struct hclge_fd_rule *rule)
5369 {
5370 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5371 
5372 	switch (flow_type) {
5373 	case SCTP_V4_FLOW:
5374 	case TCP_V4_FLOW:
5375 	case UDP_V4_FLOW:
5376 		rule->tuples.src_ip[IPV4_INDEX] =
5377 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5378 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5379 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5380 
5381 		rule->tuples.dst_ip[IPV4_INDEX] =
5382 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5383 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5384 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5385 
5386 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5387 		rule->tuples_mask.src_port =
5388 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5389 
5390 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5391 		rule->tuples_mask.dst_port =
5392 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5393 
5394 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5395 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5396 
5397 		rule->tuples.ether_proto = ETH_P_IP;
5398 		rule->tuples_mask.ether_proto = 0xFFFF;
5399 
5400 		break;
5401 	case IP_USER_FLOW:
5402 		rule->tuples.src_ip[IPV4_INDEX] =
5403 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5404 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5405 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5406 
5407 		rule->tuples.dst_ip[IPV4_INDEX] =
5408 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5409 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5410 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5411 
5412 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5413 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5414 
5415 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5416 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5417 
5418 		rule->tuples.ether_proto = ETH_P_IP;
5419 		rule->tuples_mask.ether_proto = 0xFFFF;
5420 
5421 		break;
5422 	case SCTP_V6_FLOW:
5423 	case TCP_V6_FLOW:
5424 	case UDP_V6_FLOW:
5425 		be32_to_cpu_array(rule->tuples.src_ip,
5426 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5427 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5428 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5429 
5430 		be32_to_cpu_array(rule->tuples.dst_ip,
5431 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5432 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5433 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5434 
5435 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5436 		rule->tuples_mask.src_port =
5437 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5438 
5439 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5440 		rule->tuples_mask.dst_port =
5441 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5442 
5443 		rule->tuples.ether_proto = ETH_P_IPV6;
5444 		rule->tuples_mask.ether_proto = 0xFFFF;
5445 
5446 		break;
5447 	case IPV6_USER_FLOW:
5448 		be32_to_cpu_array(rule->tuples.src_ip,
5449 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5450 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5451 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5452 
5453 		be32_to_cpu_array(rule->tuples.dst_ip,
5454 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5455 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5456 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5457 
5458 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5459 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5460 
5461 		rule->tuples.ether_proto = ETH_P_IPV6;
5462 		rule->tuples_mask.ether_proto = 0xFFFF;
5463 
5464 		break;
5465 	case ETHER_FLOW:
5466 		ether_addr_copy(rule->tuples.src_mac,
5467 				fs->h_u.ether_spec.h_source);
5468 		ether_addr_copy(rule->tuples_mask.src_mac,
5469 				fs->m_u.ether_spec.h_source);
5470 
5471 		ether_addr_copy(rule->tuples.dst_mac,
5472 				fs->h_u.ether_spec.h_dest);
5473 		ether_addr_copy(rule->tuples_mask.dst_mac,
5474 				fs->m_u.ether_spec.h_dest);
5475 
5476 		rule->tuples.ether_proto =
5477 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5478 		rule->tuples_mask.ether_proto =
5479 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5480 
5481 		break;
5482 	default:
5483 		return -EOPNOTSUPP;
5484 	}
5485 
5486 	switch (flow_type) {
5487 	case SCTP_V4_FLOW:
5488 	case SCTP_V6_FLOW:
5489 		rule->tuples.ip_proto = IPPROTO_SCTP;
5490 		rule->tuples_mask.ip_proto = 0xFF;
5491 		break;
5492 	case TCP_V4_FLOW:
5493 	case TCP_V6_FLOW:
5494 		rule->tuples.ip_proto = IPPROTO_TCP;
5495 		rule->tuples_mask.ip_proto = 0xFF;
5496 		break;
5497 	case UDP_V4_FLOW:
5498 	case UDP_V6_FLOW:
5499 		rule->tuples.ip_proto = IPPROTO_UDP;
5500 		rule->tuples_mask.ip_proto = 0xFF;
5501 		break;
5502 	default:
5503 		break;
5504 	}
5505 
5506 	if ((fs->flow_type & FLOW_EXT)) {
5507 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5508 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5509 	}
5510 
5511 	if (fs->flow_type & FLOW_MAC_EXT) {
5512 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5513 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5514 	}
5515 
5516 	return 0;
5517 }
5518 
5519 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5520 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5521 				struct hclge_fd_rule *rule)
5522 {
5523 	int ret;
5524 
5525 	if (!rule) {
5526 		dev_err(&hdev->pdev->dev,
5527 			"The flow director rule is NULL\n");
5528 		return -EINVAL;
5529 	}
5530 
5531 	/* it will never fail here, so needn't to check return value */
5532 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5533 
5534 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5535 	if (ret)
5536 		goto clear_rule;
5537 
5538 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5539 	if (ret)
5540 		goto clear_rule;
5541 
5542 	return 0;
5543 
5544 clear_rule:
5545 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5546 	return ret;
5547 }
5548 
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5549 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5550 			      struct ethtool_rxnfc *cmd)
5551 {
5552 	struct hclge_vport *vport = hclge_get_vport(handle);
5553 	struct hclge_dev *hdev = vport->back;
5554 	u16 dst_vport_id = 0, q_index = 0;
5555 	struct ethtool_rx_flow_spec *fs;
5556 	struct hclge_fd_rule *rule;
5557 	u32 unused = 0;
5558 	u8 action;
5559 	int ret;
5560 
5561 	if (!hnae3_dev_fd_supported(hdev))
5562 		return -EOPNOTSUPP;
5563 
5564 	if (!hdev->fd_en) {
5565 		dev_warn(&hdev->pdev->dev,
5566 			 "Please enable flow director first\n");
5567 		return -EOPNOTSUPP;
5568 	}
5569 
5570 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5571 
5572 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5573 	if (ret) {
5574 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5575 		return ret;
5576 	}
5577 
5578 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5579 		action = HCLGE_FD_ACTION_DROP_PACKET;
5580 	} else {
5581 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5582 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5583 		u16 tqps;
5584 
5585 		if (vf > hdev->num_req_vfs) {
5586 			dev_err(&hdev->pdev->dev,
5587 				"Error: vf id (%d) > max vf num (%d)\n",
5588 				vf, hdev->num_req_vfs);
5589 			return -EINVAL;
5590 		}
5591 
5592 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5593 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5594 
5595 		if (ring >= tqps) {
5596 			dev_err(&hdev->pdev->dev,
5597 				"Error: queue id (%d) > max tqp num (%d)\n",
5598 				ring, tqps - 1);
5599 			return -EINVAL;
5600 		}
5601 
5602 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5603 		q_index = ring;
5604 	}
5605 
5606 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5607 	if (!rule)
5608 		return -ENOMEM;
5609 
5610 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5611 	if (ret) {
5612 		kfree(rule);
5613 		return ret;
5614 	}
5615 
5616 	rule->flow_type = fs->flow_type;
5617 
5618 	rule->location = fs->location;
5619 	rule->unused_tuple = unused;
5620 	rule->vf_id = dst_vport_id;
5621 	rule->queue_id = q_index;
5622 	rule->action = action;
5623 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5624 
5625 	/* to avoid rule conflict, when user configure rule by ethtool,
5626 	 * we need to clear all arfs rules
5627 	 */
5628 	hclge_clear_arfs_rules(handle);
5629 
5630 	spin_lock_bh(&hdev->fd_rule_lock);
5631 	ret = hclge_fd_config_rule(hdev, rule);
5632 
5633 	spin_unlock_bh(&hdev->fd_rule_lock);
5634 
5635 	return ret;
5636 }
5637 
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5638 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5639 			      struct ethtool_rxnfc *cmd)
5640 {
5641 	struct hclge_vport *vport = hclge_get_vport(handle);
5642 	struct hclge_dev *hdev = vport->back;
5643 	struct ethtool_rx_flow_spec *fs;
5644 	int ret;
5645 
5646 	if (!hnae3_dev_fd_supported(hdev))
5647 		return -EOPNOTSUPP;
5648 
5649 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5650 
5651 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5652 		return -EINVAL;
5653 
5654 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5655 		dev_err(&hdev->pdev->dev,
5656 			"Delete fail, rule %d is inexistent\n", fs->location);
5657 		return -ENOENT;
5658 	}
5659 
5660 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5661 				   NULL, false);
5662 	if (ret)
5663 		return ret;
5664 
5665 	spin_lock_bh(&hdev->fd_rule_lock);
5666 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5667 
5668 	spin_unlock_bh(&hdev->fd_rule_lock);
5669 
5670 	return ret;
5671 }
5672 
hclge_del_all_fd_entries(struct hnae3_handle * handle,bool clear_list)5673 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5674 				     bool clear_list)
5675 {
5676 	struct hclge_vport *vport = hclge_get_vport(handle);
5677 	struct hclge_dev *hdev = vport->back;
5678 	struct hclge_fd_rule *rule;
5679 	struct hlist_node *node;
5680 	u16 location;
5681 
5682 	if (!hnae3_dev_fd_supported(hdev))
5683 		return;
5684 
5685 	spin_lock_bh(&hdev->fd_rule_lock);
5686 	for_each_set_bit(location, hdev->fd_bmap,
5687 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5688 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5689 				     NULL, false);
5690 
5691 	if (clear_list) {
5692 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5693 					  rule_node) {
5694 			hlist_del(&rule->rule_node);
5695 			kfree(rule);
5696 		}
5697 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5698 		hdev->hclge_fd_rule_num = 0;
5699 		bitmap_zero(hdev->fd_bmap,
5700 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5701 	}
5702 
5703 	spin_unlock_bh(&hdev->fd_rule_lock);
5704 }
5705 
hclge_restore_fd_entries(struct hnae3_handle * handle)5706 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5707 {
5708 	struct hclge_vport *vport = hclge_get_vport(handle);
5709 	struct hclge_dev *hdev = vport->back;
5710 	struct hclge_fd_rule *rule;
5711 	struct hlist_node *node;
5712 	int ret;
5713 
5714 	/* Return ok here, because reset error handling will check this
5715 	 * return value. If error is returned here, the reset process will
5716 	 * fail.
5717 	 */
5718 	if (!hnae3_dev_fd_supported(hdev))
5719 		return 0;
5720 
5721 	/* if fd is disabled, should not restore it when reset */
5722 	if (!hdev->fd_en)
5723 		return 0;
5724 
5725 	spin_lock_bh(&hdev->fd_rule_lock);
5726 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5727 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5728 		if (!ret)
5729 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5730 
5731 		if (ret) {
5732 			dev_warn(&hdev->pdev->dev,
5733 				 "Restore rule %d failed, remove it\n",
5734 				 rule->location);
5735 			clear_bit(rule->location, hdev->fd_bmap);
5736 			hlist_del(&rule->rule_node);
5737 			kfree(rule);
5738 			hdev->hclge_fd_rule_num--;
5739 		}
5740 	}
5741 
5742 	if (hdev->hclge_fd_rule_num)
5743 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5744 
5745 	spin_unlock_bh(&hdev->fd_rule_lock);
5746 
5747 	return 0;
5748 }
5749 
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5750 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5751 				 struct ethtool_rxnfc *cmd)
5752 {
5753 	struct hclge_vport *vport = hclge_get_vport(handle);
5754 	struct hclge_dev *hdev = vport->back;
5755 
5756 	if (!hnae3_dev_fd_supported(hdev))
5757 		return -EOPNOTSUPP;
5758 
5759 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5760 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5761 
5762 	return 0;
5763 }
5764 
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5765 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5766 				  struct ethtool_rxnfc *cmd)
5767 {
5768 	struct hclge_vport *vport = hclge_get_vport(handle);
5769 	struct hclge_fd_rule *rule = NULL;
5770 	struct hclge_dev *hdev = vport->back;
5771 	struct ethtool_rx_flow_spec *fs;
5772 	struct hlist_node *node2;
5773 
5774 	if (!hnae3_dev_fd_supported(hdev))
5775 		return -EOPNOTSUPP;
5776 
5777 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5778 
5779 	spin_lock_bh(&hdev->fd_rule_lock);
5780 
5781 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5782 		if (rule->location >= fs->location)
5783 			break;
5784 	}
5785 
5786 	if (!rule || fs->location != rule->location) {
5787 		spin_unlock_bh(&hdev->fd_rule_lock);
5788 
5789 		return -ENOENT;
5790 	}
5791 
5792 	fs->flow_type = rule->flow_type;
5793 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5794 	case SCTP_V4_FLOW:
5795 	case TCP_V4_FLOW:
5796 	case UDP_V4_FLOW:
5797 		fs->h_u.tcp_ip4_spec.ip4src =
5798 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5799 		fs->m_u.tcp_ip4_spec.ip4src =
5800 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5801 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5802 
5803 		fs->h_u.tcp_ip4_spec.ip4dst =
5804 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5805 		fs->m_u.tcp_ip4_spec.ip4dst =
5806 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5807 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5808 
5809 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5810 		fs->m_u.tcp_ip4_spec.psrc =
5811 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5812 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5813 
5814 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5815 		fs->m_u.tcp_ip4_spec.pdst =
5816 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5817 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5818 
5819 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5820 		fs->m_u.tcp_ip4_spec.tos =
5821 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5822 				0 : rule->tuples_mask.ip_tos;
5823 
5824 		break;
5825 	case IP_USER_FLOW:
5826 		fs->h_u.usr_ip4_spec.ip4src =
5827 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5828 		fs->m_u.tcp_ip4_spec.ip4src =
5829 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5830 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5831 
5832 		fs->h_u.usr_ip4_spec.ip4dst =
5833 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5834 		fs->m_u.usr_ip4_spec.ip4dst =
5835 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5836 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5837 
5838 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5839 		fs->m_u.usr_ip4_spec.tos =
5840 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5841 				0 : rule->tuples_mask.ip_tos;
5842 
5843 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5844 		fs->m_u.usr_ip4_spec.proto =
5845 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5846 				0 : rule->tuples_mask.ip_proto;
5847 
5848 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5849 
5850 		break;
5851 	case SCTP_V6_FLOW:
5852 	case TCP_V6_FLOW:
5853 	case UDP_V6_FLOW:
5854 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5855 				  rule->tuples.src_ip, IPV6_SIZE);
5856 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5857 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5858 			       sizeof(int) * IPV6_SIZE);
5859 		else
5860 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5861 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5862 
5863 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5864 				  rule->tuples.dst_ip, IPV6_SIZE);
5865 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5866 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5867 			       sizeof(int) * IPV6_SIZE);
5868 		else
5869 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5870 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5871 
5872 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5873 		fs->m_u.tcp_ip6_spec.psrc =
5874 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5875 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5876 
5877 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5878 		fs->m_u.tcp_ip6_spec.pdst =
5879 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5880 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5881 
5882 		break;
5883 	case IPV6_USER_FLOW:
5884 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5885 				  rule->tuples.src_ip, IPV6_SIZE);
5886 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5887 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5888 			       sizeof(int) * IPV6_SIZE);
5889 		else
5890 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5891 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5892 
5893 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5894 				  rule->tuples.dst_ip, IPV6_SIZE);
5895 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5896 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5897 			       sizeof(int) * IPV6_SIZE);
5898 		else
5899 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5900 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5901 
5902 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5903 		fs->m_u.usr_ip6_spec.l4_proto =
5904 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5905 				0 : rule->tuples_mask.ip_proto;
5906 
5907 		break;
5908 	case ETHER_FLOW:
5909 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5910 				rule->tuples.src_mac);
5911 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5912 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5913 		else
5914 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5915 					rule->tuples_mask.src_mac);
5916 
5917 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5918 				rule->tuples.dst_mac);
5919 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5920 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5921 		else
5922 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5923 					rule->tuples_mask.dst_mac);
5924 
5925 		fs->h_u.ether_spec.h_proto =
5926 				cpu_to_be16(rule->tuples.ether_proto);
5927 		fs->m_u.ether_spec.h_proto =
5928 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5929 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5930 
5931 		break;
5932 	default:
5933 		spin_unlock_bh(&hdev->fd_rule_lock);
5934 		return -EOPNOTSUPP;
5935 	}
5936 
5937 	if (fs->flow_type & FLOW_EXT) {
5938 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5939 		fs->m_ext.vlan_tci =
5940 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5941 				cpu_to_be16(VLAN_VID_MASK) :
5942 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5943 	}
5944 
5945 	if (fs->flow_type & FLOW_MAC_EXT) {
5946 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5947 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5948 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5949 		else
5950 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5951 					rule->tuples_mask.dst_mac);
5952 	}
5953 
5954 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5955 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5956 	} else {
5957 		u64 vf_id;
5958 
5959 		fs->ring_cookie = rule->queue_id;
5960 		vf_id = rule->vf_id;
5961 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5962 		fs->ring_cookie |= vf_id;
5963 	}
5964 
5965 	spin_unlock_bh(&hdev->fd_rule_lock);
5966 
5967 	return 0;
5968 }
5969 
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)5970 static int hclge_get_all_rules(struct hnae3_handle *handle,
5971 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5972 {
5973 	struct hclge_vport *vport = hclge_get_vport(handle);
5974 	struct hclge_dev *hdev = vport->back;
5975 	struct hclge_fd_rule *rule;
5976 	struct hlist_node *node2;
5977 	int cnt = 0;
5978 
5979 	if (!hnae3_dev_fd_supported(hdev))
5980 		return -EOPNOTSUPP;
5981 
5982 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5983 
5984 	spin_lock_bh(&hdev->fd_rule_lock);
5985 	hlist_for_each_entry_safe(rule, node2,
5986 				  &hdev->fd_rule_list, rule_node) {
5987 		if (cnt == cmd->rule_cnt) {
5988 			spin_unlock_bh(&hdev->fd_rule_lock);
5989 			return -EMSGSIZE;
5990 		}
5991 
5992 		rule_locs[cnt] = rule->location;
5993 		cnt++;
5994 	}
5995 
5996 	spin_unlock_bh(&hdev->fd_rule_lock);
5997 
5998 	cmd->rule_cnt = cnt;
5999 
6000 	return 0;
6001 }
6002 
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6003 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6004 				     struct hclge_fd_rule_tuples *tuples)
6005 {
6006 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6007 	tuples->ip_proto = fkeys->basic.ip_proto;
6008 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6009 
6010 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6011 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6012 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6013 	} else {
6014 		memcpy(tuples->src_ip,
6015 		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6016 		       sizeof(tuples->src_ip));
6017 		memcpy(tuples->dst_ip,
6018 		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6019 		       sizeof(tuples->dst_ip));
6020 	}
6021 }
6022 
6023 /* traverse all rules, check whether an existed rule has the same tuples */
6024 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)6025 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6026 			  const struct hclge_fd_rule_tuples *tuples)
6027 {
6028 	struct hclge_fd_rule *rule = NULL;
6029 	struct hlist_node *node;
6030 
6031 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6032 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6033 			return rule;
6034 	}
6035 
6036 	return NULL;
6037 }
6038 
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)6039 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6040 				     struct hclge_fd_rule *rule)
6041 {
6042 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6043 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6044 			     BIT(INNER_SRC_PORT);
6045 	rule->action = 0;
6046 	rule->vf_id = 0;
6047 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6048 	if (tuples->ether_proto == ETH_P_IP) {
6049 		if (tuples->ip_proto == IPPROTO_TCP)
6050 			rule->flow_type = TCP_V4_FLOW;
6051 		else
6052 			rule->flow_type = UDP_V4_FLOW;
6053 	} else {
6054 		if (tuples->ip_proto == IPPROTO_TCP)
6055 			rule->flow_type = TCP_V6_FLOW;
6056 		else
6057 			rule->flow_type = UDP_V6_FLOW;
6058 	}
6059 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6060 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6061 }
6062 
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)6063 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6064 				      u16 flow_id, struct flow_keys *fkeys)
6065 {
6066 	struct hclge_vport *vport = hclge_get_vport(handle);
6067 	struct hclge_fd_rule_tuples new_tuples;
6068 	struct hclge_dev *hdev = vport->back;
6069 	struct hclge_fd_rule *rule;
6070 	u16 tmp_queue_id;
6071 	u16 bit_id;
6072 	int ret;
6073 
6074 	if (!hnae3_dev_fd_supported(hdev))
6075 		return -EOPNOTSUPP;
6076 
6077 	memset(&new_tuples, 0, sizeof(new_tuples));
6078 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6079 
6080 	spin_lock_bh(&hdev->fd_rule_lock);
6081 
6082 	/* when there is already fd rule existed add by user,
6083 	 * arfs should not work
6084 	 */
6085 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6086 		spin_unlock_bh(&hdev->fd_rule_lock);
6087 
6088 		return -EOPNOTSUPP;
6089 	}
6090 
6091 	/* check is there flow director filter existed for this flow,
6092 	 * if not, create a new filter for it;
6093 	 * if filter exist with different queue id, modify the filter;
6094 	 * if filter exist with same queue id, do nothing
6095 	 */
6096 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6097 	if (!rule) {
6098 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6099 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6100 			spin_unlock_bh(&hdev->fd_rule_lock);
6101 
6102 			return -ENOSPC;
6103 		}
6104 
6105 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6106 		if (!rule) {
6107 			spin_unlock_bh(&hdev->fd_rule_lock);
6108 
6109 			return -ENOMEM;
6110 		}
6111 
6112 		set_bit(bit_id, hdev->fd_bmap);
6113 		rule->location = bit_id;
6114 		rule->flow_id = flow_id;
6115 		rule->queue_id = queue_id;
6116 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6117 		ret = hclge_fd_config_rule(hdev, rule);
6118 
6119 		spin_unlock_bh(&hdev->fd_rule_lock);
6120 
6121 		if (ret)
6122 			return ret;
6123 
6124 		return rule->location;
6125 	}
6126 
6127 	spin_unlock_bh(&hdev->fd_rule_lock);
6128 
6129 	if (rule->queue_id == queue_id)
6130 		return rule->location;
6131 
6132 	tmp_queue_id = rule->queue_id;
6133 	rule->queue_id = queue_id;
6134 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6135 	if (ret) {
6136 		rule->queue_id = tmp_queue_id;
6137 		return ret;
6138 	}
6139 
6140 	return rule->location;
6141 }
6142 
hclge_rfs_filter_expire(struct hclge_dev * hdev)6143 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6144 {
6145 #ifdef CONFIG_RFS_ACCEL
6146 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6147 	struct hclge_fd_rule *rule;
6148 	struct hlist_node *node;
6149 	HLIST_HEAD(del_list);
6150 
6151 	spin_lock_bh(&hdev->fd_rule_lock);
6152 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6153 		spin_unlock_bh(&hdev->fd_rule_lock);
6154 		return;
6155 	}
6156 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6157 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6158 					rule->flow_id, rule->location)) {
6159 			hlist_del_init(&rule->rule_node);
6160 			hlist_add_head(&rule->rule_node, &del_list);
6161 			hdev->hclge_fd_rule_num--;
6162 			clear_bit(rule->location, hdev->fd_bmap);
6163 		}
6164 	}
6165 	spin_unlock_bh(&hdev->fd_rule_lock);
6166 
6167 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6168 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6169 				     rule->location, NULL, false);
6170 		kfree(rule);
6171 	}
6172 #endif
6173 }
6174 
hclge_clear_arfs_rules(struct hnae3_handle * handle)6175 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6176 {
6177 #ifdef CONFIG_RFS_ACCEL
6178 	struct hclge_vport *vport = hclge_get_vport(handle);
6179 	struct hclge_dev *hdev = vport->back;
6180 
6181 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6182 		hclge_del_all_fd_entries(handle, true);
6183 #endif
6184 }
6185 
hclge_get_hw_reset_stat(struct hnae3_handle * handle)6186 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6187 {
6188 	struct hclge_vport *vport = hclge_get_vport(handle);
6189 	struct hclge_dev *hdev = vport->back;
6190 
6191 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6192 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6193 }
6194 
hclge_ae_dev_resetting(struct hnae3_handle * handle)6195 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6196 {
6197 	struct hclge_vport *vport = hclge_get_vport(handle);
6198 	struct hclge_dev *hdev = vport->back;
6199 
6200 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6201 }
6202 
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)6203 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6204 {
6205 	struct hclge_vport *vport = hclge_get_vport(handle);
6206 	struct hclge_dev *hdev = vport->back;
6207 
6208 	return hdev->rst_stats.hw_reset_done_cnt;
6209 }
6210 
hclge_enable_fd(struct hnae3_handle * handle,bool enable)6211 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6212 {
6213 	struct hclge_vport *vport = hclge_get_vport(handle);
6214 	struct hclge_dev *hdev = vport->back;
6215 	bool clear;
6216 
6217 	hdev->fd_en = enable;
6218 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6219 	if (!enable)
6220 		hclge_del_all_fd_entries(handle, clear);
6221 	else
6222 		hclge_restore_fd_entries(handle);
6223 }
6224 
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)6225 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6226 {
6227 	struct hclge_desc desc;
6228 	struct hclge_config_mac_mode_cmd *req =
6229 		(struct hclge_config_mac_mode_cmd *)desc.data;
6230 	u32 loop_en = 0;
6231 	int ret;
6232 
6233 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6234 
6235 	if (enable) {
6236 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6237 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6238 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6239 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6240 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6241 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6242 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6243 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6244 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6245 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6246 	}
6247 
6248 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6249 
6250 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6251 	if (ret)
6252 		dev_err(&hdev->pdev->dev,
6253 			"mac enable fail, ret =%d.\n", ret);
6254 }
6255 
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)6256 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6257 				     u8 switch_param, u8 param_mask)
6258 {
6259 	struct hclge_mac_vlan_switch_cmd *req;
6260 	struct hclge_desc desc;
6261 	u32 func_id;
6262 	int ret;
6263 
6264 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6265 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6266 
6267 	/* read current config parameter */
6268 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6269 				   true);
6270 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6271 	req->func_id = cpu_to_le32(func_id);
6272 
6273 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6274 	if (ret) {
6275 		dev_err(&hdev->pdev->dev,
6276 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6277 		return ret;
6278 	}
6279 
6280 	/* modify and write new config parameter */
6281 	hclge_cmd_reuse_desc(&desc, false);
6282 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6283 	req->param_mask = param_mask;
6284 
6285 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6286 	if (ret)
6287 		dev_err(&hdev->pdev->dev,
6288 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6289 	return ret;
6290 }
6291 
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)6292 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6293 				       int link_ret)
6294 {
6295 #define HCLGE_PHY_LINK_STATUS_NUM  200
6296 
6297 	struct phy_device *phydev = hdev->hw.mac.phydev;
6298 	int i = 0;
6299 	int ret;
6300 
6301 	do {
6302 		ret = phy_read_status(phydev);
6303 		if (ret) {
6304 			dev_err(&hdev->pdev->dev,
6305 				"phy update link status fail, ret = %d\n", ret);
6306 			return;
6307 		}
6308 
6309 		if (phydev->link == link_ret)
6310 			break;
6311 
6312 		msleep(HCLGE_LINK_STATUS_MS);
6313 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6314 }
6315 
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret)6316 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6317 {
6318 #define HCLGE_MAC_LINK_STATUS_NUM  100
6319 
6320 	int i = 0;
6321 	int ret;
6322 
6323 	do {
6324 		ret = hclge_get_mac_link_status(hdev);
6325 		if (ret < 0)
6326 			return ret;
6327 		else if (ret == link_ret)
6328 			return 0;
6329 
6330 		msleep(HCLGE_LINK_STATUS_MS);
6331 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6332 	return -EBUSY;
6333 }
6334 
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)6335 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6336 					  bool is_phy)
6337 {
6338 #define HCLGE_LINK_STATUS_DOWN 0
6339 #define HCLGE_LINK_STATUS_UP   1
6340 
6341 	int link_ret;
6342 
6343 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6344 
6345 	if (is_phy)
6346 		hclge_phy_link_status_wait(hdev, link_ret);
6347 
6348 	return hclge_mac_link_status_wait(hdev, link_ret);
6349 }
6350 
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)6351 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6352 {
6353 	struct hclge_config_mac_mode_cmd *req;
6354 	struct hclge_desc desc;
6355 	u32 loop_en;
6356 	int ret;
6357 
6358 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6359 	/* 1 Read out the MAC mode config at first */
6360 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6361 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6362 	if (ret) {
6363 		dev_err(&hdev->pdev->dev,
6364 			"mac loopback get fail, ret =%d.\n", ret);
6365 		return ret;
6366 	}
6367 
6368 	/* 2 Then setup the loopback flag */
6369 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6370 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6371 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6372 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6373 
6374 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6375 
6376 	/* 3 Config mac work mode with loopback flag
6377 	 * and its original configure parameters
6378 	 */
6379 	hclge_cmd_reuse_desc(&desc, false);
6380 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6381 	if (ret)
6382 		dev_err(&hdev->pdev->dev,
6383 			"mac loopback set fail, ret =%d.\n", ret);
6384 	return ret;
6385 }
6386 
hclge_cfg_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6387 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6388 				     enum hnae3_loop loop_mode)
6389 {
6390 #define HCLGE_SERDES_RETRY_MS	10
6391 #define HCLGE_SERDES_RETRY_NUM	100
6392 
6393 	struct hclge_serdes_lb_cmd *req;
6394 	struct hclge_desc desc;
6395 	int ret, i = 0;
6396 	u8 loop_mode_b;
6397 
6398 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6399 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6400 
6401 	switch (loop_mode) {
6402 	case HNAE3_LOOP_SERIAL_SERDES:
6403 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6404 		break;
6405 	case HNAE3_LOOP_PARALLEL_SERDES:
6406 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6407 		break;
6408 	default:
6409 		dev_err(&hdev->pdev->dev,
6410 			"unsupported serdes loopback mode %d\n", loop_mode);
6411 		return -ENOTSUPP;
6412 	}
6413 
6414 	if (en) {
6415 		req->enable = loop_mode_b;
6416 		req->mask = loop_mode_b;
6417 	} else {
6418 		req->mask = loop_mode_b;
6419 	}
6420 
6421 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6422 	if (ret) {
6423 		dev_err(&hdev->pdev->dev,
6424 			"serdes loopback set fail, ret = %d\n", ret);
6425 		return ret;
6426 	}
6427 
6428 	do {
6429 		msleep(HCLGE_SERDES_RETRY_MS);
6430 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6431 					   true);
6432 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6433 		if (ret) {
6434 			dev_err(&hdev->pdev->dev,
6435 				"serdes loopback get, ret = %d\n", ret);
6436 			return ret;
6437 		}
6438 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6439 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6440 
6441 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6442 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6443 		return -EBUSY;
6444 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6445 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6446 		return -EIO;
6447 	}
6448 	return ret;
6449 }
6450 
hclge_set_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6451 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6452 				     enum hnae3_loop loop_mode)
6453 {
6454 	int ret;
6455 
6456 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6457 	if (ret)
6458 		return ret;
6459 
6460 	hclge_cfg_mac_mode(hdev, en);
6461 
6462 	ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6463 	if (ret)
6464 		dev_err(&hdev->pdev->dev,
6465 			"serdes loopback config mac mode timeout\n");
6466 
6467 	return ret;
6468 }
6469 
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6470 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6471 				     struct phy_device *phydev)
6472 {
6473 	int ret;
6474 
6475 	if (!phydev->suspended) {
6476 		ret = phy_suspend(phydev);
6477 		if (ret)
6478 			return ret;
6479 	}
6480 
6481 	ret = phy_resume(phydev);
6482 	if (ret)
6483 		return ret;
6484 
6485 	return phy_loopback(phydev, true);
6486 }
6487 
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6488 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6489 				      struct phy_device *phydev)
6490 {
6491 	int ret;
6492 
6493 	ret = phy_loopback(phydev, false);
6494 	if (ret)
6495 		return ret;
6496 
6497 	return phy_suspend(phydev);
6498 }
6499 
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)6500 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6501 {
6502 	struct phy_device *phydev = hdev->hw.mac.phydev;
6503 	int ret;
6504 
6505 	if (!phydev)
6506 		return -ENOTSUPP;
6507 
6508 	if (en)
6509 		ret = hclge_enable_phy_loopback(hdev, phydev);
6510 	else
6511 		ret = hclge_disable_phy_loopback(hdev, phydev);
6512 	if (ret) {
6513 		dev_err(&hdev->pdev->dev,
6514 			"set phy loopback fail, ret = %d\n", ret);
6515 		return ret;
6516 	}
6517 
6518 	hclge_cfg_mac_mode(hdev, en);
6519 
6520 	ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6521 	if (ret)
6522 		dev_err(&hdev->pdev->dev,
6523 			"phy loopback config mac mode timeout\n");
6524 
6525 	return ret;
6526 }
6527 
hclge_tqp_enable(struct hclge_dev * hdev,unsigned int tqp_id,int stream_id,bool enable)6528 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6529 			    int stream_id, bool enable)
6530 {
6531 	struct hclge_desc desc;
6532 	struct hclge_cfg_com_tqp_queue_cmd *req =
6533 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6534 	int ret;
6535 
6536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6537 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6538 	req->stream_id = cpu_to_le16(stream_id);
6539 	if (enable)
6540 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6541 
6542 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6543 	if (ret)
6544 		dev_err(&hdev->pdev->dev,
6545 			"Tqp enable fail, status =%d.\n", ret);
6546 	return ret;
6547 }
6548 
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)6549 static int hclge_set_loopback(struct hnae3_handle *handle,
6550 			      enum hnae3_loop loop_mode, bool en)
6551 {
6552 	struct hclge_vport *vport = hclge_get_vport(handle);
6553 	struct hnae3_knic_private_info *kinfo;
6554 	struct hclge_dev *hdev = vport->back;
6555 	int i, ret;
6556 
6557 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6558 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6559 	 * the same, the packets are looped back in the SSU. If SSU loopback
6560 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6561 	 */
6562 	if (hdev->pdev->revision >= 0x21) {
6563 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6564 
6565 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6566 						HCLGE_SWITCH_ALW_LPBK_MASK);
6567 		if (ret)
6568 			return ret;
6569 	}
6570 
6571 	switch (loop_mode) {
6572 	case HNAE3_LOOP_APP:
6573 		ret = hclge_set_app_loopback(hdev, en);
6574 		break;
6575 	case HNAE3_LOOP_SERIAL_SERDES:
6576 	case HNAE3_LOOP_PARALLEL_SERDES:
6577 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6578 		break;
6579 	case HNAE3_LOOP_PHY:
6580 		ret = hclge_set_phy_loopback(hdev, en);
6581 		break;
6582 	default:
6583 		ret = -ENOTSUPP;
6584 		dev_err(&hdev->pdev->dev,
6585 			"loop_mode %d is not supported\n", loop_mode);
6586 		break;
6587 	}
6588 
6589 	if (ret)
6590 		return ret;
6591 
6592 	kinfo = &vport->nic.kinfo;
6593 	for (i = 0; i < kinfo->num_tqps; i++) {
6594 		ret = hclge_tqp_enable(hdev, i, 0, en);
6595 		if (ret)
6596 			return ret;
6597 	}
6598 
6599 	return 0;
6600 }
6601 
hclge_set_default_loopback(struct hclge_dev * hdev)6602 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6603 {
6604 	int ret;
6605 
6606 	ret = hclge_set_app_loopback(hdev, false);
6607 	if (ret)
6608 		return ret;
6609 
6610 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6611 	if (ret)
6612 		return ret;
6613 
6614 	return hclge_cfg_serdes_loopback(hdev, false,
6615 					 HNAE3_LOOP_PARALLEL_SERDES);
6616 }
6617 
hclge_reset_tqp_stats(struct hnae3_handle * handle)6618 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6619 {
6620 	struct hclge_vport *vport = hclge_get_vport(handle);
6621 	struct hnae3_knic_private_info *kinfo;
6622 	struct hnae3_queue *queue;
6623 	struct hclge_tqp *tqp;
6624 	int i;
6625 
6626 	kinfo = &vport->nic.kinfo;
6627 	for (i = 0; i < kinfo->num_tqps; i++) {
6628 		queue = handle->kinfo.tqp[i];
6629 		tqp = container_of(queue, struct hclge_tqp, q);
6630 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6631 	}
6632 }
6633 
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)6634 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6635 {
6636 	struct hclge_vport *vport = hclge_get_vport(handle);
6637 	struct hclge_dev *hdev = vport->back;
6638 
6639 	if (enable) {
6640 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6641 	} else {
6642 		/* Set the DOWN flag here to disable the service to be
6643 		 * scheduled again
6644 		 */
6645 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6646 		cancel_delayed_work_sync(&hdev->service_task);
6647 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6648 	}
6649 }
6650 
hclge_ae_start(struct hnae3_handle * handle)6651 static int hclge_ae_start(struct hnae3_handle *handle)
6652 {
6653 	struct hclge_vport *vport = hclge_get_vport(handle);
6654 	struct hclge_dev *hdev = vport->back;
6655 
6656 	/* mac enable */
6657 	hclge_cfg_mac_mode(hdev, true);
6658 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6659 	hdev->hw.mac.link = 0;
6660 
6661 	/* reset tqp stats */
6662 	hclge_reset_tqp_stats(handle);
6663 
6664 	hclge_mac_start_phy(hdev);
6665 
6666 	return 0;
6667 }
6668 
hclge_ae_stop(struct hnae3_handle * handle)6669 static void hclge_ae_stop(struct hnae3_handle *handle)
6670 {
6671 	struct hclge_vport *vport = hclge_get_vport(handle);
6672 	struct hclge_dev *hdev = vport->back;
6673 	int i;
6674 
6675 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6676 
6677 	hclge_clear_arfs_rules(handle);
6678 
6679 	/* If it is not PF reset, the firmware will disable the MAC,
6680 	 * so it only need to stop phy here.
6681 	 */
6682 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6683 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6684 		hclge_mac_stop_phy(hdev);
6685 		hclge_update_link_status(hdev);
6686 		return;
6687 	}
6688 
6689 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6690 		hclge_reset_tqp(handle, i);
6691 
6692 	hclge_config_mac_tnl_int(hdev, false);
6693 
6694 	/* Mac disable */
6695 	hclge_cfg_mac_mode(hdev, false);
6696 
6697 	hclge_mac_stop_phy(hdev);
6698 
6699 	/* reset tqp stats */
6700 	hclge_reset_tqp_stats(handle);
6701 	hclge_update_link_status(hdev);
6702 }
6703 
hclge_vport_start(struct hclge_vport * vport)6704 int hclge_vport_start(struct hclge_vport *vport)
6705 {
6706 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6707 	vport->last_active_jiffies = jiffies;
6708 	return 0;
6709 }
6710 
hclge_vport_stop(struct hclge_vport * vport)6711 void hclge_vport_stop(struct hclge_vport *vport)
6712 {
6713 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6714 }
6715 
hclge_client_start(struct hnae3_handle * handle)6716 static int hclge_client_start(struct hnae3_handle *handle)
6717 {
6718 	struct hclge_vport *vport = hclge_get_vport(handle);
6719 
6720 	return hclge_vport_start(vport);
6721 }
6722 
hclge_client_stop(struct hnae3_handle * handle)6723 static void hclge_client_stop(struct hnae3_handle *handle)
6724 {
6725 	struct hclge_vport *vport = hclge_get_vport(handle);
6726 
6727 	hclge_vport_stop(vport);
6728 }
6729 
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)6730 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6731 					 u16 cmdq_resp, u8  resp_code,
6732 					 enum hclge_mac_vlan_tbl_opcode op)
6733 {
6734 	struct hclge_dev *hdev = vport->back;
6735 
6736 	if (cmdq_resp) {
6737 		dev_err(&hdev->pdev->dev,
6738 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6739 			cmdq_resp);
6740 		return -EIO;
6741 	}
6742 
6743 	if (op == HCLGE_MAC_VLAN_ADD) {
6744 		if ((!resp_code) || (resp_code == 1)) {
6745 			return 0;
6746 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6747 			dev_err(&hdev->pdev->dev,
6748 				"add mac addr failed for uc_overflow.\n");
6749 			return -ENOSPC;
6750 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6751 			dev_err(&hdev->pdev->dev,
6752 				"add mac addr failed for mc_overflow.\n");
6753 			return -ENOSPC;
6754 		}
6755 
6756 		dev_err(&hdev->pdev->dev,
6757 			"add mac addr failed for undefined, code=%u.\n",
6758 			resp_code);
6759 		return -EIO;
6760 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6761 		if (!resp_code) {
6762 			return 0;
6763 		} else if (resp_code == 1) {
6764 			dev_dbg(&hdev->pdev->dev,
6765 				"remove mac addr failed for miss.\n");
6766 			return -ENOENT;
6767 		}
6768 
6769 		dev_err(&hdev->pdev->dev,
6770 			"remove mac addr failed for undefined, code=%u.\n",
6771 			resp_code);
6772 		return -EIO;
6773 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6774 		if (!resp_code) {
6775 			return 0;
6776 		} else if (resp_code == 1) {
6777 			dev_dbg(&hdev->pdev->dev,
6778 				"lookup mac addr failed for miss.\n");
6779 			return -ENOENT;
6780 		}
6781 
6782 		dev_err(&hdev->pdev->dev,
6783 			"lookup mac addr failed for undefined, code=%u.\n",
6784 			resp_code);
6785 		return -EIO;
6786 	}
6787 
6788 	dev_err(&hdev->pdev->dev,
6789 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6790 
6791 	return -EINVAL;
6792 }
6793 
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)6794 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6795 {
6796 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6797 
6798 	unsigned int word_num;
6799 	unsigned int bit_num;
6800 
6801 	if (vfid > 255 || vfid < 0)
6802 		return -EIO;
6803 
6804 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6805 		word_num = vfid / 32;
6806 		bit_num  = vfid % 32;
6807 		if (clr)
6808 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6809 		else
6810 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6811 	} else {
6812 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6813 		bit_num  = vfid % 32;
6814 		if (clr)
6815 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6816 		else
6817 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6818 	}
6819 
6820 	return 0;
6821 }
6822 
hclge_is_all_function_id_zero(struct hclge_desc * desc)6823 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6824 {
6825 #define HCLGE_DESC_NUMBER 3
6826 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6827 	int i, j;
6828 
6829 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6830 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6831 			if (desc[i].data[j])
6832 				return false;
6833 
6834 	return true;
6835 }
6836 
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)6837 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6838 				   const u8 *addr, bool is_mc)
6839 {
6840 	const unsigned char *mac_addr = addr;
6841 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6842 		       (mac_addr[0]) | (mac_addr[1] << 8);
6843 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6844 
6845 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6846 	if (is_mc) {
6847 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6848 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6849 	}
6850 
6851 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6852 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6853 }
6854 
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)6855 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6856 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6857 {
6858 	struct hclge_dev *hdev = vport->back;
6859 	struct hclge_desc desc;
6860 	u8 resp_code;
6861 	u16 retval;
6862 	int ret;
6863 
6864 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6865 
6866 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6867 
6868 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6869 	if (ret) {
6870 		dev_err(&hdev->pdev->dev,
6871 			"del mac addr failed for cmd_send, ret =%d.\n",
6872 			ret);
6873 		return ret;
6874 	}
6875 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6876 	retval = le16_to_cpu(desc.retval);
6877 
6878 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6879 					     HCLGE_MAC_VLAN_REMOVE);
6880 }
6881 
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)6882 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6883 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6884 				     struct hclge_desc *desc,
6885 				     bool is_mc)
6886 {
6887 	struct hclge_dev *hdev = vport->back;
6888 	u8 resp_code;
6889 	u16 retval;
6890 	int ret;
6891 
6892 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6893 	if (is_mc) {
6894 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6895 		memcpy(desc[0].data,
6896 		       req,
6897 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6898 		hclge_cmd_setup_basic_desc(&desc[1],
6899 					   HCLGE_OPC_MAC_VLAN_ADD,
6900 					   true);
6901 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6902 		hclge_cmd_setup_basic_desc(&desc[2],
6903 					   HCLGE_OPC_MAC_VLAN_ADD,
6904 					   true);
6905 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
6906 	} else {
6907 		memcpy(desc[0].data,
6908 		       req,
6909 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6910 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
6911 	}
6912 	if (ret) {
6913 		dev_err(&hdev->pdev->dev,
6914 			"lookup mac addr failed for cmd_send, ret =%d.\n",
6915 			ret);
6916 		return ret;
6917 	}
6918 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6919 	retval = le16_to_cpu(desc[0].retval);
6920 
6921 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6922 					     HCLGE_MAC_VLAN_LKUP);
6923 }
6924 
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)6925 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6926 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6927 				  struct hclge_desc *mc_desc)
6928 {
6929 	struct hclge_dev *hdev = vport->back;
6930 	int cfg_status;
6931 	u8 resp_code;
6932 	u16 retval;
6933 	int ret;
6934 
6935 	if (!mc_desc) {
6936 		struct hclge_desc desc;
6937 
6938 		hclge_cmd_setup_basic_desc(&desc,
6939 					   HCLGE_OPC_MAC_VLAN_ADD,
6940 					   false);
6941 		memcpy(desc.data, req,
6942 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6943 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6944 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6945 		retval = le16_to_cpu(desc.retval);
6946 
6947 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6948 							   resp_code,
6949 							   HCLGE_MAC_VLAN_ADD);
6950 	} else {
6951 		hclge_cmd_reuse_desc(&mc_desc[0], false);
6952 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6953 		hclge_cmd_reuse_desc(&mc_desc[1], false);
6954 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6955 		hclge_cmd_reuse_desc(&mc_desc[2], false);
6956 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6957 		memcpy(mc_desc[0].data, req,
6958 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6959 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6960 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6961 		retval = le16_to_cpu(mc_desc[0].retval);
6962 
6963 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6964 							   resp_code,
6965 							   HCLGE_MAC_VLAN_ADD);
6966 	}
6967 
6968 	if (ret) {
6969 		dev_err(&hdev->pdev->dev,
6970 			"add mac addr failed for cmd_send, ret =%d.\n",
6971 			ret);
6972 		return ret;
6973 	}
6974 
6975 	return cfg_status;
6976 }
6977 
hclge_init_umv_space(struct hclge_dev * hdev)6978 static int hclge_init_umv_space(struct hclge_dev *hdev)
6979 {
6980 	u16 allocated_size = 0;
6981 	int ret;
6982 
6983 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6984 				  true);
6985 	if (ret)
6986 		return ret;
6987 
6988 	if (allocated_size < hdev->wanted_umv_size)
6989 		dev_warn(&hdev->pdev->dev,
6990 			 "Alloc umv space failed, want %d, get %d\n",
6991 			 hdev->wanted_umv_size, allocated_size);
6992 
6993 	mutex_init(&hdev->umv_mutex);
6994 	hdev->max_umv_size = allocated_size;
6995 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6996 	 * preserve some unicast mac vlan table entries shared by pf
6997 	 * and its vfs.
6998 	 */
6999 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7000 	hdev->share_umv_size = hdev->priv_umv_size +
7001 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7002 
7003 	return 0;
7004 }
7005 
hclge_uninit_umv_space(struct hclge_dev * hdev)7006 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7007 {
7008 	int ret;
7009 
7010 	if (hdev->max_umv_size > 0) {
7011 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7012 					  false);
7013 		if (ret)
7014 			return ret;
7015 		hdev->max_umv_size = 0;
7016 	}
7017 	mutex_destroy(&hdev->umv_mutex);
7018 
7019 	return 0;
7020 }
7021 
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size,bool is_alloc)7022 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7023 			       u16 *allocated_size, bool is_alloc)
7024 {
7025 	struct hclge_umv_spc_alc_cmd *req;
7026 	struct hclge_desc desc;
7027 	int ret;
7028 
7029 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7030 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7031 	if (!is_alloc)
7032 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7033 
7034 	req->space_size = cpu_to_le32(space_size);
7035 
7036 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7037 	if (ret) {
7038 		dev_err(&hdev->pdev->dev,
7039 			"%s umv space failed for cmd_send, ret =%d\n",
7040 			is_alloc ? "allocate" : "free", ret);
7041 		return ret;
7042 	}
7043 
7044 	if (is_alloc && allocated_size)
7045 		*allocated_size = le32_to_cpu(desc.data[1]);
7046 
7047 	return 0;
7048 }
7049 
hclge_reset_umv_space(struct hclge_dev * hdev)7050 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7051 {
7052 	struct hclge_vport *vport;
7053 	int i;
7054 
7055 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7056 		vport = &hdev->vport[i];
7057 		vport->used_umv_num = 0;
7058 	}
7059 
7060 	mutex_lock(&hdev->umv_mutex);
7061 	hdev->share_umv_size = hdev->priv_umv_size +
7062 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7063 	mutex_unlock(&hdev->umv_mutex);
7064 }
7065 
hclge_is_umv_space_full(struct hclge_vport * vport)7066 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7067 {
7068 	struct hclge_dev *hdev = vport->back;
7069 	bool is_full;
7070 
7071 	mutex_lock(&hdev->umv_mutex);
7072 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7073 		   hdev->share_umv_size == 0);
7074 	mutex_unlock(&hdev->umv_mutex);
7075 
7076 	return is_full;
7077 }
7078 
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)7079 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7080 {
7081 	struct hclge_dev *hdev = vport->back;
7082 
7083 	mutex_lock(&hdev->umv_mutex);
7084 	if (is_free) {
7085 		if (vport->used_umv_num > hdev->priv_umv_size)
7086 			hdev->share_umv_size++;
7087 
7088 		if (vport->used_umv_num > 0)
7089 			vport->used_umv_num--;
7090 	} else {
7091 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7092 		    hdev->share_umv_size > 0)
7093 			hdev->share_umv_size--;
7094 		vport->used_umv_num++;
7095 	}
7096 	mutex_unlock(&hdev->umv_mutex);
7097 }
7098 
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7099 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7100 			     const unsigned char *addr)
7101 {
7102 	struct hclge_vport *vport = hclge_get_vport(handle);
7103 
7104 	return hclge_add_uc_addr_common(vport, addr);
7105 }
7106 
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7107 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7108 			     const unsigned char *addr)
7109 {
7110 	struct hclge_dev *hdev = vport->back;
7111 	struct hclge_mac_vlan_tbl_entry_cmd req;
7112 	struct hclge_desc desc;
7113 	u16 egress_port = 0;
7114 	int ret;
7115 
7116 	/* mac addr check */
7117 	if (is_zero_ether_addr(addr) ||
7118 	    is_broadcast_ether_addr(addr) ||
7119 	    is_multicast_ether_addr(addr)) {
7120 		dev_err(&hdev->pdev->dev,
7121 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7122 			 addr, is_zero_ether_addr(addr),
7123 			 is_broadcast_ether_addr(addr),
7124 			 is_multicast_ether_addr(addr));
7125 		return -EINVAL;
7126 	}
7127 
7128 	memset(&req, 0, sizeof(req));
7129 
7130 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7131 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7132 
7133 	req.egress_port = cpu_to_le16(egress_port);
7134 
7135 	hclge_prepare_mac_addr(&req, addr, false);
7136 
7137 	/* Lookup the mac address in the mac_vlan table, and add
7138 	 * it if the entry is inexistent. Repeated unicast entry
7139 	 * is not allowed in the mac vlan table.
7140 	 */
7141 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7142 	if (ret == -ENOENT) {
7143 		if (!hclge_is_umv_space_full(vport)) {
7144 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7145 			if (!ret)
7146 				hclge_update_umv_space(vport, false);
7147 			return ret;
7148 		}
7149 
7150 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7151 			hdev->priv_umv_size);
7152 
7153 		return -ENOSPC;
7154 	}
7155 
7156 	/* check if we just hit the duplicate */
7157 	if (!ret) {
7158 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
7159 			 vport->vport_id, addr);
7160 		return 0;
7161 	}
7162 
7163 	dev_err(&hdev->pdev->dev,
7164 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7165 		addr);
7166 
7167 	return ret;
7168 }
7169 
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7170 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7171 			    const unsigned char *addr)
7172 {
7173 	struct hclge_vport *vport = hclge_get_vport(handle);
7174 
7175 	return hclge_rm_uc_addr_common(vport, addr);
7176 }
7177 
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7178 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7179 			    const unsigned char *addr)
7180 {
7181 	struct hclge_dev *hdev = vport->back;
7182 	struct hclge_mac_vlan_tbl_entry_cmd req;
7183 	int ret;
7184 
7185 	/* mac addr check */
7186 	if (is_zero_ether_addr(addr) ||
7187 	    is_broadcast_ether_addr(addr) ||
7188 	    is_multicast_ether_addr(addr)) {
7189 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7190 			addr);
7191 		return -EINVAL;
7192 	}
7193 
7194 	memset(&req, 0, sizeof(req));
7195 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7196 	hclge_prepare_mac_addr(&req, addr, false);
7197 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7198 	if (!ret)
7199 		hclge_update_umv_space(vport, true);
7200 
7201 	return ret;
7202 }
7203 
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7204 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7205 			     const unsigned char *addr)
7206 {
7207 	struct hclge_vport *vport = hclge_get_vport(handle);
7208 
7209 	return hclge_add_mc_addr_common(vport, addr);
7210 }
7211 
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7212 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7213 			     const unsigned char *addr)
7214 {
7215 	struct hclge_dev *hdev = vport->back;
7216 	struct hclge_mac_vlan_tbl_entry_cmd req;
7217 	struct hclge_desc desc[3];
7218 	int status;
7219 
7220 	/* mac addr check */
7221 	if (!is_multicast_ether_addr(addr)) {
7222 		dev_err(&hdev->pdev->dev,
7223 			"Add mc mac err! invalid mac:%pM.\n",
7224 			 addr);
7225 		return -EINVAL;
7226 	}
7227 	memset(&req, 0, sizeof(req));
7228 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7229 	hclge_prepare_mac_addr(&req, addr, true);
7230 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7231 	if (status) {
7232 		/* This mac addr do not exist, add new entry for it */
7233 		memset(desc[0].data, 0, sizeof(desc[0].data));
7234 		memset(desc[1].data, 0, sizeof(desc[0].data));
7235 		memset(desc[2].data, 0, sizeof(desc[0].data));
7236 	}
7237 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7238 	if (status)
7239 		return status;
7240 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7241 
7242 	if (status == -ENOSPC)
7243 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7244 
7245 	return status;
7246 }
7247 
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7248 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7249 			    const unsigned char *addr)
7250 {
7251 	struct hclge_vport *vport = hclge_get_vport(handle);
7252 
7253 	return hclge_rm_mc_addr_common(vport, addr);
7254 }
7255 
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7256 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7257 			    const unsigned char *addr)
7258 {
7259 	struct hclge_dev *hdev = vport->back;
7260 	struct hclge_mac_vlan_tbl_entry_cmd req;
7261 	enum hclge_cmd_status status;
7262 	struct hclge_desc desc[3];
7263 
7264 	/* mac addr check */
7265 	if (!is_multicast_ether_addr(addr)) {
7266 		dev_dbg(&hdev->pdev->dev,
7267 			"Remove mc mac err! invalid mac:%pM.\n",
7268 			 addr);
7269 		return -EINVAL;
7270 	}
7271 
7272 	memset(&req, 0, sizeof(req));
7273 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7274 	hclge_prepare_mac_addr(&req, addr, true);
7275 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7276 	if (!status) {
7277 		/* This mac addr exist, remove this handle's VFID for it */
7278 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7279 		if (status)
7280 			return status;
7281 
7282 		if (hclge_is_all_function_id_zero(desc))
7283 			/* All the vfid is zero, so need to delete this entry */
7284 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7285 		else
7286 			/* Not all the vfid is zero, update the vfid */
7287 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7288 
7289 	} else {
7290 		/* Maybe this mac address is in mta table, but it cannot be
7291 		 * deleted here because an entry of mta represents an address
7292 		 * range rather than a specific address. the delete action to
7293 		 * all entries will take effect in update_mta_status called by
7294 		 * hns3_nic_set_rx_mode.
7295 		 */
7296 		status = 0;
7297 	}
7298 
7299 	return status;
7300 }
7301 
hclge_add_vport_mac_table(struct hclge_vport * vport,const u8 * mac_addr,enum HCLGE_MAC_ADDR_TYPE mac_type)7302 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7303 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7304 {
7305 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7306 	struct list_head *list;
7307 
7308 	if (!vport->vport_id)
7309 		return;
7310 
7311 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7312 	if (!mac_cfg)
7313 		return;
7314 
7315 	mac_cfg->hd_tbl_status = true;
7316 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7317 
7318 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7319 	       &vport->uc_mac_list : &vport->mc_mac_list;
7320 
7321 	list_add_tail(&mac_cfg->node, list);
7322 }
7323 
hclge_rm_vport_mac_table(struct hclge_vport * vport,const u8 * mac_addr,bool is_write_tbl,enum HCLGE_MAC_ADDR_TYPE mac_type)7324 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7325 			      bool is_write_tbl,
7326 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7327 {
7328 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7329 	struct list_head *list;
7330 	bool uc_flag, mc_flag;
7331 
7332 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7333 	       &vport->uc_mac_list : &vport->mc_mac_list;
7334 
7335 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7336 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7337 
7338 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7339 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7340 			if (uc_flag && mac_cfg->hd_tbl_status)
7341 				hclge_rm_uc_addr_common(vport, mac_addr);
7342 
7343 			if (mc_flag && mac_cfg->hd_tbl_status)
7344 				hclge_rm_mc_addr_common(vport, mac_addr);
7345 
7346 			list_del(&mac_cfg->node);
7347 			kfree(mac_cfg);
7348 			break;
7349 		}
7350 	}
7351 }
7352 
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)7353 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7354 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7355 {
7356 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7357 	struct list_head *list;
7358 
7359 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7360 	       &vport->uc_mac_list : &vport->mc_mac_list;
7361 
7362 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7363 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7364 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7365 
7366 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7367 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7368 
7369 		mac_cfg->hd_tbl_status = false;
7370 		if (is_del_list) {
7371 			list_del(&mac_cfg->node);
7372 			kfree(mac_cfg);
7373 		}
7374 	}
7375 }
7376 
hclge_uninit_vport_mac_table(struct hclge_dev * hdev)7377 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7378 {
7379 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7380 	struct hclge_vport *vport;
7381 	int i;
7382 
7383 	mutex_lock(&hdev->vport_cfg_mutex);
7384 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7385 		vport = &hdev->vport[i];
7386 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7387 			list_del(&mac->node);
7388 			kfree(mac);
7389 		}
7390 
7391 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7392 			list_del(&mac->node);
7393 			kfree(mac);
7394 		}
7395 	}
7396 	mutex_unlock(&hdev->vport_cfg_mutex);
7397 }
7398 
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)7399 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7400 					      u16 cmdq_resp, u8 resp_code)
7401 {
7402 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7403 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7404 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7405 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7406 
7407 	int return_status;
7408 
7409 	if (cmdq_resp) {
7410 		dev_err(&hdev->pdev->dev,
7411 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7412 			cmdq_resp);
7413 		return -EIO;
7414 	}
7415 
7416 	switch (resp_code) {
7417 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7418 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7419 		return_status = 0;
7420 		break;
7421 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7422 		dev_err(&hdev->pdev->dev,
7423 			"add mac ethertype failed for manager table overflow.\n");
7424 		return_status = -EIO;
7425 		break;
7426 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7427 		dev_err(&hdev->pdev->dev,
7428 			"add mac ethertype failed for key conflict.\n");
7429 		return_status = -EIO;
7430 		break;
7431 	default:
7432 		dev_err(&hdev->pdev->dev,
7433 			"add mac ethertype failed for undefined, code=%d.\n",
7434 			resp_code);
7435 		return_status = -EIO;
7436 	}
7437 
7438 	return return_status;
7439 }
7440 
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)7441 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7442 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7443 {
7444 	struct hclge_desc desc;
7445 	u8 resp_code;
7446 	u16 retval;
7447 	int ret;
7448 
7449 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7450 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7451 
7452 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7453 	if (ret) {
7454 		dev_err(&hdev->pdev->dev,
7455 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7456 			ret);
7457 		return ret;
7458 	}
7459 
7460 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7461 	retval = le16_to_cpu(desc.retval);
7462 
7463 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7464 }
7465 
init_mgr_tbl(struct hclge_dev * hdev)7466 static int init_mgr_tbl(struct hclge_dev *hdev)
7467 {
7468 	int ret;
7469 	int i;
7470 
7471 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7472 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7473 		if (ret) {
7474 			dev_err(&hdev->pdev->dev,
7475 				"add mac ethertype failed, ret =%d.\n",
7476 				ret);
7477 			return ret;
7478 		}
7479 	}
7480 
7481 	return 0;
7482 }
7483 
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)7484 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7485 {
7486 	struct hclge_vport *vport = hclge_get_vport(handle);
7487 	struct hclge_dev *hdev = vport->back;
7488 
7489 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7490 }
7491 
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)7492 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7493 			      bool is_first)
7494 {
7495 	const unsigned char *new_addr = (const unsigned char *)p;
7496 	struct hclge_vport *vport = hclge_get_vport(handle);
7497 	struct hclge_dev *hdev = vport->back;
7498 	int ret;
7499 
7500 	/* mac addr check */
7501 	if (is_zero_ether_addr(new_addr) ||
7502 	    is_broadcast_ether_addr(new_addr) ||
7503 	    is_multicast_ether_addr(new_addr)) {
7504 		dev_err(&hdev->pdev->dev,
7505 			"Change uc mac err! invalid mac:%pM.\n",
7506 			 new_addr);
7507 		return -EINVAL;
7508 	}
7509 
7510 	if ((!is_first || is_kdump_kernel()) &&
7511 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7512 		dev_warn(&hdev->pdev->dev,
7513 			 "remove old uc mac address fail.\n");
7514 
7515 	ret = hclge_add_uc_addr(handle, new_addr);
7516 	if (ret) {
7517 		dev_err(&hdev->pdev->dev,
7518 			"add uc mac address fail, ret =%d.\n",
7519 			ret);
7520 
7521 		if (!is_first &&
7522 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7523 			dev_err(&hdev->pdev->dev,
7524 				"restore uc mac address fail.\n");
7525 
7526 		return -EIO;
7527 	}
7528 
7529 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7530 	if (ret) {
7531 		dev_err(&hdev->pdev->dev,
7532 			"configure mac pause address fail, ret =%d.\n",
7533 			ret);
7534 		return -EIO;
7535 	}
7536 
7537 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7538 
7539 	return 0;
7540 }
7541 
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)7542 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7543 			  int cmd)
7544 {
7545 	struct hclge_vport *vport = hclge_get_vport(handle);
7546 	struct hclge_dev *hdev = vport->back;
7547 
7548 	if (!hdev->hw.mac.phydev)
7549 		return -EOPNOTSUPP;
7550 
7551 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7552 }
7553 
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)7554 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7555 				      u8 fe_type, bool filter_en, u8 vf_id)
7556 {
7557 	struct hclge_vlan_filter_ctrl_cmd *req;
7558 	struct hclge_desc desc;
7559 	int ret;
7560 
7561 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7562 
7563 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7564 	req->vlan_type = vlan_type;
7565 	req->vlan_fe = filter_en ? fe_type : 0;
7566 	req->vf_id = vf_id;
7567 
7568 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7569 	if (ret)
7570 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7571 			ret);
7572 
7573 	return ret;
7574 }
7575 
7576 #define HCLGE_FILTER_TYPE_VF		0
7577 #define HCLGE_FILTER_TYPE_PORT		1
7578 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7579 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7580 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7581 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7582 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7583 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7584 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7585 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7586 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7587 
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)7588 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7589 {
7590 	struct hclge_vport *vport = hclge_get_vport(handle);
7591 	struct hclge_dev *hdev = vport->back;
7592 
7593 	if (hdev->pdev->revision >= 0x21) {
7594 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7595 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7596 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7597 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7598 	} else {
7599 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7600 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7601 					   0);
7602 	}
7603 	if (enable)
7604 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7605 	else
7606 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7607 }
7608 
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,__be16 proto)7609 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7610 				    bool is_kill, u16 vlan,
7611 				    __be16 proto)
7612 {
7613 #define HCLGE_MAX_VF_BYTES  16
7614 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7615 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7616 	struct hclge_desc desc[2];
7617 	u8 vf_byte_val;
7618 	u8 vf_byte_off;
7619 	int ret;
7620 
7621 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7622 	 * is unable and unnecessary to add new vlan id to vf vlan filter
7623 	 */
7624 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7625 		return 0;
7626 
7627 	hclge_cmd_setup_basic_desc(&desc[0],
7628 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7629 	hclge_cmd_setup_basic_desc(&desc[1],
7630 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7631 
7632 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7633 
7634 	vf_byte_off = vfid / 8;
7635 	vf_byte_val = 1 << (vfid % 8);
7636 
7637 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7638 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7639 
7640 	req0->vlan_id  = cpu_to_le16(vlan);
7641 	req0->vlan_cfg = is_kill;
7642 
7643 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7644 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7645 	else
7646 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7647 
7648 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7649 	if (ret) {
7650 		dev_err(&hdev->pdev->dev,
7651 			"Send vf vlan command fail, ret =%d.\n",
7652 			ret);
7653 		return ret;
7654 	}
7655 
7656 	if (!is_kill) {
7657 #define HCLGE_VF_VLAN_NO_ENTRY	2
7658 		if (!req0->resp_code || req0->resp_code == 1)
7659 			return 0;
7660 
7661 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7662 			set_bit(vfid, hdev->vf_vlan_full);
7663 			dev_warn(&hdev->pdev->dev,
7664 				 "vf vlan table is full, vf vlan filter is disabled\n");
7665 			return 0;
7666 		}
7667 
7668 		dev_err(&hdev->pdev->dev,
7669 			"Add vf vlan filter fail, ret =%d.\n",
7670 			req0->resp_code);
7671 	} else {
7672 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7673 		if (!req0->resp_code)
7674 			return 0;
7675 
7676 		/* vf vlan filter is disabled when vf vlan table is full,
7677 		 * then new vlan id will not be added into vf vlan table.
7678 		 * Just return 0 without warning, avoid massive verbose
7679 		 * print logs when unload.
7680 		 */
7681 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7682 			return 0;
7683 
7684 		dev_err(&hdev->pdev->dev,
7685 			"Kill vf vlan filter fail, ret =%d.\n",
7686 			req0->resp_code);
7687 	}
7688 
7689 	return -EIO;
7690 }
7691 
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)7692 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7693 				      u16 vlan_id, bool is_kill)
7694 {
7695 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7696 	struct hclge_desc desc;
7697 	u8 vlan_offset_byte_val;
7698 	u8 vlan_offset_byte;
7699 	u8 vlan_offset_160;
7700 	int ret;
7701 
7702 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7703 
7704 	vlan_offset_160 = vlan_id / 160;
7705 	vlan_offset_byte = (vlan_id % 160) / 8;
7706 	vlan_offset_byte_val = 1 << (vlan_id % 8);
7707 
7708 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7709 	req->vlan_offset = vlan_offset_160;
7710 	req->vlan_cfg = is_kill;
7711 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7712 
7713 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7714 	if (ret)
7715 		dev_err(&hdev->pdev->dev,
7716 			"port vlan command, send fail, ret =%d.\n", ret);
7717 	return ret;
7718 }
7719 
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)7720 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7721 				    u16 vport_id, u16 vlan_id,
7722 				    bool is_kill)
7723 {
7724 	u16 vport_idx, vport_num = 0;
7725 	int ret;
7726 
7727 	if (is_kill && !vlan_id)
7728 		return 0;
7729 
7730 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7731 				       proto);
7732 	if (ret) {
7733 		dev_err(&hdev->pdev->dev,
7734 			"Set %d vport vlan filter config fail, ret =%d.\n",
7735 			vport_id, ret);
7736 		return ret;
7737 	}
7738 
7739 	/* vlan 0 may be added twice when 8021q module is enabled */
7740 	if (!is_kill && !vlan_id &&
7741 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7742 		return 0;
7743 
7744 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7745 		dev_err(&hdev->pdev->dev,
7746 			"Add port vlan failed, vport %d is already in vlan %d\n",
7747 			vport_id, vlan_id);
7748 		return -EINVAL;
7749 	}
7750 
7751 	if (is_kill &&
7752 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7753 		dev_err(&hdev->pdev->dev,
7754 			"Delete port vlan failed, vport %d is not in vlan %d\n",
7755 			vport_id, vlan_id);
7756 		return -EINVAL;
7757 	}
7758 
7759 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7760 		vport_num++;
7761 
7762 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7763 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7764 						 is_kill);
7765 
7766 	return ret;
7767 }
7768 
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)7769 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7770 {
7771 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7772 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7773 	struct hclge_dev *hdev = vport->back;
7774 	struct hclge_desc desc;
7775 	u16 bmap_index;
7776 	int status;
7777 
7778 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7779 
7780 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7781 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7782 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7783 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7784 		      vcfg->accept_tag1 ? 1 : 0);
7785 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7786 		      vcfg->accept_untag1 ? 1 : 0);
7787 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7788 		      vcfg->accept_tag2 ? 1 : 0);
7789 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7790 		      vcfg->accept_untag2 ? 1 : 0);
7791 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7792 		      vcfg->insert_tag1_en ? 1 : 0);
7793 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7794 		      vcfg->insert_tag2_en ? 1 : 0);
7795 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7796 
7797 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7798 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7799 			HCLGE_VF_NUM_PER_BYTE;
7800 	req->vf_bitmap[bmap_index] =
7801 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7802 
7803 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7804 	if (status)
7805 		dev_err(&hdev->pdev->dev,
7806 			"Send port txvlan cfg command fail, ret =%d\n",
7807 			status);
7808 
7809 	return status;
7810 }
7811 
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)7812 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7813 {
7814 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7815 	struct hclge_vport_vtag_rx_cfg_cmd *req;
7816 	struct hclge_dev *hdev = vport->back;
7817 	struct hclge_desc desc;
7818 	u16 bmap_index;
7819 	int status;
7820 
7821 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7822 
7823 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7824 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7825 		      vcfg->strip_tag1_en ? 1 : 0);
7826 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7827 		      vcfg->strip_tag2_en ? 1 : 0);
7828 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7829 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
7830 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7831 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
7832 
7833 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7834 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7835 			HCLGE_VF_NUM_PER_BYTE;
7836 	req->vf_bitmap[bmap_index] =
7837 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7838 
7839 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7840 	if (status)
7841 		dev_err(&hdev->pdev->dev,
7842 			"Send port rxvlan cfg command fail, ret =%d\n",
7843 			status);
7844 
7845 	return status;
7846 }
7847 
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag)7848 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7849 				  u16 port_base_vlan_state,
7850 				  u16 vlan_tag)
7851 {
7852 	int ret;
7853 
7854 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7855 		vport->txvlan_cfg.accept_tag1 = true;
7856 		vport->txvlan_cfg.insert_tag1_en = false;
7857 		vport->txvlan_cfg.default_tag1 = 0;
7858 	} else {
7859 		vport->txvlan_cfg.accept_tag1 = false;
7860 		vport->txvlan_cfg.insert_tag1_en = true;
7861 		vport->txvlan_cfg.default_tag1 = vlan_tag;
7862 	}
7863 
7864 	vport->txvlan_cfg.accept_untag1 = true;
7865 
7866 	/* accept_tag2 and accept_untag2 are not supported on
7867 	 * pdev revision(0x20), new revision support them,
7868 	 * this two fields can not be configured by user.
7869 	 */
7870 	vport->txvlan_cfg.accept_tag2 = true;
7871 	vport->txvlan_cfg.accept_untag2 = true;
7872 	vport->txvlan_cfg.insert_tag2_en = false;
7873 	vport->txvlan_cfg.default_tag2 = 0;
7874 
7875 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7876 		vport->rxvlan_cfg.strip_tag1_en = false;
7877 		vport->rxvlan_cfg.strip_tag2_en =
7878 				vport->rxvlan_cfg.rx_vlan_offload_en;
7879 	} else {
7880 		vport->rxvlan_cfg.strip_tag1_en =
7881 				vport->rxvlan_cfg.rx_vlan_offload_en;
7882 		vport->rxvlan_cfg.strip_tag2_en = true;
7883 	}
7884 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7885 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7886 
7887 	ret = hclge_set_vlan_tx_offload_cfg(vport);
7888 	if (ret)
7889 		return ret;
7890 
7891 	return hclge_set_vlan_rx_offload_cfg(vport);
7892 }
7893 
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)7894 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7895 {
7896 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7897 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7898 	struct hclge_desc desc;
7899 	int status;
7900 
7901 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7902 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7903 	rx_req->ot_fst_vlan_type =
7904 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7905 	rx_req->ot_sec_vlan_type =
7906 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7907 	rx_req->in_fst_vlan_type =
7908 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7909 	rx_req->in_sec_vlan_type =
7910 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7911 
7912 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7913 	if (status) {
7914 		dev_err(&hdev->pdev->dev,
7915 			"Send rxvlan protocol type command fail, ret =%d\n",
7916 			status);
7917 		return status;
7918 	}
7919 
7920 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7921 
7922 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7923 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7924 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7925 
7926 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7927 	if (status)
7928 		dev_err(&hdev->pdev->dev,
7929 			"Send txvlan protocol type command fail, ret =%d\n",
7930 			status);
7931 
7932 	return status;
7933 }
7934 
hclge_init_vlan_config(struct hclge_dev * hdev)7935 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7936 {
7937 #define HCLGE_DEF_VLAN_TYPE		0x8100
7938 
7939 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7940 	struct hclge_vport *vport;
7941 	int ret;
7942 	int i;
7943 
7944 	if (hdev->pdev->revision >= 0x21) {
7945 		/* for revision 0x21, vf vlan filter is per function */
7946 		for (i = 0; i < hdev->num_alloc_vport; i++) {
7947 			vport = &hdev->vport[i];
7948 			ret = hclge_set_vlan_filter_ctrl(hdev,
7949 							 HCLGE_FILTER_TYPE_VF,
7950 							 HCLGE_FILTER_FE_EGRESS,
7951 							 true,
7952 							 vport->vport_id);
7953 			if (ret)
7954 				return ret;
7955 		}
7956 
7957 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7958 						 HCLGE_FILTER_FE_INGRESS, true,
7959 						 0);
7960 		if (ret)
7961 			return ret;
7962 	} else {
7963 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7964 						 HCLGE_FILTER_FE_EGRESS_V1_B,
7965 						 true, 0);
7966 		if (ret)
7967 			return ret;
7968 	}
7969 
7970 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
7971 
7972 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7973 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7974 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7975 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7976 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7977 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7978 
7979 	ret = hclge_set_vlan_protocol_type(hdev);
7980 	if (ret)
7981 		return ret;
7982 
7983 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7984 		u16 vlan_tag;
7985 
7986 		vport = &hdev->vport[i];
7987 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7988 
7989 		ret = hclge_vlan_offload_cfg(vport,
7990 					     vport->port_base_vlan_cfg.state,
7991 					     vlan_tag);
7992 		if (ret)
7993 			return ret;
7994 	}
7995 
7996 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7997 }
7998 
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)7999 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8000 				       bool writen_to_tbl)
8001 {
8002 	struct hclge_vport_vlan_cfg *vlan;
8003 
8004 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8005 	if (!vlan)
8006 		return;
8007 
8008 	vlan->hd_tbl_status = writen_to_tbl;
8009 	vlan->vlan_id = vlan_id;
8010 
8011 	list_add_tail(&vlan->node, &vport->vlan_list);
8012 }
8013 
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)8014 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8015 {
8016 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8017 	struct hclge_dev *hdev = vport->back;
8018 	int ret;
8019 
8020 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8021 		if (!vlan->hd_tbl_status) {
8022 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8023 						       vport->vport_id,
8024 						       vlan->vlan_id, false);
8025 			if (ret) {
8026 				dev_err(&hdev->pdev->dev,
8027 					"restore vport vlan list failed, ret=%d\n",
8028 					ret);
8029 				return ret;
8030 			}
8031 		}
8032 		vlan->hd_tbl_status = true;
8033 	}
8034 
8035 	return 0;
8036 }
8037 
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)8038 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8039 				      bool is_write_tbl)
8040 {
8041 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8042 	struct hclge_dev *hdev = vport->back;
8043 
8044 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8045 		if (vlan->vlan_id == vlan_id) {
8046 			if (is_write_tbl && vlan->hd_tbl_status)
8047 				hclge_set_vlan_filter_hw(hdev,
8048 							 htons(ETH_P_8021Q),
8049 							 vport->vport_id,
8050 							 vlan_id,
8051 							 true);
8052 
8053 			list_del(&vlan->node);
8054 			kfree(vlan);
8055 			break;
8056 		}
8057 	}
8058 }
8059 
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)8060 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8061 {
8062 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8063 	struct hclge_dev *hdev = vport->back;
8064 
8065 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8066 		if (vlan->hd_tbl_status)
8067 			hclge_set_vlan_filter_hw(hdev,
8068 						 htons(ETH_P_8021Q),
8069 						 vport->vport_id,
8070 						 vlan->vlan_id,
8071 						 true);
8072 
8073 		vlan->hd_tbl_status = false;
8074 		if (is_del_list) {
8075 			list_del(&vlan->node);
8076 			kfree(vlan);
8077 		}
8078 	}
8079 }
8080 
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)8081 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8082 {
8083 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8084 	struct hclge_vport *vport;
8085 	int i;
8086 
8087 	mutex_lock(&hdev->vport_cfg_mutex);
8088 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8089 		vport = &hdev->vport[i];
8090 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8091 			list_del(&vlan->node);
8092 			kfree(vlan);
8093 		}
8094 	}
8095 	mutex_unlock(&hdev->vport_cfg_mutex);
8096 }
8097 
hclge_restore_vlan_table(struct hnae3_handle * handle)8098 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8099 {
8100 	struct hclge_vport *vport = hclge_get_vport(handle);
8101 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8102 	struct hclge_dev *hdev = vport->back;
8103 	u16 vlan_proto;
8104 	u16 state, vlan_id;
8105 	int i;
8106 
8107 	mutex_lock(&hdev->vport_cfg_mutex);
8108 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8109 		vport = &hdev->vport[i];
8110 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8111 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8112 		state = vport->port_base_vlan_cfg.state;
8113 
8114 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8115 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8116 						 vport->vport_id, vlan_id,
8117 						 false);
8118 			continue;
8119 		}
8120 
8121 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8122 			if (vlan->hd_tbl_status)
8123 				hclge_set_vlan_filter_hw(hdev,
8124 							 htons(ETH_P_8021Q),
8125 							 vport->vport_id,
8126 							 vlan->vlan_id,
8127 							 false);
8128 		}
8129 	}
8130 
8131 	mutex_unlock(&hdev->vport_cfg_mutex);
8132 }
8133 
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)8134 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8135 {
8136 	struct hclge_vport *vport = hclge_get_vport(handle);
8137 
8138 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8139 		vport->rxvlan_cfg.strip_tag1_en = false;
8140 		vport->rxvlan_cfg.strip_tag2_en = enable;
8141 	} else {
8142 		vport->rxvlan_cfg.strip_tag1_en = enable;
8143 		vport->rxvlan_cfg.strip_tag2_en = true;
8144 	}
8145 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8146 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8147 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8148 
8149 	return hclge_set_vlan_rx_offload_cfg(vport);
8150 }
8151 
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)8152 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8153 					    u16 port_base_vlan_state,
8154 					    struct hclge_vlan_info *new_info,
8155 					    struct hclge_vlan_info *old_info)
8156 {
8157 	struct hclge_dev *hdev = vport->back;
8158 	int ret;
8159 
8160 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8161 		hclge_rm_vport_all_vlan_table(vport, false);
8162 		return hclge_set_vlan_filter_hw(hdev,
8163 						 htons(new_info->vlan_proto),
8164 						 vport->vport_id,
8165 						 new_info->vlan_tag,
8166 						 false);
8167 	}
8168 
8169 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8170 				       vport->vport_id, old_info->vlan_tag,
8171 				       true);
8172 	if (ret)
8173 		return ret;
8174 
8175 	return hclge_add_vport_all_vlan_table(vport);
8176 }
8177 
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)8178 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8179 				    struct hclge_vlan_info *vlan_info)
8180 {
8181 	struct hnae3_handle *nic = &vport->nic;
8182 	struct hclge_vlan_info *old_vlan_info;
8183 	struct hclge_dev *hdev = vport->back;
8184 	int ret;
8185 
8186 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8187 
8188 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8189 	if (ret)
8190 		return ret;
8191 
8192 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8193 		/* add new VLAN tag */
8194 		ret = hclge_set_vlan_filter_hw(hdev,
8195 					       htons(vlan_info->vlan_proto),
8196 					       vport->vport_id,
8197 					       vlan_info->vlan_tag,
8198 					       false);
8199 		if (ret)
8200 			return ret;
8201 
8202 		/* remove old VLAN tag */
8203 		ret = hclge_set_vlan_filter_hw(hdev,
8204 					       htons(old_vlan_info->vlan_proto),
8205 					       vport->vport_id,
8206 					       old_vlan_info->vlan_tag,
8207 					       true);
8208 		if (ret)
8209 			return ret;
8210 
8211 		goto update;
8212 	}
8213 
8214 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8215 					       old_vlan_info);
8216 	if (ret)
8217 		return ret;
8218 
8219 	/* update state only when disable/enable port based VLAN */
8220 	vport->port_base_vlan_cfg.state = state;
8221 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8222 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8223 	else
8224 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8225 
8226 update:
8227 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8228 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8229 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8230 
8231 	return 0;
8232 }
8233 
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan)8234 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8235 					  enum hnae3_port_base_vlan_state state,
8236 					  u16 vlan)
8237 {
8238 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8239 		if (!vlan)
8240 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8241 		else
8242 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8243 	} else {
8244 		if (!vlan)
8245 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8246 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8247 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8248 		else
8249 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8250 	}
8251 }
8252 
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)8253 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8254 				    u16 vlan, u8 qos, __be16 proto)
8255 {
8256 	struct hclge_vport *vport = hclge_get_vport(handle);
8257 	struct hclge_dev *hdev = vport->back;
8258 	struct hclge_vlan_info vlan_info;
8259 	u16 state;
8260 	int ret;
8261 
8262 	if (hdev->pdev->revision == 0x20)
8263 		return -EOPNOTSUPP;
8264 
8265 	/* qos is a 3 bits value, so can not be bigger than 7 */
8266 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8267 		return -EINVAL;
8268 	if (proto != htons(ETH_P_8021Q))
8269 		return -EPROTONOSUPPORT;
8270 
8271 	vport = &hdev->vport[vfid];
8272 	state = hclge_get_port_base_vlan_state(vport,
8273 					       vport->port_base_vlan_cfg.state,
8274 					       vlan);
8275 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8276 		return 0;
8277 
8278 	vlan_info.vlan_tag = vlan;
8279 	vlan_info.qos = qos;
8280 	vlan_info.vlan_proto = ntohs(proto);
8281 
8282 	/* update port based VLAN for PF */
8283 	if (!vfid) {
8284 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8285 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8286 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8287 
8288 		return ret;
8289 	}
8290 
8291 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8292 		return hclge_update_port_base_vlan_cfg(vport, state,
8293 						       &vlan_info);
8294 	} else {
8295 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8296 							(u8)vfid, state,
8297 							vlan, qos,
8298 							ntohs(proto));
8299 		return ret;
8300 	}
8301 }
8302 
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)8303 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8304 			  u16 vlan_id, bool is_kill)
8305 {
8306 	struct hclge_vport *vport = hclge_get_vport(handle);
8307 	struct hclge_dev *hdev = vport->back;
8308 	bool writen_to_tbl = false;
8309 	int ret = 0;
8310 
8311 	/* When device is resetting, firmware is unable to handle
8312 	 * mailbox. Just record the vlan id, and remove it after
8313 	 * reset finished.
8314 	 */
8315 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8316 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8317 		return -EBUSY;
8318 	}
8319 
8320 	/* when port base vlan enabled, we use port base vlan as the vlan
8321 	 * filter entry. In this case, we don't update vlan filter table
8322 	 * when user add new vlan or remove exist vlan, just update the vport
8323 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8324 	 * table until port base vlan disabled
8325 	 */
8326 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8327 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8328 					       vlan_id, is_kill);
8329 		writen_to_tbl = true;
8330 	}
8331 
8332 	if (!ret) {
8333 		if (is_kill)
8334 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8335 		else
8336 			hclge_add_vport_vlan_table(vport, vlan_id,
8337 						   writen_to_tbl);
8338 	} else if (is_kill) {
8339 		/* when remove hw vlan filter failed, record the vlan id,
8340 		 * and try to remove it from hw later, to be consistence
8341 		 * with stack
8342 		 */
8343 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8344 	}
8345 	return ret;
8346 }
8347 
hclge_sync_vlan_filter(struct hclge_dev * hdev)8348 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8349 {
8350 #define HCLGE_MAX_SYNC_COUNT	60
8351 
8352 	int i, ret, sync_cnt = 0;
8353 	u16 vlan_id;
8354 
8355 	/* start from vport 1 for PF is always alive */
8356 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8357 		struct hclge_vport *vport = &hdev->vport[i];
8358 
8359 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8360 					 VLAN_N_VID);
8361 		while (vlan_id != VLAN_N_VID) {
8362 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8363 						       vport->vport_id, vlan_id,
8364 						       true);
8365 			if (ret && ret != -EINVAL)
8366 				return;
8367 
8368 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8369 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8370 
8371 			sync_cnt++;
8372 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8373 				return;
8374 
8375 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8376 						 VLAN_N_VID);
8377 		}
8378 	}
8379 }
8380 
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)8381 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8382 {
8383 	struct hclge_config_max_frm_size_cmd *req;
8384 	struct hclge_desc desc;
8385 
8386 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8387 
8388 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8389 	req->max_frm_size = cpu_to_le16(new_mps);
8390 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8391 
8392 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8393 }
8394 
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)8395 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8396 {
8397 	struct hclge_vport *vport = hclge_get_vport(handle);
8398 
8399 	return hclge_set_vport_mtu(vport, new_mtu);
8400 }
8401 
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)8402 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8403 {
8404 	struct hclge_dev *hdev = vport->back;
8405 	int i, max_frm_size, ret;
8406 
8407 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8408 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8409 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8410 		return -EINVAL;
8411 
8412 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8413 	mutex_lock(&hdev->vport_lock);
8414 	/* VF's mps must fit within hdev->mps */
8415 	if (vport->vport_id && max_frm_size > hdev->mps) {
8416 		mutex_unlock(&hdev->vport_lock);
8417 		return -EINVAL;
8418 	} else if (vport->vport_id) {
8419 		vport->mps = max_frm_size;
8420 		mutex_unlock(&hdev->vport_lock);
8421 		return 0;
8422 	}
8423 
8424 	/* PF's mps must be greater then VF's mps */
8425 	for (i = 1; i < hdev->num_alloc_vport; i++)
8426 		if (max_frm_size < hdev->vport[i].mps) {
8427 			mutex_unlock(&hdev->vport_lock);
8428 			return -EINVAL;
8429 		}
8430 
8431 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8432 
8433 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8434 	if (ret) {
8435 		dev_err(&hdev->pdev->dev,
8436 			"Change mtu fail, ret =%d\n", ret);
8437 		goto out;
8438 	}
8439 
8440 	hdev->mps = max_frm_size;
8441 	vport->mps = max_frm_size;
8442 
8443 	ret = hclge_buffer_alloc(hdev);
8444 	if (ret)
8445 		dev_err(&hdev->pdev->dev,
8446 			"Allocate buffer fail, ret =%d\n", ret);
8447 
8448 out:
8449 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8450 	mutex_unlock(&hdev->vport_lock);
8451 	return ret;
8452 }
8453 
hclge_send_reset_tqp_cmd(struct hclge_dev * hdev,u16 queue_id,bool enable)8454 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8455 				    bool enable)
8456 {
8457 	struct hclge_reset_tqp_queue_cmd *req;
8458 	struct hclge_desc desc;
8459 	int ret;
8460 
8461 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8462 
8463 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8464 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8465 	if (enable)
8466 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8467 
8468 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8469 	if (ret) {
8470 		dev_err(&hdev->pdev->dev,
8471 			"Send tqp reset cmd error, status =%d\n", ret);
8472 		return ret;
8473 	}
8474 
8475 	return 0;
8476 }
8477 
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id)8478 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8479 {
8480 	struct hclge_reset_tqp_queue_cmd *req;
8481 	struct hclge_desc desc;
8482 	int ret;
8483 
8484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8485 
8486 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8487 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8488 
8489 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8490 	if (ret) {
8491 		dev_err(&hdev->pdev->dev,
8492 			"Get reset status error, status =%d\n", ret);
8493 		return ret;
8494 	}
8495 
8496 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8497 }
8498 
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)8499 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8500 {
8501 	struct hnae3_queue *queue;
8502 	struct hclge_tqp *tqp;
8503 
8504 	queue = handle->kinfo.tqp[queue_id];
8505 	tqp = container_of(queue, struct hclge_tqp, q);
8506 
8507 	return tqp->index;
8508 }
8509 
hclge_reset_tqp(struct hnae3_handle * handle,u16 queue_id)8510 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8511 {
8512 	struct hclge_vport *vport = hclge_get_vport(handle);
8513 	struct hclge_dev *hdev = vport->back;
8514 	int reset_try_times = 0;
8515 	int reset_status;
8516 	u16 queue_gid;
8517 	int ret;
8518 
8519 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8520 
8521 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8522 	if (ret) {
8523 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8524 		return ret;
8525 	}
8526 
8527 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8528 	if (ret) {
8529 		dev_err(&hdev->pdev->dev,
8530 			"Send reset tqp cmd fail, ret = %d\n", ret);
8531 		return ret;
8532 	}
8533 
8534 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8535 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8536 		if (reset_status)
8537 			break;
8538 
8539 		/* Wait for tqp hw reset */
8540 		usleep_range(1000, 1200);
8541 	}
8542 
8543 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8544 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8545 		return ret;
8546 	}
8547 
8548 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8549 	if (ret)
8550 		dev_err(&hdev->pdev->dev,
8551 			"Deassert the soft reset fail, ret = %d\n", ret);
8552 
8553 	return ret;
8554 }
8555 
hclge_reset_vf_queue(struct hclge_vport * vport,u16 queue_id)8556 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8557 {
8558 	struct hclge_dev *hdev = vport->back;
8559 	int reset_try_times = 0;
8560 	int reset_status;
8561 	u16 queue_gid;
8562 	int ret;
8563 
8564 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8565 
8566 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8567 	if (ret) {
8568 		dev_warn(&hdev->pdev->dev,
8569 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8570 		return;
8571 	}
8572 
8573 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8574 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8575 		if (reset_status)
8576 			break;
8577 
8578 		/* Wait for tqp hw reset */
8579 		usleep_range(1000, 1200);
8580 	}
8581 
8582 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8583 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8584 		return;
8585 	}
8586 
8587 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8588 	if (ret)
8589 		dev_warn(&hdev->pdev->dev,
8590 			 "Deassert the soft reset fail, ret = %d\n", ret);
8591 }
8592 
hclge_get_fw_version(struct hnae3_handle * handle)8593 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8594 {
8595 	struct hclge_vport *vport = hclge_get_vport(handle);
8596 	struct hclge_dev *hdev = vport->back;
8597 
8598 	return hdev->fw_version;
8599 }
8600 
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)8601 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8602 {
8603 	struct phy_device *phydev = hdev->hw.mac.phydev;
8604 
8605 	if (!phydev)
8606 		return;
8607 
8608 	phy_set_asym_pause(phydev, rx_en, tx_en);
8609 }
8610 
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)8611 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8612 {
8613 	int ret;
8614 
8615 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8616 		return 0;
8617 
8618 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8619 	if (ret)
8620 		dev_err(&hdev->pdev->dev,
8621 			"configure pauseparam error, ret = %d.\n", ret);
8622 
8623 	return ret;
8624 }
8625 
hclge_cfg_flowctrl(struct hclge_dev * hdev)8626 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8627 {
8628 	struct phy_device *phydev = hdev->hw.mac.phydev;
8629 	u16 remote_advertising = 0;
8630 	u16 local_advertising;
8631 	u32 rx_pause, tx_pause;
8632 	u8 flowctl;
8633 
8634 	if (!phydev->link || !phydev->autoneg)
8635 		return 0;
8636 
8637 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8638 
8639 	if (phydev->pause)
8640 		remote_advertising = LPA_PAUSE_CAP;
8641 
8642 	if (phydev->asym_pause)
8643 		remote_advertising |= LPA_PAUSE_ASYM;
8644 
8645 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8646 					   remote_advertising);
8647 	tx_pause = flowctl & FLOW_CTRL_TX;
8648 	rx_pause = flowctl & FLOW_CTRL_RX;
8649 
8650 	if (phydev->duplex == HCLGE_MAC_HALF) {
8651 		tx_pause = 0;
8652 		rx_pause = 0;
8653 	}
8654 
8655 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8656 }
8657 
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)8658 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8659 				 u32 *rx_en, u32 *tx_en)
8660 {
8661 	struct hclge_vport *vport = hclge_get_vport(handle);
8662 	struct hclge_dev *hdev = vport->back;
8663 	struct phy_device *phydev = hdev->hw.mac.phydev;
8664 
8665 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8666 
8667 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8668 		*rx_en = 0;
8669 		*tx_en = 0;
8670 		return;
8671 	}
8672 
8673 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8674 		*rx_en = 1;
8675 		*tx_en = 0;
8676 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8677 		*tx_en = 1;
8678 		*rx_en = 0;
8679 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8680 		*rx_en = 1;
8681 		*tx_en = 1;
8682 	} else {
8683 		*rx_en = 0;
8684 		*tx_en = 0;
8685 	}
8686 }
8687 
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)8688 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8689 					 u32 rx_en, u32 tx_en)
8690 {
8691 	if (rx_en && tx_en)
8692 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8693 	else if (rx_en && !tx_en)
8694 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8695 	else if (!rx_en && tx_en)
8696 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8697 	else
8698 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8699 
8700 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8701 }
8702 
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)8703 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8704 				u32 rx_en, u32 tx_en)
8705 {
8706 	struct hclge_vport *vport = hclge_get_vport(handle);
8707 	struct hclge_dev *hdev = vport->back;
8708 	struct phy_device *phydev = hdev->hw.mac.phydev;
8709 	u32 fc_autoneg;
8710 
8711 	if (phydev) {
8712 		fc_autoneg = hclge_get_autoneg(handle);
8713 		if (auto_neg != fc_autoneg) {
8714 			dev_info(&hdev->pdev->dev,
8715 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8716 			return -EOPNOTSUPP;
8717 		}
8718 	}
8719 
8720 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8721 		dev_info(&hdev->pdev->dev,
8722 			 "Priority flow control enabled. Cannot set link flow control.\n");
8723 		return -EOPNOTSUPP;
8724 	}
8725 
8726 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8727 
8728 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8729 
8730 	if (!auto_neg)
8731 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8732 
8733 	if (phydev)
8734 		return phy_start_aneg(phydev);
8735 
8736 	return -EOPNOTSUPP;
8737 }
8738 
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)8739 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8740 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8741 {
8742 	struct hclge_vport *vport = hclge_get_vport(handle);
8743 	struct hclge_dev *hdev = vport->back;
8744 
8745 	if (speed)
8746 		*speed = hdev->hw.mac.speed;
8747 	if (duplex)
8748 		*duplex = hdev->hw.mac.duplex;
8749 	if (auto_neg)
8750 		*auto_neg = hdev->hw.mac.autoneg;
8751 }
8752 
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)8753 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8754 				 u8 *module_type)
8755 {
8756 	struct hclge_vport *vport = hclge_get_vport(handle);
8757 	struct hclge_dev *hdev = vport->back;
8758 
8759 	if (media_type)
8760 		*media_type = hdev->hw.mac.media_type;
8761 
8762 	if (module_type)
8763 		*module_type = hdev->hw.mac.module_type;
8764 }
8765 
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)8766 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8767 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8768 {
8769 	struct hclge_vport *vport = hclge_get_vport(handle);
8770 	struct hclge_dev *hdev = vport->back;
8771 	struct phy_device *phydev = hdev->hw.mac.phydev;
8772 	int mdix_ctrl, mdix, is_resolved;
8773 	unsigned int retval;
8774 
8775 	if (!phydev) {
8776 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8777 		*tp_mdix = ETH_TP_MDI_INVALID;
8778 		return;
8779 	}
8780 
8781 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8782 
8783 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8784 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8785 				    HCLGE_PHY_MDIX_CTRL_S);
8786 
8787 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8788 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8789 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8790 
8791 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8792 
8793 	switch (mdix_ctrl) {
8794 	case 0x0:
8795 		*tp_mdix_ctrl = ETH_TP_MDI;
8796 		break;
8797 	case 0x1:
8798 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8799 		break;
8800 	case 0x3:
8801 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8802 		break;
8803 	default:
8804 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8805 		break;
8806 	}
8807 
8808 	if (!is_resolved)
8809 		*tp_mdix = ETH_TP_MDI_INVALID;
8810 	else if (mdix)
8811 		*tp_mdix = ETH_TP_MDI_X;
8812 	else
8813 		*tp_mdix = ETH_TP_MDI;
8814 }
8815 
hclge_info_show(struct hclge_dev * hdev)8816 static void hclge_info_show(struct hclge_dev *hdev)
8817 {
8818 	struct device *dev = &hdev->pdev->dev;
8819 
8820 	dev_info(dev, "PF info begin:\n");
8821 
8822 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8823 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8824 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8825 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8826 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8827 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8828 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8829 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8830 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8831 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8832 	dev_info(dev, "This is %s PF\n",
8833 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8834 	dev_info(dev, "DCB %s\n",
8835 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8836 	dev_info(dev, "MQPRIO %s\n",
8837 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8838 
8839 	dev_info(dev, "PF info end.\n");
8840 }
8841 
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)8842 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8843 					  struct hclge_vport *vport)
8844 {
8845 	struct hnae3_client *client = vport->nic.client;
8846 	struct hclge_dev *hdev = ae_dev->priv;
8847 	int rst_cnt;
8848 	int ret;
8849 
8850 	rst_cnt = hdev->rst_stats.reset_cnt;
8851 	ret = client->ops->init_instance(&vport->nic);
8852 	if (ret)
8853 		return ret;
8854 
8855 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8856 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8857 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8858 		ret = -EBUSY;
8859 		goto init_nic_err;
8860 	}
8861 
8862 	/* Enable nic hw error interrupts */
8863 	ret = hclge_config_nic_hw_error(hdev, true);
8864 	if (ret) {
8865 		dev_err(&ae_dev->pdev->dev,
8866 			"fail(%d) to enable hw error interrupts\n", ret);
8867 		goto init_nic_err;
8868 	}
8869 
8870 	hnae3_set_client_init_flag(client, ae_dev, 1);
8871 
8872 	if (netif_msg_drv(&hdev->vport->nic))
8873 		hclge_info_show(hdev);
8874 
8875 	return ret;
8876 
8877 init_nic_err:
8878 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8879 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8880 		msleep(HCLGE_WAIT_RESET_DONE);
8881 
8882 	client->ops->uninit_instance(&vport->nic, 0);
8883 
8884 	return ret;
8885 }
8886 
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)8887 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8888 					   struct hclge_vport *vport)
8889 {
8890 	struct hnae3_client *client = vport->roce.client;
8891 	struct hclge_dev *hdev = ae_dev->priv;
8892 	int rst_cnt;
8893 	int ret;
8894 
8895 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8896 	    !hdev->nic_client)
8897 		return 0;
8898 
8899 	client = hdev->roce_client;
8900 	ret = hclge_init_roce_base_info(vport);
8901 	if (ret)
8902 		return ret;
8903 
8904 	rst_cnt = hdev->rst_stats.reset_cnt;
8905 	ret = client->ops->init_instance(&vport->roce);
8906 	if (ret)
8907 		return ret;
8908 
8909 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8910 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8911 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8912 		ret = -EBUSY;
8913 		goto init_roce_err;
8914 	}
8915 
8916 	/* Enable roce ras interrupts */
8917 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
8918 	if (ret) {
8919 		dev_err(&ae_dev->pdev->dev,
8920 			"fail(%d) to enable roce ras interrupts\n", ret);
8921 		goto init_roce_err;
8922 	}
8923 
8924 	hnae3_set_client_init_flag(client, ae_dev, 1);
8925 
8926 	return 0;
8927 
8928 init_roce_err:
8929 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8930 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8931 		msleep(HCLGE_WAIT_RESET_DONE);
8932 
8933 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8934 
8935 	return ret;
8936 }
8937 
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)8938 static int hclge_init_client_instance(struct hnae3_client *client,
8939 				      struct hnae3_ae_dev *ae_dev)
8940 {
8941 	struct hclge_dev *hdev = ae_dev->priv;
8942 	struct hclge_vport *vport;
8943 	int i, ret;
8944 
8945 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8946 		vport = &hdev->vport[i];
8947 
8948 		switch (client->type) {
8949 		case HNAE3_CLIENT_KNIC:
8950 
8951 			hdev->nic_client = client;
8952 			vport->nic.client = client;
8953 			ret = hclge_init_nic_client_instance(ae_dev, vport);
8954 			if (ret)
8955 				goto clear_nic;
8956 
8957 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8958 			if (ret)
8959 				goto clear_roce;
8960 
8961 			break;
8962 		case HNAE3_CLIENT_ROCE:
8963 			if (hnae3_dev_roce_supported(hdev)) {
8964 				hdev->roce_client = client;
8965 				vport->roce.client = client;
8966 			}
8967 
8968 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8969 			if (ret)
8970 				goto clear_roce;
8971 
8972 			break;
8973 		default:
8974 			return -EINVAL;
8975 		}
8976 	}
8977 
8978 	return 0;
8979 
8980 clear_nic:
8981 	hdev->nic_client = NULL;
8982 	vport->nic.client = NULL;
8983 	return ret;
8984 clear_roce:
8985 	hdev->roce_client = NULL;
8986 	vport->roce.client = NULL;
8987 	return ret;
8988 }
8989 
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)8990 static void hclge_uninit_client_instance(struct hnae3_client *client,
8991 					 struct hnae3_ae_dev *ae_dev)
8992 {
8993 	struct hclge_dev *hdev = ae_dev->priv;
8994 	struct hclge_vport *vport;
8995 	int i;
8996 
8997 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8998 		vport = &hdev->vport[i];
8999 		if (hdev->roce_client) {
9000 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9001 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9002 				msleep(HCLGE_WAIT_RESET_DONE);
9003 
9004 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9005 								0);
9006 			hdev->roce_client = NULL;
9007 			vport->roce.client = NULL;
9008 		}
9009 		if (client->type == HNAE3_CLIENT_ROCE)
9010 			return;
9011 		if (hdev->nic_client && client->ops->uninit_instance) {
9012 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9013 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9014 				msleep(HCLGE_WAIT_RESET_DONE);
9015 
9016 			client->ops->uninit_instance(&vport->nic, 0);
9017 			hdev->nic_client = NULL;
9018 			vport->nic.client = NULL;
9019 		}
9020 	}
9021 }
9022 
hclge_pci_init(struct hclge_dev * hdev)9023 static int hclge_pci_init(struct hclge_dev *hdev)
9024 {
9025 	struct pci_dev *pdev = hdev->pdev;
9026 	struct hclge_hw *hw;
9027 	int ret;
9028 
9029 	ret = pci_enable_device(pdev);
9030 	if (ret) {
9031 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9032 		return ret;
9033 	}
9034 
9035 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9036 	if (ret) {
9037 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9038 		if (ret) {
9039 			dev_err(&pdev->dev,
9040 				"can't set consistent PCI DMA");
9041 			goto err_disable_device;
9042 		}
9043 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9044 	}
9045 
9046 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9047 	if (ret) {
9048 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9049 		goto err_disable_device;
9050 	}
9051 
9052 	pci_set_master(pdev);
9053 	hw = &hdev->hw;
9054 	hw->io_base = pcim_iomap(pdev, 2, 0);
9055 	if (!hw->io_base) {
9056 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9057 		ret = -ENOMEM;
9058 		goto err_clr_master;
9059 	}
9060 
9061 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9062 
9063 	return 0;
9064 err_clr_master:
9065 	pci_clear_master(pdev);
9066 	pci_release_regions(pdev);
9067 err_disable_device:
9068 	pci_disable_device(pdev);
9069 
9070 	return ret;
9071 }
9072 
hclge_pci_uninit(struct hclge_dev * hdev)9073 static void hclge_pci_uninit(struct hclge_dev *hdev)
9074 {
9075 	struct pci_dev *pdev = hdev->pdev;
9076 
9077 	pcim_iounmap(pdev, hdev->hw.io_base);
9078 	pci_free_irq_vectors(pdev);
9079 	pci_clear_master(pdev);
9080 	pci_release_mem_regions(pdev);
9081 	pci_disable_device(pdev);
9082 }
9083 
hclge_state_init(struct hclge_dev * hdev)9084 static void hclge_state_init(struct hclge_dev *hdev)
9085 {
9086 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9087 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9088 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9089 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9090 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9091 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9092 }
9093 
hclge_state_uninit(struct hclge_dev * hdev)9094 static void hclge_state_uninit(struct hclge_dev *hdev)
9095 {
9096 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9097 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9098 
9099 	if (hdev->reset_timer.function)
9100 		del_timer_sync(&hdev->reset_timer);
9101 	if (hdev->service_task.work.func)
9102 		cancel_delayed_work_sync(&hdev->service_task);
9103 	if (hdev->rst_service_task.func)
9104 		cancel_work_sync(&hdev->rst_service_task);
9105 	if (hdev->mbx_service_task.func)
9106 		cancel_work_sync(&hdev->mbx_service_task);
9107 }
9108 
hclge_flr_prepare(struct hnae3_ae_dev * ae_dev)9109 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9110 {
9111 #define HCLGE_FLR_WAIT_MS	100
9112 #define HCLGE_FLR_WAIT_CNT	50
9113 	struct hclge_dev *hdev = ae_dev->priv;
9114 	int cnt = 0;
9115 
9116 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9117 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9118 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9119 	hclge_reset_event(hdev->pdev, NULL);
9120 
9121 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9122 	       cnt++ < HCLGE_FLR_WAIT_CNT)
9123 		msleep(HCLGE_FLR_WAIT_MS);
9124 
9125 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9126 		dev_err(&hdev->pdev->dev,
9127 			"flr wait down timeout: %d\n", cnt);
9128 }
9129 
hclge_flr_done(struct hnae3_ae_dev * ae_dev)9130 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9131 {
9132 	struct hclge_dev *hdev = ae_dev->priv;
9133 
9134 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9135 }
9136 
hclge_clear_resetting_state(struct hclge_dev * hdev)9137 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9138 {
9139 	u16 i;
9140 
9141 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9142 		struct hclge_vport *vport = &hdev->vport[i];
9143 		int ret;
9144 
9145 		 /* Send cmd to clear VF's FUNC_RST_ING */
9146 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9147 		if (ret)
9148 			dev_warn(&hdev->pdev->dev,
9149 				 "clear vf(%d) rst failed %d!\n",
9150 				 vport->vport_id, ret);
9151 	}
9152 }
9153 
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)9154 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9155 {
9156 	struct pci_dev *pdev = ae_dev->pdev;
9157 	struct hclge_dev *hdev;
9158 	int ret;
9159 
9160 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9161 	if (!hdev) {
9162 		ret = -ENOMEM;
9163 		goto out;
9164 	}
9165 
9166 	hdev->pdev = pdev;
9167 	hdev->ae_dev = ae_dev;
9168 	hdev->reset_type = HNAE3_NONE_RESET;
9169 	hdev->reset_level = HNAE3_FUNC_RESET;
9170 	ae_dev->priv = hdev;
9171 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9172 
9173 	mutex_init(&hdev->vport_lock);
9174 	mutex_init(&hdev->vport_cfg_mutex);
9175 	spin_lock_init(&hdev->fd_rule_lock);
9176 
9177 	ret = hclge_pci_init(hdev);
9178 	if (ret) {
9179 		dev_err(&pdev->dev, "PCI init failed\n");
9180 		goto out;
9181 	}
9182 
9183 	/* Firmware command queue initialize */
9184 	ret = hclge_cmd_queue_init(hdev);
9185 	if (ret) {
9186 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9187 		goto err_pci_uninit;
9188 	}
9189 
9190 	/* Firmware command initialize */
9191 	ret = hclge_cmd_init(hdev);
9192 	if (ret)
9193 		goto err_cmd_uninit;
9194 
9195 	ret = hclge_get_cap(hdev);
9196 	if (ret) {
9197 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9198 			ret);
9199 		goto err_cmd_uninit;
9200 	}
9201 
9202 	ret = hclge_configure(hdev);
9203 	if (ret) {
9204 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9205 		goto err_cmd_uninit;
9206 	}
9207 
9208 	ret = hclge_init_msi(hdev);
9209 	if (ret) {
9210 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9211 		goto err_cmd_uninit;
9212 	}
9213 
9214 	ret = hclge_misc_irq_init(hdev);
9215 	if (ret) {
9216 		dev_err(&pdev->dev,
9217 			"Misc IRQ(vector0) init error, ret = %d.\n",
9218 			ret);
9219 		goto err_msi_uninit;
9220 	}
9221 
9222 	ret = hclge_alloc_tqps(hdev);
9223 	if (ret) {
9224 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9225 		goto err_msi_irq_uninit;
9226 	}
9227 
9228 	ret = hclge_alloc_vport(hdev);
9229 	if (ret) {
9230 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9231 		goto err_msi_irq_uninit;
9232 	}
9233 
9234 	ret = hclge_map_tqp(hdev);
9235 	if (ret) {
9236 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9237 		goto err_msi_irq_uninit;
9238 	}
9239 
9240 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9241 		ret = hclge_mac_mdio_config(hdev);
9242 		if (ret) {
9243 			dev_err(&hdev->pdev->dev,
9244 				"mdio config fail ret=%d\n", ret);
9245 			goto err_msi_irq_uninit;
9246 		}
9247 	}
9248 
9249 	ret = hclge_init_umv_space(hdev);
9250 	if (ret) {
9251 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9252 		goto err_mdiobus_unreg;
9253 	}
9254 
9255 	ret = hclge_mac_init(hdev);
9256 	if (ret) {
9257 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9258 		goto err_mdiobus_unreg;
9259 	}
9260 
9261 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9262 	if (ret) {
9263 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9264 		goto err_mdiobus_unreg;
9265 	}
9266 
9267 	ret = hclge_config_gro(hdev, true);
9268 	if (ret)
9269 		goto err_mdiobus_unreg;
9270 
9271 	ret = hclge_init_vlan_config(hdev);
9272 	if (ret) {
9273 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9274 		goto err_mdiobus_unreg;
9275 	}
9276 
9277 	ret = hclge_tm_schd_init(hdev);
9278 	if (ret) {
9279 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9280 		goto err_mdiobus_unreg;
9281 	}
9282 
9283 	hclge_rss_init_cfg(hdev);
9284 	ret = hclge_rss_init_hw(hdev);
9285 	if (ret) {
9286 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9287 		goto err_mdiobus_unreg;
9288 	}
9289 
9290 	ret = init_mgr_tbl(hdev);
9291 	if (ret) {
9292 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9293 		goto err_mdiobus_unreg;
9294 	}
9295 
9296 	ret = hclge_init_fd_config(hdev);
9297 	if (ret) {
9298 		dev_err(&pdev->dev,
9299 			"fd table init fail, ret=%d\n", ret);
9300 		goto err_mdiobus_unreg;
9301 	}
9302 
9303 	INIT_KFIFO(hdev->mac_tnl_log);
9304 
9305 	hclge_dcb_ops_set(hdev);
9306 
9307 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9308 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9309 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9310 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9311 
9312 	/* Setup affinity after service timer setup because add_timer_on
9313 	 * is called in affinity notify.
9314 	 */
9315 	hclge_misc_affinity_setup(hdev);
9316 
9317 	hclge_clear_all_event_cause(hdev);
9318 	hclge_clear_resetting_state(hdev);
9319 
9320 	/* Log and clear the hw errors those already occurred */
9321 	hclge_handle_all_hns_hw_errors(ae_dev);
9322 
9323 	/* request delayed reset for the error recovery because an immediate
9324 	 * global reset on a PF affecting pending initialization of other PFs
9325 	 */
9326 	if (ae_dev->hw_err_reset_req) {
9327 		enum hnae3_reset_type reset_level;
9328 
9329 		reset_level = hclge_get_reset_level(ae_dev,
9330 						    &ae_dev->hw_err_reset_req);
9331 		hclge_set_def_reset_request(ae_dev, reset_level);
9332 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9333 	}
9334 
9335 	/* Enable MISC vector(vector0) */
9336 	hclge_enable_vector(&hdev->misc_vector, true);
9337 
9338 	hclge_state_init(hdev);
9339 	hdev->last_reset_time = jiffies;
9340 
9341 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9342 		 HCLGE_DRIVER_NAME);
9343 
9344 	return 0;
9345 
9346 err_mdiobus_unreg:
9347 	if (hdev->hw.mac.phydev)
9348 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9349 err_msi_irq_uninit:
9350 	hclge_misc_irq_uninit(hdev);
9351 err_msi_uninit:
9352 	pci_free_irq_vectors(pdev);
9353 err_cmd_uninit:
9354 	hclge_cmd_uninit(hdev);
9355 err_pci_uninit:
9356 	pcim_iounmap(pdev, hdev->hw.io_base);
9357 	pci_clear_master(pdev);
9358 	pci_release_regions(pdev);
9359 	pci_disable_device(pdev);
9360 out:
9361 	return ret;
9362 }
9363 
hclge_stats_clear(struct hclge_dev * hdev)9364 static void hclge_stats_clear(struct hclge_dev *hdev)
9365 {
9366 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9367 }
9368 
hclge_reset_vport_state(struct hclge_dev * hdev)9369 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9370 {
9371 	struct hclge_vport *vport = hdev->vport;
9372 	int i;
9373 
9374 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9375 		hclge_vport_stop(vport);
9376 		vport++;
9377 	}
9378 }
9379 
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)9380 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9381 {
9382 	struct hclge_dev *hdev = ae_dev->priv;
9383 	struct pci_dev *pdev = ae_dev->pdev;
9384 	int ret;
9385 
9386 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9387 
9388 	hclge_stats_clear(hdev);
9389 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9390 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9391 
9392 	ret = hclge_cmd_init(hdev);
9393 	if (ret) {
9394 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9395 		return ret;
9396 	}
9397 
9398 	ret = hclge_map_tqp(hdev);
9399 	if (ret) {
9400 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9401 		return ret;
9402 	}
9403 
9404 	hclge_reset_umv_space(hdev);
9405 
9406 	ret = hclge_mac_init(hdev);
9407 	if (ret) {
9408 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9409 		return ret;
9410 	}
9411 
9412 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9413 	if (ret) {
9414 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9415 		return ret;
9416 	}
9417 
9418 	ret = hclge_config_gro(hdev, true);
9419 	if (ret)
9420 		return ret;
9421 
9422 	ret = hclge_init_vlan_config(hdev);
9423 	if (ret) {
9424 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9425 		return ret;
9426 	}
9427 
9428 	ret = hclge_tm_init_hw(hdev, true);
9429 	if (ret) {
9430 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9431 		return ret;
9432 	}
9433 
9434 	ret = hclge_rss_init_hw(hdev);
9435 	if (ret) {
9436 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9437 		return ret;
9438 	}
9439 
9440 	ret = hclge_init_fd_config(hdev);
9441 	if (ret) {
9442 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9443 		return ret;
9444 	}
9445 
9446 	/* Re-enable the hw error interrupts because
9447 	 * the interrupts get disabled on global reset.
9448 	 */
9449 	ret = hclge_config_nic_hw_error(hdev, true);
9450 	if (ret) {
9451 		dev_err(&pdev->dev,
9452 			"fail(%d) to re-enable NIC hw error interrupts\n",
9453 			ret);
9454 		return ret;
9455 	}
9456 
9457 	if (hdev->roce_client) {
9458 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9459 		if (ret) {
9460 			dev_err(&pdev->dev,
9461 				"fail(%d) to re-enable roce ras interrupts\n",
9462 				ret);
9463 			return ret;
9464 		}
9465 	}
9466 
9467 	hclge_reset_vport_state(hdev);
9468 
9469 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9470 		 HCLGE_DRIVER_NAME);
9471 
9472 	return 0;
9473 }
9474 
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)9475 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9476 {
9477 	struct hclge_dev *hdev = ae_dev->priv;
9478 	struct hclge_mac *mac = &hdev->hw.mac;
9479 
9480 	hclge_misc_affinity_teardown(hdev);
9481 	hclge_state_uninit(hdev);
9482 
9483 	if (mac->phydev)
9484 		mdiobus_unregister(mac->mdio_bus);
9485 
9486 	hclge_uninit_umv_space(hdev);
9487 
9488 	/* Disable MISC vector(vector0) */
9489 	hclge_enable_vector(&hdev->misc_vector, false);
9490 	synchronize_irq(hdev->misc_vector.vector_irq);
9491 
9492 	/* Disable all hw interrupts */
9493 	hclge_config_mac_tnl_int(hdev, false);
9494 	hclge_config_nic_hw_error(hdev, false);
9495 	hclge_config_rocee_ras_interrupt(hdev, false);
9496 
9497 	hclge_cmd_uninit(hdev);
9498 	hclge_misc_irq_uninit(hdev);
9499 	hclge_pci_uninit(hdev);
9500 	mutex_destroy(&hdev->vport_lock);
9501 	hclge_uninit_vport_mac_table(hdev);
9502 	hclge_uninit_vport_vlan_table(hdev);
9503 	mutex_destroy(&hdev->vport_cfg_mutex);
9504 	ae_dev->priv = NULL;
9505 }
9506 
hclge_get_max_channels(struct hnae3_handle * handle)9507 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9508 {
9509 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9510 	struct hclge_vport *vport = hclge_get_vport(handle);
9511 	struct hclge_dev *hdev = vport->back;
9512 
9513 	return min_t(u32, hdev->rss_size_max,
9514 		     vport->alloc_tqps / kinfo->num_tc);
9515 }
9516 
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)9517 static void hclge_get_channels(struct hnae3_handle *handle,
9518 			       struct ethtool_channels *ch)
9519 {
9520 	ch->max_combined = hclge_get_max_channels(handle);
9521 	ch->other_count = 1;
9522 	ch->max_other = 1;
9523 	ch->combined_count = handle->kinfo.rss_size;
9524 }
9525 
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)9526 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9527 					u16 *alloc_tqps, u16 *max_rss_size)
9528 {
9529 	struct hclge_vport *vport = hclge_get_vport(handle);
9530 	struct hclge_dev *hdev = vport->back;
9531 
9532 	*alloc_tqps = vport->alloc_tqps;
9533 	*max_rss_size = hdev->rss_size_max;
9534 }
9535 
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)9536 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9537 			      bool rxfh_configured)
9538 {
9539 	struct hclge_vport *vport = hclge_get_vport(handle);
9540 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9541 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9542 	struct hclge_dev *hdev = vport->back;
9543 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9544 	int cur_rss_size = kinfo->rss_size;
9545 	int cur_tqps = kinfo->num_tqps;
9546 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9547 	u16 roundup_size;
9548 	u32 *rss_indir;
9549 	unsigned int i;
9550 	int ret;
9551 
9552 	kinfo->req_rss_size = new_tqps_num;
9553 
9554 	ret = hclge_tm_vport_map_update(hdev);
9555 	if (ret) {
9556 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9557 		return ret;
9558 	}
9559 
9560 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9561 	roundup_size = ilog2(roundup_size);
9562 	/* Set the RSS TC mode according to the new RSS size */
9563 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9564 		tc_valid[i] = 0;
9565 
9566 		if (!(hdev->hw_tc_map & BIT(i)))
9567 			continue;
9568 
9569 		tc_valid[i] = 1;
9570 		tc_size[i] = roundup_size;
9571 		tc_offset[i] = kinfo->rss_size * i;
9572 	}
9573 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9574 	if (ret)
9575 		return ret;
9576 
9577 	/* RSS indirection table has been configuared by user */
9578 	if (rxfh_configured)
9579 		goto out;
9580 
9581 	/* Reinitializes the rss indirect table according to the new RSS size */
9582 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9583 	if (!rss_indir)
9584 		return -ENOMEM;
9585 
9586 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9587 		rss_indir[i] = i % kinfo->rss_size;
9588 
9589 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9590 	if (ret)
9591 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9592 			ret);
9593 
9594 	kfree(rss_indir);
9595 
9596 out:
9597 	if (!ret)
9598 		dev_info(&hdev->pdev->dev,
9599 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9600 			 cur_rss_size, kinfo->rss_size,
9601 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9602 
9603 	return ret;
9604 }
9605 
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)9606 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9607 			      u32 *regs_num_64_bit)
9608 {
9609 	struct hclge_desc desc;
9610 	u32 total_num;
9611 	int ret;
9612 
9613 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9614 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9615 	if (ret) {
9616 		dev_err(&hdev->pdev->dev,
9617 			"Query register number cmd failed, ret = %d.\n", ret);
9618 		return ret;
9619 	}
9620 
9621 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
9622 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
9623 
9624 	total_num = *regs_num_32_bit + *regs_num_64_bit;
9625 	if (!total_num)
9626 		return -EINVAL;
9627 
9628 	return 0;
9629 }
9630 
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)9631 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9632 				 void *data)
9633 {
9634 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9635 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9636 
9637 	struct hclge_desc *desc;
9638 	u32 *reg_val = data;
9639 	__le32 *desc_data;
9640 	int nodata_num;
9641 	int cmd_num;
9642 	int i, k, n;
9643 	int ret;
9644 
9645 	if (regs_num == 0)
9646 		return 0;
9647 
9648 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9649 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9650 			       HCLGE_32_BIT_REG_RTN_DATANUM);
9651 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9652 	if (!desc)
9653 		return -ENOMEM;
9654 
9655 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9656 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9657 	if (ret) {
9658 		dev_err(&hdev->pdev->dev,
9659 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
9660 		kfree(desc);
9661 		return ret;
9662 	}
9663 
9664 	for (i = 0; i < cmd_num; i++) {
9665 		if (i == 0) {
9666 			desc_data = (__le32 *)(&desc[i].data[0]);
9667 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9668 		} else {
9669 			desc_data = (__le32 *)(&desc[i]);
9670 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
9671 		}
9672 		for (k = 0; k < n; k++) {
9673 			*reg_val++ = le32_to_cpu(*desc_data++);
9674 
9675 			regs_num--;
9676 			if (!regs_num)
9677 				break;
9678 		}
9679 	}
9680 
9681 	kfree(desc);
9682 	return 0;
9683 }
9684 
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)9685 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9686 				 void *data)
9687 {
9688 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9689 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9690 
9691 	struct hclge_desc *desc;
9692 	u64 *reg_val = data;
9693 	__le64 *desc_data;
9694 	int nodata_len;
9695 	int cmd_num;
9696 	int i, k, n;
9697 	int ret;
9698 
9699 	if (regs_num == 0)
9700 		return 0;
9701 
9702 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9703 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9704 			       HCLGE_64_BIT_REG_RTN_DATANUM);
9705 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9706 	if (!desc)
9707 		return -ENOMEM;
9708 
9709 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9710 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9711 	if (ret) {
9712 		dev_err(&hdev->pdev->dev,
9713 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
9714 		kfree(desc);
9715 		return ret;
9716 	}
9717 
9718 	for (i = 0; i < cmd_num; i++) {
9719 		if (i == 0) {
9720 			desc_data = (__le64 *)(&desc[i].data[0]);
9721 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9722 		} else {
9723 			desc_data = (__le64 *)(&desc[i]);
9724 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
9725 		}
9726 		for (k = 0; k < n; k++) {
9727 			*reg_val++ = le64_to_cpu(*desc_data++);
9728 
9729 			regs_num--;
9730 			if (!regs_num)
9731 				break;
9732 		}
9733 	}
9734 
9735 	kfree(desc);
9736 	return 0;
9737 }
9738 
9739 #define MAX_SEPARATE_NUM	4
9740 #define SEPARATOR_VALUE		0xFDFCFBFA
9741 #define REG_NUM_PER_LINE	4
9742 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
9743 #define REG_SEPARATOR_LINE	1
9744 #define REG_NUM_REMAIN_MASK	3
9745 #define BD_LIST_MAX_NUM		30
9746 
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)9747 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9748 {
9749 	/*prepare 4 commands to query DFX BD number*/
9750 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9751 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9752 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9753 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9754 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9755 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9756 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9757 
9758 	return hclge_cmd_send(&hdev->hw, desc, 4);
9759 }
9760 
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)9761 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9762 				    int *bd_num_list,
9763 				    u32 type_num)
9764 {
9765 #define HCLGE_DFX_REG_BD_NUM	4
9766 
9767 	u32 entries_per_desc, desc_index, index, offset, i;
9768 	struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9769 	int ret;
9770 
9771 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
9772 	if (ret) {
9773 		dev_err(&hdev->pdev->dev,
9774 			"Get dfx bd num fail, status is %d.\n", ret);
9775 		return ret;
9776 	}
9777 
9778 	entries_per_desc = ARRAY_SIZE(desc[0].data);
9779 	for (i = 0; i < type_num; i++) {
9780 		offset = hclge_dfx_bd_offset_list[i];
9781 		index = offset % entries_per_desc;
9782 		desc_index = offset / entries_per_desc;
9783 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9784 	}
9785 
9786 	return ret;
9787 }
9788 
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)9789 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9790 				  struct hclge_desc *desc_src, int bd_num,
9791 				  enum hclge_opcode_type cmd)
9792 {
9793 	struct hclge_desc *desc = desc_src;
9794 	int i, ret;
9795 
9796 	hclge_cmd_setup_basic_desc(desc, cmd, true);
9797 	for (i = 0; i < bd_num - 1; i++) {
9798 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9799 		desc++;
9800 		hclge_cmd_setup_basic_desc(desc, cmd, true);
9801 	}
9802 
9803 	desc = desc_src;
9804 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9805 	if (ret)
9806 		dev_err(&hdev->pdev->dev,
9807 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9808 			cmd, ret);
9809 
9810 	return ret;
9811 }
9812 
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)9813 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9814 				    void *data)
9815 {
9816 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9817 	struct hclge_desc *desc = desc_src;
9818 	u32 *reg = data;
9819 
9820 	entries_per_desc = ARRAY_SIZE(desc->data);
9821 	reg_num = entries_per_desc * bd_num;
9822 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9823 	for (i = 0; i < reg_num; i++) {
9824 		index = i % entries_per_desc;
9825 		desc_index = i / entries_per_desc;
9826 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
9827 	}
9828 	for (i = 0; i < separator_num; i++)
9829 		*reg++ = SEPARATOR_VALUE;
9830 
9831 	return reg_num + separator_num;
9832 }
9833 
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)9834 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9835 {
9836 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9837 	int data_len_per_desc, data_len, bd_num, i;
9838 	int bd_num_list[BD_LIST_MAX_NUM];
9839 	int ret;
9840 
9841 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9842 	if (ret) {
9843 		dev_err(&hdev->pdev->dev,
9844 			"Get dfx reg bd num fail, status is %d.\n", ret);
9845 		return ret;
9846 	}
9847 
9848 	data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9849 	*len = 0;
9850 	for (i = 0; i < dfx_reg_type_num; i++) {
9851 		bd_num = bd_num_list[i];
9852 		data_len = data_len_per_desc * bd_num;
9853 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9854 	}
9855 
9856 	return ret;
9857 }
9858 
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)9859 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9860 {
9861 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9862 	int bd_num, bd_num_max, buf_len, i;
9863 	int bd_num_list[BD_LIST_MAX_NUM];
9864 	struct hclge_desc *desc_src;
9865 	u32 *reg = data;
9866 	int ret;
9867 
9868 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9869 	if (ret) {
9870 		dev_err(&hdev->pdev->dev,
9871 			"Get dfx reg bd num fail, status is %d.\n", ret);
9872 		return ret;
9873 	}
9874 
9875 	bd_num_max = bd_num_list[0];
9876 	for (i = 1; i < dfx_reg_type_num; i++)
9877 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9878 
9879 	buf_len = sizeof(*desc_src) * bd_num_max;
9880 	desc_src = kzalloc(buf_len, GFP_KERNEL);
9881 	if (!desc_src) {
9882 		dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9883 		return -ENOMEM;
9884 	}
9885 
9886 	for (i = 0; i < dfx_reg_type_num; i++) {
9887 		bd_num = bd_num_list[i];
9888 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9889 					     hclge_dfx_reg_opcode_list[i]);
9890 		if (ret) {
9891 			dev_err(&hdev->pdev->dev,
9892 				"Get dfx reg fail, status is %d.\n", ret);
9893 			break;
9894 		}
9895 
9896 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9897 	}
9898 
9899 	kfree(desc_src);
9900 	return ret;
9901 }
9902 
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)9903 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9904 			      struct hnae3_knic_private_info *kinfo)
9905 {
9906 #define HCLGE_RING_REG_OFFSET		0x200
9907 #define HCLGE_RING_INT_REG_OFFSET	0x4
9908 
9909 	int i, j, reg_num, separator_num;
9910 	int data_num_sum;
9911 	u32 *reg = data;
9912 
9913 	/* fetching per-PF registers valus from PF PCIe register space */
9914 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9915 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9916 	for (i = 0; i < reg_num; i++)
9917 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9918 	for (i = 0; i < separator_num; i++)
9919 		*reg++ = SEPARATOR_VALUE;
9920 	data_num_sum = reg_num + separator_num;
9921 
9922 	reg_num = ARRAY_SIZE(common_reg_addr_list);
9923 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9924 	for (i = 0; i < reg_num; i++)
9925 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9926 	for (i = 0; i < separator_num; i++)
9927 		*reg++ = SEPARATOR_VALUE;
9928 	data_num_sum += reg_num + separator_num;
9929 
9930 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
9931 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9932 	for (j = 0; j < kinfo->num_tqps; j++) {
9933 		for (i = 0; i < reg_num; i++)
9934 			*reg++ = hclge_read_dev(&hdev->hw,
9935 						ring_reg_addr_list[i] +
9936 						HCLGE_RING_REG_OFFSET * j);
9937 		for (i = 0; i < separator_num; i++)
9938 			*reg++ = SEPARATOR_VALUE;
9939 	}
9940 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9941 
9942 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9943 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9944 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
9945 		for (i = 0; i < reg_num; i++)
9946 			*reg++ = hclge_read_dev(&hdev->hw,
9947 						tqp_intr_reg_addr_list[i] +
9948 						HCLGE_RING_INT_REG_OFFSET * j);
9949 		for (i = 0; i < separator_num; i++)
9950 			*reg++ = SEPARATOR_VALUE;
9951 	}
9952 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9953 
9954 	return data_num_sum;
9955 }
9956 
hclge_get_regs_len(struct hnae3_handle * handle)9957 static int hclge_get_regs_len(struct hnae3_handle *handle)
9958 {
9959 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9960 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9961 	struct hclge_vport *vport = hclge_get_vport(handle);
9962 	struct hclge_dev *hdev = vport->back;
9963 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9964 	int regs_lines_32_bit, regs_lines_64_bit;
9965 	int ret;
9966 
9967 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9968 	if (ret) {
9969 		dev_err(&hdev->pdev->dev,
9970 			"Get register number failed, ret = %d.\n", ret);
9971 		return ret;
9972 	}
9973 
9974 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9975 	if (ret) {
9976 		dev_err(&hdev->pdev->dev,
9977 			"Get dfx reg len failed, ret = %d.\n", ret);
9978 		return ret;
9979 	}
9980 
9981 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9982 		REG_SEPARATOR_LINE;
9983 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9984 		REG_SEPARATOR_LINE;
9985 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9986 		REG_SEPARATOR_LINE;
9987 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9988 		REG_SEPARATOR_LINE;
9989 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9990 		REG_SEPARATOR_LINE;
9991 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9992 		REG_SEPARATOR_LINE;
9993 
9994 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9995 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9996 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9997 }
9998 
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)9999 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10000 			   void *data)
10001 {
10002 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10003 	struct hclge_vport *vport = hclge_get_vport(handle);
10004 	struct hclge_dev *hdev = vport->back;
10005 	u32 regs_num_32_bit, regs_num_64_bit;
10006 	int i, reg_num, separator_num, ret;
10007 	u32 *reg = data;
10008 
10009 	*version = hdev->fw_version;
10010 
10011 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10012 	if (ret) {
10013 		dev_err(&hdev->pdev->dev,
10014 			"Get register number failed, ret = %d.\n", ret);
10015 		return;
10016 	}
10017 
10018 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10019 
10020 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10021 	if (ret) {
10022 		dev_err(&hdev->pdev->dev,
10023 			"Get 32 bit register failed, ret = %d.\n", ret);
10024 		return;
10025 	}
10026 	reg_num = regs_num_32_bit;
10027 	reg += reg_num;
10028 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10029 	for (i = 0; i < separator_num; i++)
10030 		*reg++ = SEPARATOR_VALUE;
10031 
10032 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10033 	if (ret) {
10034 		dev_err(&hdev->pdev->dev,
10035 			"Get 64 bit register failed, ret = %d.\n", ret);
10036 		return;
10037 	}
10038 	reg_num = regs_num_64_bit * 2;
10039 	reg += reg_num;
10040 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10041 	for (i = 0; i < separator_num; i++)
10042 		*reg++ = SEPARATOR_VALUE;
10043 
10044 	ret = hclge_get_dfx_reg(hdev, reg);
10045 	if (ret)
10046 		dev_err(&hdev->pdev->dev,
10047 			"Get dfx register failed, ret = %d.\n", ret);
10048 }
10049 
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)10050 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10051 {
10052 	struct hclge_set_led_state_cmd *req;
10053 	struct hclge_desc desc;
10054 	int ret;
10055 
10056 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10057 
10058 	req = (struct hclge_set_led_state_cmd *)desc.data;
10059 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10060 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10061 
10062 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10063 	if (ret)
10064 		dev_err(&hdev->pdev->dev,
10065 			"Send set led state cmd error, ret =%d\n", ret);
10066 
10067 	return ret;
10068 }
10069 
10070 enum hclge_led_status {
10071 	HCLGE_LED_OFF,
10072 	HCLGE_LED_ON,
10073 	HCLGE_LED_NO_CHANGE = 0xFF,
10074 };
10075 
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)10076 static int hclge_set_led_id(struct hnae3_handle *handle,
10077 			    enum ethtool_phys_id_state status)
10078 {
10079 	struct hclge_vport *vport = hclge_get_vport(handle);
10080 	struct hclge_dev *hdev = vport->back;
10081 
10082 	switch (status) {
10083 	case ETHTOOL_ID_ACTIVE:
10084 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10085 	case ETHTOOL_ID_INACTIVE:
10086 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10087 	default:
10088 		return -EINVAL;
10089 	}
10090 }
10091 
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)10092 static void hclge_get_link_mode(struct hnae3_handle *handle,
10093 				unsigned long *supported,
10094 				unsigned long *advertising)
10095 {
10096 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10097 	struct hclge_vport *vport = hclge_get_vport(handle);
10098 	struct hclge_dev *hdev = vport->back;
10099 	unsigned int idx = 0;
10100 
10101 	for (; idx < size; idx++) {
10102 		supported[idx] = hdev->hw.mac.supported[idx];
10103 		advertising[idx] = hdev->hw.mac.advertising[idx];
10104 	}
10105 }
10106 
hclge_gro_en(struct hnae3_handle * handle,bool enable)10107 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10108 {
10109 	struct hclge_vport *vport = hclge_get_vport(handle);
10110 	struct hclge_dev *hdev = vport->back;
10111 
10112 	return hclge_config_gro(hdev, enable);
10113 }
10114 
10115 static const struct hnae3_ae_ops hclge_ops = {
10116 	.init_ae_dev = hclge_init_ae_dev,
10117 	.uninit_ae_dev = hclge_uninit_ae_dev,
10118 	.flr_prepare = hclge_flr_prepare,
10119 	.flr_done = hclge_flr_done,
10120 	.init_client_instance = hclge_init_client_instance,
10121 	.uninit_client_instance = hclge_uninit_client_instance,
10122 	.map_ring_to_vector = hclge_map_ring_to_vector,
10123 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10124 	.get_vector = hclge_get_vector,
10125 	.put_vector = hclge_put_vector,
10126 	.set_promisc_mode = hclge_set_promisc_mode,
10127 	.set_loopback = hclge_set_loopback,
10128 	.start = hclge_ae_start,
10129 	.stop = hclge_ae_stop,
10130 	.client_start = hclge_client_start,
10131 	.client_stop = hclge_client_stop,
10132 	.get_status = hclge_get_status,
10133 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10134 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10135 	.get_media_type = hclge_get_media_type,
10136 	.check_port_speed = hclge_check_port_speed,
10137 	.get_fec = hclge_get_fec,
10138 	.set_fec = hclge_set_fec,
10139 	.get_rss_key_size = hclge_get_rss_key_size,
10140 	.get_rss_indir_size = hclge_get_rss_indir_size,
10141 	.get_rss = hclge_get_rss,
10142 	.set_rss = hclge_set_rss,
10143 	.set_rss_tuple = hclge_set_rss_tuple,
10144 	.get_rss_tuple = hclge_get_rss_tuple,
10145 	.get_tc_size = hclge_get_tc_size,
10146 	.get_mac_addr = hclge_get_mac_addr,
10147 	.set_mac_addr = hclge_set_mac_addr,
10148 	.do_ioctl = hclge_do_ioctl,
10149 	.add_uc_addr = hclge_add_uc_addr,
10150 	.rm_uc_addr = hclge_rm_uc_addr,
10151 	.add_mc_addr = hclge_add_mc_addr,
10152 	.rm_mc_addr = hclge_rm_mc_addr,
10153 	.set_autoneg = hclge_set_autoneg,
10154 	.get_autoneg = hclge_get_autoneg,
10155 	.restart_autoneg = hclge_restart_autoneg,
10156 	.halt_autoneg = hclge_halt_autoneg,
10157 	.get_pauseparam = hclge_get_pauseparam,
10158 	.set_pauseparam = hclge_set_pauseparam,
10159 	.set_mtu = hclge_set_mtu,
10160 	.reset_queue = hclge_reset_tqp,
10161 	.get_stats = hclge_get_stats,
10162 	.get_mac_stats = hclge_get_mac_stat,
10163 	.update_stats = hclge_update_stats,
10164 	.get_strings = hclge_get_strings,
10165 	.get_sset_count = hclge_get_sset_count,
10166 	.get_fw_version = hclge_get_fw_version,
10167 	.get_mdix_mode = hclge_get_mdix_mode,
10168 	.enable_vlan_filter = hclge_enable_vlan_filter,
10169 	.set_vlan_filter = hclge_set_vlan_filter,
10170 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10171 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10172 	.reset_event = hclge_reset_event,
10173 	.get_reset_level = hclge_get_reset_level,
10174 	.set_default_reset_request = hclge_set_def_reset_request,
10175 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10176 	.set_channels = hclge_set_channels,
10177 	.get_channels = hclge_get_channels,
10178 	.get_regs_len = hclge_get_regs_len,
10179 	.get_regs = hclge_get_regs,
10180 	.set_led_id = hclge_set_led_id,
10181 	.get_link_mode = hclge_get_link_mode,
10182 	.add_fd_entry = hclge_add_fd_entry,
10183 	.del_fd_entry = hclge_del_fd_entry,
10184 	.del_all_fd_entries = hclge_del_all_fd_entries,
10185 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10186 	.get_fd_rule_info = hclge_get_fd_rule_info,
10187 	.get_fd_all_rules = hclge_get_all_rules,
10188 	.restore_fd_rules = hclge_restore_fd_entries,
10189 	.enable_fd = hclge_enable_fd,
10190 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10191 	.dbg_run_cmd = hclge_dbg_run_cmd,
10192 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10193 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10194 	.ae_dev_resetting = hclge_ae_dev_resetting,
10195 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10196 	.set_gro_en = hclge_gro_en,
10197 	.get_global_queue_id = hclge_covert_handle_qid_global,
10198 	.set_timer_task = hclge_set_timer_task,
10199 	.mac_connect_phy = hclge_mac_connect_phy,
10200 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10201 	.restore_vlan_table = hclge_restore_vlan_table,
10202 };
10203 
10204 static struct hnae3_ae_algo ae_algo = {
10205 	.ops = &hclge_ops,
10206 	.pdev_id_table = ae_algo_pci_tbl,
10207 };
10208 
hclge_init(void)10209 static int hclge_init(void)
10210 {
10211 	pr_info("%s is initializing\n", HCLGE_NAME);
10212 
10213 	hnae3_register_ae_algo(&ae_algo);
10214 
10215 	return 0;
10216 }
10217 
hclge_exit(void)10218 static void hclge_exit(void)
10219 {
10220 	hnae3_unregister_ae_algo(&ae_algo);
10221 }
10222 module_init(hclge_init);
10223 module_exit(hclge_exit);
10224 
10225 MODULE_LICENSE("GPL");
10226 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10227 MODULE_DESCRIPTION("HCLGE Driver");
10228 MODULE_VERSION(HCLGE_MOD_VERSION);
10229