1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
38
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
43
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET 1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
48 #define HCLGE_DFX_IGU_BD_OFFSET 4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
51 #define HCLGE_DFX_NCSI_BD_OFFSET 7
52 #define HCLGE_DFX_RTC_BD_OFFSET 8
53 #define HCLGE_DFX_PPP_BD_OFFSET 9
54 #define HCLGE_DFX_RCB_BD_OFFSET 10
55 #define HCLGE_DFX_TQP_BD_OFFSET 11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57
58 #define HCLGE_LINK_STATUS_MS 10
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75
76 static struct hnae3_ae_algo ae_algo;
77
78 static struct workqueue_struct *hclge_wq;
79
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 /* required last entry */
90 {0, }
91 };
92
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 HCLGE_NIC_CSQ_DEPTH_REG,
98 HCLGE_NIC_CSQ_TAIL_REG,
99 HCLGE_NIC_CSQ_HEAD_REG,
100 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 HCLGE_NIC_CRQ_DEPTH_REG,
103 HCLGE_NIC_CRQ_TAIL_REG,
104 HCLGE_NIC_CRQ_HEAD_REG,
105 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 HCLGE_CMDQ_INTR_STS_REG,
107 HCLGE_CMDQ_INTR_EN_REG,
108 HCLGE_CMDQ_INTR_GEN_REG};
109
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 HCLGE_PF_OTHER_INT_REG,
112 HCLGE_MISC_RESET_STS_REG,
113 HCLGE_MISC_VECTOR_INT_STS,
114 HCLGE_GLOBAL_RESET_REG,
115 HCLGE_FUN_RST_ING,
116 HCLGE_GRO_EN_REG};
117
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 HCLGE_RING_RX_ADDR_H_REG,
120 HCLGE_RING_RX_BD_NUM_REG,
121 HCLGE_RING_RX_BD_LENGTH_REG,
122 HCLGE_RING_RX_MERGE_EN_REG,
123 HCLGE_RING_RX_TAIL_REG,
124 HCLGE_RING_RX_HEAD_REG,
125 HCLGE_RING_RX_FBD_NUM_REG,
126 HCLGE_RING_RX_OFFSET_REG,
127 HCLGE_RING_RX_FBD_OFFSET_REG,
128 HCLGE_RING_RX_STASH_REG,
129 HCLGE_RING_RX_BD_ERR_REG,
130 HCLGE_RING_TX_ADDR_L_REG,
131 HCLGE_RING_TX_ADDR_H_REG,
132 HCLGE_RING_TX_BD_NUM_REG,
133 HCLGE_RING_TX_PRIORITY_REG,
134 HCLGE_RING_TX_TC_REG,
135 HCLGE_RING_TX_MERGE_EN_REG,
136 HCLGE_RING_TX_TAIL_REG,
137 HCLGE_RING_TX_HEAD_REG,
138 HCLGE_RING_TX_FBD_NUM_REG,
139 HCLGE_RING_TX_OFFSET_REG,
140 HCLGE_RING_TX_EBD_NUM_REG,
141 HCLGE_RING_TX_EBD_OFFSET_REG,
142 HCLGE_RING_TX_BD_ERR_REG,
143 HCLGE_RING_EN_REG};
144
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 HCLGE_TQP_INTR_GL0_REG,
147 HCLGE_TQP_INTR_GL1_REG,
148 HCLGE_TQP_INTR_GL2_REG,
149 HCLGE_TQP_INTR_RL_REG};
150
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 "App Loopback test",
153 "Serdes serial Loopback test",
154 "Serdes parallel Loopback test",
155 "Phy Loopback test"
156 };
157
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 {"mac_tx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 {"mac_rx_mac_pause_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 {"mac_tx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 {"mac_rx_control_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 {"mac_tx_pfc_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 {"mac_tx_pfc_pri0_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 {"mac_tx_pfc_pri1_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 {"mac_tx_pfc_pri2_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 {"mac_tx_pfc_pri3_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 {"mac_tx_pfc_pri4_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 {"mac_tx_pfc_pri5_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 {"mac_tx_pfc_pri6_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 {"mac_tx_pfc_pri7_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 {"mac_rx_pfc_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 {"mac_rx_pfc_pri0_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 {"mac_rx_pfc_pri1_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 {"mac_rx_pfc_pri2_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 {"mac_rx_pfc_pri3_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 {"mac_rx_pfc_pri4_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 {"mac_rx_pfc_pri5_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 {"mac_rx_pfc_pri6_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 {"mac_rx_pfc_pri7_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 {"mac_tx_total_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 {"mac_tx_total_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 {"mac_tx_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 {"mac_tx_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 {"mac_tx_good_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 {"mac_tx_bad_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 {"mac_tx_uni_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 {"mac_tx_multi_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 {"mac_tx_broad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 {"mac_tx_undersize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 {"mac_tx_oversize_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 {"mac_tx_64_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 {"mac_tx_65_127_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 {"mac_tx_128_255_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 {"mac_tx_256_511_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 {"mac_tx_512_1023_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 {"mac_tx_1024_1518_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 {"mac_tx_1519_2047_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 {"mac_tx_2048_4095_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 {"mac_tx_4096_8191_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 {"mac_tx_8192_9216_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 {"mac_tx_9217_12287_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 {"mac_tx_12288_16383_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 {"mac_tx_1519_max_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 {"mac_tx_1519_max_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 {"mac_rx_total_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 {"mac_rx_total_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 {"mac_rx_good_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 {"mac_rx_bad_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 {"mac_rx_good_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 {"mac_rx_bad_oct_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 {"mac_rx_uni_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 {"mac_rx_multi_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 {"mac_rx_broad_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 {"mac_rx_undersize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 {"mac_rx_oversize_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 {"mac_rx_64_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 {"mac_rx_65_127_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 {"mac_rx_128_255_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 {"mac_rx_256_511_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 {"mac_rx_512_1023_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 {"mac_rx_1024_1518_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 {"mac_rx_1519_2047_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 {"mac_rx_2048_4095_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 {"mac_rx_4096_8191_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 {"mac_rx_8192_9216_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 {"mac_rx_9217_12287_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 {"mac_rx_12288_16383_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 {"mac_rx_1519_max_good_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 {"mac_rx_1519_max_bad_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303
304 {"mac_tx_fragment_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 {"mac_tx_undermin_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 {"mac_tx_jabber_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 {"mac_tx_err_all_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 {"mac_tx_from_app_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 {"mac_tx_from_app_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 {"mac_rx_fragment_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 {"mac_rx_undermin_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 {"mac_rx_jabber_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 {"mac_rx_fcs_err_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 {"mac_rx_send_app_good_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 {"mac_rx_send_app_bad_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 {
332 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 .i_port_bitmap = 0x1,
336 },
337 };
338
339 static const u8 hclge_hash_key[] = {
340 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 HCLGE_DFX_BIOS_BD_OFFSET,
349 HCLGE_DFX_SSU_0_BD_OFFSET,
350 HCLGE_DFX_SSU_1_BD_OFFSET,
351 HCLGE_DFX_IGU_BD_OFFSET,
352 HCLGE_DFX_RPU_0_BD_OFFSET,
353 HCLGE_DFX_RPU_1_BD_OFFSET,
354 HCLGE_DFX_NCSI_BD_OFFSET,
355 HCLGE_DFX_RTC_BD_OFFSET,
356 HCLGE_DFX_PPP_BD_OFFSET,
357 HCLGE_DFX_RCB_BD_OFFSET,
358 HCLGE_DFX_TQP_BD_OFFSET,
359 HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 HCLGE_OPC_DFX_SSU_REG_0,
365 HCLGE_OPC_DFX_SSU_REG_1,
366 HCLGE_OPC_DFX_IGU_EGU_REG,
367 HCLGE_OPC_DFX_RPU_REG_0,
368 HCLGE_OPC_DFX_RPU_REG_1,
369 HCLGE_OPC_DFX_NCSI_REG,
370 HCLGE_OPC_DFX_RTC_REG,
371 HCLGE_OPC_DFX_PPP_REG,
372 HCLGE_OPC_DFX_RCB_REG,
373 HCLGE_OPC_DFX_TQP_REG,
374 HCLGE_OPC_DFX_SSU_REG_2
375 };
376
377 static const struct key_info meta_data_key_info[] = {
378 { PACKET_TYPE_ID, 6 },
379 { IP_FRAGEMENT, 1 },
380 { ROCE_TYPE, 1 },
381 { NEXT_KEY, 5 },
382 { VLAN_NUMBER, 2 },
383 { SRC_VPORT, 12 },
384 { DST_VPORT, 12 },
385 { TUNNEL_PACKET, 1 },
386 };
387
388 static const struct key_info tuple_key_info[] = {
389 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 { INNER_DST_MAC, 48, KEY_OPT_MAC,
406 offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 offsetof(struct hclge_fd_rule, tuples.src_mac),
410 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 { INNER_L2_RSV, 16, KEY_OPT_LE16,
419 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 { INNER_IP_TOS, 8, KEY_OPT_U8,
422 offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 { INNER_IP_PROTO, 8, KEY_OPT_U8,
425 offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 { INNER_SRC_IP, 32, KEY_OPT_IP,
428 offsetof(struct hclge_fd_rule, tuples.src_ip),
429 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 { INNER_DST_IP, 32, KEY_OPT_IP,
431 offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 { INNER_L3_RSV, 16, KEY_OPT_LE16,
434 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 offsetof(struct hclge_fd_rule, tuples.src_port),
438 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 { INNER_DST_PORT, 16, KEY_OPT_LE16,
440 offsetof(struct hclge_fd_rule, tuples.dst_port),
441 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 { INNER_L4_RSV, 32, KEY_OPT_LE32,
443 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446
hclge_mac_update_stats_defective(struct hclge_dev * hdev)447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450
451 u64 *data = (u64 *)(&hdev->mac_stats);
452 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453 __le64 *desc_data;
454 int i, k, n;
455 int ret;
456
457 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459 if (ret) {
460 dev_err(&hdev->pdev->dev,
461 "Get MAC pkt stats fail, status = %d.\n", ret);
462
463 return ret;
464 }
465
466 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 /* for special opcode 0032, only the first desc has the head */
468 if (unlikely(i == 0)) {
469 desc_data = (__le64 *)(&desc[i].data[0]);
470 n = HCLGE_RD_FIRST_STATS_NUM;
471 } else {
472 desc_data = (__le64 *)(&desc[i]);
473 n = HCLGE_RD_OTHER_STATS_NUM;
474 }
475
476 for (k = 0; k < n; k++) {
477 *data += le64_to_cpu(*desc_data);
478 data++;
479 desc_data++;
480 }
481 }
482
483 return 0;
484 }
485
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488 u64 *data = (u64 *)(&hdev->mac_stats);
489 struct hclge_desc *desc;
490 __le64 *desc_data;
491 u16 i, k, n;
492 int ret;
493
494 /* This may be called inside atomic sections,
495 * so GFP_ATOMIC is more suitalbe here
496 */
497 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498 if (!desc)
499 return -ENOMEM;
500
501 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503 if (ret) {
504 kfree(desc);
505 return ret;
506 }
507
508 for (i = 0; i < desc_num; i++) {
509 /* for special opcode 0034, only the first desc has the head */
510 if (i == 0) {
511 desc_data = (__le64 *)(&desc[i].data[0]);
512 n = HCLGE_RD_FIRST_STATS_NUM;
513 } else {
514 desc_data = (__le64 *)(&desc[i]);
515 n = HCLGE_RD_OTHER_STATS_NUM;
516 }
517
518 for (k = 0; k < n; k++) {
519 *data += le64_to_cpu(*desc_data);
520 data++;
521 desc_data++;
522 }
523 }
524
525 kfree(desc);
526
527 return 0;
528 }
529
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532 struct hclge_desc desc;
533 __le32 *desc_data;
534 u32 reg_num;
535 int ret;
536
537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539 if (ret)
540 return ret;
541
542 desc_data = (__le32 *)(&desc.data[0]);
543 reg_num = le32_to_cpu(*desc_data);
544
545 *desc_num = 1 + ((reg_num - 3) >> 2) +
546 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547
548 return 0;
549 }
550
hclge_mac_update_stats(struct hclge_dev * hdev)551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553 u32 desc_num;
554 int ret;
555
556 ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 /* The firmware supports the new statistics acquisition method */
558 if (!ret)
559 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 else if (ret == -EOPNOTSUPP)
561 ret = hclge_mac_update_stats_defective(hdev);
562 else
563 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564
565 return ret;
566 }
567
hclge_tqps_update_stats(struct hnae3_handle * handle)568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 struct hclge_vport *vport = hclge_get_vport(handle);
572 struct hclge_dev *hdev = vport->back;
573 struct hnae3_queue *queue;
574 struct hclge_desc desc[1];
575 struct hclge_tqp *tqp;
576 int ret, i;
577
578 for (i = 0; i < kinfo->num_tqps; i++) {
579 queue = handle->kinfo.tqp[i];
580 tqp = container_of(queue, struct hclge_tqp, q);
581 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583 true);
584
585 desc[0].data[0] = cpu_to_le32(tqp->index);
586 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 if (ret) {
588 dev_err(&hdev->pdev->dev,
589 "Query tqp stat fail, status = %d,queue = %d\n",
590 ret, i);
591 return ret;
592 }
593 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 le32_to_cpu(desc[0].data[1]);
595 }
596
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 queue = handle->kinfo.tqp[i];
599 tqp = container_of(queue, struct hclge_tqp, q);
600 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601 hclge_cmd_setup_basic_desc(&desc[0],
602 HCLGE_OPC_QUERY_TX_STATS,
603 true);
604
605 desc[0].data[0] = cpu_to_le32(tqp->index);
606 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 if (ret) {
608 dev_err(&hdev->pdev->dev,
609 "Query tqp stat fail, status = %d,queue = %d\n",
610 ret, i);
611 return ret;
612 }
613 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 le32_to_cpu(desc[0].data[1]);
615 }
616
617 return 0;
618 }
619
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 struct hclge_tqp *tqp;
624 u64 *buff = data;
625 int i;
626
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630 }
631
632 for (i = 0; i < kinfo->num_tqps; i++) {
633 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635 }
636
637 return buff;
638 }
639
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643
644 /* each tqp has TX & RX two queues */
645 return kinfo->num_tqps * (2);
646 }
647
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651 u8 *buff = data;
652 int i;
653
654 for (i = 0; i < kinfo->num_tqps; i++) {
655 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 struct hclge_tqp, q);
657 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 tqp->index);
659 buff = buff + ETH_GSTRING_LEN;
660 }
661
662 for (i = 0; i < kinfo->num_tqps; i++) {
663 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 struct hclge_tqp, q);
665 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 tqp->index);
667 buff = buff + ETH_GSTRING_LEN;
668 }
669
670 return buff;
671 }
672
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 const struct hclge_comm_stats_str strs[],
675 int size, u64 *data)
676 {
677 u64 *buf = data;
678 u32 i;
679
680 for (i = 0; i < size; i++)
681 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682
683 return buf + size;
684 }
685
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)686 static u8 *hclge_comm_get_strings(u32 stringset,
687 const struct hclge_comm_stats_str strs[],
688 int size, u8 *data)
689 {
690 char *buff = (char *)data;
691 u32 i;
692
693 if (stringset != ETH_SS_STATS)
694 return buff;
695
696 for (i = 0; i < size; i++) {
697 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 buff = buff + ETH_GSTRING_LEN;
699 }
700
701 return (u8 *)buff;
702 }
703
hclge_update_stats_for_all(struct hclge_dev * hdev)704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706 struct hnae3_handle *handle;
707 int status;
708
709 handle = &hdev->vport[0].nic;
710 if (handle->client) {
711 status = hclge_tqps_update_stats(handle);
712 if (status) {
713 dev_err(&hdev->pdev->dev,
714 "Update TQPS stats fail, status = %d.\n",
715 status);
716 }
717 }
718
719 status = hclge_mac_update_stats(hdev);
720 if (status)
721 dev_err(&hdev->pdev->dev,
722 "Update MAC stats fail, status = %d.\n", status);
723 }
724
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)725 static void hclge_update_stats(struct hnae3_handle *handle,
726 struct net_device_stats *net_stats)
727 {
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
730 int status;
731
732 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733 return;
734
735 status = hclge_mac_update_stats(hdev);
736 if (status)
737 dev_err(&hdev->pdev->dev,
738 "Update MAC stats fail, status = %d.\n",
739 status);
740
741 status = hclge_tqps_update_stats(handle);
742 if (status)
743 dev_err(&hdev->pdev->dev,
744 "Update TQPS stats fail, status = %d.\n",
745 status);
746
747 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753 HNAE3_SUPPORT_PHY_LOOPBACK | \
754 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756
757 struct hclge_vport *vport = hclge_get_vport(handle);
758 struct hclge_dev *hdev = vport->back;
759 int count = 0;
760
761 /* Loopback test support rules:
762 * mac: only GE mode support
763 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 * phy: only support when phy device exist on board
765 */
766 if (stringset == ETH_SS_TEST) {
767 /* clear loopback bit flags at first */
768 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 count += 1;
774 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775 }
776
777 count += 2;
778 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780
781 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 hdev->hw.mac.phydev->drv->set_loopback) ||
783 hnae3_dev_phy_imp_supported(hdev)) {
784 count += 1;
785 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 }
787 } else if (stringset == ETH_SS_STATS) {
788 count = ARRAY_SIZE(g_mac_stats_string) +
789 hclge_tqps_get_sset_count(handle, stringset);
790 }
791
792 return count;
793 }
794
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796 u8 *data)
797 {
798 u8 *p = (char *)data;
799 int size;
800
801 if (stringset == ETH_SS_STATS) {
802 size = ARRAY_SIZE(g_mac_stats_string);
803 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804 size, p);
805 p = hclge_tqps_get_strings(handle, p);
806 } else if (stringset == ETH_SS_TEST) {
807 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809 ETH_GSTRING_LEN);
810 p += ETH_GSTRING_LEN;
811 }
812 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814 ETH_GSTRING_LEN);
815 p += ETH_GSTRING_LEN;
816 }
817 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818 memcpy(p,
819 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820 ETH_GSTRING_LEN);
821 p += ETH_GSTRING_LEN;
822 }
823 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825 ETH_GSTRING_LEN);
826 p += ETH_GSTRING_LEN;
827 }
828 }
829 }
830
hclge_get_stats(struct hnae3_handle * handle,u64 * data)831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833 struct hclge_vport *vport = hclge_get_vport(handle);
834 struct hclge_dev *hdev = vport->back;
835 u64 *p;
836
837 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 ARRAY_SIZE(g_mac_stats_string), data);
839 p = hclge_tqps_get_stats(handle, p);
840 }
841
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 struct hns3_mac_stats *mac_stats)
844 {
845 struct hclge_vport *vport = hclge_get_vport(handle);
846 struct hclge_dev *hdev = vport->back;
847
848 hclge_update_stats(handle, NULL);
849
850 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK 0xF
858
859 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860 return -EINVAL;
861
862 /* Set the pf to main pf */
863 if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 hdev->flag |= HCLGE_FLAG_MAIN;
865 else
866 hdev->flag &= ~HCLGE_FLAG_MAIN;
867
868 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869 return 0;
870 }
871
hclge_query_function_status(struct hclge_dev * hdev)872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT 5
875
876 struct hclge_func_status_cmd *req;
877 struct hclge_desc desc;
878 int timeout = 0;
879 int ret;
880
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 req = (struct hclge_func_status_cmd *)desc.data;
883
884 do {
885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886 if (ret) {
887 dev_err(&hdev->pdev->dev,
888 "query function status failed %d.\n", ret);
889 return ret;
890 }
891
892 /* Check pf reset is done */
893 if (req->pf_state)
894 break;
895 usleep_range(1000, 2000);
896 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897
898 return hclge_parse_func_status(hdev, req);
899 }
900
hclge_query_pf_resource(struct hclge_dev * hdev)901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903 struct hclge_pf_res_cmd *req;
904 struct hclge_desc desc;
905 int ret;
906
907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909 if (ret) {
910 dev_err(&hdev->pdev->dev,
911 "query pf resource failed %d.\n", ret);
912 return ret;
913 }
914
915 req = (struct hclge_pf_res_cmd *)desc.data;
916 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 le16_to_cpu(req->ext_tqp_num);
918 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919
920 if (req->tx_buf_size)
921 hdev->tx_buf_size =
922 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923 else
924 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925
926 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927
928 if (req->dv_buf_size)
929 hdev->dv_buf_size =
930 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931 else
932 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933
934 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935
936 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 dev_err(&hdev->pdev->dev,
939 "only %u msi resources available, not enough for pf(min:2).\n",
940 hdev->num_nic_msi);
941 return -EINVAL;
942 }
943
944 if (hnae3_dev_roce_supported(hdev)) {
945 hdev->num_roce_msi =
946 le16_to_cpu(req->pf_intr_vector_number_roce);
947
948 /* PF should have NIC vectors and Roce vectors,
949 * NIC vectors are queued before Roce vectors.
950 */
951 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952 } else {
953 hdev->num_msi = hdev->num_nic_msi;
954 }
955
956 return 0;
957 }
958
hclge_parse_speed(u8 speed_cmd,u32 * speed)959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961 switch (speed_cmd) {
962 case HCLGE_FW_MAC_SPEED_10M:
963 *speed = HCLGE_MAC_SPEED_10M;
964 break;
965 case HCLGE_FW_MAC_SPEED_100M:
966 *speed = HCLGE_MAC_SPEED_100M;
967 break;
968 case HCLGE_FW_MAC_SPEED_1G:
969 *speed = HCLGE_MAC_SPEED_1G;
970 break;
971 case HCLGE_FW_MAC_SPEED_10G:
972 *speed = HCLGE_MAC_SPEED_10G;
973 break;
974 case HCLGE_FW_MAC_SPEED_25G:
975 *speed = HCLGE_MAC_SPEED_25G;
976 break;
977 case HCLGE_FW_MAC_SPEED_40G:
978 *speed = HCLGE_MAC_SPEED_40G;
979 break;
980 case HCLGE_FW_MAC_SPEED_50G:
981 *speed = HCLGE_MAC_SPEED_50G;
982 break;
983 case HCLGE_FW_MAC_SPEED_100G:
984 *speed = HCLGE_MAC_SPEED_100G;
985 break;
986 case HCLGE_FW_MAC_SPEED_200G:
987 *speed = HCLGE_MAC_SPEED_200G;
988 break;
989 default:
990 return -EINVAL;
991 }
992
993 return 0;
994 }
995
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1006 };
1007
hclge_get_speed_bit(u32 speed,u32 * speed_bit)1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1009 {
1010 u16 i;
1011
1012 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013 if (speed == speed_bit_map[i].speed) {
1014 *speed_bit = speed_bit_map[i].speed_bit;
1015 return 0;
1016 }
1017 }
1018
1019 return -EINVAL;
1020 }
1021
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1023 {
1024 struct hclge_vport *vport = hclge_get_vport(handle);
1025 struct hclge_dev *hdev = vport->back;
1026 u32 speed_ability = hdev->hw.mac.speed_ability;
1027 u32 speed_bit = 0;
1028 int ret;
1029
1030 ret = hclge_get_speed_bit(speed, &speed_bit);
1031 if (ret)
1032 return ret;
1033
1034 if (speed_bit & speed_ability)
1035 return 0;
1036
1037 return -EINVAL;
1038 }
1039
hclge_convert_setting_sr(struct hclge_mac * mac,u16 speed_ability)1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 mac->supported);
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 mac->supported);
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 mac->supported);
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 mac->supported);
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 mac->supported);
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 mac->supported);
1060 }
1061
hclge_convert_setting_lr(struct hclge_mac * mac,u16 speed_ability)1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 mac->supported);
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 mac->supported);
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 mac->supported);
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 mac->supported);
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 mac->supported);
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 linkmode_set_bit(
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 mac->supported);
1083 }
1084
hclge_convert_setting_cr(struct hclge_mac * mac,u16 speed_ability)1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 mac->supported);
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 mac->supported);
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 mac->supported);
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 mac->supported);
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 mac->supported);
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 mac->supported);
1105 }
1106
hclge_convert_setting_kr(struct hclge_mac * mac,u16 speed_ability)1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 mac->supported);
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 mac->supported);
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 mac->supported);
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 mac->supported);
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 mac->supported);
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 mac->supported);
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 mac->supported);
1130 }
1131
hclge_convert_setting_fec(struct hclge_mac * mac)1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 mac->supported);
1142 mac->fec_ability =
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 break;
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 mac->supported);
1149 mac->fec_ability =
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1152 break;
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 break;
1158 default:
1159 mac->fec_ability = 0;
1160 break;
1161 }
1162 }
1163
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 u16 speed_ability)
1166 {
1167 struct hclge_mac *mac = &hdev->hw.mac;
1168
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 mac->supported);
1172
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1178
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 u16 speed_ability)
1188 {
1189 struct hclge_mac *mac = &hdev->hw.mac;
1190
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1194
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 u16 speed_ability)
1204 {
1205 unsigned long *supported = hdev->hw.mac.supported;
1206
1207 /* default to support all speed for GE port */
1208 if (!speed_ability)
1209 speed_ability = HCLGE_SUPPORT_GE;
1210
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 supported);
1214
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 supported);
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 supported);
1220 }
1221
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 }
1226
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 }
1231
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 u8 media_type = hdev->hw.mac.media_type;
1239
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247
hclge_get_max_speed(u16 speed_ability)1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1252
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1255
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1258
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1261
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1264
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1267
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1270
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1273
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1276
1277 return HCLGE_MAC_SPEED_1G;
1278 }
1279
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1283 #define SPEED_ABILITY_EXT_SHIFT 8
1284
1285 struct hclge_cfg_param_cmd *req;
1286 u64 mac_addr_tmp_high;
1287 u16 speed_ability_ext;
1288 u64 mac_addr_tmp;
1289 unsigned int i;
1290
1291 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292
1293 /* get the configuration */
1294 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 HCLGE_CFG_TQP_DESC_N_M,
1298 HCLGE_CFG_TQP_DESC_N_S);
1299
1300 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_PHY_ADDR_M,
1302 HCLGE_CFG_PHY_ADDR_S);
1303 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_MEDIA_TP_M,
1305 HCLGE_CFG_MEDIA_TP_S);
1306 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_RX_BUF_LEN_M,
1308 HCLGE_CFG_RX_BUF_LEN_S);
1309 /* get mac_address */
1310 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 HCLGE_CFG_MAC_ADDR_H_M,
1313 HCLGE_CFG_MAC_ADDR_H_S);
1314
1315 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316
1317 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_DEFAULT_SPEED_M,
1319 HCLGE_CFG_DEFAULT_SPEED_S);
1320 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 HCLGE_CFG_RSS_SIZE_M,
1322 HCLGE_CFG_RSS_SIZE_S);
1323
1324 for (i = 0; i < ETH_ALEN; i++)
1325 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326
1327 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329
1330 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 HCLGE_CFG_SPEED_ABILITY_M,
1332 HCLGE_CFG_SPEED_ABILITY_S);
1333 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337
1338 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 HCLGE_CFG_VLAN_FLTR_CAP_S);
1341
1342 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 if (!cfg->umv_space)
1346 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347
1348 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 HCLGE_CFG_PF_RSS_SIZE_M,
1350 HCLGE_CFG_PF_RSS_SIZE_S);
1351
1352 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 * power of 2, instead of reading out directly. This would
1354 * be more flexible for future changes and expansions.
1355 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358 */
1359 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 1U << cfg->pf_rss_size_max :
1361 cfg->vf_rss_size_max;
1362
1363 /* The unit of the tx spare buffer size queried from configuration
1364 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1365 * needed here.
1366 */
1367 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371 }
1372
1373 /* hclge_get_cfg: query the static parameter from flash
1374 * @hdev: pointer to struct hclge_dev
1375 * @hcfg: the config structure to be getted
1376 */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378 {
1379 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 struct hclge_cfg_param_cmd *req;
1381 unsigned int i;
1382 int ret;
1383
1384 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385 u32 offset = 0;
1386
1387 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389 true);
1390 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392 /* Len should be united by 4 bytes when send to hardware */
1393 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 req->offset = cpu_to_le32(offset);
1396 }
1397
1398 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399 if (ret) {
1400 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401 return ret;
1402 }
1403
1404 hclge_parse_cfg(hcfg, desc);
1405
1406 return 0;
1407 }
1408
hclge_set_default_dev_specs(struct hclge_dev * hdev)1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410 {
1411 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1412
1413 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414
1415 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422 }
1423
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 struct hclge_desc *desc)
1426 {
1427 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 struct hclge_dev_specs_0_cmd *req0;
1429 struct hclge_dev_specs_1_cmd *req1;
1430
1431 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433
1434 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 ae_dev->dev_specs.rss_ind_tbl_size =
1436 le16_to_cpu(req0->rss_ind_tbl_size);
1437 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443 }
1444
hclge_check_dev_specs(struct hclge_dev * hdev)1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446 {
1447 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448
1449 if (!dev_specs->max_non_tso_bd_num)
1450 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 if (!dev_specs->rss_ind_tbl_size)
1452 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 if (!dev_specs->rss_key_size)
1454 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 if (!dev_specs->max_tm_rate)
1456 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 if (!dev_specs->max_qset_num)
1458 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 if (!dev_specs->max_int_gl)
1460 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 if (!dev_specs->max_frm_size)
1462 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463 }
1464
hclge_query_dev_specs(struct hclge_dev * hdev)1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466 {
1467 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468 int ret;
1469 int i;
1470
1471 /* set default specifications as devices lower than version V3 do not
1472 * support querying specifications from firmware.
1473 */
1474 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 hclge_set_default_dev_specs(hdev);
1476 return 0;
1477 }
1478
1479 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481 true);
1482 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483 }
1484 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485
1486 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487 if (ret)
1488 return ret;
1489
1490 hclge_parse_dev_specs(hdev, desc);
1491 hclge_check_dev_specs(hdev);
1492
1493 return 0;
1494 }
1495
hclge_get_cap(struct hclge_dev * hdev)1496 static int hclge_get_cap(struct hclge_dev *hdev)
1497 {
1498 int ret;
1499
1500 ret = hclge_query_function_status(hdev);
1501 if (ret) {
1502 dev_err(&hdev->pdev->dev,
1503 "query function status error %d.\n", ret);
1504 return ret;
1505 }
1506
1507 /* get pf resource */
1508 return hclge_query_pf_resource(hdev);
1509 }
1510
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512 {
1513 #define HCLGE_MIN_TX_DESC 64
1514 #define HCLGE_MIN_RX_DESC 64
1515
1516 if (!is_kdump_kernel())
1517 return;
1518
1519 dev_info(&hdev->pdev->dev,
1520 "Running kdump kernel. Using minimal resources\n");
1521
1522 /* minimal queue pairs equals to the number of vports */
1523 hdev->num_tqps = hdev->num_req_vfs + 1;
1524 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526 }
1527
hclge_configure(struct hclge_dev * hdev)1528 static int hclge_configure(struct hclge_dev *hdev)
1529 {
1530 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 const struct cpumask *cpumask = cpu_online_mask;
1532 struct hclge_cfg cfg;
1533 unsigned int i;
1534 int node, ret;
1535
1536 ret = hclge_get_cfg(hdev, &cfg);
1537 if (ret)
1538 return ret;
1539
1540 hdev->base_tqp_pid = 0;
1541 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1542 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1543 hdev->rx_buf_len = cfg.rx_buf_len;
1544 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1545 hdev->hw.mac.media_type = cfg.media_type;
1546 hdev->hw.mac.phy_addr = cfg.phy_addr;
1547 hdev->num_tx_desc = cfg.tqp_desc_num;
1548 hdev->num_rx_desc = cfg.tqp_desc_num;
1549 hdev->tm_info.num_pg = 1;
1550 hdev->tc_max = cfg.tc_num;
1551 hdev->tm_info.hw_pfc_map = 0;
1552 hdev->wanted_umv_size = cfg.umv_space;
1553 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1554 hdev->gro_en = true;
1555 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1556 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1557
1558 if (hnae3_dev_fd_supported(hdev)) {
1559 hdev->fd_en = true;
1560 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1561 }
1562
1563 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1564 if (ret) {
1565 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1566 cfg.default_speed, ret);
1567 return ret;
1568 }
1569
1570 hclge_parse_link_mode(hdev, cfg.speed_ability);
1571
1572 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1573
1574 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1575 (hdev->tc_max < 1)) {
1576 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1577 hdev->tc_max);
1578 hdev->tc_max = 1;
1579 }
1580
1581 /* Dev does not support DCB */
1582 if (!hnae3_dev_dcb_supported(hdev)) {
1583 hdev->tc_max = 1;
1584 hdev->pfc_max = 0;
1585 } else {
1586 hdev->pfc_max = hdev->tc_max;
1587 }
1588
1589 hdev->tm_info.num_tc = 1;
1590
1591 /* Currently not support uncontiuous tc */
1592 for (i = 0; i < hdev->tm_info.num_tc; i++)
1593 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1594
1595 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1596
1597 hclge_init_kdump_kernel_config(hdev);
1598
1599 /* Set the affinity based on numa node */
1600 node = dev_to_node(&hdev->pdev->dev);
1601 if (node != NUMA_NO_NODE)
1602 cpumask = cpumask_of_node(node);
1603
1604 cpumask_copy(&hdev->affinity_mask, cpumask);
1605
1606 return ret;
1607 }
1608
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1609 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1610 u16 tso_mss_max)
1611 {
1612 struct hclge_cfg_tso_status_cmd *req;
1613 struct hclge_desc desc;
1614
1615 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1616
1617 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1618 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1619 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1620
1621 return hclge_cmd_send(&hdev->hw, &desc, 1);
1622 }
1623
hclge_config_gro(struct hclge_dev * hdev)1624 static int hclge_config_gro(struct hclge_dev *hdev)
1625 {
1626 struct hclge_cfg_gro_status_cmd *req;
1627 struct hclge_desc desc;
1628 int ret;
1629
1630 if (!hnae3_dev_gro_supported(hdev))
1631 return 0;
1632
1633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1634 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1635
1636 req->gro_en = hdev->gro_en ? 1 : 0;
1637
1638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1639 if (ret)
1640 dev_err(&hdev->pdev->dev,
1641 "GRO hardware config cmd failed, ret = %d\n", ret);
1642
1643 return ret;
1644 }
1645
hclge_alloc_tqps(struct hclge_dev * hdev)1646 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1647 {
1648 struct hclge_tqp *tqp;
1649 int i;
1650
1651 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1652 sizeof(struct hclge_tqp), GFP_KERNEL);
1653 if (!hdev->htqp)
1654 return -ENOMEM;
1655
1656 tqp = hdev->htqp;
1657
1658 for (i = 0; i < hdev->num_tqps; i++) {
1659 tqp->dev = &hdev->pdev->dev;
1660 tqp->index = i;
1661
1662 tqp->q.ae_algo = &ae_algo;
1663 tqp->q.buf_size = hdev->rx_buf_len;
1664 tqp->q.tx_desc_num = hdev->num_tx_desc;
1665 tqp->q.rx_desc_num = hdev->num_rx_desc;
1666
1667 /* need an extended offset to configure queues >=
1668 * HCLGE_TQP_MAX_SIZE_DEV_V2
1669 */
1670 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1671 tqp->q.io_base = hdev->hw.io_base +
1672 HCLGE_TQP_REG_OFFSET +
1673 i * HCLGE_TQP_REG_SIZE;
1674 else
1675 tqp->q.io_base = hdev->hw.io_base +
1676 HCLGE_TQP_REG_OFFSET +
1677 HCLGE_TQP_EXT_REG_OFFSET +
1678 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1679 HCLGE_TQP_REG_SIZE;
1680
1681 tqp++;
1682 }
1683
1684 return 0;
1685 }
1686
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1687 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1688 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1689 {
1690 struct hclge_tqp_map_cmd *req;
1691 struct hclge_desc desc;
1692 int ret;
1693
1694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1695
1696 req = (struct hclge_tqp_map_cmd *)desc.data;
1697 req->tqp_id = cpu_to_le16(tqp_pid);
1698 req->tqp_vf = func_id;
1699 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1700 if (!is_pf)
1701 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1702 req->tqp_vid = cpu_to_le16(tqp_vid);
1703
1704 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1705 if (ret)
1706 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1707
1708 return ret;
1709 }
1710
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1711 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1712 {
1713 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1714 struct hclge_dev *hdev = vport->back;
1715 int i, alloced;
1716
1717 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1718 alloced < num_tqps; i++) {
1719 if (!hdev->htqp[i].alloced) {
1720 hdev->htqp[i].q.handle = &vport->nic;
1721 hdev->htqp[i].q.tqp_index = alloced;
1722 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1723 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1724 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1725 hdev->htqp[i].alloced = true;
1726 alloced++;
1727 }
1728 }
1729 vport->alloc_tqps = alloced;
1730 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1731 vport->alloc_tqps / hdev->tm_info.num_tc);
1732
1733 /* ensure one to one mapping between irq and queue at default */
1734 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1735 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1736
1737 return 0;
1738 }
1739
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1740 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1741 u16 num_tx_desc, u16 num_rx_desc)
1742
1743 {
1744 struct hnae3_handle *nic = &vport->nic;
1745 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1746 struct hclge_dev *hdev = vport->back;
1747 int ret;
1748
1749 kinfo->num_tx_desc = num_tx_desc;
1750 kinfo->num_rx_desc = num_rx_desc;
1751
1752 kinfo->rx_buf_len = hdev->rx_buf_len;
1753 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1754
1755 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1756 sizeof(struct hnae3_queue *), GFP_KERNEL);
1757 if (!kinfo->tqp)
1758 return -ENOMEM;
1759
1760 ret = hclge_assign_tqp(vport, num_tqps);
1761 if (ret)
1762 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1763
1764 return ret;
1765 }
1766
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1767 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1768 struct hclge_vport *vport)
1769 {
1770 struct hnae3_handle *nic = &vport->nic;
1771 struct hnae3_knic_private_info *kinfo;
1772 u16 i;
1773
1774 kinfo = &nic->kinfo;
1775 for (i = 0; i < vport->alloc_tqps; i++) {
1776 struct hclge_tqp *q =
1777 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1778 bool is_pf;
1779 int ret;
1780
1781 is_pf = !(vport->vport_id);
1782 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1783 i, is_pf);
1784 if (ret)
1785 return ret;
1786 }
1787
1788 return 0;
1789 }
1790
hclge_map_tqp(struct hclge_dev * hdev)1791 static int hclge_map_tqp(struct hclge_dev *hdev)
1792 {
1793 struct hclge_vport *vport = hdev->vport;
1794 u16 i, num_vport;
1795
1796 num_vport = hdev->num_req_vfs + 1;
1797 for (i = 0; i < num_vport; i++) {
1798 int ret;
1799
1800 ret = hclge_map_tqp_to_vport(hdev, vport);
1801 if (ret)
1802 return ret;
1803
1804 vport++;
1805 }
1806
1807 return 0;
1808 }
1809
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1810 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1811 {
1812 struct hnae3_handle *nic = &vport->nic;
1813 struct hclge_dev *hdev = vport->back;
1814 int ret;
1815
1816 nic->pdev = hdev->pdev;
1817 nic->ae_algo = &ae_algo;
1818 nic->numa_node_mask = hdev->numa_node_mask;
1819 nic->kinfo.io_base = hdev->hw.io_base;
1820
1821 ret = hclge_knic_setup(vport, num_tqps,
1822 hdev->num_tx_desc, hdev->num_rx_desc);
1823 if (ret)
1824 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1825
1826 return ret;
1827 }
1828
hclge_alloc_vport(struct hclge_dev * hdev)1829 static int hclge_alloc_vport(struct hclge_dev *hdev)
1830 {
1831 struct pci_dev *pdev = hdev->pdev;
1832 struct hclge_vport *vport;
1833 u32 tqp_main_vport;
1834 u32 tqp_per_vport;
1835 int num_vport, i;
1836 int ret;
1837
1838 /* We need to alloc a vport for main NIC of PF */
1839 num_vport = hdev->num_req_vfs + 1;
1840
1841 if (hdev->num_tqps < num_vport) {
1842 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1843 hdev->num_tqps, num_vport);
1844 return -EINVAL;
1845 }
1846
1847 /* Alloc the same number of TQPs for every vport */
1848 tqp_per_vport = hdev->num_tqps / num_vport;
1849 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1850
1851 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1852 GFP_KERNEL);
1853 if (!vport)
1854 return -ENOMEM;
1855
1856 hdev->vport = vport;
1857 hdev->num_alloc_vport = num_vport;
1858
1859 if (IS_ENABLED(CONFIG_PCI_IOV))
1860 hdev->num_alloc_vfs = hdev->num_req_vfs;
1861
1862 for (i = 0; i < num_vport; i++) {
1863 vport->back = hdev;
1864 vport->vport_id = i;
1865 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1866 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1867 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1868 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1869 vport->req_vlan_fltr_en = true;
1870 INIT_LIST_HEAD(&vport->vlan_list);
1871 INIT_LIST_HEAD(&vport->uc_mac_list);
1872 INIT_LIST_HEAD(&vport->mc_mac_list);
1873 spin_lock_init(&vport->mac_list_lock);
1874
1875 if (i == 0)
1876 ret = hclge_vport_setup(vport, tqp_main_vport);
1877 else
1878 ret = hclge_vport_setup(vport, tqp_per_vport);
1879 if (ret) {
1880 dev_err(&pdev->dev,
1881 "vport setup failed for vport %d, %d\n",
1882 i, ret);
1883 return ret;
1884 }
1885
1886 vport++;
1887 }
1888
1889 return 0;
1890 }
1891
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1892 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1893 struct hclge_pkt_buf_alloc *buf_alloc)
1894 {
1895 /* TX buffer size is unit by 128 byte */
1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1898 struct hclge_tx_buff_alloc_cmd *req;
1899 struct hclge_desc desc;
1900 int ret;
1901 u8 i;
1902
1903 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1904
1905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1906 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1908
1909 req->tx_pkt_buff[i] =
1910 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1911 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1912 }
1913
1914 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915 if (ret)
1916 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1917 ret);
1918
1919 return ret;
1920 }
1921
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1923 struct hclge_pkt_buf_alloc *buf_alloc)
1924 {
1925 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1926
1927 if (ret)
1928 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1929
1930 return ret;
1931 }
1932
hclge_get_tc_num(struct hclge_dev * hdev)1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1934 {
1935 unsigned int i;
1936 u32 cnt = 0;
1937
1938 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939 if (hdev->hw_tc_map & BIT(i))
1940 cnt++;
1941 return cnt;
1942 }
1943
1944 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1946 struct hclge_pkt_buf_alloc *buf_alloc)
1947 {
1948 struct hclge_priv_buf *priv;
1949 unsigned int i;
1950 int cnt = 0;
1951
1952 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1953 priv = &buf_alloc->priv_buf[i];
1954 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1955 priv->enable)
1956 cnt++;
1957 }
1958
1959 return cnt;
1960 }
1961
1962 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1964 struct hclge_pkt_buf_alloc *buf_alloc)
1965 {
1966 struct hclge_priv_buf *priv;
1967 unsigned int i;
1968 int cnt = 0;
1969
1970 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1971 priv = &buf_alloc->priv_buf[i];
1972 if (hdev->hw_tc_map & BIT(i) &&
1973 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1974 priv->enable)
1975 cnt++;
1976 }
1977
1978 return cnt;
1979 }
1980
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1982 {
1983 struct hclge_priv_buf *priv;
1984 u32 rx_priv = 0;
1985 int i;
1986
1987 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988 priv = &buf_alloc->priv_buf[i];
1989 if (priv->enable)
1990 rx_priv += priv->buf_size;
1991 }
1992 return rx_priv;
1993 }
1994
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1996 {
1997 u32 i, total_tx_size = 0;
1998
1999 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2000 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2001
2002 return total_tx_size;
2003 }
2004
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)2005 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2006 struct hclge_pkt_buf_alloc *buf_alloc,
2007 u32 rx_all)
2008 {
2009 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2010 u32 tc_num = hclge_get_tc_num(hdev);
2011 u32 shared_buf, aligned_mps;
2012 u32 rx_priv;
2013 int i;
2014
2015 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2016
2017 if (hnae3_dev_dcb_supported(hdev))
2018 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2019 hdev->dv_buf_size;
2020 else
2021 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2022 + hdev->dv_buf_size;
2023
2024 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2025 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2026 HCLGE_BUF_SIZE_UNIT);
2027
2028 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2029 if (rx_all < rx_priv + shared_std)
2030 return false;
2031
2032 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2033 buf_alloc->s_buf.buf_size = shared_buf;
2034 if (hnae3_dev_dcb_supported(hdev)) {
2035 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2036 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2037 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2038 HCLGE_BUF_SIZE_UNIT);
2039 } else {
2040 buf_alloc->s_buf.self.high = aligned_mps +
2041 HCLGE_NON_DCB_ADDITIONAL_BUF;
2042 buf_alloc->s_buf.self.low = aligned_mps;
2043 }
2044
2045 if (hnae3_dev_dcb_supported(hdev)) {
2046 hi_thrd = shared_buf - hdev->dv_buf_size;
2047
2048 if (tc_num <= NEED_RESERVE_TC_NUM)
2049 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2050 / BUF_MAX_PERCENT;
2051
2052 if (tc_num)
2053 hi_thrd = hi_thrd / tc_num;
2054
2055 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2056 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2057 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2058 } else {
2059 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2060 lo_thrd = aligned_mps;
2061 }
2062
2063 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2065 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2066 }
2067
2068 return true;
2069 }
2070
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2072 struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074 u32 i, total_size;
2075
2076 total_size = hdev->pkt_buf_size;
2077
2078 /* alloc tx buffer for all enabled tc */
2079 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2080 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2081
2082 if (hdev->hw_tc_map & BIT(i)) {
2083 if (total_size < hdev->tx_buf_size)
2084 return -ENOMEM;
2085
2086 priv->tx_buf_size = hdev->tx_buf_size;
2087 } else {
2088 priv->tx_buf_size = 0;
2089 }
2090
2091 total_size -= priv->tx_buf_size;
2092 }
2093
2094 return 0;
2095 }
2096
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2101 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2102 unsigned int i;
2103
2104 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2105 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2106
2107 priv->enable = 0;
2108 priv->wl.low = 0;
2109 priv->wl.high = 0;
2110 priv->buf_size = 0;
2111
2112 if (!(hdev->hw_tc_map & BIT(i)))
2113 continue;
2114
2115 priv->enable = 1;
2116
2117 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2118 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2119 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2120 HCLGE_BUF_SIZE_UNIT);
2121 } else {
2122 priv->wl.low = 0;
2123 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2124 aligned_mps;
2125 }
2126
2127 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2128 }
2129
2130 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2131 }
2132
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2134 struct hclge_pkt_buf_alloc *buf_alloc)
2135 {
2136 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2138 int i;
2139
2140 /* let the last to be cleared first */
2141 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2142 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2143 unsigned int mask = BIT((unsigned int)i);
2144
2145 if (hdev->hw_tc_map & mask &&
2146 !(hdev->tm_info.hw_pfc_map & mask)) {
2147 /* Clear the no pfc TC private buffer */
2148 priv->wl.low = 0;
2149 priv->wl.high = 0;
2150 priv->buf_size = 0;
2151 priv->enable = 0;
2152 no_pfc_priv_num--;
2153 }
2154
2155 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2156 no_pfc_priv_num == 0)
2157 break;
2158 }
2159
2160 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2161 }
2162
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2164 struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2167 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2168 int i;
2169
2170 /* let the last to be cleared first */
2171 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2172 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2173 unsigned int mask = BIT((unsigned int)i);
2174
2175 if (hdev->hw_tc_map & mask &&
2176 hdev->tm_info.hw_pfc_map & mask) {
2177 /* Reduce the number of pfc TC with private buffer */
2178 priv->wl.low = 0;
2179 priv->enable = 0;
2180 priv->wl.high = 0;
2181 priv->buf_size = 0;
2182 pfc_priv_num--;
2183 }
2184
2185 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2186 pfc_priv_num == 0)
2187 break;
2188 }
2189
2190 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2191 }
2192
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2194 struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196 #define COMPENSATE_BUFFER 0x3C00
2197 #define COMPENSATE_HALF_MPS_NUM 5
2198 #define PRIV_WL_GAP 0x1800
2199
2200 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2201 u32 tc_num = hclge_get_tc_num(hdev);
2202 u32 half_mps = hdev->mps >> 1;
2203 u32 min_rx_priv;
2204 unsigned int i;
2205
2206 if (tc_num)
2207 rx_priv = rx_priv / tc_num;
2208
2209 if (tc_num <= NEED_RESERVE_TC_NUM)
2210 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2211
2212 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2213 COMPENSATE_HALF_MPS_NUM * half_mps;
2214 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2215 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2216 if (rx_priv < min_rx_priv)
2217 return false;
2218
2219 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2220 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2221
2222 priv->enable = 0;
2223 priv->wl.low = 0;
2224 priv->wl.high = 0;
2225 priv->buf_size = 0;
2226
2227 if (!(hdev->hw_tc_map & BIT(i)))
2228 continue;
2229
2230 priv->enable = 1;
2231 priv->buf_size = rx_priv;
2232 priv->wl.high = rx_priv - hdev->dv_buf_size;
2233 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2234 }
2235
2236 buf_alloc->s_buf.buf_size = 0;
2237
2238 return true;
2239 }
2240
2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2242 * @hdev: pointer to struct hclge_dev
2243 * @buf_alloc: pointer to buffer calculation data
2244 * @return: 0: calculate successful, negative: fail
2245 */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2247 struct hclge_pkt_buf_alloc *buf_alloc)
2248 {
2249 /* When DCB is not supported, rx private buffer is not allocated. */
2250 if (!hnae3_dev_dcb_supported(hdev)) {
2251 u32 rx_all = hdev->pkt_buf_size;
2252
2253 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2254 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2255 return -ENOMEM;
2256
2257 return 0;
2258 }
2259
2260 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2261 return 0;
2262
2263 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2264 return 0;
2265
2266 /* try to decrease the buffer size */
2267 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2268 return 0;
2269
2270 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2271 return 0;
2272
2273 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2274 return 0;
2275
2276 return -ENOMEM;
2277 }
2278
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2280 struct hclge_pkt_buf_alloc *buf_alloc)
2281 {
2282 struct hclge_rx_priv_buff_cmd *req;
2283 struct hclge_desc desc;
2284 int ret;
2285 int i;
2286
2287 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2288 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2289
2290 /* Alloc private buffer TCs */
2291 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2292 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2293
2294 req->buf_num[i] =
2295 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2296 req->buf_num[i] |=
2297 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2298 }
2299
2300 req->shared_buf =
2301 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2302 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2303
2304 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2305 if (ret)
2306 dev_err(&hdev->pdev->dev,
2307 "rx private buffer alloc cmd failed %d\n", ret);
2308
2309 return ret;
2310 }
2311
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2313 struct hclge_pkt_buf_alloc *buf_alloc)
2314 {
2315 struct hclge_rx_priv_wl_buf *req;
2316 struct hclge_priv_buf *priv;
2317 struct hclge_desc desc[2];
2318 int i, j;
2319 int ret;
2320
2321 for (i = 0; i < 2; i++) {
2322 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2323 false);
2324 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2325
2326 /* The first descriptor set the NEXT bit to 1 */
2327 if (i == 0)
2328 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329 else
2330 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331
2332 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2334
2335 priv = &buf_alloc->priv_buf[idx];
2336 req->tc_wl[j].high =
2337 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2338 req->tc_wl[j].high |=
2339 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340 req->tc_wl[j].low =
2341 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2342 req->tc_wl[j].low |=
2343 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344 }
2345 }
2346
2347 /* Send 2 descriptor at one time */
2348 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349 if (ret)
2350 dev_err(&hdev->pdev->dev,
2351 "rx private waterline config cmd failed %d\n",
2352 ret);
2353 return ret;
2354 }
2355
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2356 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2357 struct hclge_pkt_buf_alloc *buf_alloc)
2358 {
2359 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2360 struct hclge_rx_com_thrd *req;
2361 struct hclge_desc desc[2];
2362 struct hclge_tc_thrd *tc;
2363 int i, j;
2364 int ret;
2365
2366 for (i = 0; i < 2; i++) {
2367 hclge_cmd_setup_basic_desc(&desc[i],
2368 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2369 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2370
2371 /* The first descriptor set the NEXT bit to 1 */
2372 if (i == 0)
2373 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2374 else
2375 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2376
2377 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2378 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2379
2380 req->com_thrd[j].high =
2381 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2382 req->com_thrd[j].high |=
2383 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2384 req->com_thrd[j].low =
2385 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2386 req->com_thrd[j].low |=
2387 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388 }
2389 }
2390
2391 /* Send 2 descriptors at one time */
2392 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2393 if (ret)
2394 dev_err(&hdev->pdev->dev,
2395 "common threshold config cmd failed %d\n", ret);
2396 return ret;
2397 }
2398
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2399 static int hclge_common_wl_config(struct hclge_dev *hdev,
2400 struct hclge_pkt_buf_alloc *buf_alloc)
2401 {
2402 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2403 struct hclge_rx_com_wl *req;
2404 struct hclge_desc desc;
2405 int ret;
2406
2407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2408
2409 req = (struct hclge_rx_com_wl *)desc.data;
2410 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2411 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412
2413 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2414 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2415
2416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2417 if (ret)
2418 dev_err(&hdev->pdev->dev,
2419 "common waterline config cmd failed %d\n", ret);
2420
2421 return ret;
2422 }
2423
hclge_buffer_alloc(struct hclge_dev * hdev)2424 int hclge_buffer_alloc(struct hclge_dev *hdev)
2425 {
2426 struct hclge_pkt_buf_alloc *pkt_buf;
2427 int ret;
2428
2429 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2430 if (!pkt_buf)
2431 return -ENOMEM;
2432
2433 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2434 if (ret) {
2435 dev_err(&hdev->pdev->dev,
2436 "could not calc tx buffer size for all TCs %d\n", ret);
2437 goto out;
2438 }
2439
2440 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2441 if (ret) {
2442 dev_err(&hdev->pdev->dev,
2443 "could not alloc tx buffers %d\n", ret);
2444 goto out;
2445 }
2446
2447 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2448 if (ret) {
2449 dev_err(&hdev->pdev->dev,
2450 "could not calc rx priv buffer size for all TCs %d\n",
2451 ret);
2452 goto out;
2453 }
2454
2455 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2456 if (ret) {
2457 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2458 ret);
2459 goto out;
2460 }
2461
2462 if (hnae3_dev_dcb_supported(hdev)) {
2463 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2464 if (ret) {
2465 dev_err(&hdev->pdev->dev,
2466 "could not configure rx private waterline %d\n",
2467 ret);
2468 goto out;
2469 }
2470
2471 ret = hclge_common_thrd_config(hdev, pkt_buf);
2472 if (ret) {
2473 dev_err(&hdev->pdev->dev,
2474 "could not configure common threshold %d\n",
2475 ret);
2476 goto out;
2477 }
2478 }
2479
2480 ret = hclge_common_wl_config(hdev, pkt_buf);
2481 if (ret)
2482 dev_err(&hdev->pdev->dev,
2483 "could not configure common waterline %d\n", ret);
2484
2485 out:
2486 kfree(pkt_buf);
2487 return ret;
2488 }
2489
hclge_init_roce_base_info(struct hclge_vport * vport)2490 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2491 {
2492 struct hnae3_handle *roce = &vport->roce;
2493 struct hnae3_handle *nic = &vport->nic;
2494 struct hclge_dev *hdev = vport->back;
2495
2496 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2497
2498 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2499 return -EINVAL;
2500
2501 roce->rinfo.base_vector = hdev->roce_base_vector;
2502
2503 roce->rinfo.netdev = nic->kinfo.netdev;
2504 roce->rinfo.roce_io_base = hdev->hw.io_base;
2505 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2506
2507 roce->pdev = nic->pdev;
2508 roce->ae_algo = nic->ae_algo;
2509 roce->numa_node_mask = nic->numa_node_mask;
2510
2511 return 0;
2512 }
2513
hclge_init_msi(struct hclge_dev * hdev)2514 static int hclge_init_msi(struct hclge_dev *hdev)
2515 {
2516 struct pci_dev *pdev = hdev->pdev;
2517 int vectors;
2518 int i;
2519
2520 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2521 hdev->num_msi,
2522 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2523 if (vectors < 0) {
2524 dev_err(&pdev->dev,
2525 "failed(%d) to allocate MSI/MSI-X vectors\n",
2526 vectors);
2527 return vectors;
2528 }
2529 if (vectors < hdev->num_msi)
2530 dev_warn(&hdev->pdev->dev,
2531 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2532 hdev->num_msi, vectors);
2533
2534 hdev->num_msi = vectors;
2535 hdev->num_msi_left = vectors;
2536
2537 hdev->base_msi_vector = pdev->irq;
2538 hdev->roce_base_vector = hdev->base_msi_vector +
2539 hdev->num_nic_msi;
2540
2541 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2542 sizeof(u16), GFP_KERNEL);
2543 if (!hdev->vector_status) {
2544 pci_free_irq_vectors(pdev);
2545 return -ENOMEM;
2546 }
2547
2548 for (i = 0; i < hdev->num_msi; i++)
2549 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2550
2551 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2552 sizeof(int), GFP_KERNEL);
2553 if (!hdev->vector_irq) {
2554 pci_free_irq_vectors(pdev);
2555 return -ENOMEM;
2556 }
2557
2558 return 0;
2559 }
2560
hclge_check_speed_dup(u8 duplex,int speed)2561 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2562 {
2563 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2564 duplex = HCLGE_MAC_FULL;
2565
2566 return duplex;
2567 }
2568
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2569 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2570 u8 duplex)
2571 {
2572 struct hclge_config_mac_speed_dup_cmd *req;
2573 struct hclge_desc desc;
2574 int ret;
2575
2576 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2577
2578 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2579
2580 if (duplex)
2581 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2582
2583 switch (speed) {
2584 case HCLGE_MAC_SPEED_10M:
2585 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2587 break;
2588 case HCLGE_MAC_SPEED_100M:
2589 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2591 break;
2592 case HCLGE_MAC_SPEED_1G:
2593 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2595 break;
2596 case HCLGE_MAC_SPEED_10G:
2597 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2599 break;
2600 case HCLGE_MAC_SPEED_25G:
2601 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2603 break;
2604 case HCLGE_MAC_SPEED_40G:
2605 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2606 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2607 break;
2608 case HCLGE_MAC_SPEED_50G:
2609 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2610 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2611 break;
2612 case HCLGE_MAC_SPEED_100G:
2613 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2614 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2615 break;
2616 case HCLGE_MAC_SPEED_200G:
2617 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2618 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2619 break;
2620 default:
2621 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2622 return -EINVAL;
2623 }
2624
2625 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2626 1);
2627
2628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629 if (ret) {
2630 dev_err(&hdev->pdev->dev,
2631 "mac speed/duplex config cmd failed %d.\n", ret);
2632 return ret;
2633 }
2634
2635 return 0;
2636 }
2637
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2638 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2639 {
2640 struct hclge_mac *mac = &hdev->hw.mac;
2641 int ret;
2642
2643 duplex = hclge_check_speed_dup(duplex, speed);
2644 if (!mac->support_autoneg && mac->speed == speed &&
2645 mac->duplex == duplex)
2646 return 0;
2647
2648 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2649 if (ret)
2650 return ret;
2651
2652 hdev->hw.mac.speed = speed;
2653 hdev->hw.mac.duplex = duplex;
2654
2655 return 0;
2656 }
2657
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2658 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2659 u8 duplex)
2660 {
2661 struct hclge_vport *vport = hclge_get_vport(handle);
2662 struct hclge_dev *hdev = vport->back;
2663
2664 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2665 }
2666
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2667 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2668 {
2669 struct hclge_config_auto_neg_cmd *req;
2670 struct hclge_desc desc;
2671 u32 flag = 0;
2672 int ret;
2673
2674 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2675
2676 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2677 if (enable)
2678 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2679 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2680
2681 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2682 if (ret)
2683 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2684 ret);
2685
2686 return ret;
2687 }
2688
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2689 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2690 {
2691 struct hclge_vport *vport = hclge_get_vport(handle);
2692 struct hclge_dev *hdev = vport->back;
2693
2694 if (!hdev->hw.mac.support_autoneg) {
2695 if (enable) {
2696 dev_err(&hdev->pdev->dev,
2697 "autoneg is not supported by current port\n");
2698 return -EOPNOTSUPP;
2699 } else {
2700 return 0;
2701 }
2702 }
2703
2704 return hclge_set_autoneg_en(hdev, enable);
2705 }
2706
hclge_get_autoneg(struct hnae3_handle * handle)2707 static int hclge_get_autoneg(struct hnae3_handle *handle)
2708 {
2709 struct hclge_vport *vport = hclge_get_vport(handle);
2710 struct hclge_dev *hdev = vport->back;
2711 struct phy_device *phydev = hdev->hw.mac.phydev;
2712
2713 if (phydev)
2714 return phydev->autoneg;
2715
2716 return hdev->hw.mac.autoneg;
2717 }
2718
hclge_restart_autoneg(struct hnae3_handle * handle)2719 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2720 {
2721 struct hclge_vport *vport = hclge_get_vport(handle);
2722 struct hclge_dev *hdev = vport->back;
2723 int ret;
2724
2725 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2726
2727 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2728 if (ret)
2729 return ret;
2730 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2731 }
2732
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2733 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2734 {
2735 struct hclge_vport *vport = hclge_get_vport(handle);
2736 struct hclge_dev *hdev = vport->back;
2737
2738 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2739 return hclge_set_autoneg_en(hdev, !halt);
2740
2741 return 0;
2742 }
2743
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2744 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2745 {
2746 struct hclge_config_fec_cmd *req;
2747 struct hclge_desc desc;
2748 int ret;
2749
2750 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2751
2752 req = (struct hclge_config_fec_cmd *)desc.data;
2753 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2754 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2755 if (fec_mode & BIT(HNAE3_FEC_RS))
2756 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2758 if (fec_mode & BIT(HNAE3_FEC_BASER))
2759 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2760 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2761
2762 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2763 if (ret)
2764 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2765
2766 return ret;
2767 }
2768
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2769 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2770 {
2771 struct hclge_vport *vport = hclge_get_vport(handle);
2772 struct hclge_dev *hdev = vport->back;
2773 struct hclge_mac *mac = &hdev->hw.mac;
2774 int ret;
2775
2776 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2777 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2778 return -EINVAL;
2779 }
2780
2781 ret = hclge_set_fec_hw(hdev, fec_mode);
2782 if (ret)
2783 return ret;
2784
2785 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2786 return 0;
2787 }
2788
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2789 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2790 u8 *fec_mode)
2791 {
2792 struct hclge_vport *vport = hclge_get_vport(handle);
2793 struct hclge_dev *hdev = vport->back;
2794 struct hclge_mac *mac = &hdev->hw.mac;
2795
2796 if (fec_ability)
2797 *fec_ability = mac->fec_ability;
2798 if (fec_mode)
2799 *fec_mode = mac->fec_mode;
2800 }
2801
hclge_mac_init(struct hclge_dev * hdev)2802 static int hclge_mac_init(struct hclge_dev *hdev)
2803 {
2804 struct hclge_mac *mac = &hdev->hw.mac;
2805 int ret;
2806
2807 hdev->support_sfp_query = true;
2808 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2809 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2810 hdev->hw.mac.duplex);
2811 if (ret)
2812 return ret;
2813
2814 if (hdev->hw.mac.support_autoneg) {
2815 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2816 if (ret)
2817 return ret;
2818 }
2819
2820 mac->link = 0;
2821
2822 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2823 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2824 if (ret)
2825 return ret;
2826 }
2827
2828 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2829 if (ret) {
2830 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2831 return ret;
2832 }
2833
2834 ret = hclge_set_default_loopback(hdev);
2835 if (ret)
2836 return ret;
2837
2838 ret = hclge_buffer_alloc(hdev);
2839 if (ret)
2840 dev_err(&hdev->pdev->dev,
2841 "allocate buffer fail, ret=%d\n", ret);
2842
2843 return ret;
2844 }
2845
hclge_mbx_task_schedule(struct hclge_dev * hdev)2846 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2847 {
2848 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2850 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2851 }
2852
hclge_reset_task_schedule(struct hclge_dev * hdev)2853 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2854 {
2855 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2856 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
2857 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2858 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2859 }
2860
hclge_errhand_task_schedule(struct hclge_dev * hdev)2861 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2862 {
2863 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2864 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2865 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2866 }
2867
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2868 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2869 {
2870 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2871 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2872 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
2873 }
2874
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2875 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2876 {
2877 struct hclge_link_status_cmd *req;
2878 struct hclge_desc desc;
2879 int ret;
2880
2881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2883 if (ret) {
2884 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2885 ret);
2886 return ret;
2887 }
2888
2889 req = (struct hclge_link_status_cmd *)desc.data;
2890 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2891 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2892
2893 return 0;
2894 }
2895
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2896 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2897 {
2898 struct phy_device *phydev = hdev->hw.mac.phydev;
2899
2900 *link_status = HCLGE_LINK_STATUS_DOWN;
2901
2902 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2903 return 0;
2904
2905 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2906 return 0;
2907
2908 return hclge_get_mac_link_status(hdev, link_status);
2909 }
2910
hclge_push_link_status(struct hclge_dev * hdev)2911 static void hclge_push_link_status(struct hclge_dev *hdev)
2912 {
2913 struct hclge_vport *vport;
2914 int ret;
2915 u16 i;
2916
2917 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2918 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2919
2920 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2921 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2922 continue;
2923
2924 ret = hclge_push_vf_link_status(vport);
2925 if (ret) {
2926 dev_err(&hdev->pdev->dev,
2927 "failed to push link status to vf%u, ret = %d\n",
2928 i, ret);
2929 }
2930 }
2931 }
2932
hclge_update_link_status(struct hclge_dev * hdev)2933 static void hclge_update_link_status(struct hclge_dev *hdev)
2934 {
2935 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2936 struct hnae3_handle *handle = &hdev->vport[0].nic;
2937 struct hnae3_client *rclient = hdev->roce_client;
2938 struct hnae3_client *client = hdev->nic_client;
2939 int state;
2940 int ret;
2941
2942 if (!client)
2943 return;
2944
2945 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2946 return;
2947
2948 ret = hclge_get_mac_phy_link(hdev, &state);
2949 if (ret) {
2950 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2951 return;
2952 }
2953
2954 if (state != hdev->hw.mac.link) {
2955 hdev->hw.mac.link = state;
2956 client->ops->link_status_change(handle, state);
2957 hclge_config_mac_tnl_int(hdev, state);
2958 if (rclient && rclient->ops->link_status_change)
2959 rclient->ops->link_status_change(rhandle, state);
2960
2961 hclge_push_link_status(hdev);
2962 }
2963
2964 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2965 }
2966
hclge_update_port_capability(struct hclge_dev * hdev,struct hclge_mac * mac)2967 static void hclge_update_port_capability(struct hclge_dev *hdev,
2968 struct hclge_mac *mac)
2969 {
2970 if (hnae3_dev_fec_supported(hdev))
2971 /* update fec ability by speed */
2972 hclge_convert_setting_fec(mac);
2973
2974 /* firmware can not identify back plane type, the media type
2975 * read from configuration can help deal it
2976 */
2977 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2978 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2979 mac->module_type = HNAE3_MODULE_TYPE_KR;
2980 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2981 mac->module_type = HNAE3_MODULE_TYPE_TP;
2982
2983 if (mac->support_autoneg) {
2984 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2985 linkmode_copy(mac->advertising, mac->supported);
2986 } else {
2987 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2988 mac->supported);
2989 linkmode_zero(mac->advertising);
2990 }
2991 }
2992
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2993 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2994 {
2995 struct hclge_sfp_info_cmd *resp;
2996 struct hclge_desc desc;
2997 int ret;
2998
2999 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3000 resp = (struct hclge_sfp_info_cmd *)desc.data;
3001 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3002 if (ret == -EOPNOTSUPP) {
3003 dev_warn(&hdev->pdev->dev,
3004 "IMP do not support get SFP speed %d\n", ret);
3005 return ret;
3006 } else if (ret) {
3007 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3008 return ret;
3009 }
3010
3011 *speed = le32_to_cpu(resp->speed);
3012
3013 return 0;
3014 }
3015
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)3016 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3017 {
3018 struct hclge_sfp_info_cmd *resp;
3019 struct hclge_desc desc;
3020 int ret;
3021
3022 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3023 resp = (struct hclge_sfp_info_cmd *)desc.data;
3024
3025 resp->query_type = QUERY_ACTIVE_SPEED;
3026
3027 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3028 if (ret == -EOPNOTSUPP) {
3029 dev_warn(&hdev->pdev->dev,
3030 "IMP does not support get SFP info %d\n", ret);
3031 return ret;
3032 } else if (ret) {
3033 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3034 return ret;
3035 }
3036
3037 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3038 * set to mac->speed.
3039 */
3040 if (!le32_to_cpu(resp->speed))
3041 return 0;
3042
3043 mac->speed = le32_to_cpu(resp->speed);
3044 /* if resp->speed_ability is 0, it means it's an old version
3045 * firmware, do not update these params
3046 */
3047 if (resp->speed_ability) {
3048 mac->module_type = le32_to_cpu(resp->module_type);
3049 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3050 mac->autoneg = resp->autoneg;
3051 mac->support_autoneg = resp->autoneg_ability;
3052 mac->speed_type = QUERY_ACTIVE_SPEED;
3053 if (!resp->active_fec)
3054 mac->fec_mode = 0;
3055 else
3056 mac->fec_mode = BIT(resp->active_fec);
3057 } else {
3058 mac->speed_type = QUERY_SFP_SPEED;
3059 }
3060
3061 return 0;
3062 }
3063
hclge_get_phy_link_ksettings(struct hnae3_handle * handle,struct ethtool_link_ksettings * cmd)3064 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3065 struct ethtool_link_ksettings *cmd)
3066 {
3067 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3068 struct hclge_vport *vport = hclge_get_vport(handle);
3069 struct hclge_phy_link_ksetting_0_cmd *req0;
3070 struct hclge_phy_link_ksetting_1_cmd *req1;
3071 u32 supported, advertising, lp_advertising;
3072 struct hclge_dev *hdev = vport->back;
3073 int ret;
3074
3075 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3076 true);
3077 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3078 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3079 true);
3080
3081 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3082 if (ret) {
3083 dev_err(&hdev->pdev->dev,
3084 "failed to get phy link ksetting, ret = %d.\n", ret);
3085 return ret;
3086 }
3087
3088 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3089 cmd->base.autoneg = req0->autoneg;
3090 cmd->base.speed = le32_to_cpu(req0->speed);
3091 cmd->base.duplex = req0->duplex;
3092 cmd->base.port = req0->port;
3093 cmd->base.transceiver = req0->transceiver;
3094 cmd->base.phy_address = req0->phy_address;
3095 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3096 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3097 supported = le32_to_cpu(req0->supported);
3098 advertising = le32_to_cpu(req0->advertising);
3099 lp_advertising = le32_to_cpu(req0->lp_advertising);
3100 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3101 supported);
3102 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3103 advertising);
3104 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3105 lp_advertising);
3106
3107 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3108 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3109 cmd->base.master_slave_state = req1->master_slave_state;
3110
3111 return 0;
3112 }
3113
3114 static int
hclge_set_phy_link_ksettings(struct hnae3_handle * handle,const struct ethtool_link_ksettings * cmd)3115 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3116 const struct ethtool_link_ksettings *cmd)
3117 {
3118 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3119 struct hclge_vport *vport = hclge_get_vport(handle);
3120 struct hclge_phy_link_ksetting_0_cmd *req0;
3121 struct hclge_phy_link_ksetting_1_cmd *req1;
3122 struct hclge_dev *hdev = vport->back;
3123 u32 advertising;
3124 int ret;
3125
3126 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3127 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3128 (cmd->base.duplex != DUPLEX_HALF &&
3129 cmd->base.duplex != DUPLEX_FULL)))
3130 return -EINVAL;
3131
3132 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3133 false);
3134 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3135 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3136 false);
3137
3138 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3139 req0->autoneg = cmd->base.autoneg;
3140 req0->speed = cpu_to_le32(cmd->base.speed);
3141 req0->duplex = cmd->base.duplex;
3142 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3143 cmd->link_modes.advertising);
3144 req0->advertising = cpu_to_le32(advertising);
3145 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3146
3147 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3148 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3149
3150 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3151 if (ret) {
3152 dev_err(&hdev->pdev->dev,
3153 "failed to set phy link ksettings, ret = %d.\n", ret);
3154 return ret;
3155 }
3156
3157 hdev->hw.mac.autoneg = cmd->base.autoneg;
3158 hdev->hw.mac.speed = cmd->base.speed;
3159 hdev->hw.mac.duplex = cmd->base.duplex;
3160 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3161
3162 return 0;
3163 }
3164
hclge_update_tp_port_info(struct hclge_dev * hdev)3165 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3166 {
3167 struct ethtool_link_ksettings cmd;
3168 int ret;
3169
3170 if (!hnae3_dev_phy_imp_supported(hdev))
3171 return 0;
3172
3173 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3174 if (ret)
3175 return ret;
3176
3177 hdev->hw.mac.autoneg = cmd.base.autoneg;
3178 hdev->hw.mac.speed = cmd.base.speed;
3179 hdev->hw.mac.duplex = cmd.base.duplex;
3180
3181 return 0;
3182 }
3183
hclge_tp_port_init(struct hclge_dev * hdev)3184 static int hclge_tp_port_init(struct hclge_dev *hdev)
3185 {
3186 struct ethtool_link_ksettings cmd;
3187
3188 if (!hnae3_dev_phy_imp_supported(hdev))
3189 return 0;
3190
3191 cmd.base.autoneg = hdev->hw.mac.autoneg;
3192 cmd.base.speed = hdev->hw.mac.speed;
3193 cmd.base.duplex = hdev->hw.mac.duplex;
3194 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3195
3196 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3197 }
3198
hclge_update_port_info(struct hclge_dev * hdev)3199 static int hclge_update_port_info(struct hclge_dev *hdev)
3200 {
3201 struct hclge_mac *mac = &hdev->hw.mac;
3202 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3203 int ret;
3204
3205 /* get the port info from SFP cmd if not copper port */
3206 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3207 return hclge_update_tp_port_info(hdev);
3208
3209 /* if IMP does not support get SFP/qSFP info, return directly */
3210 if (!hdev->support_sfp_query)
3211 return 0;
3212
3213 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3214 ret = hclge_get_sfp_info(hdev, mac);
3215 else
3216 ret = hclge_get_sfp_speed(hdev, &speed);
3217
3218 if (ret == -EOPNOTSUPP) {
3219 hdev->support_sfp_query = false;
3220 return ret;
3221 } else if (ret) {
3222 return ret;
3223 }
3224
3225 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3226 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3227 hclge_update_port_capability(hdev, mac);
3228 return 0;
3229 }
3230 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3231 HCLGE_MAC_FULL);
3232 } else {
3233 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3234 return 0; /* do nothing if no SFP */
3235
3236 /* must config full duplex for SFP */
3237 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3238 }
3239 }
3240
hclge_get_status(struct hnae3_handle * handle)3241 static int hclge_get_status(struct hnae3_handle *handle)
3242 {
3243 struct hclge_vport *vport = hclge_get_vport(handle);
3244 struct hclge_dev *hdev = vport->back;
3245
3246 hclge_update_link_status(hdev);
3247
3248 return hdev->hw.mac.link;
3249 }
3250
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3251 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3252 {
3253 if (!pci_num_vf(hdev->pdev)) {
3254 dev_err(&hdev->pdev->dev,
3255 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3256 return NULL;
3257 }
3258
3259 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3260 dev_err(&hdev->pdev->dev,
3261 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3262 vf, pci_num_vf(hdev->pdev));
3263 return NULL;
3264 }
3265
3266 /* VF start from 1 in vport */
3267 vf += HCLGE_VF_VPORT_START_NUM;
3268 return &hdev->vport[vf];
3269 }
3270
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3271 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3272 struct ifla_vf_info *ivf)
3273 {
3274 struct hclge_vport *vport = hclge_get_vport(handle);
3275 struct hclge_dev *hdev = vport->back;
3276
3277 vport = hclge_get_vf_vport(hdev, vf);
3278 if (!vport)
3279 return -EINVAL;
3280
3281 ivf->vf = vf;
3282 ivf->linkstate = vport->vf_info.link_state;
3283 ivf->spoofchk = vport->vf_info.spoofchk;
3284 ivf->trusted = vport->vf_info.trusted;
3285 ivf->min_tx_rate = 0;
3286 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3287 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3288 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3289 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3290 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3291
3292 return 0;
3293 }
3294
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3295 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3296 int link_state)
3297 {
3298 struct hclge_vport *vport = hclge_get_vport(handle);
3299 struct hclge_dev *hdev = vport->back;
3300 int link_state_old;
3301 int ret;
3302
3303 vport = hclge_get_vf_vport(hdev, vf);
3304 if (!vport)
3305 return -EINVAL;
3306
3307 link_state_old = vport->vf_info.link_state;
3308 vport->vf_info.link_state = link_state;
3309
3310 ret = hclge_push_vf_link_status(vport);
3311 if (ret) {
3312 vport->vf_info.link_state = link_state_old;
3313 dev_err(&hdev->pdev->dev,
3314 "failed to push vf%d link status, ret = %d\n", vf, ret);
3315 }
3316
3317 return ret;
3318 }
3319
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3320 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3321 {
3322 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3323
3324 /* fetch the events from their corresponding regs */
3325 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3326 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3327 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3328 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3329
3330 /* Assumption: If by any chance reset and mailbox events are reported
3331 * together then we will only process reset event in this go and will
3332 * defer the processing of the mailbox events. Since, we would have not
3333 * cleared RX CMDQ event this time we would receive again another
3334 * interrupt from H/W just for the mailbox.
3335 *
3336 * check for vector0 reset event sources
3337 */
3338 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3339 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3340 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3341 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3342 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3343 hdev->rst_stats.imp_rst_cnt++;
3344 return HCLGE_VECTOR0_EVENT_RST;
3345 }
3346
3347 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3348 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3349 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3350 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3351 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3352 hdev->rst_stats.global_rst_cnt++;
3353 return HCLGE_VECTOR0_EVENT_RST;
3354 }
3355
3356 /* check for vector0 msix event and hardware error event source */
3357 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3358 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3359 return HCLGE_VECTOR0_EVENT_ERR;
3360
3361 /* check for vector0 ptp event source */
3362 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3363 *clearval = msix_src_reg;
3364 return HCLGE_VECTOR0_EVENT_PTP;
3365 }
3366
3367 /* check for vector0 mailbox(=CMDQ RX) event source */
3368 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3369 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3370 *clearval = cmdq_src_reg;
3371 return HCLGE_VECTOR0_EVENT_MBX;
3372 }
3373
3374 /* print other vector0 event source */
3375 dev_info(&hdev->pdev->dev,
3376 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3377 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3378
3379 return HCLGE_VECTOR0_EVENT_OTHER;
3380 }
3381
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3382 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3383 u32 regclr)
3384 {
3385 switch (event_type) {
3386 case HCLGE_VECTOR0_EVENT_PTP:
3387 case HCLGE_VECTOR0_EVENT_RST:
3388 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3389 break;
3390 case HCLGE_VECTOR0_EVENT_MBX:
3391 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3392 break;
3393 default:
3394 break;
3395 }
3396 }
3397
hclge_clear_all_event_cause(struct hclge_dev * hdev)3398 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3399 {
3400 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3401 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3402 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3403 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3404 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3405 }
3406
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3407 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3408 {
3409 writel(enable ? 1 : 0, vector->addr);
3410 }
3411
hclge_misc_irq_handle(int irq,void * data)3412 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3413 {
3414 struct hclge_dev *hdev = data;
3415 unsigned long flags;
3416 u32 clearval = 0;
3417 u32 event_cause;
3418
3419 hclge_enable_vector(&hdev->misc_vector, false);
3420 event_cause = hclge_check_event_cause(hdev, &clearval);
3421
3422 /* vector 0 interrupt is shared with reset and mailbox source events. */
3423 switch (event_cause) {
3424 case HCLGE_VECTOR0_EVENT_ERR:
3425 hclge_errhand_task_schedule(hdev);
3426 break;
3427 case HCLGE_VECTOR0_EVENT_RST:
3428 hclge_reset_task_schedule(hdev);
3429 break;
3430 case HCLGE_VECTOR0_EVENT_PTP:
3431 spin_lock_irqsave(&hdev->ptp->lock, flags);
3432 hclge_ptp_clean_tx_hwts(hdev);
3433 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3434 break;
3435 case HCLGE_VECTOR0_EVENT_MBX:
3436 /* If we are here then,
3437 * 1. Either we are not handling any mbx task and we are not
3438 * scheduled as well
3439 * OR
3440 * 2. We could be handling a mbx task but nothing more is
3441 * scheduled.
3442 * In both cases, we should schedule mbx task as there are more
3443 * mbx messages reported by this interrupt.
3444 */
3445 hclge_mbx_task_schedule(hdev);
3446 break;
3447 default:
3448 dev_warn(&hdev->pdev->dev,
3449 "received unknown or unhandled event of vector0\n");
3450 break;
3451 }
3452
3453 hclge_clear_event_cause(hdev, event_cause, clearval);
3454
3455 /* Enable interrupt if it is not caused by reset event or error event */
3456 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3457 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3458 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3459 hclge_enable_vector(&hdev->misc_vector, true);
3460
3461 return IRQ_HANDLED;
3462 }
3463
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3464 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3465 {
3466 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3467 dev_warn(&hdev->pdev->dev,
3468 "vector(vector_id %d) has been freed.\n", vector_id);
3469 return;
3470 }
3471
3472 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3473 hdev->num_msi_left += 1;
3474 hdev->num_msi_used -= 1;
3475 }
3476
hclge_get_misc_vector(struct hclge_dev * hdev)3477 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3478 {
3479 struct hclge_misc_vector *vector = &hdev->misc_vector;
3480
3481 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3482
3483 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3484 hdev->vector_status[0] = 0;
3485
3486 hdev->num_msi_left -= 1;
3487 hdev->num_msi_used += 1;
3488 }
3489
hclge_misc_affinity_setup(struct hclge_dev * hdev)3490 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3491 {
3492 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3493 &hdev->affinity_mask);
3494 }
3495
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3496 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3497 {
3498 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3499 }
3500
hclge_misc_irq_init(struct hclge_dev * hdev)3501 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3502 {
3503 int ret;
3504
3505 hclge_get_misc_vector(hdev);
3506
3507 /* this would be explicitly freed in the end */
3508 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3509 HCLGE_NAME, pci_name(hdev->pdev));
3510 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3511 0, hdev->misc_vector.name, hdev);
3512 if (ret) {
3513 hclge_free_vector(hdev, 0);
3514 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3515 hdev->misc_vector.vector_irq);
3516 }
3517
3518 return ret;
3519 }
3520
hclge_misc_irq_uninit(struct hclge_dev * hdev)3521 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3522 {
3523 free_irq(hdev->misc_vector.vector_irq, hdev);
3524 hclge_free_vector(hdev, 0);
3525 }
3526
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3527 int hclge_notify_client(struct hclge_dev *hdev,
3528 enum hnae3_reset_notify_type type)
3529 {
3530 struct hnae3_handle *handle = &hdev->vport[0].nic;
3531 struct hnae3_client *client = hdev->nic_client;
3532 int ret;
3533
3534 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3535 return 0;
3536
3537 if (!client->ops->reset_notify)
3538 return -EOPNOTSUPP;
3539
3540 ret = client->ops->reset_notify(handle, type);
3541 if (ret)
3542 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3543 type, ret);
3544
3545 return ret;
3546 }
3547
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3548 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3549 enum hnae3_reset_notify_type type)
3550 {
3551 struct hnae3_handle *handle = &hdev->vport[0].roce;
3552 struct hnae3_client *client = hdev->roce_client;
3553 int ret;
3554
3555 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3556 return 0;
3557
3558 if (!client->ops->reset_notify)
3559 return -EOPNOTSUPP;
3560
3561 ret = client->ops->reset_notify(handle, type);
3562 if (ret)
3563 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3564 type, ret);
3565
3566 return ret;
3567 }
3568
hclge_reset_wait(struct hclge_dev * hdev)3569 static int hclge_reset_wait(struct hclge_dev *hdev)
3570 {
3571 #define HCLGE_RESET_WATI_MS 100
3572 #define HCLGE_RESET_WAIT_CNT 350
3573
3574 u32 val, reg, reg_bit;
3575 u32 cnt = 0;
3576
3577 switch (hdev->reset_type) {
3578 case HNAE3_IMP_RESET:
3579 reg = HCLGE_GLOBAL_RESET_REG;
3580 reg_bit = HCLGE_IMP_RESET_BIT;
3581 break;
3582 case HNAE3_GLOBAL_RESET:
3583 reg = HCLGE_GLOBAL_RESET_REG;
3584 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3585 break;
3586 case HNAE3_FUNC_RESET:
3587 reg = HCLGE_FUN_RST_ING;
3588 reg_bit = HCLGE_FUN_RST_ING_B;
3589 break;
3590 default:
3591 dev_err(&hdev->pdev->dev,
3592 "Wait for unsupported reset type: %d\n",
3593 hdev->reset_type);
3594 return -EINVAL;
3595 }
3596
3597 val = hclge_read_dev(&hdev->hw, reg);
3598 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3599 msleep(HCLGE_RESET_WATI_MS);
3600 val = hclge_read_dev(&hdev->hw, reg);
3601 cnt++;
3602 }
3603
3604 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3605 dev_warn(&hdev->pdev->dev,
3606 "Wait for reset timeout: %d\n", hdev->reset_type);
3607 return -EBUSY;
3608 }
3609
3610 return 0;
3611 }
3612
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3613 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3614 {
3615 struct hclge_vf_rst_cmd *req;
3616 struct hclge_desc desc;
3617
3618 req = (struct hclge_vf_rst_cmd *)desc.data;
3619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3620 req->dest_vfid = func_id;
3621
3622 if (reset)
3623 req->vf_rst = 0x1;
3624
3625 return hclge_cmd_send(&hdev->hw, &desc, 1);
3626 }
3627
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3628 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3629 {
3630 int i;
3631
3632 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3633 struct hclge_vport *vport = &hdev->vport[i];
3634 int ret;
3635
3636 /* Send cmd to set/clear VF's FUNC_RST_ING */
3637 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3638 if (ret) {
3639 dev_err(&hdev->pdev->dev,
3640 "set vf(%u) rst failed %d!\n",
3641 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3642 ret);
3643 return ret;
3644 }
3645
3646 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3647 continue;
3648
3649 /* Inform VF to process the reset.
3650 * hclge_inform_reset_assert_to_vf may fail if VF
3651 * driver is not loaded.
3652 */
3653 ret = hclge_inform_reset_assert_to_vf(vport);
3654 if (ret)
3655 dev_warn(&hdev->pdev->dev,
3656 "inform reset to vf(%u) failed %d!\n",
3657 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3658 ret);
3659 }
3660
3661 return 0;
3662 }
3663
hclge_mailbox_service_task(struct hclge_dev * hdev)3664 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3665 {
3666 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3667 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3668 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3669 return;
3670
3671 hclge_mbx_handler(hdev);
3672
3673 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3674 }
3675
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3676 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3677 {
3678 struct hclge_pf_rst_sync_cmd *req;
3679 struct hclge_desc desc;
3680 int cnt = 0;
3681 int ret;
3682
3683 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3684 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3685
3686 do {
3687 /* vf need to down netdev by mbx during PF or FLR reset */
3688 hclge_mailbox_service_task(hdev);
3689
3690 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3691 /* for compatible with old firmware, wait
3692 * 100 ms for VF to stop IO
3693 */
3694 if (ret == -EOPNOTSUPP) {
3695 msleep(HCLGE_RESET_SYNC_TIME);
3696 return;
3697 } else if (ret) {
3698 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3699 ret);
3700 return;
3701 } else if (req->all_vf_ready) {
3702 return;
3703 }
3704 msleep(HCLGE_PF_RESET_SYNC_TIME);
3705 hclge_cmd_reuse_desc(&desc, true);
3706 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3707
3708 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3709 }
3710
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3711 void hclge_report_hw_error(struct hclge_dev *hdev,
3712 enum hnae3_hw_error_type type)
3713 {
3714 struct hnae3_client *client = hdev->nic_client;
3715
3716 if (!client || !client->ops->process_hw_error ||
3717 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3718 return;
3719
3720 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3721 }
3722
hclge_handle_imp_error(struct hclge_dev * hdev)3723 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3724 {
3725 u32 reg_val;
3726
3727 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3728 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3729 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3730 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3731 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3732 }
3733
3734 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3735 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3736 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3737 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3738 }
3739 }
3740
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3741 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3742 {
3743 struct hclge_desc desc;
3744 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3745 int ret;
3746
3747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3748 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3749 req->fun_reset_vfid = func_id;
3750
3751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3752 if (ret)
3753 dev_err(&hdev->pdev->dev,
3754 "send function reset cmd fail, status =%d\n", ret);
3755
3756 return ret;
3757 }
3758
hclge_do_reset(struct hclge_dev * hdev)3759 static void hclge_do_reset(struct hclge_dev *hdev)
3760 {
3761 struct hnae3_handle *handle = &hdev->vport[0].nic;
3762 struct pci_dev *pdev = hdev->pdev;
3763 u32 val;
3764
3765 if (hclge_get_hw_reset_stat(handle)) {
3766 dev_info(&pdev->dev, "hardware reset not finish\n");
3767 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3768 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3769 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3770 return;
3771 }
3772
3773 switch (hdev->reset_type) {
3774 case HNAE3_IMP_RESET:
3775 dev_info(&pdev->dev, "IMP reset requested\n");
3776 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3777 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3778 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3779 break;
3780 case HNAE3_GLOBAL_RESET:
3781 dev_info(&pdev->dev, "global reset requested\n");
3782 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3783 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3784 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3785 break;
3786 case HNAE3_FUNC_RESET:
3787 dev_info(&pdev->dev, "PF reset requested\n");
3788 /* schedule again to check later */
3789 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3790 hclge_reset_task_schedule(hdev);
3791 break;
3792 default:
3793 dev_warn(&pdev->dev,
3794 "unsupported reset type: %d\n", hdev->reset_type);
3795 break;
3796 }
3797 }
3798
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3799 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3800 unsigned long *addr)
3801 {
3802 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3803 struct hclge_dev *hdev = ae_dev->priv;
3804
3805 /* return the highest priority reset level amongst all */
3806 if (test_bit(HNAE3_IMP_RESET, addr)) {
3807 rst_level = HNAE3_IMP_RESET;
3808 clear_bit(HNAE3_IMP_RESET, addr);
3809 clear_bit(HNAE3_GLOBAL_RESET, addr);
3810 clear_bit(HNAE3_FUNC_RESET, addr);
3811 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3812 rst_level = HNAE3_GLOBAL_RESET;
3813 clear_bit(HNAE3_GLOBAL_RESET, addr);
3814 clear_bit(HNAE3_FUNC_RESET, addr);
3815 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3816 rst_level = HNAE3_FUNC_RESET;
3817 clear_bit(HNAE3_FUNC_RESET, addr);
3818 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3819 rst_level = HNAE3_FLR_RESET;
3820 clear_bit(HNAE3_FLR_RESET, addr);
3821 }
3822
3823 if (hdev->reset_type != HNAE3_NONE_RESET &&
3824 rst_level < hdev->reset_type)
3825 return HNAE3_NONE_RESET;
3826
3827 return rst_level;
3828 }
3829
hclge_clear_reset_cause(struct hclge_dev * hdev)3830 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3831 {
3832 u32 clearval = 0;
3833
3834 switch (hdev->reset_type) {
3835 case HNAE3_IMP_RESET:
3836 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3837 break;
3838 case HNAE3_GLOBAL_RESET:
3839 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3840 break;
3841 default:
3842 break;
3843 }
3844
3845 if (!clearval)
3846 return;
3847
3848 /* For revision 0x20, the reset interrupt source
3849 * can only be cleared after hardware reset done
3850 */
3851 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3852 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3853 clearval);
3854
3855 hclge_enable_vector(&hdev->misc_vector, true);
3856 }
3857
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3858 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3859 {
3860 u32 reg_val;
3861
3862 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3863 if (enable)
3864 reg_val |= HCLGE_NIC_SW_RST_RDY;
3865 else
3866 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3867
3868 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3869 }
3870
hclge_func_reset_notify_vf(struct hclge_dev * hdev)3871 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3872 {
3873 int ret;
3874
3875 ret = hclge_set_all_vf_rst(hdev, true);
3876 if (ret)
3877 return ret;
3878
3879 hclge_func_reset_sync_vf(hdev);
3880
3881 return 0;
3882 }
3883
hclge_reset_prepare_wait(struct hclge_dev * hdev)3884 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3885 {
3886 u32 reg_val;
3887 int ret = 0;
3888
3889 switch (hdev->reset_type) {
3890 case HNAE3_FUNC_RESET:
3891 ret = hclge_func_reset_notify_vf(hdev);
3892 if (ret)
3893 return ret;
3894
3895 ret = hclge_func_reset_cmd(hdev, 0);
3896 if (ret) {
3897 dev_err(&hdev->pdev->dev,
3898 "asserting function reset fail %d!\n", ret);
3899 return ret;
3900 }
3901
3902 /* After performaning pf reset, it is not necessary to do the
3903 * mailbox handling or send any command to firmware, because
3904 * any mailbox handling or command to firmware is only valid
3905 * after hclge_cmd_init is called.
3906 */
3907 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3908 hdev->rst_stats.pf_rst_cnt++;
3909 break;
3910 case HNAE3_FLR_RESET:
3911 ret = hclge_func_reset_notify_vf(hdev);
3912 if (ret)
3913 return ret;
3914 break;
3915 case HNAE3_IMP_RESET:
3916 hclge_handle_imp_error(hdev);
3917 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3918 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3919 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3920 break;
3921 default:
3922 break;
3923 }
3924
3925 /* inform hardware that preparatory work is done */
3926 msleep(HCLGE_RESET_SYNC_TIME);
3927 hclge_reset_handshake(hdev, true);
3928 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3929
3930 return ret;
3931 }
3932
hclge_show_rst_info(struct hclge_dev * hdev)3933 static void hclge_show_rst_info(struct hclge_dev *hdev)
3934 {
3935 char *buf;
3936
3937 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3938 if (!buf)
3939 return;
3940
3941 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3942
3943 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3944
3945 kfree(buf);
3946 }
3947
hclge_reset_err_handle(struct hclge_dev * hdev)3948 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3949 {
3950 #define MAX_RESET_FAIL_CNT 5
3951
3952 if (hdev->reset_pending) {
3953 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3954 hdev->reset_pending);
3955 return true;
3956 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3957 HCLGE_RESET_INT_M) {
3958 dev_info(&hdev->pdev->dev,
3959 "reset failed because new reset interrupt\n");
3960 hclge_clear_reset_cause(hdev);
3961 return false;
3962 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3963 hdev->rst_stats.reset_fail_cnt++;
3964 set_bit(hdev->reset_type, &hdev->reset_pending);
3965 dev_info(&hdev->pdev->dev,
3966 "re-schedule reset task(%u)\n",
3967 hdev->rst_stats.reset_fail_cnt);
3968 return true;
3969 }
3970
3971 hclge_clear_reset_cause(hdev);
3972
3973 /* recover the handshake status when reset fail */
3974 hclge_reset_handshake(hdev, true);
3975
3976 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3977
3978 hclge_show_rst_info(hdev);
3979
3980 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3981
3982 return false;
3983 }
3984
hclge_update_reset_level(struct hclge_dev * hdev)3985 static void hclge_update_reset_level(struct hclge_dev *hdev)
3986 {
3987 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3988 enum hnae3_reset_type reset_level;
3989
3990 /* reset request will not be set during reset, so clear
3991 * pending reset request to avoid unnecessary reset
3992 * caused by the same reason.
3993 */
3994 hclge_get_reset_level(ae_dev, &hdev->reset_request);
3995
3996 /* if default_reset_request has a higher level reset request,
3997 * it should be handled as soon as possible. since some errors
3998 * need this kind of reset to fix.
3999 */
4000 reset_level = hclge_get_reset_level(ae_dev,
4001 &hdev->default_reset_request);
4002 if (reset_level != HNAE3_NONE_RESET)
4003 set_bit(reset_level, &hdev->reset_request);
4004 }
4005
hclge_set_rst_done(struct hclge_dev * hdev)4006 static int hclge_set_rst_done(struct hclge_dev *hdev)
4007 {
4008 struct hclge_pf_rst_done_cmd *req;
4009 struct hclge_desc desc;
4010 int ret;
4011
4012 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4013 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4014 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4015
4016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4017 /* To be compatible with the old firmware, which does not support
4018 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4019 * return success
4020 */
4021 if (ret == -EOPNOTSUPP) {
4022 dev_warn(&hdev->pdev->dev,
4023 "current firmware does not support command(0x%x)!\n",
4024 HCLGE_OPC_PF_RST_DONE);
4025 return 0;
4026 } else if (ret) {
4027 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4028 ret);
4029 }
4030
4031 return ret;
4032 }
4033
hclge_reset_prepare_up(struct hclge_dev * hdev)4034 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4035 {
4036 int ret = 0;
4037
4038 switch (hdev->reset_type) {
4039 case HNAE3_FUNC_RESET:
4040 case HNAE3_FLR_RESET:
4041 ret = hclge_set_all_vf_rst(hdev, false);
4042 break;
4043 case HNAE3_GLOBAL_RESET:
4044 case HNAE3_IMP_RESET:
4045 ret = hclge_set_rst_done(hdev);
4046 break;
4047 default:
4048 break;
4049 }
4050
4051 /* clear up the handshake status after re-initialize done */
4052 hclge_reset_handshake(hdev, false);
4053
4054 return ret;
4055 }
4056
hclge_reset_stack(struct hclge_dev * hdev)4057 static int hclge_reset_stack(struct hclge_dev *hdev)
4058 {
4059 int ret;
4060
4061 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4062 if (ret)
4063 return ret;
4064
4065 ret = hclge_reset_ae_dev(hdev->ae_dev);
4066 if (ret)
4067 return ret;
4068
4069 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4070 }
4071
hclge_reset_prepare(struct hclge_dev * hdev)4072 static int hclge_reset_prepare(struct hclge_dev *hdev)
4073 {
4074 int ret;
4075
4076 hdev->rst_stats.reset_cnt++;
4077 /* perform reset of the stack & ae device for a client */
4078 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4079 if (ret)
4080 return ret;
4081
4082 rtnl_lock();
4083 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4084 rtnl_unlock();
4085 if (ret)
4086 return ret;
4087
4088 return hclge_reset_prepare_wait(hdev);
4089 }
4090
hclge_reset_rebuild(struct hclge_dev * hdev)4091 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4092 {
4093 int ret;
4094
4095 hdev->rst_stats.hw_reset_done_cnt++;
4096
4097 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4098 if (ret)
4099 return ret;
4100
4101 rtnl_lock();
4102 ret = hclge_reset_stack(hdev);
4103 rtnl_unlock();
4104 if (ret)
4105 return ret;
4106
4107 hclge_clear_reset_cause(hdev);
4108
4109 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4110 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4111 * times
4112 */
4113 if (ret &&
4114 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4115 return ret;
4116
4117 ret = hclge_reset_prepare_up(hdev);
4118 if (ret)
4119 return ret;
4120
4121 rtnl_lock();
4122 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4123 rtnl_unlock();
4124 if (ret)
4125 return ret;
4126
4127 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4128 if (ret)
4129 return ret;
4130
4131 hdev->last_reset_time = jiffies;
4132 hdev->rst_stats.reset_fail_cnt = 0;
4133 hdev->rst_stats.reset_done_cnt++;
4134 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4135
4136 hclge_update_reset_level(hdev);
4137
4138 return 0;
4139 }
4140
hclge_reset(struct hclge_dev * hdev)4141 static void hclge_reset(struct hclge_dev *hdev)
4142 {
4143 if (hclge_reset_prepare(hdev))
4144 goto err_reset;
4145
4146 if (hclge_reset_wait(hdev))
4147 goto err_reset;
4148
4149 if (hclge_reset_rebuild(hdev))
4150 goto err_reset;
4151
4152 return;
4153
4154 err_reset:
4155 if (hclge_reset_err_handle(hdev))
4156 hclge_reset_task_schedule(hdev);
4157 }
4158
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)4159 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4160 {
4161 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4162 struct hclge_dev *hdev = ae_dev->priv;
4163
4164 /* We might end up getting called broadly because of 2 below cases:
4165 * 1. Recoverable error was conveyed through APEI and only way to bring
4166 * normalcy is to reset.
4167 * 2. A new reset request from the stack due to timeout
4168 *
4169 * check if this is a new reset request and we are not here just because
4170 * last reset attempt did not succeed and watchdog hit us again. We will
4171 * know this if last reset request did not occur very recently (watchdog
4172 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4173 * In case of new request we reset the "reset level" to PF reset.
4174 * And if it is a repeat reset request of the most recent one then we
4175 * want to make sure we throttle the reset request. Therefore, we will
4176 * not allow it again before 3*HZ times.
4177 */
4178
4179 if (time_before(jiffies, (hdev->last_reset_time +
4180 HCLGE_RESET_INTERVAL))) {
4181 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4182 return;
4183 }
4184
4185 if (hdev->default_reset_request) {
4186 hdev->reset_level =
4187 hclge_get_reset_level(ae_dev,
4188 &hdev->default_reset_request);
4189 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4190 hdev->reset_level = HNAE3_FUNC_RESET;
4191 }
4192
4193 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4194 hdev->reset_level);
4195
4196 /* request reset & schedule reset task */
4197 set_bit(hdev->reset_level, &hdev->reset_request);
4198 hclge_reset_task_schedule(hdev);
4199
4200 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4201 hdev->reset_level++;
4202 }
4203
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)4204 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4205 enum hnae3_reset_type rst_type)
4206 {
4207 struct hclge_dev *hdev = ae_dev->priv;
4208
4209 set_bit(rst_type, &hdev->default_reset_request);
4210 }
4211
hclge_reset_timer(struct timer_list * t)4212 static void hclge_reset_timer(struct timer_list *t)
4213 {
4214 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4215
4216 /* if default_reset_request has no value, it means that this reset
4217 * request has already be handled, so just return here
4218 */
4219 if (!hdev->default_reset_request)
4220 return;
4221
4222 dev_info(&hdev->pdev->dev,
4223 "triggering reset in reset timer\n");
4224 hclge_reset_event(hdev->pdev, NULL);
4225 }
4226
hclge_reset_subtask(struct hclge_dev * hdev)4227 static void hclge_reset_subtask(struct hclge_dev *hdev)
4228 {
4229 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4230
4231 /* check if there is any ongoing reset in the hardware. This status can
4232 * be checked from reset_pending. If there is then, we need to wait for
4233 * hardware to complete reset.
4234 * a. If we are able to figure out in reasonable time that hardware
4235 * has fully resetted then, we can proceed with driver, client
4236 * reset.
4237 * b. else, we can come back later to check this status so re-sched
4238 * now.
4239 */
4240 hdev->last_reset_time = jiffies;
4241 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4242 if (hdev->reset_type != HNAE3_NONE_RESET)
4243 hclge_reset(hdev);
4244
4245 /* check if we got any *new* reset requests to be honored */
4246 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4247 if (hdev->reset_type != HNAE3_NONE_RESET)
4248 hclge_do_reset(hdev);
4249
4250 hdev->reset_type = HNAE3_NONE_RESET;
4251 }
4252
hclge_handle_err_reset_request(struct hclge_dev * hdev)4253 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4254 {
4255 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4256 enum hnae3_reset_type reset_type;
4257
4258 if (ae_dev->hw_err_reset_req) {
4259 reset_type = hclge_get_reset_level(ae_dev,
4260 &ae_dev->hw_err_reset_req);
4261 hclge_set_def_reset_request(ae_dev, reset_type);
4262 }
4263
4264 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4265 ae_dev->ops->reset_event(hdev->pdev, NULL);
4266
4267 /* enable interrupt after error handling complete */
4268 hclge_enable_vector(&hdev->misc_vector, true);
4269 }
4270
hclge_handle_err_recovery(struct hclge_dev * hdev)4271 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4272 {
4273 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4274
4275 ae_dev->hw_err_reset_req = 0;
4276
4277 if (hclge_find_error_source(hdev)) {
4278 hclge_handle_error_info_log(ae_dev);
4279 hclge_handle_mac_tnl(hdev);
4280 }
4281
4282 hclge_handle_err_reset_request(hdev);
4283 }
4284
hclge_misc_err_recovery(struct hclge_dev * hdev)4285 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4286 {
4287 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4288 struct device *dev = &hdev->pdev->dev;
4289 u32 msix_sts_reg;
4290
4291 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4292 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4293 if (hclge_handle_hw_msix_error
4294 (hdev, &hdev->default_reset_request))
4295 dev_info(dev, "received msix interrupt 0x%x\n",
4296 msix_sts_reg);
4297 }
4298
4299 hclge_handle_hw_ras_error(ae_dev);
4300
4301 hclge_handle_err_reset_request(hdev);
4302 }
4303
hclge_errhand_service_task(struct hclge_dev * hdev)4304 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4305 {
4306 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4307 return;
4308
4309 if (hnae3_dev_ras_imp_supported(hdev))
4310 hclge_handle_err_recovery(hdev);
4311 else
4312 hclge_misc_err_recovery(hdev);
4313 }
4314
hclge_reset_service_task(struct hclge_dev * hdev)4315 static void hclge_reset_service_task(struct hclge_dev *hdev)
4316 {
4317 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4318 return;
4319
4320 down(&hdev->reset_sem);
4321 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4322
4323 hclge_reset_subtask(hdev);
4324
4325 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4326 up(&hdev->reset_sem);
4327 }
4328
hclge_update_vport_alive(struct hclge_dev * hdev)4329 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4330 {
4331 int i;
4332
4333 /* start from vport 1 for PF is always alive */
4334 for (i = 1; i < hdev->num_alloc_vport; i++) {
4335 struct hclge_vport *vport = &hdev->vport[i];
4336
4337 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4338 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4339
4340 /* If vf is not alive, set to default value */
4341 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4342 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4343 }
4344 }
4345
hclge_periodic_service_task(struct hclge_dev * hdev)4346 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4347 {
4348 unsigned long delta = round_jiffies_relative(HZ);
4349
4350 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4351 return;
4352
4353 /* Always handle the link updating to make sure link state is
4354 * updated when it is triggered by mbx.
4355 */
4356 hclge_update_link_status(hdev);
4357 hclge_sync_mac_table(hdev);
4358 hclge_sync_promisc_mode(hdev);
4359 hclge_sync_fd_table(hdev);
4360
4361 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4362 delta = jiffies - hdev->last_serv_processed;
4363
4364 if (delta < round_jiffies_relative(HZ)) {
4365 delta = round_jiffies_relative(HZ) - delta;
4366 goto out;
4367 }
4368 }
4369
4370 hdev->serv_processed_cnt++;
4371 hclge_update_vport_alive(hdev);
4372
4373 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4374 hdev->last_serv_processed = jiffies;
4375 goto out;
4376 }
4377
4378 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4379 hclge_update_stats_for_all(hdev);
4380
4381 hclge_update_port_info(hdev);
4382 hclge_sync_vlan_filter(hdev);
4383
4384 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4385 hclge_rfs_filter_expire(hdev);
4386
4387 hdev->last_serv_processed = jiffies;
4388
4389 out:
4390 hclge_task_schedule(hdev, delta);
4391 }
4392
hclge_ptp_service_task(struct hclge_dev * hdev)4393 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4394 {
4395 unsigned long flags;
4396
4397 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4398 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4399 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4400 return;
4401
4402 /* to prevent concurrence with the irq handler */
4403 spin_lock_irqsave(&hdev->ptp->lock, flags);
4404
4405 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4406 * handler may handle it just before spin_lock_irqsave().
4407 */
4408 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4409 hclge_ptp_clean_tx_hwts(hdev);
4410
4411 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4412 }
4413
hclge_service_task(struct work_struct * work)4414 static void hclge_service_task(struct work_struct *work)
4415 {
4416 struct hclge_dev *hdev =
4417 container_of(work, struct hclge_dev, service_task.work);
4418
4419 hclge_errhand_service_task(hdev);
4420 hclge_reset_service_task(hdev);
4421 hclge_ptp_service_task(hdev);
4422 hclge_mailbox_service_task(hdev);
4423 hclge_periodic_service_task(hdev);
4424
4425 /* Handle error recovery, reset and mbx again in case periodical task
4426 * delays the handling by calling hclge_task_schedule() in
4427 * hclge_periodic_service_task().
4428 */
4429 hclge_errhand_service_task(hdev);
4430 hclge_reset_service_task(hdev);
4431 hclge_mailbox_service_task(hdev);
4432 }
4433
hclge_get_vport(struct hnae3_handle * handle)4434 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4435 {
4436 /* VF handle has no client */
4437 if (!handle->client)
4438 return container_of(handle, struct hclge_vport, nic);
4439 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4440 return container_of(handle, struct hclge_vport, roce);
4441 else
4442 return container_of(handle, struct hclge_vport, nic);
4443 }
4444
hclge_get_vector_info(struct hclge_dev * hdev,u16 idx,struct hnae3_vector_info * vector_info)4445 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4446 struct hnae3_vector_info *vector_info)
4447 {
4448 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4449
4450 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4451
4452 /* need an extend offset to config vector >= 64 */
4453 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4454 vector_info->io_addr = hdev->hw.io_base +
4455 HCLGE_VECTOR_REG_BASE +
4456 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4457 else
4458 vector_info->io_addr = hdev->hw.io_base +
4459 HCLGE_VECTOR_EXT_REG_BASE +
4460 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4461 HCLGE_VECTOR_REG_OFFSET_H +
4462 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4463 HCLGE_VECTOR_REG_OFFSET;
4464
4465 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4466 hdev->vector_irq[idx] = vector_info->vector;
4467 }
4468
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4469 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4470 struct hnae3_vector_info *vector_info)
4471 {
4472 struct hclge_vport *vport = hclge_get_vport(handle);
4473 struct hnae3_vector_info *vector = vector_info;
4474 struct hclge_dev *hdev = vport->back;
4475 int alloc = 0;
4476 u16 i = 0;
4477 u16 j;
4478
4479 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4480 vector_num = min(hdev->num_msi_left, vector_num);
4481
4482 for (j = 0; j < vector_num; j++) {
4483 while (++i < hdev->num_nic_msi) {
4484 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4485 hclge_get_vector_info(hdev, i, vector);
4486 vector++;
4487 alloc++;
4488
4489 break;
4490 }
4491 }
4492 }
4493 hdev->num_msi_left -= alloc;
4494 hdev->num_msi_used += alloc;
4495
4496 return alloc;
4497 }
4498
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4499 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4500 {
4501 int i;
4502
4503 for (i = 0; i < hdev->num_msi; i++)
4504 if (vector == hdev->vector_irq[i])
4505 return i;
4506
4507 return -EINVAL;
4508 }
4509
hclge_put_vector(struct hnae3_handle * handle,int vector)4510 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4511 {
4512 struct hclge_vport *vport = hclge_get_vport(handle);
4513 struct hclge_dev *hdev = vport->back;
4514 int vector_id;
4515
4516 vector_id = hclge_get_vector_index(hdev, vector);
4517 if (vector_id < 0) {
4518 dev_err(&hdev->pdev->dev,
4519 "Get vector index fail. vector = %d\n", vector);
4520 return vector_id;
4521 }
4522
4523 hclge_free_vector(hdev, vector_id);
4524
4525 return 0;
4526 }
4527
hclge_get_rss_key_size(struct hnae3_handle * handle)4528 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4529 {
4530 return HCLGE_RSS_KEY_SIZE;
4531 }
4532
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4533 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4534 const u8 hfunc, const u8 *key)
4535 {
4536 struct hclge_rss_config_cmd *req;
4537 unsigned int key_offset = 0;
4538 struct hclge_desc desc;
4539 int key_counts;
4540 int key_size;
4541 int ret;
4542
4543 key_counts = HCLGE_RSS_KEY_SIZE;
4544 req = (struct hclge_rss_config_cmd *)desc.data;
4545
4546 while (key_counts) {
4547 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4548 false);
4549
4550 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4551 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4552
4553 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4554 memcpy(req->hash_key,
4555 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4556
4557 key_counts -= key_size;
4558 key_offset++;
4559 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4560 if (ret) {
4561 dev_err(&hdev->pdev->dev,
4562 "Configure RSS config fail, status = %d\n",
4563 ret);
4564 return ret;
4565 }
4566 }
4567 return 0;
4568 }
4569
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u16 * indir)4570 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4571 {
4572 struct hclge_rss_indirection_table_cmd *req;
4573 struct hclge_desc desc;
4574 int rss_cfg_tbl_num;
4575 u8 rss_msb_oft;
4576 u8 rss_msb_val;
4577 int ret;
4578 u16 qid;
4579 int i;
4580 u32 j;
4581
4582 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4583 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4584 HCLGE_RSS_CFG_TBL_SIZE;
4585
4586 for (i = 0; i < rss_cfg_tbl_num; i++) {
4587 hclge_cmd_setup_basic_desc
4588 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4589
4590 req->start_table_index =
4591 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4592 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4593 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4594 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4595 req->rss_qid_l[j] = qid & 0xff;
4596 rss_msb_oft =
4597 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4598 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4599 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4600 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4601 }
4602 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4603 if (ret) {
4604 dev_err(&hdev->pdev->dev,
4605 "Configure rss indir table fail,status = %d\n",
4606 ret);
4607 return ret;
4608 }
4609 }
4610 return 0;
4611 }
4612
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4613 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4614 u16 *tc_size, u16 *tc_offset)
4615 {
4616 struct hclge_rss_tc_mode_cmd *req;
4617 struct hclge_desc desc;
4618 int ret;
4619 int i;
4620
4621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4622 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4623
4624 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4625 u16 mode = 0;
4626
4627 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4628 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4629 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4630 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4631 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4632 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4633 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4634
4635 req->rss_tc_mode[i] = cpu_to_le16(mode);
4636 }
4637
4638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4639 if (ret)
4640 dev_err(&hdev->pdev->dev,
4641 "Configure rss tc mode fail, status = %d\n", ret);
4642
4643 return ret;
4644 }
4645
hclge_get_rss_type(struct hclge_vport * vport)4646 static void hclge_get_rss_type(struct hclge_vport *vport)
4647 {
4648 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4649 vport->rss_tuple_sets.ipv4_udp_en ||
4650 vport->rss_tuple_sets.ipv4_sctp_en ||
4651 vport->rss_tuple_sets.ipv6_tcp_en ||
4652 vport->rss_tuple_sets.ipv6_udp_en ||
4653 vport->rss_tuple_sets.ipv6_sctp_en)
4654 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4655 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4656 vport->rss_tuple_sets.ipv6_fragment_en)
4657 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4658 else
4659 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4660 }
4661
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4662 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4663 {
4664 struct hclge_rss_input_tuple_cmd *req;
4665 struct hclge_desc desc;
4666 int ret;
4667
4668 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4669
4670 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4671
4672 /* Get the tuple cfg from pf */
4673 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4674 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4675 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4676 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4677 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4678 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4679 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4680 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4681 hclge_get_rss_type(&hdev->vport[0]);
4682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4683 if (ret)
4684 dev_err(&hdev->pdev->dev,
4685 "Configure rss input fail, status = %d\n", ret);
4686 return ret;
4687 }
4688
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4689 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4690 u8 *key, u8 *hfunc)
4691 {
4692 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4693 struct hclge_vport *vport = hclge_get_vport(handle);
4694 int i;
4695
4696 /* Get hash algorithm */
4697 if (hfunc) {
4698 switch (vport->rss_algo) {
4699 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4700 *hfunc = ETH_RSS_HASH_TOP;
4701 break;
4702 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4703 *hfunc = ETH_RSS_HASH_XOR;
4704 break;
4705 default:
4706 *hfunc = ETH_RSS_HASH_UNKNOWN;
4707 break;
4708 }
4709 }
4710
4711 /* Get the RSS Key required by the user */
4712 if (key)
4713 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4714
4715 /* Get indirect table */
4716 if (indir)
4717 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4718 indir[i] = vport->rss_indirection_tbl[i];
4719
4720 return 0;
4721 }
4722
hclge_parse_rss_hfunc(struct hclge_vport * vport,const u8 hfunc,u8 * hash_algo)4723 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4724 u8 *hash_algo)
4725 {
4726 switch (hfunc) {
4727 case ETH_RSS_HASH_TOP:
4728 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4729 return 0;
4730 case ETH_RSS_HASH_XOR:
4731 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4732 return 0;
4733 case ETH_RSS_HASH_NO_CHANGE:
4734 *hash_algo = vport->rss_algo;
4735 return 0;
4736 default:
4737 return -EINVAL;
4738 }
4739 }
4740
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4741 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4742 const u8 *key, const u8 hfunc)
4743 {
4744 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4745 struct hclge_vport *vport = hclge_get_vport(handle);
4746 struct hclge_dev *hdev = vport->back;
4747 u8 hash_algo;
4748 int ret, i;
4749
4750 ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4751 if (ret) {
4752 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4753 return ret;
4754 }
4755
4756 /* Set the RSS Hash Key if specififed by the user */
4757 if (key) {
4758 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4759 if (ret)
4760 return ret;
4761
4762 /* Update the shadow RSS key with user specified qids */
4763 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4764 } else {
4765 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4766 vport->rss_hash_key);
4767 if (ret)
4768 return ret;
4769 }
4770 vport->rss_algo = hash_algo;
4771
4772 /* Update the shadow RSS table with user specified qids */
4773 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4774 vport->rss_indirection_tbl[i] = indir[i];
4775
4776 /* Update the hardware */
4777 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4778 }
4779
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4780 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4781 {
4782 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4783
4784 if (nfc->data & RXH_L4_B_2_3)
4785 hash_sets |= HCLGE_D_PORT_BIT;
4786 else
4787 hash_sets &= ~HCLGE_D_PORT_BIT;
4788
4789 if (nfc->data & RXH_IP_SRC)
4790 hash_sets |= HCLGE_S_IP_BIT;
4791 else
4792 hash_sets &= ~HCLGE_S_IP_BIT;
4793
4794 if (nfc->data & RXH_IP_DST)
4795 hash_sets |= HCLGE_D_IP_BIT;
4796 else
4797 hash_sets &= ~HCLGE_D_IP_BIT;
4798
4799 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4800 hash_sets |= HCLGE_V_TAG_BIT;
4801
4802 return hash_sets;
4803 }
4804
hclge_init_rss_tuple_cmd(struct hclge_vport * vport,struct ethtool_rxnfc * nfc,struct hclge_rss_input_tuple_cmd * req)4805 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4806 struct ethtool_rxnfc *nfc,
4807 struct hclge_rss_input_tuple_cmd *req)
4808 {
4809 struct hclge_dev *hdev = vport->back;
4810 u8 tuple_sets;
4811
4812 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4813 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4814 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4815 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4816 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4817 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4818 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4819 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4820
4821 tuple_sets = hclge_get_rss_hash_bits(nfc);
4822 switch (nfc->flow_type) {
4823 case TCP_V4_FLOW:
4824 req->ipv4_tcp_en = tuple_sets;
4825 break;
4826 case TCP_V6_FLOW:
4827 req->ipv6_tcp_en = tuple_sets;
4828 break;
4829 case UDP_V4_FLOW:
4830 req->ipv4_udp_en = tuple_sets;
4831 break;
4832 case UDP_V6_FLOW:
4833 req->ipv6_udp_en = tuple_sets;
4834 break;
4835 case SCTP_V4_FLOW:
4836 req->ipv4_sctp_en = tuple_sets;
4837 break;
4838 case SCTP_V6_FLOW:
4839 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4840 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4841 return -EINVAL;
4842
4843 req->ipv6_sctp_en = tuple_sets;
4844 break;
4845 case IPV4_FLOW:
4846 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4847 break;
4848 case IPV6_FLOW:
4849 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4850 break;
4851 default:
4852 return -EINVAL;
4853 }
4854
4855 return 0;
4856 }
4857
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4858 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4859 struct ethtool_rxnfc *nfc)
4860 {
4861 struct hclge_vport *vport = hclge_get_vport(handle);
4862 struct hclge_dev *hdev = vport->back;
4863 struct hclge_rss_input_tuple_cmd *req;
4864 struct hclge_desc desc;
4865 int ret;
4866
4867 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4868 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4869 return -EINVAL;
4870
4871 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4872 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4873
4874 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4875 if (ret) {
4876 dev_err(&hdev->pdev->dev,
4877 "failed to init rss tuple cmd, ret = %d\n", ret);
4878 return ret;
4879 }
4880
4881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4882 if (ret) {
4883 dev_err(&hdev->pdev->dev,
4884 "Set rss tuple fail, status = %d\n", ret);
4885 return ret;
4886 }
4887
4888 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4889 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4890 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4891 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4892 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4893 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4894 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4895 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4896 hclge_get_rss_type(vport);
4897 return 0;
4898 }
4899
hclge_get_vport_rss_tuple(struct hclge_vport * vport,int flow_type,u8 * tuple_sets)4900 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4901 u8 *tuple_sets)
4902 {
4903 switch (flow_type) {
4904 case TCP_V4_FLOW:
4905 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4906 break;
4907 case UDP_V4_FLOW:
4908 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4909 break;
4910 case TCP_V6_FLOW:
4911 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4912 break;
4913 case UDP_V6_FLOW:
4914 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4915 break;
4916 case SCTP_V4_FLOW:
4917 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4918 break;
4919 case SCTP_V6_FLOW:
4920 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4921 break;
4922 case IPV4_FLOW:
4923 case IPV6_FLOW:
4924 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4925 break;
4926 default:
4927 return -EINVAL;
4928 }
4929
4930 return 0;
4931 }
4932
hclge_convert_rss_tuple(u8 tuple_sets)4933 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4934 {
4935 u64 tuple_data = 0;
4936
4937 if (tuple_sets & HCLGE_D_PORT_BIT)
4938 tuple_data |= RXH_L4_B_2_3;
4939 if (tuple_sets & HCLGE_S_PORT_BIT)
4940 tuple_data |= RXH_L4_B_0_1;
4941 if (tuple_sets & HCLGE_D_IP_BIT)
4942 tuple_data |= RXH_IP_DST;
4943 if (tuple_sets & HCLGE_S_IP_BIT)
4944 tuple_data |= RXH_IP_SRC;
4945
4946 return tuple_data;
4947 }
4948
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4949 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4950 struct ethtool_rxnfc *nfc)
4951 {
4952 struct hclge_vport *vport = hclge_get_vport(handle);
4953 u8 tuple_sets;
4954 int ret;
4955
4956 nfc->data = 0;
4957
4958 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4959 if (ret || !tuple_sets)
4960 return ret;
4961
4962 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4963
4964 return 0;
4965 }
4966
hclge_get_tc_size(struct hnae3_handle * handle)4967 static int hclge_get_tc_size(struct hnae3_handle *handle)
4968 {
4969 struct hclge_vport *vport = hclge_get_vport(handle);
4970 struct hclge_dev *hdev = vport->back;
4971
4972 return hdev->pf_rss_size_max;
4973 }
4974
hclge_init_rss_tc_mode(struct hclge_dev * hdev)4975 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4976 {
4977 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4978 struct hclge_vport *vport = hdev->vport;
4979 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4980 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4981 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4982 struct hnae3_tc_info *tc_info;
4983 u16 roundup_size;
4984 u16 rss_size;
4985 int i;
4986
4987 tc_info = &vport->nic.kinfo.tc_info;
4988 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4989 rss_size = tc_info->tqp_count[i];
4990 tc_valid[i] = 0;
4991
4992 if (!(hdev->hw_tc_map & BIT(i)))
4993 continue;
4994
4995 /* tc_size set to hardware is the log2 of roundup power of two
4996 * of rss_size, the acutal queue size is limited by indirection
4997 * table.
4998 */
4999 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5000 rss_size == 0) {
5001 dev_err(&hdev->pdev->dev,
5002 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5003 rss_size);
5004 return -EINVAL;
5005 }
5006
5007 roundup_size = roundup_pow_of_two(rss_size);
5008 roundup_size = ilog2(roundup_size);
5009
5010 tc_valid[i] = 1;
5011 tc_size[i] = roundup_size;
5012 tc_offset[i] = tc_info->tqp_offset[i];
5013 }
5014
5015 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5016 }
5017
hclge_rss_init_hw(struct hclge_dev * hdev)5018 int hclge_rss_init_hw(struct hclge_dev *hdev)
5019 {
5020 struct hclge_vport *vport = hdev->vport;
5021 u16 *rss_indir = vport[0].rss_indirection_tbl;
5022 u8 *key = vport[0].rss_hash_key;
5023 u8 hfunc = vport[0].rss_algo;
5024 int ret;
5025
5026 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5027 if (ret)
5028 return ret;
5029
5030 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5031 if (ret)
5032 return ret;
5033
5034 ret = hclge_set_rss_input_tuple(hdev);
5035 if (ret)
5036 return ret;
5037
5038 return hclge_init_rss_tc_mode(hdev);
5039 }
5040
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)5041 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5042 {
5043 struct hclge_vport *vport = &hdev->vport[0];
5044 int i;
5045
5046 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5047 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5048 }
5049
hclge_rss_init_cfg(struct hclge_dev * hdev)5050 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5051 {
5052 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5053 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5054 struct hclge_vport *vport = &hdev->vport[0];
5055 u16 *rss_ind_tbl;
5056
5057 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5058 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5059
5060 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5061 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5062 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5063 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5064 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5065 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5066 vport->rss_tuple_sets.ipv6_sctp_en =
5067 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5068 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5069 HCLGE_RSS_INPUT_TUPLE_SCTP;
5070 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5071
5072 vport->rss_algo = rss_algo;
5073
5074 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5075 sizeof(*rss_ind_tbl), GFP_KERNEL);
5076 if (!rss_ind_tbl)
5077 return -ENOMEM;
5078
5079 vport->rss_indirection_tbl = rss_ind_tbl;
5080 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5081
5082 hclge_rss_indir_init_cfg(hdev);
5083
5084 return 0;
5085 }
5086
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)5087 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5088 int vector_id, bool en,
5089 struct hnae3_ring_chain_node *ring_chain)
5090 {
5091 struct hclge_dev *hdev = vport->back;
5092 struct hnae3_ring_chain_node *node;
5093 struct hclge_desc desc;
5094 struct hclge_ctrl_vector_chain_cmd *req =
5095 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5096 enum hclge_cmd_status status;
5097 enum hclge_opcode_type op;
5098 u16 tqp_type_and_id;
5099 int i;
5100
5101 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5102 hclge_cmd_setup_basic_desc(&desc, op, false);
5103 req->int_vector_id_l = hnae3_get_field(vector_id,
5104 HCLGE_VECTOR_ID_L_M,
5105 HCLGE_VECTOR_ID_L_S);
5106 req->int_vector_id_h = hnae3_get_field(vector_id,
5107 HCLGE_VECTOR_ID_H_M,
5108 HCLGE_VECTOR_ID_H_S);
5109
5110 i = 0;
5111 for (node = ring_chain; node; node = node->next) {
5112 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5113 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5114 HCLGE_INT_TYPE_S,
5115 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5116 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5117 HCLGE_TQP_ID_S, node->tqp_index);
5118 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5119 HCLGE_INT_GL_IDX_S,
5120 hnae3_get_field(node->int_gl_idx,
5121 HNAE3_RING_GL_IDX_M,
5122 HNAE3_RING_GL_IDX_S));
5123 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5124 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5125 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5126 req->vfid = vport->vport_id;
5127
5128 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5129 if (status) {
5130 dev_err(&hdev->pdev->dev,
5131 "Map TQP fail, status is %d.\n",
5132 status);
5133 return -EIO;
5134 }
5135 i = 0;
5136
5137 hclge_cmd_setup_basic_desc(&desc,
5138 op,
5139 false);
5140 req->int_vector_id_l =
5141 hnae3_get_field(vector_id,
5142 HCLGE_VECTOR_ID_L_M,
5143 HCLGE_VECTOR_ID_L_S);
5144 req->int_vector_id_h =
5145 hnae3_get_field(vector_id,
5146 HCLGE_VECTOR_ID_H_M,
5147 HCLGE_VECTOR_ID_H_S);
5148 }
5149 }
5150
5151 if (i > 0) {
5152 req->int_cause_num = i;
5153 req->vfid = vport->vport_id;
5154 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5155 if (status) {
5156 dev_err(&hdev->pdev->dev,
5157 "Map TQP fail, status is %d.\n", status);
5158 return -EIO;
5159 }
5160 }
5161
5162 return 0;
5163 }
5164
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)5165 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5166 struct hnae3_ring_chain_node *ring_chain)
5167 {
5168 struct hclge_vport *vport = hclge_get_vport(handle);
5169 struct hclge_dev *hdev = vport->back;
5170 int vector_id;
5171
5172 vector_id = hclge_get_vector_index(hdev, vector);
5173 if (vector_id < 0) {
5174 dev_err(&hdev->pdev->dev,
5175 "failed to get vector index. vector=%d\n", vector);
5176 return vector_id;
5177 }
5178
5179 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5180 }
5181
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)5182 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5183 struct hnae3_ring_chain_node *ring_chain)
5184 {
5185 struct hclge_vport *vport = hclge_get_vport(handle);
5186 struct hclge_dev *hdev = vport->back;
5187 int vector_id, ret;
5188
5189 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5190 return 0;
5191
5192 vector_id = hclge_get_vector_index(hdev, vector);
5193 if (vector_id < 0) {
5194 dev_err(&handle->pdev->dev,
5195 "Get vector index fail. ret =%d\n", vector_id);
5196 return vector_id;
5197 }
5198
5199 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5200 if (ret)
5201 dev_err(&handle->pdev->dev,
5202 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5203 vector_id, ret);
5204
5205 return ret;
5206 }
5207
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,u8 vf_id,bool en_uc,bool en_mc,bool en_bc)5208 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5209 bool en_uc, bool en_mc, bool en_bc)
5210 {
5211 struct hclge_vport *vport = &hdev->vport[vf_id];
5212 struct hnae3_handle *handle = &vport->nic;
5213 struct hclge_promisc_cfg_cmd *req;
5214 struct hclge_desc desc;
5215 bool uc_tx_en = en_uc;
5216 u8 promisc_cfg = 0;
5217 int ret;
5218
5219 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5220
5221 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5222 req->vf_id = vf_id;
5223
5224 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5225 uc_tx_en = false;
5226
5227 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5228 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5229 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5230 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5231 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5232 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5233 req->extend_promisc = promisc_cfg;
5234
5235 /* to be compatible with DEVICE_VERSION_V1/2 */
5236 promisc_cfg = 0;
5237 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5238 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5239 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5240 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5241 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5242 req->promisc = promisc_cfg;
5243
5244 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5245 if (ret)
5246 dev_err(&hdev->pdev->dev,
5247 "failed to set vport %u promisc mode, ret = %d.\n",
5248 vf_id, ret);
5249
5250 return ret;
5251 }
5252
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)5253 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5254 bool en_mc_pmc, bool en_bc_pmc)
5255 {
5256 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5257 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5258 }
5259
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)5260 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5261 bool en_mc_pmc)
5262 {
5263 struct hclge_vport *vport = hclge_get_vport(handle);
5264 struct hclge_dev *hdev = vport->back;
5265 bool en_bc_pmc = true;
5266
5267 /* For device whose version below V2, if broadcast promisc enabled,
5268 * vlan filter is always bypassed. So broadcast promisc should be
5269 * disabled until user enable promisc mode
5270 */
5271 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5272 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5273
5274 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5275 en_bc_pmc);
5276 }
5277
hclge_request_update_promisc_mode(struct hnae3_handle * handle)5278 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5279 {
5280 struct hclge_vport *vport = hclge_get_vport(handle);
5281
5282 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5283 }
5284
hclge_sync_fd_state(struct hclge_dev * hdev)5285 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5286 {
5287 if (hlist_empty(&hdev->fd_rule_list))
5288 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5289 }
5290
hclge_fd_inc_rule_cnt(struct hclge_dev * hdev,u16 location)5291 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5292 {
5293 if (!test_bit(location, hdev->fd_bmap)) {
5294 set_bit(location, hdev->fd_bmap);
5295 hdev->hclge_fd_rule_num++;
5296 }
5297 }
5298
hclge_fd_dec_rule_cnt(struct hclge_dev * hdev,u16 location)5299 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5300 {
5301 if (test_bit(location, hdev->fd_bmap)) {
5302 clear_bit(location, hdev->fd_bmap);
5303 hdev->hclge_fd_rule_num--;
5304 }
5305 }
5306
hclge_fd_free_node(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5307 static void hclge_fd_free_node(struct hclge_dev *hdev,
5308 struct hclge_fd_rule *rule)
5309 {
5310 hlist_del(&rule->rule_node);
5311 kfree(rule);
5312 hclge_sync_fd_state(hdev);
5313 }
5314
hclge_update_fd_rule_node(struct hclge_dev * hdev,struct hclge_fd_rule * old_rule,struct hclge_fd_rule * new_rule,enum HCLGE_FD_NODE_STATE state)5315 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5316 struct hclge_fd_rule *old_rule,
5317 struct hclge_fd_rule *new_rule,
5318 enum HCLGE_FD_NODE_STATE state)
5319 {
5320 switch (state) {
5321 case HCLGE_FD_TO_ADD:
5322 case HCLGE_FD_ACTIVE:
5323 /* 1) if the new state is TO_ADD, just replace the old rule
5324 * with the same location, no matter its state, because the
5325 * new rule will be configured to the hardware.
5326 * 2) if the new state is ACTIVE, it means the new rule
5327 * has been configured to the hardware, so just replace
5328 * the old rule node with the same location.
5329 * 3) for it doesn't add a new node to the list, so it's
5330 * unnecessary to update the rule number and fd_bmap.
5331 */
5332 new_rule->rule_node.next = old_rule->rule_node.next;
5333 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5334 memcpy(old_rule, new_rule, sizeof(*old_rule));
5335 kfree(new_rule);
5336 break;
5337 case HCLGE_FD_DELETED:
5338 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5339 hclge_fd_free_node(hdev, old_rule);
5340 break;
5341 case HCLGE_FD_TO_DEL:
5342 /* if new request is TO_DEL, and old rule is existent
5343 * 1) the state of old rule is TO_DEL, we need do nothing,
5344 * because we delete rule by location, other rule content
5345 * is unncessary.
5346 * 2) the state of old rule is ACTIVE, we need to change its
5347 * state to TO_DEL, so the rule will be deleted when periodic
5348 * task being scheduled.
5349 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5350 * been added to hardware, so we just delete the rule node from
5351 * fd_rule_list directly.
5352 */
5353 if (old_rule->state == HCLGE_FD_TO_ADD) {
5354 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5355 hclge_fd_free_node(hdev, old_rule);
5356 return;
5357 }
5358 old_rule->state = HCLGE_FD_TO_DEL;
5359 break;
5360 }
5361 }
5362
hclge_find_fd_rule(struct hlist_head * hlist,u16 location,struct hclge_fd_rule ** parent)5363 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5364 u16 location,
5365 struct hclge_fd_rule **parent)
5366 {
5367 struct hclge_fd_rule *rule;
5368 struct hlist_node *node;
5369
5370 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5371 if (rule->location == location)
5372 return rule;
5373 else if (rule->location > location)
5374 return NULL;
5375 /* record the parent node, use to keep the nodes in fd_rule_list
5376 * in ascend order.
5377 */
5378 *parent = rule;
5379 }
5380
5381 return NULL;
5382 }
5383
5384 /* insert fd rule node in ascend order according to rule->location */
hclge_fd_insert_rule_node(struct hlist_head * hlist,struct hclge_fd_rule * rule,struct hclge_fd_rule * parent)5385 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5386 struct hclge_fd_rule *rule,
5387 struct hclge_fd_rule *parent)
5388 {
5389 INIT_HLIST_NODE(&rule->rule_node);
5390
5391 if (parent)
5392 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5393 else
5394 hlist_add_head(&rule->rule_node, hlist);
5395 }
5396
hclge_fd_set_user_def_cmd(struct hclge_dev * hdev,struct hclge_fd_user_def_cfg * cfg)5397 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5398 struct hclge_fd_user_def_cfg *cfg)
5399 {
5400 struct hclge_fd_user_def_cfg_cmd *req;
5401 struct hclge_desc desc;
5402 u16 data = 0;
5403 int ret;
5404
5405 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5406
5407 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5408
5409 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5410 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5411 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5412 req->ol2_cfg = cpu_to_le16(data);
5413
5414 data = 0;
5415 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5416 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5417 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5418 req->ol3_cfg = cpu_to_le16(data);
5419
5420 data = 0;
5421 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5422 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5423 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5424 req->ol4_cfg = cpu_to_le16(data);
5425
5426 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5427 if (ret)
5428 dev_err(&hdev->pdev->dev,
5429 "failed to set fd user def data, ret= %d\n", ret);
5430 return ret;
5431 }
5432
hclge_sync_fd_user_def_cfg(struct hclge_dev * hdev,bool locked)5433 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5434 {
5435 int ret;
5436
5437 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5438 return;
5439
5440 if (!locked)
5441 spin_lock_bh(&hdev->fd_rule_lock);
5442
5443 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5444 if (ret)
5445 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5446
5447 if (!locked)
5448 spin_unlock_bh(&hdev->fd_rule_lock);
5449 }
5450
hclge_fd_check_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5451 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5452 struct hclge_fd_rule *rule)
5453 {
5454 struct hlist_head *hlist = &hdev->fd_rule_list;
5455 struct hclge_fd_rule *fd_rule, *parent = NULL;
5456 struct hclge_fd_user_def_info *info, *old_info;
5457 struct hclge_fd_user_def_cfg *cfg;
5458
5459 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5460 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5461 return 0;
5462
5463 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5464 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5465 info = &rule->ep.user_def;
5466
5467 if (!cfg->ref_cnt || cfg->offset == info->offset)
5468 return 0;
5469
5470 if (cfg->ref_cnt > 1)
5471 goto error;
5472
5473 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5474 if (fd_rule) {
5475 old_info = &fd_rule->ep.user_def;
5476 if (info->layer == old_info->layer)
5477 return 0;
5478 }
5479
5480 error:
5481 dev_err(&hdev->pdev->dev,
5482 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5483 info->layer + 1);
5484 return -ENOSPC;
5485 }
5486
hclge_fd_inc_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5487 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5488 struct hclge_fd_rule *rule)
5489 {
5490 struct hclge_fd_user_def_cfg *cfg;
5491
5492 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5493 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5494 return;
5495
5496 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5497 if (!cfg->ref_cnt) {
5498 cfg->offset = rule->ep.user_def.offset;
5499 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5500 }
5501 cfg->ref_cnt++;
5502 }
5503
hclge_fd_dec_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5504 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5505 struct hclge_fd_rule *rule)
5506 {
5507 struct hclge_fd_user_def_cfg *cfg;
5508
5509 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5510 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5511 return;
5512
5513 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5514 if (!cfg->ref_cnt)
5515 return;
5516
5517 cfg->ref_cnt--;
5518 if (!cfg->ref_cnt) {
5519 cfg->offset = 0;
5520 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5521 }
5522 }
5523
hclge_update_fd_list(struct hclge_dev * hdev,enum HCLGE_FD_NODE_STATE state,u16 location,struct hclge_fd_rule * new_rule)5524 static void hclge_update_fd_list(struct hclge_dev *hdev,
5525 enum HCLGE_FD_NODE_STATE state, u16 location,
5526 struct hclge_fd_rule *new_rule)
5527 {
5528 struct hlist_head *hlist = &hdev->fd_rule_list;
5529 struct hclge_fd_rule *fd_rule, *parent = NULL;
5530
5531 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5532 if (fd_rule) {
5533 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5534 if (state == HCLGE_FD_ACTIVE)
5535 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5536 hclge_sync_fd_user_def_cfg(hdev, true);
5537
5538 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5539 return;
5540 }
5541
5542 /* it's unlikely to fail here, because we have checked the rule
5543 * exist before.
5544 */
5545 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5546 dev_warn(&hdev->pdev->dev,
5547 "failed to delete fd rule %u, it's inexistent\n",
5548 location);
5549 return;
5550 }
5551
5552 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5553 hclge_sync_fd_user_def_cfg(hdev, true);
5554
5555 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5556 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5557
5558 if (state == HCLGE_FD_TO_ADD) {
5559 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5560 hclge_task_schedule(hdev, 0);
5561 }
5562 }
5563
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)5564 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5565 {
5566 struct hclge_get_fd_mode_cmd *req;
5567 struct hclge_desc desc;
5568 int ret;
5569
5570 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5571
5572 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5573
5574 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5575 if (ret) {
5576 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5577 return ret;
5578 }
5579
5580 *fd_mode = req->mode;
5581
5582 return ret;
5583 }
5584
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)5585 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5586 u32 *stage1_entry_num,
5587 u32 *stage2_entry_num,
5588 u16 *stage1_counter_num,
5589 u16 *stage2_counter_num)
5590 {
5591 struct hclge_get_fd_allocation_cmd *req;
5592 struct hclge_desc desc;
5593 int ret;
5594
5595 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5596
5597 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5598
5599 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5600 if (ret) {
5601 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5602 ret);
5603 return ret;
5604 }
5605
5606 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5607 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5608 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5609 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5610
5611 return ret;
5612 }
5613
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)5614 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5615 enum HCLGE_FD_STAGE stage_num)
5616 {
5617 struct hclge_set_fd_key_config_cmd *req;
5618 struct hclge_fd_key_cfg *stage;
5619 struct hclge_desc desc;
5620 int ret;
5621
5622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5623
5624 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5625 stage = &hdev->fd_cfg.key_cfg[stage_num];
5626 req->stage = stage_num;
5627 req->key_select = stage->key_sel;
5628 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5629 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5630 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5631 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5632 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5633 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5634
5635 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5636 if (ret)
5637 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5638
5639 return ret;
5640 }
5641
hclge_fd_disable_user_def(struct hclge_dev * hdev)5642 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5643 {
5644 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5645
5646 spin_lock_bh(&hdev->fd_rule_lock);
5647 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5648 spin_unlock_bh(&hdev->fd_rule_lock);
5649
5650 hclge_fd_set_user_def_cmd(hdev, cfg);
5651 }
5652
hclge_init_fd_config(struct hclge_dev * hdev)5653 static int hclge_init_fd_config(struct hclge_dev *hdev)
5654 {
5655 #define LOW_2_WORDS 0x03
5656 struct hclge_fd_key_cfg *key_cfg;
5657 int ret;
5658
5659 if (!hnae3_dev_fd_supported(hdev))
5660 return 0;
5661
5662 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5663 if (ret)
5664 return ret;
5665
5666 switch (hdev->fd_cfg.fd_mode) {
5667 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5668 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5669 break;
5670 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5671 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5672 break;
5673 default:
5674 dev_err(&hdev->pdev->dev,
5675 "Unsupported flow director mode %u\n",
5676 hdev->fd_cfg.fd_mode);
5677 return -EOPNOTSUPP;
5678 }
5679
5680 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5681 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5682 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5683 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5684 key_cfg->outer_sipv6_word_en = 0;
5685 key_cfg->outer_dipv6_word_en = 0;
5686
5687 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5688 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5689 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5690 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5691
5692 /* If use max 400bit key, we can support tuples for ether type */
5693 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5694 key_cfg->tuple_active |=
5695 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5696 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5697 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5698 }
5699
5700 /* roce_type is used to filter roce frames
5701 * dst_vport is used to specify the rule
5702 */
5703 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5704
5705 ret = hclge_get_fd_allocation(hdev,
5706 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5707 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5708 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5709 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5710 if (ret)
5711 return ret;
5712
5713 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5714 }
5715
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5716 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5717 int loc, u8 *key, bool is_add)
5718 {
5719 struct hclge_fd_tcam_config_1_cmd *req1;
5720 struct hclge_fd_tcam_config_2_cmd *req2;
5721 struct hclge_fd_tcam_config_3_cmd *req3;
5722 struct hclge_desc desc[3];
5723 int ret;
5724
5725 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5726 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5727 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5728 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5729 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5730
5731 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5732 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5733 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5734
5735 req1->stage = stage;
5736 req1->xy_sel = sel_x ? 1 : 0;
5737 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5738 req1->index = cpu_to_le32(loc);
5739 req1->entry_vld = sel_x ? is_add : 0;
5740
5741 if (key) {
5742 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5743 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5744 sizeof(req2->tcam_data));
5745 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5746 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5747 }
5748
5749 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5750 if (ret)
5751 dev_err(&hdev->pdev->dev,
5752 "config tcam key fail, ret=%d\n",
5753 ret);
5754
5755 return ret;
5756 }
5757
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5758 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5759 struct hclge_fd_ad_data *action)
5760 {
5761 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5762 struct hclge_fd_ad_config_cmd *req;
5763 struct hclge_desc desc;
5764 u64 ad_data = 0;
5765 int ret;
5766
5767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5768
5769 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5770 req->index = cpu_to_le32(loc);
5771 req->stage = stage;
5772
5773 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5774 action->write_rule_id_to_bd);
5775 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5776 action->rule_id);
5777 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5778 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5779 action->override_tc);
5780 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5781 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5782 }
5783 ad_data <<= 32;
5784 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5785 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5786 action->forward_to_direct_queue);
5787 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5788 action->queue_id);
5789 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5790 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5791 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5792 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5793 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5794 action->counter_id);
5795
5796 req->ad_data = cpu_to_le64(ad_data);
5797 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5798 if (ret)
5799 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5800
5801 return ret;
5802 }
5803
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5804 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5805 struct hclge_fd_rule *rule)
5806 {
5807 int offset, moffset, ip_offset;
5808 enum HCLGE_FD_KEY_OPT key_opt;
5809 u16 tmp_x_s, tmp_y_s;
5810 u32 tmp_x_l, tmp_y_l;
5811 u8 *p = (u8 *)rule;
5812 int i;
5813
5814 if (rule->unused_tuple & BIT(tuple_bit))
5815 return true;
5816
5817 key_opt = tuple_key_info[tuple_bit].key_opt;
5818 offset = tuple_key_info[tuple_bit].offset;
5819 moffset = tuple_key_info[tuple_bit].moffset;
5820
5821 switch (key_opt) {
5822 case KEY_OPT_U8:
5823 calc_x(*key_x, p[offset], p[moffset]);
5824 calc_y(*key_y, p[offset], p[moffset]);
5825
5826 return true;
5827 case KEY_OPT_LE16:
5828 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5829 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5830 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5831 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5832
5833 return true;
5834 case KEY_OPT_LE32:
5835 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5836 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5837 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5838 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5839
5840 return true;
5841 case KEY_OPT_MAC:
5842 for (i = 0; i < ETH_ALEN; i++) {
5843 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5844 p[moffset + i]);
5845 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5846 p[moffset + i]);
5847 }
5848
5849 return true;
5850 case KEY_OPT_IP:
5851 ip_offset = IPV4_INDEX * sizeof(u32);
5852 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5853 *(u32 *)(&p[moffset + ip_offset]));
5854 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5855 *(u32 *)(&p[moffset + ip_offset]));
5856 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5857 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5858
5859 return true;
5860 default:
5861 return false;
5862 }
5863 }
5864
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5865 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5866 u8 vf_id, u8 network_port_id)
5867 {
5868 u32 port_number = 0;
5869
5870 if (port_type == HOST_PORT) {
5871 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5872 pf_id);
5873 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5874 vf_id);
5875 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5876 } else {
5877 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5878 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5879 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5880 }
5881
5882 return port_number;
5883 }
5884
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5885 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5886 __le32 *key_x, __le32 *key_y,
5887 struct hclge_fd_rule *rule)
5888 {
5889 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5890 u8 cur_pos = 0, tuple_size, shift_bits;
5891 unsigned int i;
5892
5893 for (i = 0; i < MAX_META_DATA; i++) {
5894 tuple_size = meta_data_key_info[i].key_length;
5895 tuple_bit = key_cfg->meta_data_active & BIT(i);
5896
5897 switch (tuple_bit) {
5898 case BIT(ROCE_TYPE):
5899 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5900 cur_pos += tuple_size;
5901 break;
5902 case BIT(DST_VPORT):
5903 port_number = hclge_get_port_number(HOST_PORT, 0,
5904 rule->vf_id, 0);
5905 hnae3_set_field(meta_data,
5906 GENMASK(cur_pos + tuple_size, cur_pos),
5907 cur_pos, port_number);
5908 cur_pos += tuple_size;
5909 break;
5910 default:
5911 break;
5912 }
5913 }
5914
5915 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5916 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5917 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5918
5919 *key_x = cpu_to_le32(tmp_x << shift_bits);
5920 *key_y = cpu_to_le32(tmp_y << shift_bits);
5921 }
5922
5923 /* A complete key is combined with meta data key and tuple key.
5924 * Meta data key is stored at the MSB region, and tuple key is stored at
5925 * the LSB region, unused bits will be filled 0.
5926 */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5927 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5928 struct hclge_fd_rule *rule)
5929 {
5930 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5931 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5932 u8 *cur_key_x, *cur_key_y;
5933 u8 meta_data_region;
5934 u8 tuple_size;
5935 int ret;
5936 u32 i;
5937
5938 memset(key_x, 0, sizeof(key_x));
5939 memset(key_y, 0, sizeof(key_y));
5940 cur_key_x = key_x;
5941 cur_key_y = key_y;
5942
5943 for (i = 0; i < MAX_TUPLE; i++) {
5944 bool tuple_valid;
5945
5946 tuple_size = tuple_key_info[i].key_length / 8;
5947 if (!(key_cfg->tuple_active & BIT(i)))
5948 continue;
5949
5950 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5951 cur_key_y, rule);
5952 if (tuple_valid) {
5953 cur_key_x += tuple_size;
5954 cur_key_y += tuple_size;
5955 }
5956 }
5957
5958 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5959 MAX_META_DATA_LENGTH / 8;
5960
5961 hclge_fd_convert_meta_data(key_cfg,
5962 (__le32 *)(key_x + meta_data_region),
5963 (__le32 *)(key_y + meta_data_region),
5964 rule);
5965
5966 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5967 true);
5968 if (ret) {
5969 dev_err(&hdev->pdev->dev,
5970 "fd key_y config fail, loc=%u, ret=%d\n",
5971 rule->queue_id, ret);
5972 return ret;
5973 }
5974
5975 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5976 true);
5977 if (ret)
5978 dev_err(&hdev->pdev->dev,
5979 "fd key_x config fail, loc=%u, ret=%d\n",
5980 rule->queue_id, ret);
5981 return ret;
5982 }
5983
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5984 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5985 struct hclge_fd_rule *rule)
5986 {
5987 struct hclge_vport *vport = hdev->vport;
5988 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5989 struct hclge_fd_ad_data ad_data;
5990
5991 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5992 ad_data.ad_id = rule->location;
5993
5994 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5995 ad_data.drop_packet = true;
5996 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5997 ad_data.override_tc = true;
5998 ad_data.queue_id =
5999 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6000 ad_data.tc_size =
6001 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6002 } else {
6003 ad_data.forward_to_direct_queue = true;
6004 ad_data.queue_id = rule->queue_id;
6005 }
6006
6007 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6008 ad_data.use_counter = true;
6009 ad_data.counter_id = rule->vf_id %
6010 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6011 } else {
6012 ad_data.use_counter = false;
6013 ad_data.counter_id = 0;
6014 }
6015
6016 ad_data.use_next_stage = false;
6017 ad_data.next_input_key = 0;
6018
6019 ad_data.write_rule_id_to_bd = true;
6020 ad_data.rule_id = rule->location;
6021
6022 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6023 }
6024
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)6025 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6026 u32 *unused_tuple)
6027 {
6028 if (!spec || !unused_tuple)
6029 return -EINVAL;
6030
6031 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6032
6033 if (!spec->ip4src)
6034 *unused_tuple |= BIT(INNER_SRC_IP);
6035
6036 if (!spec->ip4dst)
6037 *unused_tuple |= BIT(INNER_DST_IP);
6038
6039 if (!spec->psrc)
6040 *unused_tuple |= BIT(INNER_SRC_PORT);
6041
6042 if (!spec->pdst)
6043 *unused_tuple |= BIT(INNER_DST_PORT);
6044
6045 if (!spec->tos)
6046 *unused_tuple |= BIT(INNER_IP_TOS);
6047
6048 return 0;
6049 }
6050
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)6051 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6052 u32 *unused_tuple)
6053 {
6054 if (!spec || !unused_tuple)
6055 return -EINVAL;
6056
6057 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6058 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6059
6060 if (!spec->ip4src)
6061 *unused_tuple |= BIT(INNER_SRC_IP);
6062
6063 if (!spec->ip4dst)
6064 *unused_tuple |= BIT(INNER_DST_IP);
6065
6066 if (!spec->tos)
6067 *unused_tuple |= BIT(INNER_IP_TOS);
6068
6069 if (!spec->proto)
6070 *unused_tuple |= BIT(INNER_IP_PROTO);
6071
6072 if (spec->l4_4_bytes)
6073 return -EOPNOTSUPP;
6074
6075 if (spec->ip_ver != ETH_RX_NFC_IP4)
6076 return -EOPNOTSUPP;
6077
6078 return 0;
6079 }
6080
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)6081 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6082 u32 *unused_tuple)
6083 {
6084 if (!spec || !unused_tuple)
6085 return -EINVAL;
6086
6087 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6088
6089 /* check whether src/dst ip address used */
6090 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6091 *unused_tuple |= BIT(INNER_SRC_IP);
6092
6093 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6094 *unused_tuple |= BIT(INNER_DST_IP);
6095
6096 if (!spec->psrc)
6097 *unused_tuple |= BIT(INNER_SRC_PORT);
6098
6099 if (!spec->pdst)
6100 *unused_tuple |= BIT(INNER_DST_PORT);
6101
6102 if (!spec->tclass)
6103 *unused_tuple |= BIT(INNER_IP_TOS);
6104
6105 return 0;
6106 }
6107
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)6108 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6109 u32 *unused_tuple)
6110 {
6111 if (!spec || !unused_tuple)
6112 return -EINVAL;
6113
6114 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6115 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6116
6117 /* check whether src/dst ip address used */
6118 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6119 *unused_tuple |= BIT(INNER_SRC_IP);
6120
6121 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6122 *unused_tuple |= BIT(INNER_DST_IP);
6123
6124 if (!spec->l4_proto)
6125 *unused_tuple |= BIT(INNER_IP_PROTO);
6126
6127 if (!spec->tclass)
6128 *unused_tuple |= BIT(INNER_IP_TOS);
6129
6130 if (spec->l4_4_bytes)
6131 return -EOPNOTSUPP;
6132
6133 return 0;
6134 }
6135
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)6136 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6137 {
6138 if (!spec || !unused_tuple)
6139 return -EINVAL;
6140
6141 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6142 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6143 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6144
6145 if (is_zero_ether_addr(spec->h_source))
6146 *unused_tuple |= BIT(INNER_SRC_MAC);
6147
6148 if (is_zero_ether_addr(spec->h_dest))
6149 *unused_tuple |= BIT(INNER_DST_MAC);
6150
6151 if (!spec->h_proto)
6152 *unused_tuple |= BIT(INNER_ETH_TYPE);
6153
6154 return 0;
6155 }
6156
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)6157 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6158 struct ethtool_rx_flow_spec *fs,
6159 u32 *unused_tuple)
6160 {
6161 if (fs->flow_type & FLOW_EXT) {
6162 if (fs->h_ext.vlan_etype) {
6163 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6164 return -EOPNOTSUPP;
6165 }
6166
6167 if (!fs->h_ext.vlan_tci)
6168 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6169
6170 if (fs->m_ext.vlan_tci &&
6171 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6172 dev_err(&hdev->pdev->dev,
6173 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6174 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6175 return -EINVAL;
6176 }
6177 } else {
6178 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6179 }
6180
6181 if (fs->flow_type & FLOW_MAC_EXT) {
6182 if (hdev->fd_cfg.fd_mode !=
6183 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6184 dev_err(&hdev->pdev->dev,
6185 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6186 return -EOPNOTSUPP;
6187 }
6188
6189 if (is_zero_ether_addr(fs->h_ext.h_dest))
6190 *unused_tuple |= BIT(INNER_DST_MAC);
6191 else
6192 *unused_tuple &= ~BIT(INNER_DST_MAC);
6193 }
6194
6195 return 0;
6196 }
6197
hclge_fd_get_user_def_layer(u32 flow_type,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6198 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6199 struct hclge_fd_user_def_info *info)
6200 {
6201 switch (flow_type) {
6202 case ETHER_FLOW:
6203 info->layer = HCLGE_FD_USER_DEF_L2;
6204 *unused_tuple &= ~BIT(INNER_L2_RSV);
6205 break;
6206 case IP_USER_FLOW:
6207 case IPV6_USER_FLOW:
6208 info->layer = HCLGE_FD_USER_DEF_L3;
6209 *unused_tuple &= ~BIT(INNER_L3_RSV);
6210 break;
6211 case TCP_V4_FLOW:
6212 case UDP_V4_FLOW:
6213 case TCP_V6_FLOW:
6214 case UDP_V6_FLOW:
6215 info->layer = HCLGE_FD_USER_DEF_L4;
6216 *unused_tuple &= ~BIT(INNER_L4_RSV);
6217 break;
6218 default:
6219 return -EOPNOTSUPP;
6220 }
6221
6222 return 0;
6223 }
6224
hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec * fs)6225 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6226 {
6227 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6228 }
6229
hclge_fd_parse_user_def_field(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6230 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6231 struct ethtool_rx_flow_spec *fs,
6232 u32 *unused_tuple,
6233 struct hclge_fd_user_def_info *info)
6234 {
6235 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6236 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6237 u16 data, offset, data_mask, offset_mask;
6238 int ret;
6239
6240 info->layer = HCLGE_FD_USER_DEF_NONE;
6241 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6242
6243 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6244 return 0;
6245
6246 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6247 * for data, and bit32~47 is used for offset.
6248 */
6249 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6250 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6251 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6252 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6253
6254 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6255 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6256 return -EOPNOTSUPP;
6257 }
6258
6259 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6260 dev_err(&hdev->pdev->dev,
6261 "user-def offset[%u] should be no more than %u\n",
6262 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6263 return -EINVAL;
6264 }
6265
6266 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6267 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6268 return -EINVAL;
6269 }
6270
6271 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6272 if (ret) {
6273 dev_err(&hdev->pdev->dev,
6274 "unsupported flow type for user-def bytes, ret = %d\n",
6275 ret);
6276 return ret;
6277 }
6278
6279 info->data = data;
6280 info->data_mask = data_mask;
6281 info->offset = offset;
6282
6283 return 0;
6284 }
6285
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6286 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6287 struct ethtool_rx_flow_spec *fs,
6288 u32 *unused_tuple,
6289 struct hclge_fd_user_def_info *info)
6290 {
6291 u32 flow_type;
6292 int ret;
6293
6294 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6295 dev_err(&hdev->pdev->dev,
6296 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6297 fs->location,
6298 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6299 return -EINVAL;
6300 }
6301
6302 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6303 if (ret)
6304 return ret;
6305
6306 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6307 switch (flow_type) {
6308 case SCTP_V4_FLOW:
6309 case TCP_V4_FLOW:
6310 case UDP_V4_FLOW:
6311 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6312 unused_tuple);
6313 break;
6314 case IP_USER_FLOW:
6315 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6316 unused_tuple);
6317 break;
6318 case SCTP_V6_FLOW:
6319 case TCP_V6_FLOW:
6320 case UDP_V6_FLOW:
6321 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6322 unused_tuple);
6323 break;
6324 case IPV6_USER_FLOW:
6325 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6326 unused_tuple);
6327 break;
6328 case ETHER_FLOW:
6329 if (hdev->fd_cfg.fd_mode !=
6330 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6331 dev_err(&hdev->pdev->dev,
6332 "ETHER_FLOW is not supported in current fd mode!\n");
6333 return -EOPNOTSUPP;
6334 }
6335
6336 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6337 unused_tuple);
6338 break;
6339 default:
6340 dev_err(&hdev->pdev->dev,
6341 "unsupported protocol type, protocol type = %#x\n",
6342 flow_type);
6343 return -EOPNOTSUPP;
6344 }
6345
6346 if (ret) {
6347 dev_err(&hdev->pdev->dev,
6348 "failed to check flow union tuple, ret = %d\n",
6349 ret);
6350 return ret;
6351 }
6352
6353 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6354 }
6355
hclge_fd_get_tcpip4_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6356 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6357 struct ethtool_rx_flow_spec *fs,
6358 struct hclge_fd_rule *rule, u8 ip_proto)
6359 {
6360 rule->tuples.src_ip[IPV4_INDEX] =
6361 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6362 rule->tuples_mask.src_ip[IPV4_INDEX] =
6363 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6364
6365 rule->tuples.dst_ip[IPV4_INDEX] =
6366 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6367 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6368 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6369
6370 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6371 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6372
6373 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6374 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6375
6376 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6377 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6378
6379 rule->tuples.ether_proto = ETH_P_IP;
6380 rule->tuples_mask.ether_proto = 0xFFFF;
6381
6382 rule->tuples.ip_proto = ip_proto;
6383 rule->tuples_mask.ip_proto = 0xFF;
6384 }
6385
hclge_fd_get_ip4_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6386 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6387 struct ethtool_rx_flow_spec *fs,
6388 struct hclge_fd_rule *rule)
6389 {
6390 rule->tuples.src_ip[IPV4_INDEX] =
6391 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6392 rule->tuples_mask.src_ip[IPV4_INDEX] =
6393 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6394
6395 rule->tuples.dst_ip[IPV4_INDEX] =
6396 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6397 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6398 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6399
6400 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6401 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6402
6403 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6404 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6405
6406 rule->tuples.ether_proto = ETH_P_IP;
6407 rule->tuples_mask.ether_proto = 0xFFFF;
6408 }
6409
hclge_fd_get_tcpip6_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6410 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6411 struct ethtool_rx_flow_spec *fs,
6412 struct hclge_fd_rule *rule, u8 ip_proto)
6413 {
6414 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6415 IPV6_SIZE);
6416 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6417 IPV6_SIZE);
6418
6419 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6420 IPV6_SIZE);
6421 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6422 IPV6_SIZE);
6423
6424 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6425 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6426
6427 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6428 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6429
6430 rule->tuples.ether_proto = ETH_P_IPV6;
6431 rule->tuples_mask.ether_proto = 0xFFFF;
6432
6433 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6434 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6435
6436 rule->tuples.ip_proto = ip_proto;
6437 rule->tuples_mask.ip_proto = 0xFF;
6438 }
6439
hclge_fd_get_ip6_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6440 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6441 struct ethtool_rx_flow_spec *fs,
6442 struct hclge_fd_rule *rule)
6443 {
6444 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6445 IPV6_SIZE);
6446 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6447 IPV6_SIZE);
6448
6449 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6450 IPV6_SIZE);
6451 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6452 IPV6_SIZE);
6453
6454 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6455 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6456
6457 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6458 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6459
6460 rule->tuples.ether_proto = ETH_P_IPV6;
6461 rule->tuples_mask.ether_proto = 0xFFFF;
6462 }
6463
hclge_fd_get_ether_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6464 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6465 struct ethtool_rx_flow_spec *fs,
6466 struct hclge_fd_rule *rule)
6467 {
6468 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6469 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6470
6471 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6472 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6473
6474 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6475 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6476 }
6477
hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info * info,struct hclge_fd_rule * rule)6478 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6479 struct hclge_fd_rule *rule)
6480 {
6481 switch (info->layer) {
6482 case HCLGE_FD_USER_DEF_L2:
6483 rule->tuples.l2_user_def = info->data;
6484 rule->tuples_mask.l2_user_def = info->data_mask;
6485 break;
6486 case HCLGE_FD_USER_DEF_L3:
6487 rule->tuples.l3_user_def = info->data;
6488 rule->tuples_mask.l3_user_def = info->data_mask;
6489 break;
6490 case HCLGE_FD_USER_DEF_L4:
6491 rule->tuples.l4_user_def = (u32)info->data << 16;
6492 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6493 break;
6494 default:
6495 break;
6496 }
6497
6498 rule->ep.user_def = *info;
6499 }
6500
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,struct hclge_fd_user_def_info * info)6501 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6502 struct ethtool_rx_flow_spec *fs,
6503 struct hclge_fd_rule *rule,
6504 struct hclge_fd_user_def_info *info)
6505 {
6506 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6507
6508 switch (flow_type) {
6509 case SCTP_V4_FLOW:
6510 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6511 break;
6512 case TCP_V4_FLOW:
6513 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6514 break;
6515 case UDP_V4_FLOW:
6516 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6517 break;
6518 case IP_USER_FLOW:
6519 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6520 break;
6521 case SCTP_V6_FLOW:
6522 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6523 break;
6524 case TCP_V6_FLOW:
6525 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6526 break;
6527 case UDP_V6_FLOW:
6528 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6529 break;
6530 case IPV6_USER_FLOW:
6531 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6532 break;
6533 case ETHER_FLOW:
6534 hclge_fd_get_ether_tuple(hdev, fs, rule);
6535 break;
6536 default:
6537 return -EOPNOTSUPP;
6538 }
6539
6540 if (fs->flow_type & FLOW_EXT) {
6541 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6542 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6543 hclge_fd_get_user_def_tuple(info, rule);
6544 }
6545
6546 if (fs->flow_type & FLOW_MAC_EXT) {
6547 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6548 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6549 }
6550
6551 return 0;
6552 }
6553
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6554 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6555 struct hclge_fd_rule *rule)
6556 {
6557 int ret;
6558
6559 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6560 if (ret)
6561 return ret;
6562
6563 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6564 }
6565
hclge_add_fd_entry_common(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6566 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6567 struct hclge_fd_rule *rule)
6568 {
6569 int ret;
6570
6571 spin_lock_bh(&hdev->fd_rule_lock);
6572
6573 if (hdev->fd_active_type != rule->rule_type &&
6574 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6575 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6576 dev_err(&hdev->pdev->dev,
6577 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6578 rule->rule_type, hdev->fd_active_type);
6579 spin_unlock_bh(&hdev->fd_rule_lock);
6580 return -EINVAL;
6581 }
6582
6583 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6584 if (ret)
6585 goto out;
6586
6587 ret = hclge_clear_arfs_rules(hdev);
6588 if (ret)
6589 goto out;
6590
6591 ret = hclge_fd_config_rule(hdev, rule);
6592 if (ret)
6593 goto out;
6594
6595 rule->state = HCLGE_FD_ACTIVE;
6596 hdev->fd_active_type = rule->rule_type;
6597 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6598
6599 out:
6600 spin_unlock_bh(&hdev->fd_rule_lock);
6601 return ret;
6602 }
6603
hclge_is_cls_flower_active(struct hnae3_handle * handle)6604 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6605 {
6606 struct hclge_vport *vport = hclge_get_vport(handle);
6607 struct hclge_dev *hdev = vport->back;
6608
6609 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6610 }
6611
hclge_fd_parse_ring_cookie(struct hclge_dev * hdev,u64 ring_cookie,u16 * vport_id,u8 * action,u16 * queue_id)6612 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6613 u16 *vport_id, u8 *action, u16 *queue_id)
6614 {
6615 struct hclge_vport *vport = hdev->vport;
6616
6617 if (ring_cookie == RX_CLS_FLOW_DISC) {
6618 *action = HCLGE_FD_ACTION_DROP_PACKET;
6619 } else {
6620 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6621 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6622 u16 tqps;
6623
6624 /* To keep consistent with user's configuration, minus 1 when
6625 * printing 'vf', because vf id from ethtool is added 1 for vf.
6626 */
6627 if (vf > hdev->num_req_vfs) {
6628 dev_err(&hdev->pdev->dev,
6629 "Error: vf id (%u) should be less than %u\n",
6630 vf - 1, hdev->num_req_vfs);
6631 return -EINVAL;
6632 }
6633
6634 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6635 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6636
6637 if (ring >= tqps) {
6638 dev_err(&hdev->pdev->dev,
6639 "Error: queue id (%u) > max tqp num (%u)\n",
6640 ring, tqps - 1);
6641 return -EINVAL;
6642 }
6643
6644 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6645 *queue_id = ring;
6646 }
6647
6648 return 0;
6649 }
6650
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6651 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6652 struct ethtool_rxnfc *cmd)
6653 {
6654 struct hclge_vport *vport = hclge_get_vport(handle);
6655 struct hclge_dev *hdev = vport->back;
6656 struct hclge_fd_user_def_info info;
6657 u16 dst_vport_id = 0, q_index = 0;
6658 struct ethtool_rx_flow_spec *fs;
6659 struct hclge_fd_rule *rule;
6660 u32 unused = 0;
6661 u8 action;
6662 int ret;
6663
6664 if (!hnae3_dev_fd_supported(hdev)) {
6665 dev_err(&hdev->pdev->dev,
6666 "flow table director is not supported\n");
6667 return -EOPNOTSUPP;
6668 }
6669
6670 if (!hdev->fd_en) {
6671 dev_err(&hdev->pdev->dev,
6672 "please enable flow director first\n");
6673 return -EOPNOTSUPP;
6674 }
6675
6676 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6677
6678 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6679 if (ret)
6680 return ret;
6681
6682 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6683 &action, &q_index);
6684 if (ret)
6685 return ret;
6686
6687 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6688 if (!rule)
6689 return -ENOMEM;
6690
6691 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6692 if (ret) {
6693 kfree(rule);
6694 return ret;
6695 }
6696
6697 rule->flow_type = fs->flow_type;
6698 rule->location = fs->location;
6699 rule->unused_tuple = unused;
6700 rule->vf_id = dst_vport_id;
6701 rule->queue_id = q_index;
6702 rule->action = action;
6703 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6704
6705 ret = hclge_add_fd_entry_common(hdev, rule);
6706 if (ret)
6707 kfree(rule);
6708
6709 return ret;
6710 }
6711
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6712 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6713 struct ethtool_rxnfc *cmd)
6714 {
6715 struct hclge_vport *vport = hclge_get_vport(handle);
6716 struct hclge_dev *hdev = vport->back;
6717 struct ethtool_rx_flow_spec *fs;
6718 int ret;
6719
6720 if (!hnae3_dev_fd_supported(hdev))
6721 return -EOPNOTSUPP;
6722
6723 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6724
6725 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6726 return -EINVAL;
6727
6728 spin_lock_bh(&hdev->fd_rule_lock);
6729 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6730 !test_bit(fs->location, hdev->fd_bmap)) {
6731 dev_err(&hdev->pdev->dev,
6732 "Delete fail, rule %u is inexistent\n", fs->location);
6733 spin_unlock_bh(&hdev->fd_rule_lock);
6734 return -ENOENT;
6735 }
6736
6737 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6738 NULL, false);
6739 if (ret)
6740 goto out;
6741
6742 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6743
6744 out:
6745 spin_unlock_bh(&hdev->fd_rule_lock);
6746 return ret;
6747 }
6748
hclge_clear_fd_rules_in_list(struct hclge_dev * hdev,bool clear_list)6749 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6750 bool clear_list)
6751 {
6752 struct hclge_fd_rule *rule;
6753 struct hlist_node *node;
6754 u16 location;
6755
6756 if (!hnae3_dev_fd_supported(hdev))
6757 return;
6758
6759 spin_lock_bh(&hdev->fd_rule_lock);
6760
6761 for_each_set_bit(location, hdev->fd_bmap,
6762 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6763 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6764 NULL, false);
6765
6766 if (clear_list) {
6767 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6768 rule_node) {
6769 hlist_del(&rule->rule_node);
6770 kfree(rule);
6771 }
6772 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6773 hdev->hclge_fd_rule_num = 0;
6774 bitmap_zero(hdev->fd_bmap,
6775 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6776 }
6777
6778 spin_unlock_bh(&hdev->fd_rule_lock);
6779 }
6780
hclge_del_all_fd_entries(struct hclge_dev * hdev)6781 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6782 {
6783 hclge_clear_fd_rules_in_list(hdev, true);
6784 hclge_fd_disable_user_def(hdev);
6785 }
6786
hclge_restore_fd_entries(struct hnae3_handle * handle)6787 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6788 {
6789 struct hclge_vport *vport = hclge_get_vport(handle);
6790 struct hclge_dev *hdev = vport->back;
6791 struct hclge_fd_rule *rule;
6792 struct hlist_node *node;
6793
6794 /* Return ok here, because reset error handling will check this
6795 * return value. If error is returned here, the reset process will
6796 * fail.
6797 */
6798 if (!hnae3_dev_fd_supported(hdev))
6799 return 0;
6800
6801 /* if fd is disabled, should not restore it when reset */
6802 if (!hdev->fd_en)
6803 return 0;
6804
6805 spin_lock_bh(&hdev->fd_rule_lock);
6806 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6807 if (rule->state == HCLGE_FD_ACTIVE)
6808 rule->state = HCLGE_FD_TO_ADD;
6809 }
6810 spin_unlock_bh(&hdev->fd_rule_lock);
6811 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6812
6813 return 0;
6814 }
6815
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6816 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6817 struct ethtool_rxnfc *cmd)
6818 {
6819 struct hclge_vport *vport = hclge_get_vport(handle);
6820 struct hclge_dev *hdev = vport->back;
6821
6822 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6823 return -EOPNOTSUPP;
6824
6825 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6826 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6827
6828 return 0;
6829 }
6830
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6831 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6832 struct ethtool_tcpip4_spec *spec,
6833 struct ethtool_tcpip4_spec *spec_mask)
6834 {
6835 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6836 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6837 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6838
6839 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6840 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6841 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6842
6843 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6844 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6845 0 : cpu_to_be16(rule->tuples_mask.src_port);
6846
6847 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6848 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6849 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6850
6851 spec->tos = rule->tuples.ip_tos;
6852 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6853 0 : rule->tuples_mask.ip_tos;
6854 }
6855
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6856 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6857 struct ethtool_usrip4_spec *spec,
6858 struct ethtool_usrip4_spec *spec_mask)
6859 {
6860 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6861 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6862 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6863
6864 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6865 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6866 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6867
6868 spec->tos = rule->tuples.ip_tos;
6869 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6870 0 : rule->tuples_mask.ip_tos;
6871
6872 spec->proto = rule->tuples.ip_proto;
6873 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6874 0 : rule->tuples_mask.ip_proto;
6875
6876 spec->ip_ver = ETH_RX_NFC_IP4;
6877 }
6878
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6879 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6880 struct ethtool_tcpip6_spec *spec,
6881 struct ethtool_tcpip6_spec *spec_mask)
6882 {
6883 cpu_to_be32_array(spec->ip6src,
6884 rule->tuples.src_ip, IPV6_SIZE);
6885 cpu_to_be32_array(spec->ip6dst,
6886 rule->tuples.dst_ip, IPV6_SIZE);
6887 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6888 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6889 else
6890 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6891 IPV6_SIZE);
6892
6893 if (rule->unused_tuple & BIT(INNER_DST_IP))
6894 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6895 else
6896 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6897 IPV6_SIZE);
6898
6899 spec->tclass = rule->tuples.ip_tos;
6900 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6901 0 : rule->tuples_mask.ip_tos;
6902
6903 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6904 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6905 0 : cpu_to_be16(rule->tuples_mask.src_port);
6906
6907 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6908 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6909 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6910 }
6911
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6912 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6913 struct ethtool_usrip6_spec *spec,
6914 struct ethtool_usrip6_spec *spec_mask)
6915 {
6916 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6917 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6918 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6919 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6920 else
6921 cpu_to_be32_array(spec_mask->ip6src,
6922 rule->tuples_mask.src_ip, IPV6_SIZE);
6923
6924 if (rule->unused_tuple & BIT(INNER_DST_IP))
6925 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6926 else
6927 cpu_to_be32_array(spec_mask->ip6dst,
6928 rule->tuples_mask.dst_ip, IPV6_SIZE);
6929
6930 spec->tclass = rule->tuples.ip_tos;
6931 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6932 0 : rule->tuples_mask.ip_tos;
6933
6934 spec->l4_proto = rule->tuples.ip_proto;
6935 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6936 0 : rule->tuples_mask.ip_proto;
6937 }
6938
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6939 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6940 struct ethhdr *spec,
6941 struct ethhdr *spec_mask)
6942 {
6943 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6944 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6945
6946 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6947 eth_zero_addr(spec_mask->h_source);
6948 else
6949 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6950
6951 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6952 eth_zero_addr(spec_mask->h_dest);
6953 else
6954 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6955
6956 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6957 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6958 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6959 }
6960
hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6961 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6962 struct hclge_fd_rule *rule)
6963 {
6964 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6965 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6966 fs->h_ext.data[0] = 0;
6967 fs->h_ext.data[1] = 0;
6968 fs->m_ext.data[0] = 0;
6969 fs->m_ext.data[1] = 0;
6970 } else {
6971 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6972 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6973 fs->m_ext.data[0] =
6974 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6975 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6976 }
6977 }
6978
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6979 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6980 struct hclge_fd_rule *rule)
6981 {
6982 if (fs->flow_type & FLOW_EXT) {
6983 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6984 fs->m_ext.vlan_tci =
6985 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6986 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6987
6988 hclge_fd_get_user_def_info(fs, rule);
6989 }
6990
6991 if (fs->flow_type & FLOW_MAC_EXT) {
6992 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6993 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6994 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6995 else
6996 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6997 rule->tuples_mask.dst_mac);
6998 }
6999 }
7000
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)7001 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7002 struct ethtool_rxnfc *cmd)
7003 {
7004 struct hclge_vport *vport = hclge_get_vport(handle);
7005 struct hclge_fd_rule *rule = NULL;
7006 struct hclge_dev *hdev = vport->back;
7007 struct ethtool_rx_flow_spec *fs;
7008 struct hlist_node *node2;
7009
7010 if (!hnae3_dev_fd_supported(hdev))
7011 return -EOPNOTSUPP;
7012
7013 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7014
7015 spin_lock_bh(&hdev->fd_rule_lock);
7016
7017 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7018 if (rule->location >= fs->location)
7019 break;
7020 }
7021
7022 if (!rule || fs->location != rule->location) {
7023 spin_unlock_bh(&hdev->fd_rule_lock);
7024
7025 return -ENOENT;
7026 }
7027
7028 fs->flow_type = rule->flow_type;
7029 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7030 case SCTP_V4_FLOW:
7031 case TCP_V4_FLOW:
7032 case UDP_V4_FLOW:
7033 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7034 &fs->m_u.tcp_ip4_spec);
7035 break;
7036 case IP_USER_FLOW:
7037 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7038 &fs->m_u.usr_ip4_spec);
7039 break;
7040 case SCTP_V6_FLOW:
7041 case TCP_V6_FLOW:
7042 case UDP_V6_FLOW:
7043 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7044 &fs->m_u.tcp_ip6_spec);
7045 break;
7046 case IPV6_USER_FLOW:
7047 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7048 &fs->m_u.usr_ip6_spec);
7049 break;
7050 /* The flow type of fd rule has been checked before adding in to rule
7051 * list. As other flow types have been handled, it must be ETHER_FLOW
7052 * for the default case
7053 */
7054 default:
7055 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7056 &fs->m_u.ether_spec);
7057 break;
7058 }
7059
7060 hclge_fd_get_ext_info(fs, rule);
7061
7062 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7063 fs->ring_cookie = RX_CLS_FLOW_DISC;
7064 } else {
7065 u64 vf_id;
7066
7067 fs->ring_cookie = rule->queue_id;
7068 vf_id = rule->vf_id;
7069 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7070 fs->ring_cookie |= vf_id;
7071 }
7072
7073 spin_unlock_bh(&hdev->fd_rule_lock);
7074
7075 return 0;
7076 }
7077
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)7078 static int hclge_get_all_rules(struct hnae3_handle *handle,
7079 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7080 {
7081 struct hclge_vport *vport = hclge_get_vport(handle);
7082 struct hclge_dev *hdev = vport->back;
7083 struct hclge_fd_rule *rule;
7084 struct hlist_node *node2;
7085 int cnt = 0;
7086
7087 if (!hnae3_dev_fd_supported(hdev))
7088 return -EOPNOTSUPP;
7089
7090 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7091
7092 spin_lock_bh(&hdev->fd_rule_lock);
7093 hlist_for_each_entry_safe(rule, node2,
7094 &hdev->fd_rule_list, rule_node) {
7095 if (cnt == cmd->rule_cnt) {
7096 spin_unlock_bh(&hdev->fd_rule_lock);
7097 return -EMSGSIZE;
7098 }
7099
7100 if (rule->state == HCLGE_FD_TO_DEL)
7101 continue;
7102
7103 rule_locs[cnt] = rule->location;
7104 cnt++;
7105 }
7106
7107 spin_unlock_bh(&hdev->fd_rule_lock);
7108
7109 cmd->rule_cnt = cnt;
7110
7111 return 0;
7112 }
7113
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)7114 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7115 struct hclge_fd_rule_tuples *tuples)
7116 {
7117 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7118 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7119
7120 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7121 tuples->ip_proto = fkeys->basic.ip_proto;
7122 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7123
7124 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7125 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7126 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7127 } else {
7128 int i;
7129
7130 for (i = 0; i < IPV6_SIZE; i++) {
7131 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7132 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7133 }
7134 }
7135 }
7136
7137 /* traverse all rules, check whether an existed rule has the same tuples */
7138 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)7139 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7140 const struct hclge_fd_rule_tuples *tuples)
7141 {
7142 struct hclge_fd_rule *rule = NULL;
7143 struct hlist_node *node;
7144
7145 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7146 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7147 return rule;
7148 }
7149
7150 return NULL;
7151 }
7152
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)7153 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7154 struct hclge_fd_rule *rule)
7155 {
7156 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7157 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7158 BIT(INNER_SRC_PORT);
7159 rule->action = 0;
7160 rule->vf_id = 0;
7161 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7162 rule->state = HCLGE_FD_TO_ADD;
7163 if (tuples->ether_proto == ETH_P_IP) {
7164 if (tuples->ip_proto == IPPROTO_TCP)
7165 rule->flow_type = TCP_V4_FLOW;
7166 else
7167 rule->flow_type = UDP_V4_FLOW;
7168 } else {
7169 if (tuples->ip_proto == IPPROTO_TCP)
7170 rule->flow_type = TCP_V6_FLOW;
7171 else
7172 rule->flow_type = UDP_V6_FLOW;
7173 }
7174 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7175 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7176 }
7177
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)7178 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7179 u16 flow_id, struct flow_keys *fkeys)
7180 {
7181 struct hclge_vport *vport = hclge_get_vport(handle);
7182 struct hclge_fd_rule_tuples new_tuples = {};
7183 struct hclge_dev *hdev = vport->back;
7184 struct hclge_fd_rule *rule;
7185 u16 bit_id;
7186
7187 if (!hnae3_dev_fd_supported(hdev))
7188 return -EOPNOTSUPP;
7189
7190 /* when there is already fd rule existed add by user,
7191 * arfs should not work
7192 */
7193 spin_lock_bh(&hdev->fd_rule_lock);
7194 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7195 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7196 spin_unlock_bh(&hdev->fd_rule_lock);
7197 return -EOPNOTSUPP;
7198 }
7199
7200 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7201
7202 /* check is there flow director filter existed for this flow,
7203 * if not, create a new filter for it;
7204 * if filter exist with different queue id, modify the filter;
7205 * if filter exist with same queue id, do nothing
7206 */
7207 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7208 if (!rule) {
7209 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7210 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7211 spin_unlock_bh(&hdev->fd_rule_lock);
7212 return -ENOSPC;
7213 }
7214
7215 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7216 if (!rule) {
7217 spin_unlock_bh(&hdev->fd_rule_lock);
7218 return -ENOMEM;
7219 }
7220
7221 rule->location = bit_id;
7222 rule->arfs.flow_id = flow_id;
7223 rule->queue_id = queue_id;
7224 hclge_fd_build_arfs_rule(&new_tuples, rule);
7225 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7226 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7227 } else if (rule->queue_id != queue_id) {
7228 rule->queue_id = queue_id;
7229 rule->state = HCLGE_FD_TO_ADD;
7230 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7231 hclge_task_schedule(hdev, 0);
7232 }
7233 spin_unlock_bh(&hdev->fd_rule_lock);
7234 return rule->location;
7235 }
7236
hclge_rfs_filter_expire(struct hclge_dev * hdev)7237 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7238 {
7239 #ifdef CONFIG_RFS_ACCEL
7240 struct hnae3_handle *handle = &hdev->vport[0].nic;
7241 struct hclge_fd_rule *rule;
7242 struct hlist_node *node;
7243
7244 spin_lock_bh(&hdev->fd_rule_lock);
7245 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7246 spin_unlock_bh(&hdev->fd_rule_lock);
7247 return;
7248 }
7249 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7250 if (rule->state != HCLGE_FD_ACTIVE)
7251 continue;
7252 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7253 rule->arfs.flow_id, rule->location)) {
7254 rule->state = HCLGE_FD_TO_DEL;
7255 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7256 }
7257 }
7258 spin_unlock_bh(&hdev->fd_rule_lock);
7259 #endif
7260 }
7261
7262 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hclge_dev * hdev)7263 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7264 {
7265 #ifdef CONFIG_RFS_ACCEL
7266 struct hclge_fd_rule *rule;
7267 struct hlist_node *node;
7268 int ret;
7269
7270 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7271 return 0;
7272
7273 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7274 switch (rule->state) {
7275 case HCLGE_FD_TO_DEL:
7276 case HCLGE_FD_ACTIVE:
7277 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7278 rule->location, NULL, false);
7279 if (ret)
7280 return ret;
7281 fallthrough;
7282 case HCLGE_FD_TO_ADD:
7283 hclge_fd_dec_rule_cnt(hdev, rule->location);
7284 hlist_del(&rule->rule_node);
7285 kfree(rule);
7286 break;
7287 default:
7288 break;
7289 }
7290 }
7291 hclge_sync_fd_state(hdev);
7292
7293 #endif
7294 return 0;
7295 }
7296
hclge_get_cls_key_basic(const struct flow_rule * flow,struct hclge_fd_rule * rule)7297 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7298 struct hclge_fd_rule *rule)
7299 {
7300 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7301 struct flow_match_basic match;
7302 u16 ethtype_key, ethtype_mask;
7303
7304 flow_rule_match_basic(flow, &match);
7305 ethtype_key = ntohs(match.key->n_proto);
7306 ethtype_mask = ntohs(match.mask->n_proto);
7307
7308 if (ethtype_key == ETH_P_ALL) {
7309 ethtype_key = 0;
7310 ethtype_mask = 0;
7311 }
7312 rule->tuples.ether_proto = ethtype_key;
7313 rule->tuples_mask.ether_proto = ethtype_mask;
7314 rule->tuples.ip_proto = match.key->ip_proto;
7315 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7316 } else {
7317 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7318 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7319 }
7320 }
7321
hclge_get_cls_key_mac(const struct flow_rule * flow,struct hclge_fd_rule * rule)7322 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7323 struct hclge_fd_rule *rule)
7324 {
7325 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7326 struct flow_match_eth_addrs match;
7327
7328 flow_rule_match_eth_addrs(flow, &match);
7329 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7330 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7331 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7332 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7333 } else {
7334 rule->unused_tuple |= BIT(INNER_DST_MAC);
7335 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7336 }
7337 }
7338
hclge_get_cls_key_vlan(const struct flow_rule * flow,struct hclge_fd_rule * rule)7339 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7340 struct hclge_fd_rule *rule)
7341 {
7342 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7343 struct flow_match_vlan match;
7344
7345 flow_rule_match_vlan(flow, &match);
7346 rule->tuples.vlan_tag1 = match.key->vlan_id |
7347 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7348 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7349 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7350 } else {
7351 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7352 }
7353 }
7354
hclge_get_cls_key_ip(const struct flow_rule * flow,struct hclge_fd_rule * rule)7355 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7356 struct hclge_fd_rule *rule)
7357 {
7358 u16 addr_type = 0;
7359
7360 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7361 struct flow_match_control match;
7362
7363 flow_rule_match_control(flow, &match);
7364 addr_type = match.key->addr_type;
7365 }
7366
7367 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7368 struct flow_match_ipv4_addrs match;
7369
7370 flow_rule_match_ipv4_addrs(flow, &match);
7371 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7372 rule->tuples_mask.src_ip[IPV4_INDEX] =
7373 be32_to_cpu(match.mask->src);
7374 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7375 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7376 be32_to_cpu(match.mask->dst);
7377 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7378 struct flow_match_ipv6_addrs match;
7379
7380 flow_rule_match_ipv6_addrs(flow, &match);
7381 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7382 IPV6_SIZE);
7383 be32_to_cpu_array(rule->tuples_mask.src_ip,
7384 match.mask->src.s6_addr32, IPV6_SIZE);
7385 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7386 IPV6_SIZE);
7387 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7388 match.mask->dst.s6_addr32, IPV6_SIZE);
7389 } else {
7390 rule->unused_tuple |= BIT(INNER_SRC_IP);
7391 rule->unused_tuple |= BIT(INNER_DST_IP);
7392 }
7393 }
7394
hclge_get_cls_key_port(const struct flow_rule * flow,struct hclge_fd_rule * rule)7395 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7396 struct hclge_fd_rule *rule)
7397 {
7398 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7399 struct flow_match_ports match;
7400
7401 flow_rule_match_ports(flow, &match);
7402
7403 rule->tuples.src_port = be16_to_cpu(match.key->src);
7404 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7405 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7406 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7407 } else {
7408 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7409 rule->unused_tuple |= BIT(INNER_DST_PORT);
7410 }
7411 }
7412
hclge_parse_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,struct hclge_fd_rule * rule)7413 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7414 struct flow_cls_offload *cls_flower,
7415 struct hclge_fd_rule *rule)
7416 {
7417 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7418 struct flow_dissector *dissector = flow->match.dissector;
7419
7420 if (dissector->used_keys &
7421 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7422 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7423 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7424 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7425 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7426 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7427 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7428 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7429 dissector->used_keys);
7430 return -EOPNOTSUPP;
7431 }
7432
7433 hclge_get_cls_key_basic(flow, rule);
7434 hclge_get_cls_key_mac(flow, rule);
7435 hclge_get_cls_key_vlan(flow, rule);
7436 hclge_get_cls_key_ip(flow, rule);
7437 hclge_get_cls_key_port(flow, rule);
7438
7439 return 0;
7440 }
7441
hclge_check_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,int tc)7442 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7443 struct flow_cls_offload *cls_flower, int tc)
7444 {
7445 u32 prio = cls_flower->common.prio;
7446
7447 if (tc < 0 || tc > hdev->tc_max) {
7448 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7449 return -EINVAL;
7450 }
7451
7452 if (prio == 0 ||
7453 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7454 dev_err(&hdev->pdev->dev,
7455 "prio %u should be in range[1, %u]\n",
7456 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7457 return -EINVAL;
7458 }
7459
7460 if (test_bit(prio - 1, hdev->fd_bmap)) {
7461 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7462 return -EINVAL;
7463 }
7464 return 0;
7465 }
7466
hclge_add_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower,int tc)7467 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7468 struct flow_cls_offload *cls_flower,
7469 int tc)
7470 {
7471 struct hclge_vport *vport = hclge_get_vport(handle);
7472 struct hclge_dev *hdev = vport->back;
7473 struct hclge_fd_rule *rule;
7474 int ret;
7475
7476 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7477 if (ret) {
7478 dev_err(&hdev->pdev->dev,
7479 "failed to check cls flower params, ret = %d\n", ret);
7480 return ret;
7481 }
7482
7483 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7484 if (!rule)
7485 return -ENOMEM;
7486
7487 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7488 if (ret) {
7489 kfree(rule);
7490 return ret;
7491 }
7492
7493 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7494 rule->cls_flower.tc = tc;
7495 rule->location = cls_flower->common.prio - 1;
7496 rule->vf_id = 0;
7497 rule->cls_flower.cookie = cls_flower->cookie;
7498 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7499
7500 ret = hclge_add_fd_entry_common(hdev, rule);
7501 if (ret)
7502 kfree(rule);
7503
7504 return ret;
7505 }
7506
hclge_find_cls_flower(struct hclge_dev * hdev,unsigned long cookie)7507 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7508 unsigned long cookie)
7509 {
7510 struct hclge_fd_rule *rule;
7511 struct hlist_node *node;
7512
7513 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7514 if (rule->cls_flower.cookie == cookie)
7515 return rule;
7516 }
7517
7518 return NULL;
7519 }
7520
hclge_del_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower)7521 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7522 struct flow_cls_offload *cls_flower)
7523 {
7524 struct hclge_vport *vport = hclge_get_vport(handle);
7525 struct hclge_dev *hdev = vport->back;
7526 struct hclge_fd_rule *rule;
7527 int ret;
7528
7529 spin_lock_bh(&hdev->fd_rule_lock);
7530
7531 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7532 if (!rule) {
7533 spin_unlock_bh(&hdev->fd_rule_lock);
7534 return -EINVAL;
7535 }
7536
7537 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7538 NULL, false);
7539 if (ret) {
7540 spin_unlock_bh(&hdev->fd_rule_lock);
7541 return ret;
7542 }
7543
7544 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7545 spin_unlock_bh(&hdev->fd_rule_lock);
7546
7547 return 0;
7548 }
7549
hclge_sync_fd_list(struct hclge_dev * hdev,struct hlist_head * hlist)7550 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7551 {
7552 struct hclge_fd_rule *rule;
7553 struct hlist_node *node;
7554 int ret = 0;
7555
7556 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7557 return;
7558
7559 spin_lock_bh(&hdev->fd_rule_lock);
7560
7561 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7562 switch (rule->state) {
7563 case HCLGE_FD_TO_ADD:
7564 ret = hclge_fd_config_rule(hdev, rule);
7565 if (ret)
7566 goto out;
7567 rule->state = HCLGE_FD_ACTIVE;
7568 break;
7569 case HCLGE_FD_TO_DEL:
7570 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7571 rule->location, NULL, false);
7572 if (ret)
7573 goto out;
7574 hclge_fd_dec_rule_cnt(hdev, rule->location);
7575 hclge_fd_free_node(hdev, rule);
7576 break;
7577 default:
7578 break;
7579 }
7580 }
7581
7582 out:
7583 if (ret)
7584 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7585
7586 spin_unlock_bh(&hdev->fd_rule_lock);
7587 }
7588
hclge_sync_fd_table(struct hclge_dev * hdev)7589 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7590 {
7591 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7592 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7593
7594 hclge_clear_fd_rules_in_list(hdev, clear_list);
7595 }
7596
7597 hclge_sync_fd_user_def_cfg(hdev, false);
7598
7599 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7600 }
7601
hclge_get_hw_reset_stat(struct hnae3_handle * handle)7602 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7603 {
7604 struct hclge_vport *vport = hclge_get_vport(handle);
7605 struct hclge_dev *hdev = vport->back;
7606
7607 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7608 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7609 }
7610
hclge_get_cmdq_stat(struct hnae3_handle * handle)7611 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7612 {
7613 struct hclge_vport *vport = hclge_get_vport(handle);
7614 struct hclge_dev *hdev = vport->back;
7615
7616 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7617 }
7618
hclge_ae_dev_resetting(struct hnae3_handle * handle)7619 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7620 {
7621 struct hclge_vport *vport = hclge_get_vport(handle);
7622 struct hclge_dev *hdev = vport->back;
7623
7624 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7625 }
7626
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)7627 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7628 {
7629 struct hclge_vport *vport = hclge_get_vport(handle);
7630 struct hclge_dev *hdev = vport->back;
7631
7632 return hdev->rst_stats.hw_reset_done_cnt;
7633 }
7634
hclge_enable_fd(struct hnae3_handle * handle,bool enable)7635 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7636 {
7637 struct hclge_vport *vport = hclge_get_vport(handle);
7638 struct hclge_dev *hdev = vport->back;
7639
7640 hdev->fd_en = enable;
7641
7642 if (!enable)
7643 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7644 else
7645 hclge_restore_fd_entries(handle);
7646
7647 hclge_task_schedule(hdev, 0);
7648 }
7649
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)7650 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7651 {
7652 struct hclge_desc desc;
7653 struct hclge_config_mac_mode_cmd *req =
7654 (struct hclge_config_mac_mode_cmd *)desc.data;
7655 u32 loop_en = 0;
7656 int ret;
7657
7658 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7659
7660 if (enable) {
7661 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7662 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7663 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7664 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7665 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7666 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7667 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7668 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7669 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7670 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7671 }
7672
7673 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7674
7675 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7676 if (ret)
7677 dev_err(&hdev->pdev->dev,
7678 "mac enable fail, ret =%d.\n", ret);
7679 }
7680
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)7681 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7682 u8 switch_param, u8 param_mask)
7683 {
7684 struct hclge_mac_vlan_switch_cmd *req;
7685 struct hclge_desc desc;
7686 u32 func_id;
7687 int ret;
7688
7689 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7690 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7691
7692 /* read current config parameter */
7693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7694 true);
7695 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7696 req->func_id = cpu_to_le32(func_id);
7697
7698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7699 if (ret) {
7700 dev_err(&hdev->pdev->dev,
7701 "read mac vlan switch parameter fail, ret = %d\n", ret);
7702 return ret;
7703 }
7704
7705 /* modify and write new config parameter */
7706 hclge_cmd_reuse_desc(&desc, false);
7707 req->switch_param = (req->switch_param & param_mask) | switch_param;
7708 req->param_mask = param_mask;
7709
7710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7711 if (ret)
7712 dev_err(&hdev->pdev->dev,
7713 "set mac vlan switch parameter fail, ret = %d\n", ret);
7714 return ret;
7715 }
7716
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)7717 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7718 int link_ret)
7719 {
7720 #define HCLGE_PHY_LINK_STATUS_NUM 200
7721
7722 struct phy_device *phydev = hdev->hw.mac.phydev;
7723 int i = 0;
7724 int ret;
7725
7726 do {
7727 ret = phy_read_status(phydev);
7728 if (ret) {
7729 dev_err(&hdev->pdev->dev,
7730 "phy update link status fail, ret = %d\n", ret);
7731 return;
7732 }
7733
7734 if (phydev->link == link_ret)
7735 break;
7736
7737 msleep(HCLGE_LINK_STATUS_MS);
7738 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7739 }
7740
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret)7741 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7742 {
7743 #define HCLGE_MAC_LINK_STATUS_NUM 100
7744
7745 int link_status;
7746 int i = 0;
7747 int ret;
7748
7749 do {
7750 ret = hclge_get_mac_link_status(hdev, &link_status);
7751 if (ret)
7752 return ret;
7753 if (link_status == link_ret)
7754 return 0;
7755
7756 msleep(HCLGE_LINK_STATUS_MS);
7757 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7758 return -EBUSY;
7759 }
7760
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)7761 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7762 bool is_phy)
7763 {
7764 int link_ret;
7765
7766 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7767
7768 if (is_phy)
7769 hclge_phy_link_status_wait(hdev, link_ret);
7770
7771 return hclge_mac_link_status_wait(hdev, link_ret);
7772 }
7773
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)7774 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7775 {
7776 struct hclge_config_mac_mode_cmd *req;
7777 struct hclge_desc desc;
7778 u32 loop_en;
7779 int ret;
7780
7781 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7782 /* 1 Read out the MAC mode config at first */
7783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7785 if (ret) {
7786 dev_err(&hdev->pdev->dev,
7787 "mac loopback get fail, ret =%d.\n", ret);
7788 return ret;
7789 }
7790
7791 /* 2 Then setup the loopback flag */
7792 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7793 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7794
7795 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7796
7797 /* 3 Config mac work mode with loopback flag
7798 * and its original configure parameters
7799 */
7800 hclge_cmd_reuse_desc(&desc, false);
7801 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7802 if (ret)
7803 dev_err(&hdev->pdev->dev,
7804 "mac loopback set fail, ret =%d.\n", ret);
7805 return ret;
7806 }
7807
hclge_cfg_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7808 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7809 enum hnae3_loop loop_mode)
7810 {
7811 #define HCLGE_COMMON_LB_RETRY_MS 10
7812 #define HCLGE_COMMON_LB_RETRY_NUM 100
7813
7814 struct hclge_common_lb_cmd *req;
7815 struct hclge_desc desc;
7816 int ret, i = 0;
7817 u8 loop_mode_b;
7818
7819 req = (struct hclge_common_lb_cmd *)desc.data;
7820 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7821
7822 switch (loop_mode) {
7823 case HNAE3_LOOP_SERIAL_SERDES:
7824 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7825 break;
7826 case HNAE3_LOOP_PARALLEL_SERDES:
7827 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7828 break;
7829 case HNAE3_LOOP_PHY:
7830 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7831 break;
7832 default:
7833 dev_err(&hdev->pdev->dev,
7834 "unsupported common loopback mode %d\n", loop_mode);
7835 return -ENOTSUPP;
7836 }
7837
7838 if (en) {
7839 req->enable = loop_mode_b;
7840 req->mask = loop_mode_b;
7841 } else {
7842 req->mask = loop_mode_b;
7843 }
7844
7845 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7846 if (ret) {
7847 dev_err(&hdev->pdev->dev,
7848 "common loopback set fail, ret = %d\n", ret);
7849 return ret;
7850 }
7851
7852 do {
7853 msleep(HCLGE_COMMON_LB_RETRY_MS);
7854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7855 true);
7856 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7857 if (ret) {
7858 dev_err(&hdev->pdev->dev,
7859 "common loopback get, ret = %d\n", ret);
7860 return ret;
7861 }
7862 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7863 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7864
7865 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7866 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7867 return -EBUSY;
7868 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7869 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7870 return -EIO;
7871 }
7872 return ret;
7873 }
7874
hclge_set_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7875 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7876 enum hnae3_loop loop_mode)
7877 {
7878 int ret;
7879
7880 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7881 if (ret)
7882 return ret;
7883
7884 hclge_cfg_mac_mode(hdev, en);
7885
7886 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7887 if (ret)
7888 dev_err(&hdev->pdev->dev,
7889 "serdes loopback config mac mode timeout\n");
7890
7891 return ret;
7892 }
7893
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7894 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7895 struct phy_device *phydev)
7896 {
7897 int ret;
7898
7899 if (!phydev->suspended) {
7900 ret = phy_suspend(phydev);
7901 if (ret)
7902 return ret;
7903 }
7904
7905 ret = phy_resume(phydev);
7906 if (ret)
7907 return ret;
7908
7909 return phy_loopback(phydev, true);
7910 }
7911
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7912 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7913 struct phy_device *phydev)
7914 {
7915 int ret;
7916
7917 ret = phy_loopback(phydev, false);
7918 if (ret)
7919 return ret;
7920
7921 return phy_suspend(phydev);
7922 }
7923
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)7924 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7925 {
7926 struct phy_device *phydev = hdev->hw.mac.phydev;
7927 int ret;
7928
7929 if (!phydev) {
7930 if (hnae3_dev_phy_imp_supported(hdev))
7931 return hclge_set_common_loopback(hdev, en,
7932 HNAE3_LOOP_PHY);
7933 return -ENOTSUPP;
7934 }
7935
7936 if (en)
7937 ret = hclge_enable_phy_loopback(hdev, phydev);
7938 else
7939 ret = hclge_disable_phy_loopback(hdev, phydev);
7940 if (ret) {
7941 dev_err(&hdev->pdev->dev,
7942 "set phy loopback fail, ret = %d\n", ret);
7943 return ret;
7944 }
7945
7946 hclge_cfg_mac_mode(hdev, en);
7947
7948 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7949 if (ret)
7950 dev_err(&hdev->pdev->dev,
7951 "phy loopback config mac mode timeout\n");
7952
7953 return ret;
7954 }
7955
hclge_tqp_enable_cmd_send(struct hclge_dev * hdev,u16 tqp_id,u16 stream_id,bool enable)7956 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7957 u16 stream_id, bool enable)
7958 {
7959 struct hclge_desc desc;
7960 struct hclge_cfg_com_tqp_queue_cmd *req =
7961 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7962
7963 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7964 req->tqp_id = cpu_to_le16(tqp_id);
7965 req->stream_id = cpu_to_le16(stream_id);
7966 if (enable)
7967 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7968
7969 return hclge_cmd_send(&hdev->hw, &desc, 1);
7970 }
7971
hclge_tqp_enable(struct hnae3_handle * handle,bool enable)7972 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7973 {
7974 struct hclge_vport *vport = hclge_get_vport(handle);
7975 struct hclge_dev *hdev = vport->back;
7976 int ret;
7977 u16 i;
7978
7979 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7980 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7981 if (ret)
7982 return ret;
7983 }
7984 return 0;
7985 }
7986
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)7987 static int hclge_set_loopback(struct hnae3_handle *handle,
7988 enum hnae3_loop loop_mode, bool en)
7989 {
7990 struct hclge_vport *vport = hclge_get_vport(handle);
7991 struct hclge_dev *hdev = vport->back;
7992 int ret;
7993
7994 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7995 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7996 * the same, the packets are looped back in the SSU. If SSU loopback
7997 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7998 */
7999 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8000 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8001
8002 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8003 HCLGE_SWITCH_ALW_LPBK_MASK);
8004 if (ret)
8005 return ret;
8006 }
8007
8008 switch (loop_mode) {
8009 case HNAE3_LOOP_APP:
8010 ret = hclge_set_app_loopback(hdev, en);
8011 break;
8012 case HNAE3_LOOP_SERIAL_SERDES:
8013 case HNAE3_LOOP_PARALLEL_SERDES:
8014 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8015 break;
8016 case HNAE3_LOOP_PHY:
8017 ret = hclge_set_phy_loopback(hdev, en);
8018 break;
8019 default:
8020 ret = -ENOTSUPP;
8021 dev_err(&hdev->pdev->dev,
8022 "loop_mode %d is not supported\n", loop_mode);
8023 break;
8024 }
8025
8026 if (ret)
8027 return ret;
8028
8029 ret = hclge_tqp_enable(handle, en);
8030 if (ret)
8031 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8032 en ? "enable" : "disable", ret);
8033
8034 return ret;
8035 }
8036
hclge_set_default_loopback(struct hclge_dev * hdev)8037 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8038 {
8039 int ret;
8040
8041 ret = hclge_set_app_loopback(hdev, false);
8042 if (ret)
8043 return ret;
8044
8045 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8046 if (ret)
8047 return ret;
8048
8049 return hclge_cfg_common_loopback(hdev, false,
8050 HNAE3_LOOP_PARALLEL_SERDES);
8051 }
8052
hclge_reset_tqp_stats(struct hnae3_handle * handle)8053 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8054 {
8055 struct hclge_vport *vport = hclge_get_vport(handle);
8056 struct hnae3_knic_private_info *kinfo;
8057 struct hnae3_queue *queue;
8058 struct hclge_tqp *tqp;
8059 int i;
8060
8061 kinfo = &vport->nic.kinfo;
8062 for (i = 0; i < kinfo->num_tqps; i++) {
8063 queue = handle->kinfo.tqp[i];
8064 tqp = container_of(queue, struct hclge_tqp, q);
8065 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8066 }
8067 }
8068
hclge_flush_link_update(struct hclge_dev * hdev)8069 static void hclge_flush_link_update(struct hclge_dev *hdev)
8070 {
8071 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8072
8073 unsigned long last = hdev->serv_processed_cnt;
8074 int i = 0;
8075
8076 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8077 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8078 last == hdev->serv_processed_cnt)
8079 usleep_range(1, 1);
8080 }
8081
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)8082 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8083 {
8084 struct hclge_vport *vport = hclge_get_vport(handle);
8085 struct hclge_dev *hdev = vport->back;
8086
8087 if (enable) {
8088 hclge_task_schedule(hdev, 0);
8089 } else {
8090 /* Set the DOWN flag here to disable link updating */
8091 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8092
8093 /* flush memory to make sure DOWN is seen by service task */
8094 smp_mb__before_atomic();
8095 hclge_flush_link_update(hdev);
8096 }
8097 }
8098
hclge_ae_start(struct hnae3_handle * handle)8099 static int hclge_ae_start(struct hnae3_handle *handle)
8100 {
8101 struct hclge_vport *vport = hclge_get_vport(handle);
8102 struct hclge_dev *hdev = vport->back;
8103
8104 /* mac enable */
8105 hclge_cfg_mac_mode(hdev, true);
8106 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8107 hdev->hw.mac.link = 0;
8108
8109 /* reset tqp stats */
8110 hclge_reset_tqp_stats(handle);
8111
8112 hclge_mac_start_phy(hdev);
8113
8114 return 0;
8115 }
8116
hclge_ae_stop(struct hnae3_handle * handle)8117 static void hclge_ae_stop(struct hnae3_handle *handle)
8118 {
8119 struct hclge_vport *vport = hclge_get_vport(handle);
8120 struct hclge_dev *hdev = vport->back;
8121
8122 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8123 spin_lock_bh(&hdev->fd_rule_lock);
8124 hclge_clear_arfs_rules(hdev);
8125 spin_unlock_bh(&hdev->fd_rule_lock);
8126
8127 /* If it is not PF reset or FLR, the firmware will disable the MAC,
8128 * so it only need to stop phy here.
8129 */
8130 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8131 hdev->reset_type != HNAE3_FUNC_RESET &&
8132 hdev->reset_type != HNAE3_FLR_RESET) {
8133 hclge_mac_stop_phy(hdev);
8134 hclge_update_link_status(hdev);
8135 return;
8136 }
8137
8138 hclge_reset_tqp(handle);
8139
8140 hclge_config_mac_tnl_int(hdev, false);
8141
8142 /* Mac disable */
8143 hclge_cfg_mac_mode(hdev, false);
8144
8145 hclge_mac_stop_phy(hdev);
8146
8147 /* reset tqp stats */
8148 hclge_reset_tqp_stats(handle);
8149 hclge_update_link_status(hdev);
8150 }
8151
hclge_vport_start(struct hclge_vport * vport)8152 int hclge_vport_start(struct hclge_vport *vport)
8153 {
8154 struct hclge_dev *hdev = vport->back;
8155
8156 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8157 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8158 vport->last_active_jiffies = jiffies;
8159
8160 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8161 if (vport->vport_id) {
8162 hclge_restore_mac_table_common(vport);
8163 hclge_restore_vport_vlan_table(vport);
8164 } else {
8165 hclge_restore_hw_table(hdev);
8166 }
8167 }
8168
8169 clear_bit(vport->vport_id, hdev->vport_config_block);
8170
8171 return 0;
8172 }
8173
hclge_vport_stop(struct hclge_vport * vport)8174 void hclge_vport_stop(struct hclge_vport *vport)
8175 {
8176 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8177 }
8178
hclge_client_start(struct hnae3_handle * handle)8179 static int hclge_client_start(struct hnae3_handle *handle)
8180 {
8181 struct hclge_vport *vport = hclge_get_vport(handle);
8182
8183 return hclge_vport_start(vport);
8184 }
8185
hclge_client_stop(struct hnae3_handle * handle)8186 static void hclge_client_stop(struct hnae3_handle *handle)
8187 {
8188 struct hclge_vport *vport = hclge_get_vport(handle);
8189
8190 hclge_vport_stop(vport);
8191 }
8192
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)8193 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8194 u16 cmdq_resp, u8 resp_code,
8195 enum hclge_mac_vlan_tbl_opcode op)
8196 {
8197 struct hclge_dev *hdev = vport->back;
8198
8199 if (cmdq_resp) {
8200 dev_err(&hdev->pdev->dev,
8201 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8202 cmdq_resp);
8203 return -EIO;
8204 }
8205
8206 if (op == HCLGE_MAC_VLAN_ADD) {
8207 if (!resp_code || resp_code == 1)
8208 return 0;
8209 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8210 resp_code == HCLGE_ADD_MC_OVERFLOW)
8211 return -ENOSPC;
8212
8213 dev_err(&hdev->pdev->dev,
8214 "add mac addr failed for undefined, code=%u.\n",
8215 resp_code);
8216 return -EIO;
8217 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8218 if (!resp_code) {
8219 return 0;
8220 } else if (resp_code == 1) {
8221 dev_dbg(&hdev->pdev->dev,
8222 "remove mac addr failed for miss.\n");
8223 return -ENOENT;
8224 }
8225
8226 dev_err(&hdev->pdev->dev,
8227 "remove mac addr failed for undefined, code=%u.\n",
8228 resp_code);
8229 return -EIO;
8230 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8231 if (!resp_code) {
8232 return 0;
8233 } else if (resp_code == 1) {
8234 dev_dbg(&hdev->pdev->dev,
8235 "lookup mac addr failed for miss.\n");
8236 return -ENOENT;
8237 }
8238
8239 dev_err(&hdev->pdev->dev,
8240 "lookup mac addr failed for undefined, code=%u.\n",
8241 resp_code);
8242 return -EIO;
8243 }
8244
8245 dev_err(&hdev->pdev->dev,
8246 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8247
8248 return -EINVAL;
8249 }
8250
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)8251 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8252 {
8253 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8254
8255 unsigned int word_num;
8256 unsigned int bit_num;
8257
8258 if (vfid > 255 || vfid < 0)
8259 return -EIO;
8260
8261 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8262 word_num = vfid / 32;
8263 bit_num = vfid % 32;
8264 if (clr)
8265 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8266 else
8267 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8268 } else {
8269 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8270 bit_num = vfid % 32;
8271 if (clr)
8272 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8273 else
8274 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8275 }
8276
8277 return 0;
8278 }
8279
hclge_is_all_function_id_zero(struct hclge_desc * desc)8280 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8281 {
8282 #define HCLGE_DESC_NUMBER 3
8283 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8284 int i, j;
8285
8286 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8287 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8288 if (desc[i].data[j])
8289 return false;
8290
8291 return true;
8292 }
8293
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)8294 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8295 const u8 *addr, bool is_mc)
8296 {
8297 const unsigned char *mac_addr = addr;
8298 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8299 (mac_addr[0]) | (mac_addr[1] << 8);
8300 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8301
8302 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8303 if (is_mc) {
8304 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8305 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8306 }
8307
8308 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8309 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8310 }
8311
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)8312 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8313 struct hclge_mac_vlan_tbl_entry_cmd *req)
8314 {
8315 struct hclge_dev *hdev = vport->back;
8316 struct hclge_desc desc;
8317 u8 resp_code;
8318 u16 retval;
8319 int ret;
8320
8321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8322
8323 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8324
8325 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8326 if (ret) {
8327 dev_err(&hdev->pdev->dev,
8328 "del mac addr failed for cmd_send, ret =%d.\n",
8329 ret);
8330 return ret;
8331 }
8332 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8333 retval = le16_to_cpu(desc.retval);
8334
8335 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8336 HCLGE_MAC_VLAN_REMOVE);
8337 }
8338
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)8339 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8340 struct hclge_mac_vlan_tbl_entry_cmd *req,
8341 struct hclge_desc *desc,
8342 bool is_mc)
8343 {
8344 struct hclge_dev *hdev = vport->back;
8345 u8 resp_code;
8346 u16 retval;
8347 int ret;
8348
8349 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8350 if (is_mc) {
8351 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8352 memcpy(desc[0].data,
8353 req,
8354 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8355 hclge_cmd_setup_basic_desc(&desc[1],
8356 HCLGE_OPC_MAC_VLAN_ADD,
8357 true);
8358 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8359 hclge_cmd_setup_basic_desc(&desc[2],
8360 HCLGE_OPC_MAC_VLAN_ADD,
8361 true);
8362 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8363 } else {
8364 memcpy(desc[0].data,
8365 req,
8366 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8367 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8368 }
8369 if (ret) {
8370 dev_err(&hdev->pdev->dev,
8371 "lookup mac addr failed for cmd_send, ret =%d.\n",
8372 ret);
8373 return ret;
8374 }
8375 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8376 retval = le16_to_cpu(desc[0].retval);
8377
8378 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8379 HCLGE_MAC_VLAN_LKUP);
8380 }
8381
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)8382 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8383 struct hclge_mac_vlan_tbl_entry_cmd *req,
8384 struct hclge_desc *mc_desc)
8385 {
8386 struct hclge_dev *hdev = vport->back;
8387 int cfg_status;
8388 u8 resp_code;
8389 u16 retval;
8390 int ret;
8391
8392 if (!mc_desc) {
8393 struct hclge_desc desc;
8394
8395 hclge_cmd_setup_basic_desc(&desc,
8396 HCLGE_OPC_MAC_VLAN_ADD,
8397 false);
8398 memcpy(desc.data, req,
8399 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8401 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8402 retval = le16_to_cpu(desc.retval);
8403
8404 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8405 resp_code,
8406 HCLGE_MAC_VLAN_ADD);
8407 } else {
8408 hclge_cmd_reuse_desc(&mc_desc[0], false);
8409 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8410 hclge_cmd_reuse_desc(&mc_desc[1], false);
8411 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8412 hclge_cmd_reuse_desc(&mc_desc[2], false);
8413 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8414 memcpy(mc_desc[0].data, req,
8415 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8416 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8417 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8418 retval = le16_to_cpu(mc_desc[0].retval);
8419
8420 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8421 resp_code,
8422 HCLGE_MAC_VLAN_ADD);
8423 }
8424
8425 if (ret) {
8426 dev_err(&hdev->pdev->dev,
8427 "add mac addr failed for cmd_send, ret =%d.\n",
8428 ret);
8429 return ret;
8430 }
8431
8432 return cfg_status;
8433 }
8434
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)8435 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8436 u16 *allocated_size)
8437 {
8438 struct hclge_umv_spc_alc_cmd *req;
8439 struct hclge_desc desc;
8440 int ret;
8441
8442 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8443 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8444
8445 req->space_size = cpu_to_le32(space_size);
8446
8447 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8448 if (ret) {
8449 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8450 ret);
8451 return ret;
8452 }
8453
8454 *allocated_size = le32_to_cpu(desc.data[1]);
8455
8456 return 0;
8457 }
8458
hclge_init_umv_space(struct hclge_dev * hdev)8459 static int hclge_init_umv_space(struct hclge_dev *hdev)
8460 {
8461 u16 allocated_size = 0;
8462 int ret;
8463
8464 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8465 if (ret)
8466 return ret;
8467
8468 if (allocated_size < hdev->wanted_umv_size)
8469 dev_warn(&hdev->pdev->dev,
8470 "failed to alloc umv space, want %u, get %u\n",
8471 hdev->wanted_umv_size, allocated_size);
8472
8473 hdev->max_umv_size = allocated_size;
8474 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8475 hdev->share_umv_size = hdev->priv_umv_size +
8476 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8477
8478 return 0;
8479 }
8480
hclge_reset_umv_space(struct hclge_dev * hdev)8481 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8482 {
8483 struct hclge_vport *vport;
8484 int i;
8485
8486 for (i = 0; i < hdev->num_alloc_vport; i++) {
8487 vport = &hdev->vport[i];
8488 vport->used_umv_num = 0;
8489 }
8490
8491 mutex_lock(&hdev->vport_lock);
8492 hdev->share_umv_size = hdev->priv_umv_size +
8493 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8494 mutex_unlock(&hdev->vport_lock);
8495 }
8496
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)8497 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8498 {
8499 struct hclge_dev *hdev = vport->back;
8500 bool is_full;
8501
8502 if (need_lock)
8503 mutex_lock(&hdev->vport_lock);
8504
8505 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8506 hdev->share_umv_size == 0);
8507
8508 if (need_lock)
8509 mutex_unlock(&hdev->vport_lock);
8510
8511 return is_full;
8512 }
8513
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)8514 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8515 {
8516 struct hclge_dev *hdev = vport->back;
8517
8518 if (is_free) {
8519 if (vport->used_umv_num > hdev->priv_umv_size)
8520 hdev->share_umv_size++;
8521
8522 if (vport->used_umv_num > 0)
8523 vport->used_umv_num--;
8524 } else {
8525 if (vport->used_umv_num >= hdev->priv_umv_size &&
8526 hdev->share_umv_size > 0)
8527 hdev->share_umv_size--;
8528 vport->used_umv_num++;
8529 }
8530 }
8531
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)8532 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8533 const u8 *mac_addr)
8534 {
8535 struct hclge_mac_node *mac_node, *tmp;
8536
8537 list_for_each_entry_safe(mac_node, tmp, list, node)
8538 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8539 return mac_node;
8540
8541 return NULL;
8542 }
8543
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)8544 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8545 enum HCLGE_MAC_NODE_STATE state)
8546 {
8547 switch (state) {
8548 /* from set_rx_mode or tmp_add_list */
8549 case HCLGE_MAC_TO_ADD:
8550 if (mac_node->state == HCLGE_MAC_TO_DEL)
8551 mac_node->state = HCLGE_MAC_ACTIVE;
8552 break;
8553 /* only from set_rx_mode */
8554 case HCLGE_MAC_TO_DEL:
8555 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8556 list_del(&mac_node->node);
8557 kfree(mac_node);
8558 } else {
8559 mac_node->state = HCLGE_MAC_TO_DEL;
8560 }
8561 break;
8562 /* only from tmp_add_list, the mac_node->state won't be
8563 * ACTIVE.
8564 */
8565 case HCLGE_MAC_ACTIVE:
8566 if (mac_node->state == HCLGE_MAC_TO_ADD)
8567 mac_node->state = HCLGE_MAC_ACTIVE;
8568
8569 break;
8570 }
8571 }
8572
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)8573 int hclge_update_mac_list(struct hclge_vport *vport,
8574 enum HCLGE_MAC_NODE_STATE state,
8575 enum HCLGE_MAC_ADDR_TYPE mac_type,
8576 const unsigned char *addr)
8577 {
8578 struct hclge_dev *hdev = vport->back;
8579 struct hclge_mac_node *mac_node;
8580 struct list_head *list;
8581
8582 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8583 &vport->uc_mac_list : &vport->mc_mac_list;
8584
8585 spin_lock_bh(&vport->mac_list_lock);
8586
8587 /* if the mac addr is already in the mac list, no need to add a new
8588 * one into it, just check the mac addr state, convert it to a new
8589 * state, or just remove it, or do nothing.
8590 */
8591 mac_node = hclge_find_mac_node(list, addr);
8592 if (mac_node) {
8593 hclge_update_mac_node(mac_node, state);
8594 spin_unlock_bh(&vport->mac_list_lock);
8595 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8596 return 0;
8597 }
8598
8599 /* if this address is never added, unnecessary to delete */
8600 if (state == HCLGE_MAC_TO_DEL) {
8601 spin_unlock_bh(&vport->mac_list_lock);
8602 dev_err(&hdev->pdev->dev,
8603 "failed to delete address %pM from mac list\n",
8604 addr);
8605 return -ENOENT;
8606 }
8607
8608 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8609 if (!mac_node) {
8610 spin_unlock_bh(&vport->mac_list_lock);
8611 return -ENOMEM;
8612 }
8613
8614 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8615
8616 mac_node->state = state;
8617 ether_addr_copy(mac_node->mac_addr, addr);
8618 list_add_tail(&mac_node->node, list);
8619
8620 spin_unlock_bh(&vport->mac_list_lock);
8621
8622 return 0;
8623 }
8624
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8625 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8626 const unsigned char *addr)
8627 {
8628 struct hclge_vport *vport = hclge_get_vport(handle);
8629
8630 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8631 addr);
8632 }
8633
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8634 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8635 const unsigned char *addr)
8636 {
8637 struct hclge_dev *hdev = vport->back;
8638 struct hclge_mac_vlan_tbl_entry_cmd req;
8639 struct hclge_desc desc;
8640 u16 egress_port = 0;
8641 int ret;
8642
8643 /* mac addr check */
8644 if (is_zero_ether_addr(addr) ||
8645 is_broadcast_ether_addr(addr) ||
8646 is_multicast_ether_addr(addr)) {
8647 dev_err(&hdev->pdev->dev,
8648 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8649 addr, is_zero_ether_addr(addr),
8650 is_broadcast_ether_addr(addr),
8651 is_multicast_ether_addr(addr));
8652 return -EINVAL;
8653 }
8654
8655 memset(&req, 0, sizeof(req));
8656
8657 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8658 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8659
8660 req.egress_port = cpu_to_le16(egress_port);
8661
8662 hclge_prepare_mac_addr(&req, addr, false);
8663
8664 /* Lookup the mac address in the mac_vlan table, and add
8665 * it if the entry is inexistent. Repeated unicast entry
8666 * is not allowed in the mac vlan table.
8667 */
8668 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8669 if (ret == -ENOENT) {
8670 mutex_lock(&hdev->vport_lock);
8671 if (!hclge_is_umv_space_full(vport, false)) {
8672 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8673 if (!ret)
8674 hclge_update_umv_space(vport, false);
8675 mutex_unlock(&hdev->vport_lock);
8676 return ret;
8677 }
8678 mutex_unlock(&hdev->vport_lock);
8679
8680 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8681 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8682 hdev->priv_umv_size);
8683
8684 return -ENOSPC;
8685 }
8686
8687 /* check if we just hit the duplicate */
8688 if (!ret)
8689 return -EEXIST;
8690
8691 return ret;
8692 }
8693
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8694 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8695 const unsigned char *addr)
8696 {
8697 struct hclge_vport *vport = hclge_get_vport(handle);
8698
8699 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8700 addr);
8701 }
8702
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8703 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8704 const unsigned char *addr)
8705 {
8706 struct hclge_dev *hdev = vport->back;
8707 struct hclge_mac_vlan_tbl_entry_cmd req;
8708 int ret;
8709
8710 /* mac addr check */
8711 if (is_zero_ether_addr(addr) ||
8712 is_broadcast_ether_addr(addr) ||
8713 is_multicast_ether_addr(addr)) {
8714 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8715 addr);
8716 return -EINVAL;
8717 }
8718
8719 memset(&req, 0, sizeof(req));
8720 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8721 hclge_prepare_mac_addr(&req, addr, false);
8722 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8723 if (!ret) {
8724 mutex_lock(&hdev->vport_lock);
8725 hclge_update_umv_space(vport, true);
8726 mutex_unlock(&hdev->vport_lock);
8727 } else if (ret == -ENOENT) {
8728 ret = 0;
8729 }
8730
8731 return ret;
8732 }
8733
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8734 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8735 const unsigned char *addr)
8736 {
8737 struct hclge_vport *vport = hclge_get_vport(handle);
8738
8739 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8740 addr);
8741 }
8742
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8743 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8744 const unsigned char *addr)
8745 {
8746 struct hclge_dev *hdev = vport->back;
8747 struct hclge_mac_vlan_tbl_entry_cmd req;
8748 struct hclge_desc desc[3];
8749 int status;
8750
8751 /* mac addr check */
8752 if (!is_multicast_ether_addr(addr)) {
8753 dev_err(&hdev->pdev->dev,
8754 "Add mc mac err! invalid mac:%pM.\n",
8755 addr);
8756 return -EINVAL;
8757 }
8758 memset(&req, 0, sizeof(req));
8759 hclge_prepare_mac_addr(&req, addr, true);
8760 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8761 if (status) {
8762 /* This mac addr do not exist, add new entry for it */
8763 memset(desc[0].data, 0, sizeof(desc[0].data));
8764 memset(desc[1].data, 0, sizeof(desc[0].data));
8765 memset(desc[2].data, 0, sizeof(desc[0].data));
8766 }
8767 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8768 if (status)
8769 return status;
8770 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8771 /* if already overflow, not to print each time */
8772 if (status == -ENOSPC &&
8773 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8774 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8775
8776 return status;
8777 }
8778
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8779 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8780 const unsigned char *addr)
8781 {
8782 struct hclge_vport *vport = hclge_get_vport(handle);
8783
8784 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8785 addr);
8786 }
8787
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8788 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8789 const unsigned char *addr)
8790 {
8791 struct hclge_dev *hdev = vport->back;
8792 struct hclge_mac_vlan_tbl_entry_cmd req;
8793 enum hclge_cmd_status status;
8794 struct hclge_desc desc[3];
8795
8796 /* mac addr check */
8797 if (!is_multicast_ether_addr(addr)) {
8798 dev_dbg(&hdev->pdev->dev,
8799 "Remove mc mac err! invalid mac:%pM.\n",
8800 addr);
8801 return -EINVAL;
8802 }
8803
8804 memset(&req, 0, sizeof(req));
8805 hclge_prepare_mac_addr(&req, addr, true);
8806 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8807 if (!status) {
8808 /* This mac addr exist, remove this handle's VFID for it */
8809 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8810 if (status)
8811 return status;
8812
8813 if (hclge_is_all_function_id_zero(desc))
8814 /* All the vfid is zero, so need to delete this entry */
8815 status = hclge_remove_mac_vlan_tbl(vport, &req);
8816 else
8817 /* Not all the vfid is zero, update the vfid */
8818 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8819 } else if (status == -ENOENT) {
8820 status = 0;
8821 }
8822
8823 return status;
8824 }
8825
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* sync)(struct hclge_vport *,const unsigned char *))8826 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8827 struct list_head *list,
8828 int (*sync)(struct hclge_vport *,
8829 const unsigned char *))
8830 {
8831 struct hclge_mac_node *mac_node, *tmp;
8832 int ret;
8833
8834 list_for_each_entry_safe(mac_node, tmp, list, node) {
8835 ret = sync(vport, mac_node->mac_addr);
8836 if (!ret) {
8837 mac_node->state = HCLGE_MAC_ACTIVE;
8838 } else {
8839 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8840 &vport->state);
8841
8842 /* If one unicast mac address is existing in hardware,
8843 * we need to try whether other unicast mac addresses
8844 * are new addresses that can be added.
8845 */
8846 if (ret != -EEXIST)
8847 break;
8848 }
8849 }
8850 }
8851
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* unsync)(struct hclge_vport *,const unsigned char *))8852 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8853 struct list_head *list,
8854 int (*unsync)(struct hclge_vport *,
8855 const unsigned char *))
8856 {
8857 struct hclge_mac_node *mac_node, *tmp;
8858 int ret;
8859
8860 list_for_each_entry_safe(mac_node, tmp, list, node) {
8861 ret = unsync(vport, mac_node->mac_addr);
8862 if (!ret || ret == -ENOENT) {
8863 list_del(&mac_node->node);
8864 kfree(mac_node);
8865 } else {
8866 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8867 &vport->state);
8868 break;
8869 }
8870 }
8871 }
8872
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)8873 static bool hclge_sync_from_add_list(struct list_head *add_list,
8874 struct list_head *mac_list)
8875 {
8876 struct hclge_mac_node *mac_node, *tmp, *new_node;
8877 bool all_added = true;
8878
8879 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8880 if (mac_node->state == HCLGE_MAC_TO_ADD)
8881 all_added = false;
8882
8883 /* if the mac address from tmp_add_list is not in the
8884 * uc/mc_mac_list, it means have received a TO_DEL request
8885 * during the time window of adding the mac address into mac
8886 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8887 * then it will be removed at next time. else it must be TO_ADD,
8888 * this address hasn't been added into mac table,
8889 * so just remove the mac node.
8890 */
8891 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8892 if (new_node) {
8893 hclge_update_mac_node(new_node, mac_node->state);
8894 list_del(&mac_node->node);
8895 kfree(mac_node);
8896 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8897 mac_node->state = HCLGE_MAC_TO_DEL;
8898 list_move_tail(&mac_node->node, mac_list);
8899 } else {
8900 list_del(&mac_node->node);
8901 kfree(mac_node);
8902 }
8903 }
8904
8905 return all_added;
8906 }
8907
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)8908 static void hclge_sync_from_del_list(struct list_head *del_list,
8909 struct list_head *mac_list)
8910 {
8911 struct hclge_mac_node *mac_node, *tmp, *new_node;
8912
8913 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8914 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8915 if (new_node) {
8916 /* If the mac addr exists in the mac list, it means
8917 * received a new TO_ADD request during the time window
8918 * of configuring the mac address. For the mac node
8919 * state is TO_ADD, and the address is already in the
8920 * in the hardware(due to delete fail), so we just need
8921 * to change the mac node state to ACTIVE.
8922 */
8923 new_node->state = HCLGE_MAC_ACTIVE;
8924 list_del(&mac_node->node);
8925 kfree(mac_node);
8926 } else {
8927 list_move_tail(&mac_node->node, mac_list);
8928 }
8929 }
8930 }
8931
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)8932 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8933 enum HCLGE_MAC_ADDR_TYPE mac_type,
8934 bool is_all_added)
8935 {
8936 if (mac_type == HCLGE_MAC_ADDR_UC) {
8937 if (is_all_added)
8938 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8939 else
8940 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8941 } else {
8942 if (is_all_added)
8943 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8944 else
8945 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8946 }
8947 }
8948
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8949 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8950 enum HCLGE_MAC_ADDR_TYPE mac_type)
8951 {
8952 struct hclge_mac_node *mac_node, *tmp, *new_node;
8953 struct list_head tmp_add_list, tmp_del_list;
8954 struct list_head *list;
8955 bool all_added;
8956
8957 INIT_LIST_HEAD(&tmp_add_list);
8958 INIT_LIST_HEAD(&tmp_del_list);
8959
8960 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8961 * we can add/delete these mac addr outside the spin lock
8962 */
8963 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8964 &vport->uc_mac_list : &vport->mc_mac_list;
8965
8966 spin_lock_bh(&vport->mac_list_lock);
8967
8968 list_for_each_entry_safe(mac_node, tmp, list, node) {
8969 switch (mac_node->state) {
8970 case HCLGE_MAC_TO_DEL:
8971 list_move_tail(&mac_node->node, &tmp_del_list);
8972 break;
8973 case HCLGE_MAC_TO_ADD:
8974 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8975 if (!new_node)
8976 goto stop_traverse;
8977 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8978 new_node->state = mac_node->state;
8979 list_add_tail(&new_node->node, &tmp_add_list);
8980 break;
8981 default:
8982 break;
8983 }
8984 }
8985
8986 stop_traverse:
8987 spin_unlock_bh(&vport->mac_list_lock);
8988
8989 /* delete first, in order to get max mac table space for adding */
8990 if (mac_type == HCLGE_MAC_ADDR_UC) {
8991 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8992 hclge_rm_uc_addr_common);
8993 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8994 hclge_add_uc_addr_common);
8995 } else {
8996 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8997 hclge_rm_mc_addr_common);
8998 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8999 hclge_add_mc_addr_common);
9000 }
9001
9002 /* if some mac addresses were added/deleted fail, move back to the
9003 * mac_list, and retry at next time.
9004 */
9005 spin_lock_bh(&vport->mac_list_lock);
9006
9007 hclge_sync_from_del_list(&tmp_del_list, list);
9008 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9009
9010 spin_unlock_bh(&vport->mac_list_lock);
9011
9012 hclge_update_overflow_flags(vport, mac_type, all_added);
9013 }
9014
hclge_need_sync_mac_table(struct hclge_vport * vport)9015 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9016 {
9017 struct hclge_dev *hdev = vport->back;
9018
9019 if (test_bit(vport->vport_id, hdev->vport_config_block))
9020 return false;
9021
9022 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9023 return true;
9024
9025 return false;
9026 }
9027
hclge_sync_mac_table(struct hclge_dev * hdev)9028 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9029 {
9030 int i;
9031
9032 for (i = 0; i < hdev->num_alloc_vport; i++) {
9033 struct hclge_vport *vport = &hdev->vport[i];
9034
9035 if (!hclge_need_sync_mac_table(vport))
9036 continue;
9037
9038 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9039 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9040 }
9041 }
9042
hclge_build_del_list(struct list_head * list,bool is_del_list,struct list_head * tmp_del_list)9043 static void hclge_build_del_list(struct list_head *list,
9044 bool is_del_list,
9045 struct list_head *tmp_del_list)
9046 {
9047 struct hclge_mac_node *mac_cfg, *tmp;
9048
9049 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9050 switch (mac_cfg->state) {
9051 case HCLGE_MAC_TO_DEL:
9052 case HCLGE_MAC_ACTIVE:
9053 list_move_tail(&mac_cfg->node, tmp_del_list);
9054 break;
9055 case HCLGE_MAC_TO_ADD:
9056 if (is_del_list) {
9057 list_del(&mac_cfg->node);
9058 kfree(mac_cfg);
9059 }
9060 break;
9061 }
9062 }
9063 }
9064
hclge_unsync_del_list(struct hclge_vport * vport,int (* unsync)(struct hclge_vport * vport,const unsigned char * addr),bool is_del_list,struct list_head * tmp_del_list)9065 static void hclge_unsync_del_list(struct hclge_vport *vport,
9066 int (*unsync)(struct hclge_vport *vport,
9067 const unsigned char *addr),
9068 bool is_del_list,
9069 struct list_head *tmp_del_list)
9070 {
9071 struct hclge_mac_node *mac_cfg, *tmp;
9072 int ret;
9073
9074 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9075 ret = unsync(vport, mac_cfg->mac_addr);
9076 if (!ret || ret == -ENOENT) {
9077 /* clear all mac addr from hardware, but remain these
9078 * mac addr in the mac list, and restore them after
9079 * vf reset finished.
9080 */
9081 if (!is_del_list &&
9082 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9083 mac_cfg->state = HCLGE_MAC_TO_ADD;
9084 } else {
9085 list_del(&mac_cfg->node);
9086 kfree(mac_cfg);
9087 }
9088 } else if (is_del_list) {
9089 mac_cfg->state = HCLGE_MAC_TO_DEL;
9090 }
9091 }
9092 }
9093
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)9094 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9095 enum HCLGE_MAC_ADDR_TYPE mac_type)
9096 {
9097 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9098 struct hclge_dev *hdev = vport->back;
9099 struct list_head tmp_del_list, *list;
9100
9101 if (mac_type == HCLGE_MAC_ADDR_UC) {
9102 list = &vport->uc_mac_list;
9103 unsync = hclge_rm_uc_addr_common;
9104 } else {
9105 list = &vport->mc_mac_list;
9106 unsync = hclge_rm_mc_addr_common;
9107 }
9108
9109 INIT_LIST_HEAD(&tmp_del_list);
9110
9111 if (!is_del_list)
9112 set_bit(vport->vport_id, hdev->vport_config_block);
9113
9114 spin_lock_bh(&vport->mac_list_lock);
9115
9116 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9117
9118 spin_unlock_bh(&vport->mac_list_lock);
9119
9120 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9121
9122 spin_lock_bh(&vport->mac_list_lock);
9123
9124 hclge_sync_from_del_list(&tmp_del_list, list);
9125
9126 spin_unlock_bh(&vport->mac_list_lock);
9127 }
9128
9129 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)9130 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9131 enum HCLGE_MAC_ADDR_TYPE mac_type)
9132 {
9133 struct hclge_mac_node *mac_node, *tmp;
9134 struct hclge_dev *hdev = vport->back;
9135 struct list_head tmp_del_list, *list;
9136
9137 INIT_LIST_HEAD(&tmp_del_list);
9138
9139 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9140 &vport->uc_mac_list : &vport->mc_mac_list;
9141
9142 spin_lock_bh(&vport->mac_list_lock);
9143
9144 list_for_each_entry_safe(mac_node, tmp, list, node) {
9145 switch (mac_node->state) {
9146 case HCLGE_MAC_TO_DEL:
9147 case HCLGE_MAC_ACTIVE:
9148 list_move_tail(&mac_node->node, &tmp_del_list);
9149 break;
9150 case HCLGE_MAC_TO_ADD:
9151 list_del(&mac_node->node);
9152 kfree(mac_node);
9153 break;
9154 }
9155 }
9156
9157 spin_unlock_bh(&vport->mac_list_lock);
9158
9159 if (mac_type == HCLGE_MAC_ADDR_UC)
9160 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9161 hclge_rm_uc_addr_common);
9162 else
9163 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9164 hclge_rm_mc_addr_common);
9165
9166 if (!list_empty(&tmp_del_list))
9167 dev_warn(&hdev->pdev->dev,
9168 "uninit %s mac list for vport %u not completely.\n",
9169 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9170 vport->vport_id);
9171
9172 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9173 list_del(&mac_node->node);
9174 kfree(mac_node);
9175 }
9176 }
9177
hclge_uninit_mac_table(struct hclge_dev * hdev)9178 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9179 {
9180 struct hclge_vport *vport;
9181 int i;
9182
9183 for (i = 0; i < hdev->num_alloc_vport; i++) {
9184 vport = &hdev->vport[i];
9185 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9186 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9187 }
9188 }
9189
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)9190 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9191 u16 cmdq_resp, u8 resp_code)
9192 {
9193 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9194 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9195 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9196 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9197
9198 int return_status;
9199
9200 if (cmdq_resp) {
9201 dev_err(&hdev->pdev->dev,
9202 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9203 cmdq_resp);
9204 return -EIO;
9205 }
9206
9207 switch (resp_code) {
9208 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9209 case HCLGE_ETHERTYPE_ALREADY_ADD:
9210 return_status = 0;
9211 break;
9212 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9213 dev_err(&hdev->pdev->dev,
9214 "add mac ethertype failed for manager table overflow.\n");
9215 return_status = -EIO;
9216 break;
9217 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9218 dev_err(&hdev->pdev->dev,
9219 "add mac ethertype failed for key conflict.\n");
9220 return_status = -EIO;
9221 break;
9222 default:
9223 dev_err(&hdev->pdev->dev,
9224 "add mac ethertype failed for undefined, code=%u.\n",
9225 resp_code);
9226 return_status = -EIO;
9227 }
9228
9229 return return_status;
9230 }
9231
hclge_check_vf_mac_exist(struct hclge_vport * vport,int vf_idx,u8 * mac_addr)9232 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9233 u8 *mac_addr)
9234 {
9235 struct hclge_mac_vlan_tbl_entry_cmd req;
9236 struct hclge_dev *hdev = vport->back;
9237 struct hclge_desc desc;
9238 u16 egress_port = 0;
9239 int i;
9240
9241 if (is_zero_ether_addr(mac_addr))
9242 return false;
9243
9244 memset(&req, 0, sizeof(req));
9245 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9246 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9247 req.egress_port = cpu_to_le16(egress_port);
9248 hclge_prepare_mac_addr(&req, mac_addr, false);
9249
9250 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9251 return true;
9252
9253 vf_idx += HCLGE_VF_VPORT_START_NUM;
9254 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9255 if (i != vf_idx &&
9256 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9257 return true;
9258
9259 return false;
9260 }
9261
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)9262 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9263 u8 *mac_addr)
9264 {
9265 struct hclge_vport *vport = hclge_get_vport(handle);
9266 struct hclge_dev *hdev = vport->back;
9267
9268 vport = hclge_get_vf_vport(hdev, vf);
9269 if (!vport)
9270 return -EINVAL;
9271
9272 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9273 dev_info(&hdev->pdev->dev,
9274 "Specified MAC(=%pM) is same as before, no change committed!\n",
9275 mac_addr);
9276 return 0;
9277 }
9278
9279 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9280 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9281 mac_addr);
9282 return -EEXIST;
9283 }
9284
9285 ether_addr_copy(vport->vf_info.mac, mac_addr);
9286
9287 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9288 dev_info(&hdev->pdev->dev,
9289 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9290 vf, mac_addr);
9291 return hclge_inform_reset_assert_to_vf(vport);
9292 }
9293
9294 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9295 vf, mac_addr);
9296 return 0;
9297 }
9298
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)9299 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9300 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9301 {
9302 struct hclge_desc desc;
9303 u8 resp_code;
9304 u16 retval;
9305 int ret;
9306
9307 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9308 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9309
9310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9311 if (ret) {
9312 dev_err(&hdev->pdev->dev,
9313 "add mac ethertype failed for cmd_send, ret =%d.\n",
9314 ret);
9315 return ret;
9316 }
9317
9318 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9319 retval = le16_to_cpu(desc.retval);
9320
9321 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9322 }
9323
init_mgr_tbl(struct hclge_dev * hdev)9324 static int init_mgr_tbl(struct hclge_dev *hdev)
9325 {
9326 int ret;
9327 int i;
9328
9329 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9330 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9331 if (ret) {
9332 dev_err(&hdev->pdev->dev,
9333 "add mac ethertype failed, ret =%d.\n",
9334 ret);
9335 return ret;
9336 }
9337 }
9338
9339 return 0;
9340 }
9341
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)9342 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9343 {
9344 struct hclge_vport *vport = hclge_get_vport(handle);
9345 struct hclge_dev *hdev = vport->back;
9346
9347 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9348 }
9349
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)9350 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9351 const u8 *old_addr, const u8 *new_addr)
9352 {
9353 struct list_head *list = &vport->uc_mac_list;
9354 struct hclge_mac_node *old_node, *new_node;
9355
9356 new_node = hclge_find_mac_node(list, new_addr);
9357 if (!new_node) {
9358 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9359 if (!new_node)
9360 return -ENOMEM;
9361
9362 new_node->state = HCLGE_MAC_TO_ADD;
9363 ether_addr_copy(new_node->mac_addr, new_addr);
9364 list_add(&new_node->node, list);
9365 } else {
9366 if (new_node->state == HCLGE_MAC_TO_DEL)
9367 new_node->state = HCLGE_MAC_ACTIVE;
9368
9369 /* make sure the new addr is in the list head, avoid dev
9370 * addr may be not re-added into mac table for the umv space
9371 * limitation after global/imp reset which will clear mac
9372 * table by hardware.
9373 */
9374 list_move(&new_node->node, list);
9375 }
9376
9377 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9378 old_node = hclge_find_mac_node(list, old_addr);
9379 if (old_node) {
9380 if (old_node->state == HCLGE_MAC_TO_ADD) {
9381 list_del(&old_node->node);
9382 kfree(old_node);
9383 } else {
9384 old_node->state = HCLGE_MAC_TO_DEL;
9385 }
9386 }
9387 }
9388
9389 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9390
9391 return 0;
9392 }
9393
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)9394 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9395 bool is_first)
9396 {
9397 const unsigned char *new_addr = (const unsigned char *)p;
9398 struct hclge_vport *vport = hclge_get_vport(handle);
9399 struct hclge_dev *hdev = vport->back;
9400 unsigned char *old_addr = NULL;
9401 int ret;
9402
9403 /* mac addr check */
9404 if (is_zero_ether_addr(new_addr) ||
9405 is_broadcast_ether_addr(new_addr) ||
9406 is_multicast_ether_addr(new_addr)) {
9407 dev_err(&hdev->pdev->dev,
9408 "change uc mac err! invalid mac: %pM.\n",
9409 new_addr);
9410 return -EINVAL;
9411 }
9412
9413 ret = hclge_pause_addr_cfg(hdev, new_addr);
9414 if (ret) {
9415 dev_err(&hdev->pdev->dev,
9416 "failed to configure mac pause address, ret = %d\n",
9417 ret);
9418 return ret;
9419 }
9420
9421 if (!is_first)
9422 old_addr = hdev->hw.mac.mac_addr;
9423
9424 spin_lock_bh(&vport->mac_list_lock);
9425 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9426 if (ret) {
9427 dev_err(&hdev->pdev->dev,
9428 "failed to change the mac addr:%pM, ret = %d\n",
9429 new_addr, ret);
9430 spin_unlock_bh(&vport->mac_list_lock);
9431
9432 if (!is_first)
9433 hclge_pause_addr_cfg(hdev, old_addr);
9434
9435 return ret;
9436 }
9437 /* we must update dev addr with spin lock protect, preventing dev addr
9438 * being removed by set_rx_mode path.
9439 */
9440 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9441 spin_unlock_bh(&vport->mac_list_lock);
9442
9443 hclge_task_schedule(hdev, 0);
9444
9445 return 0;
9446 }
9447
hclge_mii_ioctl(struct hclge_dev * hdev,struct ifreq * ifr,int cmd)9448 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9449 {
9450 struct mii_ioctl_data *data = if_mii(ifr);
9451
9452 if (!hnae3_dev_phy_imp_supported(hdev))
9453 return -EOPNOTSUPP;
9454
9455 switch (cmd) {
9456 case SIOCGMIIPHY:
9457 data->phy_id = hdev->hw.mac.phy_addr;
9458 /* this command reads phy id and register at the same time */
9459 fallthrough;
9460 case SIOCGMIIREG:
9461 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9462 return 0;
9463
9464 case SIOCSMIIREG:
9465 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9466 default:
9467 return -EOPNOTSUPP;
9468 }
9469 }
9470
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)9471 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9472 int cmd)
9473 {
9474 struct hclge_vport *vport = hclge_get_vport(handle);
9475 struct hclge_dev *hdev = vport->back;
9476
9477 switch (cmd) {
9478 case SIOCGHWTSTAMP:
9479 return hclge_ptp_get_cfg(hdev, ifr);
9480 case SIOCSHWTSTAMP:
9481 return hclge_ptp_set_cfg(hdev, ifr);
9482 default:
9483 if (!hdev->hw.mac.phydev)
9484 return hclge_mii_ioctl(hdev, ifr, cmd);
9485 }
9486
9487 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9488 }
9489
hclge_set_port_vlan_filter_bypass(struct hclge_dev * hdev,u8 vf_id,bool bypass_en)9490 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9491 bool bypass_en)
9492 {
9493 struct hclge_port_vlan_filter_bypass_cmd *req;
9494 struct hclge_desc desc;
9495 int ret;
9496
9497 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9498 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9499 req->vf_id = vf_id;
9500 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9501 bypass_en ? 1 : 0);
9502
9503 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9504 if (ret)
9505 dev_err(&hdev->pdev->dev,
9506 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9507 vf_id, ret);
9508
9509 return ret;
9510 }
9511
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)9512 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9513 u8 fe_type, bool filter_en, u8 vf_id)
9514 {
9515 struct hclge_vlan_filter_ctrl_cmd *req;
9516 struct hclge_desc desc;
9517 int ret;
9518
9519 /* read current vlan filter parameter */
9520 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9521 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9522 req->vlan_type = vlan_type;
9523 req->vf_id = vf_id;
9524
9525 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9526 if (ret) {
9527 dev_err(&hdev->pdev->dev,
9528 "failed to get vlan filter config, ret = %d.\n", ret);
9529 return ret;
9530 }
9531
9532 /* modify and write new config parameter */
9533 hclge_cmd_reuse_desc(&desc, false);
9534 req->vlan_fe = filter_en ?
9535 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9536
9537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9538 if (ret)
9539 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9540 ret);
9541
9542 return ret;
9543 }
9544
hclge_set_vport_vlan_filter(struct hclge_vport * vport,bool enable)9545 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9546 {
9547 struct hclge_dev *hdev = vport->back;
9548 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9549 int ret;
9550
9551 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9552 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9553 HCLGE_FILTER_FE_EGRESS_V1_B,
9554 enable, vport->vport_id);
9555
9556 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9557 HCLGE_FILTER_FE_EGRESS, enable,
9558 vport->vport_id);
9559 if (ret)
9560 return ret;
9561
9562 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9563 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9564 !enable);
9565 } else if (!vport->vport_id) {
9566 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9567 enable = false;
9568
9569 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9570 HCLGE_FILTER_FE_INGRESS,
9571 enable, 0);
9572 }
9573
9574 return ret;
9575 }
9576
hclge_need_enable_vport_vlan_filter(struct hclge_vport * vport)9577 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9578 {
9579 struct hnae3_handle *handle = &vport->nic;
9580 struct hclge_vport_vlan_cfg *vlan, *tmp;
9581 struct hclge_dev *hdev = vport->back;
9582
9583 if (vport->vport_id) {
9584 if (vport->port_base_vlan_cfg.state !=
9585 HNAE3_PORT_BASE_VLAN_DISABLE)
9586 return true;
9587
9588 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9589 return false;
9590 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9591 return false;
9592 }
9593
9594 if (!vport->req_vlan_fltr_en)
9595 return false;
9596
9597 /* compatible with former device, always enable vlan filter */
9598 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9599 return true;
9600
9601 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9602 if (vlan->vlan_id != 0)
9603 return true;
9604
9605 return false;
9606 }
9607
hclge_enable_vport_vlan_filter(struct hclge_vport * vport,bool request_en)9608 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9609 {
9610 struct hclge_dev *hdev = vport->back;
9611 bool need_en;
9612 int ret;
9613
9614 mutex_lock(&hdev->vport_lock);
9615
9616 vport->req_vlan_fltr_en = request_en;
9617
9618 need_en = hclge_need_enable_vport_vlan_filter(vport);
9619 if (need_en == vport->cur_vlan_fltr_en) {
9620 mutex_unlock(&hdev->vport_lock);
9621 return 0;
9622 }
9623
9624 ret = hclge_set_vport_vlan_filter(vport, need_en);
9625 if (ret) {
9626 mutex_unlock(&hdev->vport_lock);
9627 return ret;
9628 }
9629
9630 vport->cur_vlan_fltr_en = need_en;
9631
9632 mutex_unlock(&hdev->vport_lock);
9633
9634 return 0;
9635 }
9636
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)9637 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9638 {
9639 struct hclge_vport *vport = hclge_get_vport(handle);
9640
9641 return hclge_enable_vport_vlan_filter(vport, enable);
9642 }
9643
hclge_set_vf_vlan_filter_cmd(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,struct hclge_desc * desc)9644 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9645 bool is_kill, u16 vlan,
9646 struct hclge_desc *desc)
9647 {
9648 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9649 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9650 u8 vf_byte_val;
9651 u8 vf_byte_off;
9652 int ret;
9653
9654 hclge_cmd_setup_basic_desc(&desc[0],
9655 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9656 hclge_cmd_setup_basic_desc(&desc[1],
9657 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9658
9659 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9660
9661 vf_byte_off = vfid / 8;
9662 vf_byte_val = 1 << (vfid % 8);
9663
9664 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9665 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9666
9667 req0->vlan_id = cpu_to_le16(vlan);
9668 req0->vlan_cfg = is_kill;
9669
9670 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9671 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9672 else
9673 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9674
9675 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9676 if (ret) {
9677 dev_err(&hdev->pdev->dev,
9678 "Send vf vlan command fail, ret =%d.\n",
9679 ret);
9680 return ret;
9681 }
9682
9683 return 0;
9684 }
9685
hclge_check_vf_vlan_cmd_status(struct hclge_dev * hdev,u16 vfid,bool is_kill,struct hclge_desc * desc)9686 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9687 bool is_kill, struct hclge_desc *desc)
9688 {
9689 struct hclge_vlan_filter_vf_cfg_cmd *req;
9690
9691 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9692
9693 if (!is_kill) {
9694 #define HCLGE_VF_VLAN_NO_ENTRY 2
9695 if (!req->resp_code || req->resp_code == 1)
9696 return 0;
9697
9698 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9699 set_bit(vfid, hdev->vf_vlan_full);
9700 dev_warn(&hdev->pdev->dev,
9701 "vf vlan table is full, vf vlan filter is disabled\n");
9702 return 0;
9703 }
9704
9705 dev_err(&hdev->pdev->dev,
9706 "Add vf vlan filter fail, ret =%u.\n",
9707 req->resp_code);
9708 } else {
9709 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9710 if (!req->resp_code)
9711 return 0;
9712
9713 /* vf vlan filter is disabled when vf vlan table is full,
9714 * then new vlan id will not be added into vf vlan table.
9715 * Just return 0 without warning, avoid massive verbose
9716 * print logs when unload.
9717 */
9718 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9719 return 0;
9720
9721 dev_err(&hdev->pdev->dev,
9722 "Kill vf vlan filter fail, ret =%u.\n",
9723 req->resp_code);
9724 }
9725
9726 return -EIO;
9727 }
9728
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan)9729 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9730 bool is_kill, u16 vlan)
9731 {
9732 struct hclge_vport *vport = &hdev->vport[vfid];
9733 struct hclge_desc desc[2];
9734 int ret;
9735
9736 /* if vf vlan table is full, firmware will close vf vlan filter, it
9737 * is unable and unnecessary to add new vlan id to vf vlan filter.
9738 * If spoof check is enable, and vf vlan is full, it shouldn't add
9739 * new vlan, because tx packets with these vlan id will be dropped.
9740 */
9741 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9742 if (vport->vf_info.spoofchk && vlan) {
9743 dev_err(&hdev->pdev->dev,
9744 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9745 return -EPERM;
9746 }
9747 return 0;
9748 }
9749
9750 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9751 if (ret)
9752 return ret;
9753
9754 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9755 }
9756
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)9757 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9758 u16 vlan_id, bool is_kill)
9759 {
9760 struct hclge_vlan_filter_pf_cfg_cmd *req;
9761 struct hclge_desc desc;
9762 u8 vlan_offset_byte_val;
9763 u8 vlan_offset_byte;
9764 u8 vlan_offset_160;
9765 int ret;
9766
9767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9768
9769 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9770 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9771 HCLGE_VLAN_BYTE_SIZE;
9772 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9773
9774 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9775 req->vlan_offset = vlan_offset_160;
9776 req->vlan_cfg = is_kill;
9777 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9778
9779 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9780 if (ret)
9781 dev_err(&hdev->pdev->dev,
9782 "port vlan command, send fail, ret =%d.\n", ret);
9783 return ret;
9784 }
9785
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)9786 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9787 u16 vport_id, u16 vlan_id,
9788 bool is_kill)
9789 {
9790 u16 vport_idx, vport_num = 0;
9791 int ret;
9792
9793 if (is_kill && !vlan_id)
9794 return 0;
9795
9796 if (vlan_id >= VLAN_N_VID)
9797 return -EINVAL;
9798
9799 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9800 if (ret) {
9801 dev_err(&hdev->pdev->dev,
9802 "Set %u vport vlan filter config fail, ret =%d.\n",
9803 vport_id, ret);
9804 return ret;
9805 }
9806
9807 /* vlan 0 may be added twice when 8021q module is enabled */
9808 if (!is_kill && !vlan_id &&
9809 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9810 return 0;
9811
9812 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9813 dev_err(&hdev->pdev->dev,
9814 "Add port vlan failed, vport %u is already in vlan %u\n",
9815 vport_id, vlan_id);
9816 return -EINVAL;
9817 }
9818
9819 if (is_kill &&
9820 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9821 dev_err(&hdev->pdev->dev,
9822 "Delete port vlan failed, vport %u is not in vlan %u\n",
9823 vport_id, vlan_id);
9824 return -EINVAL;
9825 }
9826
9827 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9828 vport_num++;
9829
9830 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9831 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9832 is_kill);
9833
9834 return ret;
9835 }
9836
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)9837 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9838 {
9839 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9840 struct hclge_vport_vtag_tx_cfg_cmd *req;
9841 struct hclge_dev *hdev = vport->back;
9842 struct hclge_desc desc;
9843 u16 bmap_index;
9844 int status;
9845
9846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9847
9848 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9849 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9850 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9851 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9852 vcfg->accept_tag1 ? 1 : 0);
9853 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9854 vcfg->accept_untag1 ? 1 : 0);
9855 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9856 vcfg->accept_tag2 ? 1 : 0);
9857 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9858 vcfg->accept_untag2 ? 1 : 0);
9859 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9860 vcfg->insert_tag1_en ? 1 : 0);
9861 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9862 vcfg->insert_tag2_en ? 1 : 0);
9863 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9864 vcfg->tag_shift_mode_en ? 1 : 0);
9865 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9866
9867 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9868 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9869 HCLGE_VF_NUM_PER_BYTE;
9870 req->vf_bitmap[bmap_index] =
9871 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9872
9873 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9874 if (status)
9875 dev_err(&hdev->pdev->dev,
9876 "Send port txvlan cfg command fail, ret =%d\n",
9877 status);
9878
9879 return status;
9880 }
9881
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)9882 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9883 {
9884 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9885 struct hclge_vport_vtag_rx_cfg_cmd *req;
9886 struct hclge_dev *hdev = vport->back;
9887 struct hclge_desc desc;
9888 u16 bmap_index;
9889 int status;
9890
9891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9892
9893 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9894 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9895 vcfg->strip_tag1_en ? 1 : 0);
9896 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9897 vcfg->strip_tag2_en ? 1 : 0);
9898 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9899 vcfg->vlan1_vlan_prionly ? 1 : 0);
9900 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9901 vcfg->vlan2_vlan_prionly ? 1 : 0);
9902 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9903 vcfg->strip_tag1_discard_en ? 1 : 0);
9904 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9905 vcfg->strip_tag2_discard_en ? 1 : 0);
9906
9907 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9908 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9909 HCLGE_VF_NUM_PER_BYTE;
9910 req->vf_bitmap[bmap_index] =
9911 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9912
9913 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9914 if (status)
9915 dev_err(&hdev->pdev->dev,
9916 "Send port rxvlan cfg command fail, ret =%d\n",
9917 status);
9918
9919 return status;
9920 }
9921
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag,u8 qos)9922 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9923 u16 port_base_vlan_state,
9924 u16 vlan_tag, u8 qos)
9925 {
9926 int ret;
9927
9928 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9929 vport->txvlan_cfg.accept_tag1 = true;
9930 vport->txvlan_cfg.insert_tag1_en = false;
9931 vport->txvlan_cfg.default_tag1 = 0;
9932 } else {
9933 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9934
9935 vport->txvlan_cfg.accept_tag1 =
9936 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9937 vport->txvlan_cfg.insert_tag1_en = true;
9938 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9939 vlan_tag;
9940 }
9941
9942 vport->txvlan_cfg.accept_untag1 = true;
9943
9944 /* accept_tag2 and accept_untag2 are not supported on
9945 * pdev revision(0x20), new revision support them,
9946 * this two fields can not be configured by user.
9947 */
9948 vport->txvlan_cfg.accept_tag2 = true;
9949 vport->txvlan_cfg.accept_untag2 = true;
9950 vport->txvlan_cfg.insert_tag2_en = false;
9951 vport->txvlan_cfg.default_tag2 = 0;
9952 vport->txvlan_cfg.tag_shift_mode_en = true;
9953
9954 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9955 vport->rxvlan_cfg.strip_tag1_en = false;
9956 vport->rxvlan_cfg.strip_tag2_en =
9957 vport->rxvlan_cfg.rx_vlan_offload_en;
9958 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9959 } else {
9960 vport->rxvlan_cfg.strip_tag1_en =
9961 vport->rxvlan_cfg.rx_vlan_offload_en;
9962 vport->rxvlan_cfg.strip_tag2_en = true;
9963 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9964 }
9965
9966 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9967 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9968 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9969
9970 ret = hclge_set_vlan_tx_offload_cfg(vport);
9971 if (ret)
9972 return ret;
9973
9974 return hclge_set_vlan_rx_offload_cfg(vport);
9975 }
9976
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)9977 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9978 {
9979 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9980 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9981 struct hclge_desc desc;
9982 int status;
9983
9984 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9985 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9986 rx_req->ot_fst_vlan_type =
9987 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9988 rx_req->ot_sec_vlan_type =
9989 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9990 rx_req->in_fst_vlan_type =
9991 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9992 rx_req->in_sec_vlan_type =
9993 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9994
9995 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9996 if (status) {
9997 dev_err(&hdev->pdev->dev,
9998 "Send rxvlan protocol type command fail, ret =%d\n",
9999 status);
10000 return status;
10001 }
10002
10003 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10004
10005 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10006 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10007 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10008
10009 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10010 if (status)
10011 dev_err(&hdev->pdev->dev,
10012 "Send txvlan protocol type command fail, ret =%d\n",
10013 status);
10014
10015 return status;
10016 }
10017
hclge_init_vlan_config(struct hclge_dev * hdev)10018 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10019 {
10020 #define HCLGE_DEF_VLAN_TYPE 0x8100
10021
10022 struct hnae3_handle *handle = &hdev->vport[0].nic;
10023 struct hclge_vport *vport;
10024 int ret;
10025 int i;
10026
10027 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10028 /* for revision 0x21, vf vlan filter is per function */
10029 for (i = 0; i < hdev->num_alloc_vport; i++) {
10030 vport = &hdev->vport[i];
10031 ret = hclge_set_vlan_filter_ctrl(hdev,
10032 HCLGE_FILTER_TYPE_VF,
10033 HCLGE_FILTER_FE_EGRESS,
10034 true,
10035 vport->vport_id);
10036 if (ret)
10037 return ret;
10038 vport->cur_vlan_fltr_en = true;
10039 }
10040
10041 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10042 HCLGE_FILTER_FE_INGRESS, true,
10043 0);
10044 if (ret)
10045 return ret;
10046 } else {
10047 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10048 HCLGE_FILTER_FE_EGRESS_V1_B,
10049 true, 0);
10050 if (ret)
10051 return ret;
10052 }
10053
10054 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10055 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10056 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10057 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10058 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10059 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10060
10061 ret = hclge_set_vlan_protocol_type(hdev);
10062 if (ret)
10063 return ret;
10064
10065 for (i = 0; i < hdev->num_alloc_vport; i++) {
10066 u16 vlan_tag;
10067 u8 qos;
10068
10069 vport = &hdev->vport[i];
10070 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10071 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10072
10073 ret = hclge_vlan_offload_cfg(vport,
10074 vport->port_base_vlan_cfg.state,
10075 vlan_tag, qos);
10076 if (ret)
10077 return ret;
10078 }
10079
10080 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10081 }
10082
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)10083 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10084 bool writen_to_tbl)
10085 {
10086 struct hclge_vport_vlan_cfg *vlan, *tmp;
10087
10088 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10089 if (vlan->vlan_id == vlan_id)
10090 return;
10091
10092 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10093 if (!vlan)
10094 return;
10095
10096 vlan->hd_tbl_status = writen_to_tbl;
10097 vlan->vlan_id = vlan_id;
10098
10099 list_add_tail(&vlan->node, &vport->vlan_list);
10100 }
10101
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)10102 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10103 {
10104 struct hclge_vport_vlan_cfg *vlan, *tmp;
10105 struct hclge_dev *hdev = vport->back;
10106 int ret;
10107
10108 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10109 if (!vlan->hd_tbl_status) {
10110 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10111 vport->vport_id,
10112 vlan->vlan_id, false);
10113 if (ret) {
10114 dev_err(&hdev->pdev->dev,
10115 "restore vport vlan list failed, ret=%d\n",
10116 ret);
10117 return ret;
10118 }
10119 }
10120 vlan->hd_tbl_status = true;
10121 }
10122
10123 return 0;
10124 }
10125
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)10126 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10127 bool is_write_tbl)
10128 {
10129 struct hclge_vport_vlan_cfg *vlan, *tmp;
10130 struct hclge_dev *hdev = vport->back;
10131
10132 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10133 if (vlan->vlan_id == vlan_id) {
10134 if (is_write_tbl && vlan->hd_tbl_status)
10135 hclge_set_vlan_filter_hw(hdev,
10136 htons(ETH_P_8021Q),
10137 vport->vport_id,
10138 vlan_id,
10139 true);
10140
10141 list_del(&vlan->node);
10142 kfree(vlan);
10143 break;
10144 }
10145 }
10146 }
10147
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)10148 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10149 {
10150 struct hclge_vport_vlan_cfg *vlan, *tmp;
10151 struct hclge_dev *hdev = vport->back;
10152
10153 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10154 if (vlan->hd_tbl_status)
10155 hclge_set_vlan_filter_hw(hdev,
10156 htons(ETH_P_8021Q),
10157 vport->vport_id,
10158 vlan->vlan_id,
10159 true);
10160
10161 vlan->hd_tbl_status = false;
10162 if (is_del_list) {
10163 list_del(&vlan->node);
10164 kfree(vlan);
10165 }
10166 }
10167 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10168 }
10169
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)10170 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10171 {
10172 struct hclge_vport_vlan_cfg *vlan, *tmp;
10173 struct hclge_vport *vport;
10174 int i;
10175
10176 for (i = 0; i < hdev->num_alloc_vport; i++) {
10177 vport = &hdev->vport[i];
10178 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10179 list_del(&vlan->node);
10180 kfree(vlan);
10181 }
10182 }
10183 }
10184
hclge_restore_vport_vlan_table(struct hclge_vport * vport)10185 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10186 {
10187 struct hclge_vport_vlan_cfg *vlan, *tmp;
10188 struct hclge_dev *hdev = vport->back;
10189 u16 vlan_proto;
10190 u16 vlan_id;
10191 u16 state;
10192 int ret;
10193
10194 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10195 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10196 state = vport->port_base_vlan_cfg.state;
10197
10198 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10199 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10200 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10201 vport->vport_id, vlan_id,
10202 false);
10203 return;
10204 }
10205
10206 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10207 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10208 vport->vport_id,
10209 vlan->vlan_id, false);
10210 if (ret)
10211 break;
10212 vlan->hd_tbl_status = true;
10213 }
10214 }
10215
10216 /* For global reset and imp reset, hardware will clear the mac table,
10217 * so we change the mac address state from ACTIVE to TO_ADD, then they
10218 * can be restored in the service task after reset complete. Furtherly,
10219 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10220 * be restored after reset, so just remove these mac nodes from mac_list.
10221 */
hclge_mac_node_convert_for_reset(struct list_head * list)10222 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10223 {
10224 struct hclge_mac_node *mac_node, *tmp;
10225
10226 list_for_each_entry_safe(mac_node, tmp, list, node) {
10227 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10228 mac_node->state = HCLGE_MAC_TO_ADD;
10229 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10230 list_del(&mac_node->node);
10231 kfree(mac_node);
10232 }
10233 }
10234 }
10235
hclge_restore_mac_table_common(struct hclge_vport * vport)10236 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10237 {
10238 spin_lock_bh(&vport->mac_list_lock);
10239
10240 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10241 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10242 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10243
10244 spin_unlock_bh(&vport->mac_list_lock);
10245 }
10246
hclge_restore_hw_table(struct hclge_dev * hdev)10247 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10248 {
10249 struct hclge_vport *vport = &hdev->vport[0];
10250 struct hnae3_handle *handle = &vport->nic;
10251
10252 hclge_restore_mac_table_common(vport);
10253 hclge_restore_vport_vlan_table(vport);
10254 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10255 hclge_restore_fd_entries(handle);
10256 }
10257
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)10258 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10259 {
10260 struct hclge_vport *vport = hclge_get_vport(handle);
10261
10262 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10263 vport->rxvlan_cfg.strip_tag1_en = false;
10264 vport->rxvlan_cfg.strip_tag2_en = enable;
10265 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10266 } else {
10267 vport->rxvlan_cfg.strip_tag1_en = enable;
10268 vport->rxvlan_cfg.strip_tag2_en = true;
10269 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10270 }
10271
10272 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10273 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10274 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10275 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10276
10277 return hclge_set_vlan_rx_offload_cfg(vport);
10278 }
10279
hclge_set_vport_vlan_fltr_change(struct hclge_vport * vport)10280 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10281 {
10282 struct hclge_dev *hdev = vport->back;
10283
10284 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10285 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10286 }
10287
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)10288 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10289 u16 port_base_vlan_state,
10290 struct hclge_vlan_info *new_info,
10291 struct hclge_vlan_info *old_info)
10292 {
10293 struct hclge_dev *hdev = vport->back;
10294 int ret;
10295
10296 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10297 hclge_rm_vport_all_vlan_table(vport, false);
10298 /* force clear VLAN 0 */
10299 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10300 if (ret)
10301 return ret;
10302 return hclge_set_vlan_filter_hw(hdev,
10303 htons(new_info->vlan_proto),
10304 vport->vport_id,
10305 new_info->vlan_tag,
10306 false);
10307 }
10308
10309 /* force add VLAN 0 */
10310 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10311 if (ret)
10312 return ret;
10313
10314 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10315 vport->vport_id, old_info->vlan_tag,
10316 true);
10317 if (ret)
10318 return ret;
10319
10320 return hclge_add_vport_all_vlan_table(vport);
10321 }
10322
hclge_need_update_vlan_filter(const struct hclge_vlan_info * new_cfg,const struct hclge_vlan_info * old_cfg)10323 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10324 const struct hclge_vlan_info *old_cfg)
10325 {
10326 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10327 return true;
10328
10329 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10330 return true;
10331
10332 return false;
10333 }
10334
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)10335 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10336 struct hclge_vlan_info *vlan_info)
10337 {
10338 struct hnae3_handle *nic = &vport->nic;
10339 struct hclge_vlan_info *old_vlan_info;
10340 struct hclge_dev *hdev = vport->back;
10341 int ret;
10342
10343 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10344
10345 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10346 vlan_info->qos);
10347 if (ret)
10348 return ret;
10349
10350 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10351 goto out;
10352
10353 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10354 /* add new VLAN tag */
10355 ret = hclge_set_vlan_filter_hw(hdev,
10356 htons(vlan_info->vlan_proto),
10357 vport->vport_id,
10358 vlan_info->vlan_tag,
10359 false);
10360 if (ret)
10361 return ret;
10362
10363 /* remove old VLAN tag */
10364 if (old_vlan_info->vlan_tag == 0)
10365 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10366 true, 0);
10367 else
10368 ret = hclge_set_vlan_filter_hw(hdev,
10369 htons(ETH_P_8021Q),
10370 vport->vport_id,
10371 old_vlan_info->vlan_tag,
10372 true);
10373 if (ret) {
10374 dev_err(&hdev->pdev->dev,
10375 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10376 vport->vport_id, old_vlan_info->vlan_tag, ret);
10377 return ret;
10378 }
10379
10380 goto out;
10381 }
10382
10383 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10384 old_vlan_info);
10385 if (ret)
10386 return ret;
10387
10388 out:
10389 vport->port_base_vlan_cfg.state = state;
10390 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10391 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10392 else
10393 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10394
10395 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10396 hclge_set_vport_vlan_fltr_change(vport);
10397
10398 return 0;
10399 }
10400
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan,u8 qos)10401 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10402 enum hnae3_port_base_vlan_state state,
10403 u16 vlan, u8 qos)
10404 {
10405 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10406 if (!vlan && !qos)
10407 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10408
10409 return HNAE3_PORT_BASE_VLAN_ENABLE;
10410 }
10411
10412 if (!vlan && !qos)
10413 return HNAE3_PORT_BASE_VLAN_DISABLE;
10414
10415 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10416 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10417 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10418
10419 return HNAE3_PORT_BASE_VLAN_MODIFY;
10420 }
10421
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)10422 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10423 u16 vlan, u8 qos, __be16 proto)
10424 {
10425 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10426 struct hclge_vport *vport = hclge_get_vport(handle);
10427 struct hclge_dev *hdev = vport->back;
10428 struct hclge_vlan_info vlan_info;
10429 u16 state;
10430 int ret;
10431
10432 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10433 return -EOPNOTSUPP;
10434
10435 vport = hclge_get_vf_vport(hdev, vfid);
10436 if (!vport)
10437 return -EINVAL;
10438
10439 /* qos is a 3 bits value, so can not be bigger than 7 */
10440 if (vlan > VLAN_N_VID - 1 || qos > 7)
10441 return -EINVAL;
10442 if (proto != htons(ETH_P_8021Q))
10443 return -EPROTONOSUPPORT;
10444
10445 state = hclge_get_port_base_vlan_state(vport,
10446 vport->port_base_vlan_cfg.state,
10447 vlan, qos);
10448 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10449 return 0;
10450
10451 vlan_info.vlan_tag = vlan;
10452 vlan_info.qos = qos;
10453 vlan_info.vlan_proto = ntohs(proto);
10454
10455 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10456 if (ret) {
10457 dev_err(&hdev->pdev->dev,
10458 "failed to update port base vlan for vf %d, ret = %d\n",
10459 vfid, ret);
10460 return ret;
10461 }
10462
10463 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10464 * VLAN state.
10465 */
10466 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10467 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10468 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10469 vport->vport_id, state,
10470 &vlan_info);
10471
10472 return 0;
10473 }
10474
hclge_clear_vf_vlan(struct hclge_dev * hdev)10475 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10476 {
10477 struct hclge_vlan_info *vlan_info;
10478 struct hclge_vport *vport;
10479 int ret;
10480 int vf;
10481
10482 /* clear port base vlan for all vf */
10483 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10484 vport = &hdev->vport[vf];
10485 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10486
10487 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10488 vport->vport_id,
10489 vlan_info->vlan_tag, true);
10490 if (ret)
10491 dev_err(&hdev->pdev->dev,
10492 "failed to clear vf vlan for vf%d, ret = %d\n",
10493 vf - HCLGE_VF_VPORT_START_NUM, ret);
10494 }
10495 }
10496
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)10497 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10498 u16 vlan_id, bool is_kill)
10499 {
10500 struct hclge_vport *vport = hclge_get_vport(handle);
10501 struct hclge_dev *hdev = vport->back;
10502 bool writen_to_tbl = false;
10503 int ret = 0;
10504
10505 /* When device is resetting or reset failed, firmware is unable to
10506 * handle mailbox. Just record the vlan id, and remove it after
10507 * reset finished.
10508 */
10509 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10510 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10511 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10512 return -EBUSY;
10513 }
10514
10515 /* when port base vlan enabled, we use port base vlan as the vlan
10516 * filter entry. In this case, we don't update vlan filter table
10517 * when user add new vlan or remove exist vlan, just update the vport
10518 * vlan list. The vlan id in vlan list will be writen in vlan filter
10519 * table until port base vlan disabled
10520 */
10521 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10522 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10523 vlan_id, is_kill);
10524 writen_to_tbl = true;
10525 }
10526
10527 if (!ret) {
10528 if (is_kill)
10529 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10530 else
10531 hclge_add_vport_vlan_table(vport, vlan_id,
10532 writen_to_tbl);
10533 } else if (is_kill) {
10534 /* when remove hw vlan filter failed, record the vlan id,
10535 * and try to remove it from hw later, to be consistence
10536 * with stack
10537 */
10538 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10539 }
10540
10541 hclge_set_vport_vlan_fltr_change(vport);
10542
10543 return ret;
10544 }
10545
hclge_sync_vlan_fltr_state(struct hclge_dev * hdev)10546 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10547 {
10548 struct hclge_vport *vport;
10549 int ret;
10550 u16 i;
10551
10552 for (i = 0; i < hdev->num_alloc_vport; i++) {
10553 vport = &hdev->vport[i];
10554 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10555 &vport->state))
10556 continue;
10557
10558 ret = hclge_enable_vport_vlan_filter(vport,
10559 vport->req_vlan_fltr_en);
10560 if (ret) {
10561 dev_err(&hdev->pdev->dev,
10562 "failed to sync vlan filter state for vport%u, ret = %d\n",
10563 vport->vport_id, ret);
10564 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10565 &vport->state);
10566 return;
10567 }
10568 }
10569 }
10570
hclge_sync_vlan_filter(struct hclge_dev * hdev)10571 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10572 {
10573 #define HCLGE_MAX_SYNC_COUNT 60
10574
10575 int i, ret, sync_cnt = 0;
10576 u16 vlan_id;
10577
10578 /* start from vport 1 for PF is always alive */
10579 for (i = 0; i < hdev->num_alloc_vport; i++) {
10580 struct hclge_vport *vport = &hdev->vport[i];
10581
10582 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10583 VLAN_N_VID);
10584 while (vlan_id != VLAN_N_VID) {
10585 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10586 vport->vport_id, vlan_id,
10587 true);
10588 if (ret && ret != -EINVAL)
10589 return;
10590
10591 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10592 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10593 hclge_set_vport_vlan_fltr_change(vport);
10594
10595 sync_cnt++;
10596 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10597 return;
10598
10599 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10600 VLAN_N_VID);
10601 }
10602 }
10603
10604 hclge_sync_vlan_fltr_state(hdev);
10605 }
10606
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)10607 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10608 {
10609 struct hclge_config_max_frm_size_cmd *req;
10610 struct hclge_desc desc;
10611
10612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10613
10614 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10615 req->max_frm_size = cpu_to_le16(new_mps);
10616 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10617
10618 return hclge_cmd_send(&hdev->hw, &desc, 1);
10619 }
10620
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)10621 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10622 {
10623 struct hclge_vport *vport = hclge_get_vport(handle);
10624
10625 return hclge_set_vport_mtu(vport, new_mtu);
10626 }
10627
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)10628 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10629 {
10630 struct hclge_dev *hdev = vport->back;
10631 int i, max_frm_size, ret;
10632
10633 /* HW supprt 2 layer vlan */
10634 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10635 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10636 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10637 return -EINVAL;
10638
10639 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10640 mutex_lock(&hdev->vport_lock);
10641 /* VF's mps must fit within hdev->mps */
10642 if (vport->vport_id && max_frm_size > hdev->mps) {
10643 mutex_unlock(&hdev->vport_lock);
10644 return -EINVAL;
10645 } else if (vport->vport_id) {
10646 vport->mps = max_frm_size;
10647 mutex_unlock(&hdev->vport_lock);
10648 return 0;
10649 }
10650
10651 /* PF's mps must be greater then VF's mps */
10652 for (i = 1; i < hdev->num_alloc_vport; i++)
10653 if (max_frm_size < hdev->vport[i].mps) {
10654 mutex_unlock(&hdev->vport_lock);
10655 return -EINVAL;
10656 }
10657
10658 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10659
10660 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10661 if (ret) {
10662 dev_err(&hdev->pdev->dev,
10663 "Change mtu fail, ret =%d\n", ret);
10664 goto out;
10665 }
10666
10667 hdev->mps = max_frm_size;
10668 vport->mps = max_frm_size;
10669
10670 ret = hclge_buffer_alloc(hdev);
10671 if (ret)
10672 dev_err(&hdev->pdev->dev,
10673 "Allocate buffer fail, ret =%d\n", ret);
10674
10675 out:
10676 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10677 mutex_unlock(&hdev->vport_lock);
10678 return ret;
10679 }
10680
hclge_reset_tqp_cmd_send(struct hclge_dev * hdev,u16 queue_id,bool enable)10681 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10682 bool enable)
10683 {
10684 struct hclge_reset_tqp_queue_cmd *req;
10685 struct hclge_desc desc;
10686 int ret;
10687
10688 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10689
10690 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10691 req->tqp_id = cpu_to_le16(queue_id);
10692 if (enable)
10693 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10694
10695 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10696 if (ret) {
10697 dev_err(&hdev->pdev->dev,
10698 "Send tqp reset cmd error, status =%d\n", ret);
10699 return ret;
10700 }
10701
10702 return 0;
10703 }
10704
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id,u8 * reset_status)10705 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10706 u8 *reset_status)
10707 {
10708 struct hclge_reset_tqp_queue_cmd *req;
10709 struct hclge_desc desc;
10710 int ret;
10711
10712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10713
10714 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10715 req->tqp_id = cpu_to_le16(queue_id);
10716
10717 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10718 if (ret) {
10719 dev_err(&hdev->pdev->dev,
10720 "Get reset status error, status =%d\n", ret);
10721 return ret;
10722 }
10723
10724 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10725
10726 return 0;
10727 }
10728
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)10729 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10730 {
10731 struct hnae3_queue *queue;
10732 struct hclge_tqp *tqp;
10733
10734 queue = handle->kinfo.tqp[queue_id];
10735 tqp = container_of(queue, struct hclge_tqp, q);
10736
10737 return tqp->index;
10738 }
10739
hclge_reset_tqp_cmd(struct hnae3_handle * handle)10740 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10741 {
10742 struct hclge_vport *vport = hclge_get_vport(handle);
10743 struct hclge_dev *hdev = vport->back;
10744 u16 reset_try_times = 0;
10745 u8 reset_status;
10746 u16 queue_gid;
10747 int ret;
10748 u16 i;
10749
10750 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10751 queue_gid = hclge_covert_handle_qid_global(handle, i);
10752 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10753 if (ret) {
10754 dev_err(&hdev->pdev->dev,
10755 "failed to send reset tqp cmd, ret = %d\n",
10756 ret);
10757 return ret;
10758 }
10759
10760 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10761 ret = hclge_get_reset_status(hdev, queue_gid,
10762 &reset_status);
10763 if (ret)
10764 return ret;
10765
10766 if (reset_status)
10767 break;
10768
10769 /* Wait for tqp hw reset */
10770 usleep_range(1000, 1200);
10771 }
10772
10773 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10774 dev_err(&hdev->pdev->dev,
10775 "wait for tqp hw reset timeout\n");
10776 return -ETIME;
10777 }
10778
10779 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10780 if (ret) {
10781 dev_err(&hdev->pdev->dev,
10782 "failed to deassert soft reset, ret = %d\n",
10783 ret);
10784 return ret;
10785 }
10786 reset_try_times = 0;
10787 }
10788 return 0;
10789 }
10790
hclge_reset_rcb(struct hnae3_handle * handle)10791 static int hclge_reset_rcb(struct hnae3_handle *handle)
10792 {
10793 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10794 #define HCLGE_RESET_RCB_SUCCESS 1U
10795
10796 struct hclge_vport *vport = hclge_get_vport(handle);
10797 struct hclge_dev *hdev = vport->back;
10798 struct hclge_reset_cmd *req;
10799 struct hclge_desc desc;
10800 u8 return_status;
10801 u16 queue_gid;
10802 int ret;
10803
10804 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10805
10806 req = (struct hclge_reset_cmd *)desc.data;
10807 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10808 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10809 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10810 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10811
10812 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10813 if (ret) {
10814 dev_err(&hdev->pdev->dev,
10815 "failed to send rcb reset cmd, ret = %d\n", ret);
10816 return ret;
10817 }
10818
10819 return_status = req->fun_reset_rcb_return_status;
10820 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10821 return 0;
10822
10823 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10824 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10825 return_status);
10826 return -EIO;
10827 }
10828
10829 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10830 * again to reset all tqps
10831 */
10832 return hclge_reset_tqp_cmd(handle);
10833 }
10834
hclge_reset_tqp(struct hnae3_handle * handle)10835 int hclge_reset_tqp(struct hnae3_handle *handle)
10836 {
10837 struct hclge_vport *vport = hclge_get_vport(handle);
10838 struct hclge_dev *hdev = vport->back;
10839 int ret;
10840
10841 /* only need to disable PF's tqp */
10842 if (!vport->vport_id) {
10843 ret = hclge_tqp_enable(handle, false);
10844 if (ret) {
10845 dev_err(&hdev->pdev->dev,
10846 "failed to disable tqp, ret = %d\n", ret);
10847 return ret;
10848 }
10849 }
10850
10851 return hclge_reset_rcb(handle);
10852 }
10853
hclge_get_fw_version(struct hnae3_handle * handle)10854 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10855 {
10856 struct hclge_vport *vport = hclge_get_vport(handle);
10857 struct hclge_dev *hdev = vport->back;
10858
10859 return hdev->fw_version;
10860 }
10861
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10862 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10863 {
10864 struct phy_device *phydev = hdev->hw.mac.phydev;
10865
10866 if (!phydev)
10867 return;
10868
10869 phy_set_asym_pause(phydev, rx_en, tx_en);
10870 }
10871
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10872 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10873 {
10874 int ret;
10875
10876 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10877 return 0;
10878
10879 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10880 if (ret)
10881 dev_err(&hdev->pdev->dev,
10882 "configure pauseparam error, ret = %d.\n", ret);
10883
10884 return ret;
10885 }
10886
hclge_cfg_flowctrl(struct hclge_dev * hdev)10887 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10888 {
10889 struct phy_device *phydev = hdev->hw.mac.phydev;
10890 u16 remote_advertising = 0;
10891 u16 local_advertising;
10892 u32 rx_pause, tx_pause;
10893 u8 flowctl;
10894
10895 if (!phydev->link || !phydev->autoneg)
10896 return 0;
10897
10898 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10899
10900 if (phydev->pause)
10901 remote_advertising = LPA_PAUSE_CAP;
10902
10903 if (phydev->asym_pause)
10904 remote_advertising |= LPA_PAUSE_ASYM;
10905
10906 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10907 remote_advertising);
10908 tx_pause = flowctl & FLOW_CTRL_TX;
10909 rx_pause = flowctl & FLOW_CTRL_RX;
10910
10911 if (phydev->duplex == HCLGE_MAC_HALF) {
10912 tx_pause = 0;
10913 rx_pause = 0;
10914 }
10915
10916 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10917 }
10918
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)10919 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10920 u32 *rx_en, u32 *tx_en)
10921 {
10922 struct hclge_vport *vport = hclge_get_vport(handle);
10923 struct hclge_dev *hdev = vport->back;
10924 u8 media_type = hdev->hw.mac.media_type;
10925
10926 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10927 hclge_get_autoneg(handle) : 0;
10928
10929 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10930 *rx_en = 0;
10931 *tx_en = 0;
10932 return;
10933 }
10934
10935 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10936 *rx_en = 1;
10937 *tx_en = 0;
10938 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10939 *tx_en = 1;
10940 *rx_en = 0;
10941 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10942 *rx_en = 1;
10943 *tx_en = 1;
10944 } else {
10945 *rx_en = 0;
10946 *tx_en = 0;
10947 }
10948 }
10949
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10950 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10951 u32 rx_en, u32 tx_en)
10952 {
10953 if (rx_en && tx_en)
10954 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10955 else if (rx_en && !tx_en)
10956 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10957 else if (!rx_en && tx_en)
10958 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10959 else
10960 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10961
10962 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10963 }
10964
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)10965 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10966 u32 rx_en, u32 tx_en)
10967 {
10968 struct hclge_vport *vport = hclge_get_vport(handle);
10969 struct hclge_dev *hdev = vport->back;
10970 struct phy_device *phydev = hdev->hw.mac.phydev;
10971 u32 fc_autoneg;
10972
10973 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10974 fc_autoneg = hclge_get_autoneg(handle);
10975 if (auto_neg != fc_autoneg) {
10976 dev_info(&hdev->pdev->dev,
10977 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10978 return -EOPNOTSUPP;
10979 }
10980 }
10981
10982 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10983 dev_info(&hdev->pdev->dev,
10984 "Priority flow control enabled. Cannot set link flow control.\n");
10985 return -EOPNOTSUPP;
10986 }
10987
10988 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10989
10990 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10991
10992 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10993 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10994
10995 if (phydev)
10996 return phy_start_aneg(phydev);
10997
10998 return -EOPNOTSUPP;
10999 }
11000
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)11001 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11002 u8 *auto_neg, u32 *speed, u8 *duplex)
11003 {
11004 struct hclge_vport *vport = hclge_get_vport(handle);
11005 struct hclge_dev *hdev = vport->back;
11006
11007 if (speed)
11008 *speed = hdev->hw.mac.speed;
11009 if (duplex)
11010 *duplex = hdev->hw.mac.duplex;
11011 if (auto_neg)
11012 *auto_neg = hdev->hw.mac.autoneg;
11013 }
11014
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)11015 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11016 u8 *module_type)
11017 {
11018 struct hclge_vport *vport = hclge_get_vport(handle);
11019 struct hclge_dev *hdev = vport->back;
11020
11021 /* When nic is down, the service task is not running, doesn't update
11022 * the port information per second. Query the port information before
11023 * return the media type, ensure getting the correct media information.
11024 */
11025 hclge_update_port_info(hdev);
11026
11027 if (media_type)
11028 *media_type = hdev->hw.mac.media_type;
11029
11030 if (module_type)
11031 *module_type = hdev->hw.mac.module_type;
11032 }
11033
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)11034 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11035 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11036 {
11037 struct hclge_vport *vport = hclge_get_vport(handle);
11038 struct hclge_dev *hdev = vport->back;
11039 struct phy_device *phydev = hdev->hw.mac.phydev;
11040 int mdix_ctrl, mdix, is_resolved;
11041 unsigned int retval;
11042
11043 if (!phydev) {
11044 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11045 *tp_mdix = ETH_TP_MDI_INVALID;
11046 return;
11047 }
11048
11049 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11050
11051 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11052 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11053 HCLGE_PHY_MDIX_CTRL_S);
11054
11055 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11056 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11057 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11058
11059 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11060
11061 switch (mdix_ctrl) {
11062 case 0x0:
11063 *tp_mdix_ctrl = ETH_TP_MDI;
11064 break;
11065 case 0x1:
11066 *tp_mdix_ctrl = ETH_TP_MDI_X;
11067 break;
11068 case 0x3:
11069 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11070 break;
11071 default:
11072 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11073 break;
11074 }
11075
11076 if (!is_resolved)
11077 *tp_mdix = ETH_TP_MDI_INVALID;
11078 else if (mdix)
11079 *tp_mdix = ETH_TP_MDI_X;
11080 else
11081 *tp_mdix = ETH_TP_MDI;
11082 }
11083
hclge_info_show(struct hclge_dev * hdev)11084 static void hclge_info_show(struct hclge_dev *hdev)
11085 {
11086 struct device *dev = &hdev->pdev->dev;
11087
11088 dev_info(dev, "PF info begin:\n");
11089
11090 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11091 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11092 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11093 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11094 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11095 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11096 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11097 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11098 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11099 dev_info(dev, "This is %s PF\n",
11100 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11101 dev_info(dev, "DCB %s\n",
11102 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11103 dev_info(dev, "MQPRIO %s\n",
11104 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11105 dev_info(dev, "Default tx spare buffer size: %u\n",
11106 hdev->tx_spare_buf_size);
11107
11108 dev_info(dev, "PF info end.\n");
11109 }
11110
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)11111 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11112 struct hclge_vport *vport)
11113 {
11114 struct hnae3_client *client = vport->nic.client;
11115 struct hclge_dev *hdev = ae_dev->priv;
11116 int rst_cnt = hdev->rst_stats.reset_cnt;
11117 int ret;
11118
11119 ret = client->ops->init_instance(&vport->nic);
11120 if (ret)
11121 return ret;
11122
11123 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11124 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11125 rst_cnt != hdev->rst_stats.reset_cnt) {
11126 ret = -EBUSY;
11127 goto init_nic_err;
11128 }
11129
11130 /* Enable nic hw error interrupts */
11131 ret = hclge_config_nic_hw_error(hdev, true);
11132 if (ret) {
11133 dev_err(&ae_dev->pdev->dev,
11134 "fail(%d) to enable hw error interrupts\n", ret);
11135 goto init_nic_err;
11136 }
11137
11138 hnae3_set_client_init_flag(client, ae_dev, 1);
11139
11140 if (netif_msg_drv(&hdev->vport->nic))
11141 hclge_info_show(hdev);
11142
11143 return ret;
11144
11145 init_nic_err:
11146 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11147 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11148 msleep(HCLGE_WAIT_RESET_DONE);
11149
11150 client->ops->uninit_instance(&vport->nic, 0);
11151
11152 return ret;
11153 }
11154
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)11155 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11156 struct hclge_vport *vport)
11157 {
11158 struct hclge_dev *hdev = ae_dev->priv;
11159 struct hnae3_client *client;
11160 int rst_cnt;
11161 int ret;
11162
11163 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11164 !hdev->nic_client)
11165 return 0;
11166
11167 client = hdev->roce_client;
11168 ret = hclge_init_roce_base_info(vport);
11169 if (ret)
11170 return ret;
11171
11172 rst_cnt = hdev->rst_stats.reset_cnt;
11173 ret = client->ops->init_instance(&vport->roce);
11174 if (ret)
11175 return ret;
11176
11177 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11178 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11179 rst_cnt != hdev->rst_stats.reset_cnt) {
11180 ret = -EBUSY;
11181 goto init_roce_err;
11182 }
11183
11184 /* Enable roce ras interrupts */
11185 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11186 if (ret) {
11187 dev_err(&ae_dev->pdev->dev,
11188 "fail(%d) to enable roce ras interrupts\n", ret);
11189 goto init_roce_err;
11190 }
11191
11192 hnae3_set_client_init_flag(client, ae_dev, 1);
11193
11194 return 0;
11195
11196 init_roce_err:
11197 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11198 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11199 msleep(HCLGE_WAIT_RESET_DONE);
11200
11201 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11202
11203 return ret;
11204 }
11205
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)11206 static int hclge_init_client_instance(struct hnae3_client *client,
11207 struct hnae3_ae_dev *ae_dev)
11208 {
11209 struct hclge_dev *hdev = ae_dev->priv;
11210 struct hclge_vport *vport = &hdev->vport[0];
11211 int ret;
11212
11213 switch (client->type) {
11214 case HNAE3_CLIENT_KNIC:
11215 hdev->nic_client = client;
11216 vport->nic.client = client;
11217 ret = hclge_init_nic_client_instance(ae_dev, vport);
11218 if (ret)
11219 goto clear_nic;
11220
11221 ret = hclge_init_roce_client_instance(ae_dev, vport);
11222 if (ret)
11223 goto clear_roce;
11224
11225 break;
11226 case HNAE3_CLIENT_ROCE:
11227 if (hnae3_dev_roce_supported(hdev)) {
11228 hdev->roce_client = client;
11229 vport->roce.client = client;
11230 }
11231
11232 ret = hclge_init_roce_client_instance(ae_dev, vport);
11233 if (ret)
11234 goto clear_roce;
11235
11236 break;
11237 default:
11238 return -EINVAL;
11239 }
11240
11241 return 0;
11242
11243 clear_nic:
11244 hdev->nic_client = NULL;
11245 vport->nic.client = NULL;
11246 return ret;
11247 clear_roce:
11248 hdev->roce_client = NULL;
11249 vport->roce.client = NULL;
11250 return ret;
11251 }
11252
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)11253 static void hclge_uninit_client_instance(struct hnae3_client *client,
11254 struct hnae3_ae_dev *ae_dev)
11255 {
11256 struct hclge_dev *hdev = ae_dev->priv;
11257 struct hclge_vport *vport = &hdev->vport[0];
11258
11259 if (hdev->roce_client) {
11260 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11261 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11262 msleep(HCLGE_WAIT_RESET_DONE);
11263
11264 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11265 hdev->roce_client = NULL;
11266 vport->roce.client = NULL;
11267 }
11268 if (client->type == HNAE3_CLIENT_ROCE)
11269 return;
11270 if (hdev->nic_client && client->ops->uninit_instance) {
11271 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11272 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11273 msleep(HCLGE_WAIT_RESET_DONE);
11274
11275 client->ops->uninit_instance(&vport->nic, 0);
11276 hdev->nic_client = NULL;
11277 vport->nic.client = NULL;
11278 }
11279 }
11280
hclge_dev_mem_map(struct hclge_dev * hdev)11281 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11282 {
11283 #define HCLGE_MEM_BAR 4
11284
11285 struct pci_dev *pdev = hdev->pdev;
11286 struct hclge_hw *hw = &hdev->hw;
11287
11288 /* for device does not have device memory, return directly */
11289 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11290 return 0;
11291
11292 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11293 pci_resource_start(pdev, HCLGE_MEM_BAR),
11294 pci_resource_len(pdev, HCLGE_MEM_BAR));
11295 if (!hw->mem_base) {
11296 dev_err(&pdev->dev, "failed to map device memory\n");
11297 return -EFAULT;
11298 }
11299
11300 return 0;
11301 }
11302
hclge_pci_init(struct hclge_dev * hdev)11303 static int hclge_pci_init(struct hclge_dev *hdev)
11304 {
11305 struct pci_dev *pdev = hdev->pdev;
11306 struct hclge_hw *hw;
11307 int ret;
11308
11309 ret = pci_enable_device(pdev);
11310 if (ret) {
11311 dev_err(&pdev->dev, "failed to enable PCI device\n");
11312 return ret;
11313 }
11314
11315 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11316 if (ret) {
11317 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11318 if (ret) {
11319 dev_err(&pdev->dev,
11320 "can't set consistent PCI DMA");
11321 goto err_disable_device;
11322 }
11323 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11324 }
11325
11326 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11327 if (ret) {
11328 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11329 goto err_disable_device;
11330 }
11331
11332 pci_set_master(pdev);
11333 hw = &hdev->hw;
11334 hw->io_base = pcim_iomap(pdev, 2, 0);
11335 if (!hw->io_base) {
11336 dev_err(&pdev->dev, "Can't map configuration register space\n");
11337 ret = -ENOMEM;
11338 goto err_clr_master;
11339 }
11340
11341 ret = hclge_dev_mem_map(hdev);
11342 if (ret)
11343 goto err_unmap_io_base;
11344
11345 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11346
11347 return 0;
11348
11349 err_unmap_io_base:
11350 pcim_iounmap(pdev, hdev->hw.io_base);
11351 err_clr_master:
11352 pci_clear_master(pdev);
11353 pci_release_regions(pdev);
11354 err_disable_device:
11355 pci_disable_device(pdev);
11356
11357 return ret;
11358 }
11359
hclge_pci_uninit(struct hclge_dev * hdev)11360 static void hclge_pci_uninit(struct hclge_dev *hdev)
11361 {
11362 struct pci_dev *pdev = hdev->pdev;
11363
11364 if (hdev->hw.mem_base)
11365 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11366
11367 pcim_iounmap(pdev, hdev->hw.io_base);
11368 pci_free_irq_vectors(pdev);
11369 pci_clear_master(pdev);
11370 pci_release_mem_regions(pdev);
11371 pci_disable_device(pdev);
11372 }
11373
hclge_state_init(struct hclge_dev * hdev)11374 static void hclge_state_init(struct hclge_dev *hdev)
11375 {
11376 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11377 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11378 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11379 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11380 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11381 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11382 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11383 }
11384
hclge_state_uninit(struct hclge_dev * hdev)11385 static void hclge_state_uninit(struct hclge_dev *hdev)
11386 {
11387 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11388 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11389
11390 if (hdev->reset_timer.function)
11391 del_timer_sync(&hdev->reset_timer);
11392 if (hdev->service_task.work.func)
11393 cancel_delayed_work_sync(&hdev->service_task);
11394 }
11395
hclge_reset_prepare_general(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)11396 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11397 enum hnae3_reset_type rst_type)
11398 {
11399 #define HCLGE_RESET_RETRY_WAIT_MS 500
11400 #define HCLGE_RESET_RETRY_CNT 5
11401
11402 struct hclge_dev *hdev = ae_dev->priv;
11403 int retry_cnt = 0;
11404 int ret;
11405
11406 retry:
11407 down(&hdev->reset_sem);
11408 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11409 hdev->reset_type = rst_type;
11410 ret = hclge_reset_prepare(hdev);
11411 if (ret || hdev->reset_pending) {
11412 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11413 ret);
11414 if (hdev->reset_pending ||
11415 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11416 dev_err(&hdev->pdev->dev,
11417 "reset_pending:0x%lx, retry_cnt:%d\n",
11418 hdev->reset_pending, retry_cnt);
11419 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11420 up(&hdev->reset_sem);
11421 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11422 goto retry;
11423 }
11424 }
11425
11426 /* disable misc vector before reset done */
11427 hclge_enable_vector(&hdev->misc_vector, false);
11428 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11429
11430 if (hdev->reset_type == HNAE3_FLR_RESET)
11431 hdev->rst_stats.flr_rst_cnt++;
11432 }
11433
hclge_reset_done(struct hnae3_ae_dev * ae_dev)11434 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11435 {
11436 struct hclge_dev *hdev = ae_dev->priv;
11437 int ret;
11438
11439 hclge_enable_vector(&hdev->misc_vector, true);
11440
11441 ret = hclge_reset_rebuild(hdev);
11442 if (ret)
11443 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11444
11445 hdev->reset_type = HNAE3_NONE_RESET;
11446 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11447 up(&hdev->reset_sem);
11448 }
11449
hclge_clear_resetting_state(struct hclge_dev * hdev)11450 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11451 {
11452 u16 i;
11453
11454 for (i = 0; i < hdev->num_alloc_vport; i++) {
11455 struct hclge_vport *vport = &hdev->vport[i];
11456 int ret;
11457
11458 /* Send cmd to clear vport's FUNC_RST_ING */
11459 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11460 if (ret)
11461 dev_warn(&hdev->pdev->dev,
11462 "clear vport(%u) rst failed %d!\n",
11463 vport->vport_id, ret);
11464 }
11465 }
11466
hclge_clear_hw_resource(struct hclge_dev * hdev)11467 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11468 {
11469 struct hclge_desc desc;
11470 int ret;
11471
11472 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11473
11474 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11475 /* This new command is only supported by new firmware, it will
11476 * fail with older firmware. Error value -EOPNOSUPP can only be
11477 * returned by older firmware running this command, to keep code
11478 * backward compatible we will override this value and return
11479 * success.
11480 */
11481 if (ret && ret != -EOPNOTSUPP) {
11482 dev_err(&hdev->pdev->dev,
11483 "failed to clear hw resource, ret = %d\n", ret);
11484 return ret;
11485 }
11486 return 0;
11487 }
11488
hclge_init_rxd_adv_layout(struct hclge_dev * hdev)11489 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11490 {
11491 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11492 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11493 }
11494
hclge_uninit_rxd_adv_layout(struct hclge_dev * hdev)11495 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11496 {
11497 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11498 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11499 }
11500
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)11501 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11502 {
11503 struct pci_dev *pdev = ae_dev->pdev;
11504 struct hclge_dev *hdev;
11505 int ret;
11506
11507 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11508 if (!hdev)
11509 return -ENOMEM;
11510
11511 hdev->pdev = pdev;
11512 hdev->ae_dev = ae_dev;
11513 hdev->reset_type = HNAE3_NONE_RESET;
11514 hdev->reset_level = HNAE3_FUNC_RESET;
11515 ae_dev->priv = hdev;
11516
11517 /* HW supprt 2 layer vlan */
11518 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11519
11520 mutex_init(&hdev->vport_lock);
11521 spin_lock_init(&hdev->fd_rule_lock);
11522 sema_init(&hdev->reset_sem, 1);
11523
11524 ret = hclge_pci_init(hdev);
11525 if (ret)
11526 goto out;
11527
11528 ret = hclge_devlink_init(hdev);
11529 if (ret)
11530 goto err_pci_uninit;
11531
11532 /* Firmware command queue initialize */
11533 ret = hclge_cmd_queue_init(hdev);
11534 if (ret)
11535 goto err_devlink_uninit;
11536
11537 /* Firmware command initialize */
11538 ret = hclge_cmd_init(hdev);
11539 if (ret)
11540 goto err_cmd_uninit;
11541
11542 ret = hclge_clear_hw_resource(hdev);
11543 if (ret)
11544 goto err_cmd_uninit;
11545
11546 ret = hclge_get_cap(hdev);
11547 if (ret)
11548 goto err_cmd_uninit;
11549
11550 ret = hclge_query_dev_specs(hdev);
11551 if (ret) {
11552 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11553 ret);
11554 goto err_cmd_uninit;
11555 }
11556
11557 ret = hclge_configure(hdev);
11558 if (ret) {
11559 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11560 goto err_cmd_uninit;
11561 }
11562
11563 ret = hclge_init_msi(hdev);
11564 if (ret) {
11565 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11566 goto err_cmd_uninit;
11567 }
11568
11569 ret = hclge_misc_irq_init(hdev);
11570 if (ret)
11571 goto err_msi_uninit;
11572
11573 ret = hclge_alloc_tqps(hdev);
11574 if (ret) {
11575 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11576 goto err_msi_irq_uninit;
11577 }
11578
11579 ret = hclge_alloc_vport(hdev);
11580 if (ret)
11581 goto err_msi_irq_uninit;
11582
11583 ret = hclge_map_tqp(hdev);
11584 if (ret)
11585 goto err_msi_irq_uninit;
11586
11587 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11588 !hnae3_dev_phy_imp_supported(hdev)) {
11589 ret = hclge_mac_mdio_config(hdev);
11590 if (ret)
11591 goto err_msi_irq_uninit;
11592 }
11593
11594 ret = hclge_init_umv_space(hdev);
11595 if (ret)
11596 goto err_mdiobus_unreg;
11597
11598 ret = hclge_mac_init(hdev);
11599 if (ret) {
11600 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11601 goto err_mdiobus_unreg;
11602 }
11603
11604 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11605 if (ret) {
11606 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11607 goto err_mdiobus_unreg;
11608 }
11609
11610 ret = hclge_config_gro(hdev);
11611 if (ret)
11612 goto err_mdiobus_unreg;
11613
11614 ret = hclge_init_vlan_config(hdev);
11615 if (ret) {
11616 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11617 goto err_mdiobus_unreg;
11618 }
11619
11620 ret = hclge_tm_schd_init(hdev);
11621 if (ret) {
11622 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11623 goto err_mdiobus_unreg;
11624 }
11625
11626 ret = hclge_rss_init_cfg(hdev);
11627 if (ret) {
11628 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11629 goto err_mdiobus_unreg;
11630 }
11631
11632 ret = hclge_rss_init_hw(hdev);
11633 if (ret) {
11634 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11635 goto err_mdiobus_unreg;
11636 }
11637
11638 ret = init_mgr_tbl(hdev);
11639 if (ret) {
11640 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11641 goto err_mdiobus_unreg;
11642 }
11643
11644 ret = hclge_init_fd_config(hdev);
11645 if (ret) {
11646 dev_err(&pdev->dev,
11647 "fd table init fail, ret=%d\n", ret);
11648 goto err_mdiobus_unreg;
11649 }
11650
11651 ret = hclge_ptp_init(hdev);
11652 if (ret)
11653 goto err_mdiobus_unreg;
11654
11655 INIT_KFIFO(hdev->mac_tnl_log);
11656
11657 hclge_dcb_ops_set(hdev);
11658
11659 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11660 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11661
11662 /* Setup affinity after service timer setup because add_timer_on
11663 * is called in affinity notify.
11664 */
11665 hclge_misc_affinity_setup(hdev);
11666
11667 hclge_clear_all_event_cause(hdev);
11668 hclge_clear_resetting_state(hdev);
11669
11670 /* Log and clear the hw errors those already occurred */
11671 if (hnae3_dev_ras_imp_supported(hdev))
11672 hclge_handle_occurred_error(hdev);
11673 else
11674 hclge_handle_all_hns_hw_errors(ae_dev);
11675
11676 /* request delayed reset for the error recovery because an immediate
11677 * global reset on a PF affecting pending initialization of other PFs
11678 */
11679 if (ae_dev->hw_err_reset_req) {
11680 enum hnae3_reset_type reset_level;
11681
11682 reset_level = hclge_get_reset_level(ae_dev,
11683 &ae_dev->hw_err_reset_req);
11684 hclge_set_def_reset_request(ae_dev, reset_level);
11685 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11686 }
11687
11688 hclge_init_rxd_adv_layout(hdev);
11689
11690 /* Enable MISC vector(vector0) */
11691 hclge_enable_vector(&hdev->misc_vector, true);
11692
11693 hclge_state_init(hdev);
11694 hdev->last_reset_time = jiffies;
11695
11696 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11697 HCLGE_DRIVER_NAME);
11698
11699 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11700
11701 return 0;
11702
11703 err_mdiobus_unreg:
11704 if (hdev->hw.mac.phydev)
11705 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11706 err_msi_irq_uninit:
11707 hclge_misc_irq_uninit(hdev);
11708 err_msi_uninit:
11709 pci_free_irq_vectors(pdev);
11710 err_cmd_uninit:
11711 hclge_cmd_uninit(hdev);
11712 err_devlink_uninit:
11713 hclge_devlink_uninit(hdev);
11714 err_pci_uninit:
11715 pcim_iounmap(pdev, hdev->hw.io_base);
11716 pci_clear_master(pdev);
11717 pci_release_regions(pdev);
11718 pci_disable_device(pdev);
11719 out:
11720 mutex_destroy(&hdev->vport_lock);
11721 return ret;
11722 }
11723
hclge_stats_clear(struct hclge_dev * hdev)11724 static void hclge_stats_clear(struct hclge_dev *hdev)
11725 {
11726 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11727 }
11728
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11729 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11730 {
11731 return hclge_config_switch_param(hdev, vf, enable,
11732 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11733 }
11734
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11735 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11736 {
11737 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11738 HCLGE_FILTER_FE_NIC_INGRESS_B,
11739 enable, vf);
11740 }
11741
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)11742 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11743 {
11744 int ret;
11745
11746 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11747 if (ret) {
11748 dev_err(&hdev->pdev->dev,
11749 "Set vf %d mac spoof check %s failed, ret=%d\n",
11750 vf, enable ? "on" : "off", ret);
11751 return ret;
11752 }
11753
11754 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11755 if (ret)
11756 dev_err(&hdev->pdev->dev,
11757 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11758 vf, enable ? "on" : "off", ret);
11759
11760 return ret;
11761 }
11762
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)11763 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11764 bool enable)
11765 {
11766 struct hclge_vport *vport = hclge_get_vport(handle);
11767 struct hclge_dev *hdev = vport->back;
11768 u32 new_spoofchk = enable ? 1 : 0;
11769 int ret;
11770
11771 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11772 return -EOPNOTSUPP;
11773
11774 vport = hclge_get_vf_vport(hdev, vf);
11775 if (!vport)
11776 return -EINVAL;
11777
11778 if (vport->vf_info.spoofchk == new_spoofchk)
11779 return 0;
11780
11781 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11782 dev_warn(&hdev->pdev->dev,
11783 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11784 vf);
11785 else if (enable && hclge_is_umv_space_full(vport, true))
11786 dev_warn(&hdev->pdev->dev,
11787 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11788 vf);
11789
11790 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11791 if (ret)
11792 return ret;
11793
11794 vport->vf_info.spoofchk = new_spoofchk;
11795 return 0;
11796 }
11797
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)11798 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11799 {
11800 struct hclge_vport *vport = hdev->vport;
11801 int ret;
11802 int i;
11803
11804 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11805 return 0;
11806
11807 /* resume the vf spoof check state after reset */
11808 for (i = 0; i < hdev->num_alloc_vport; i++) {
11809 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11810 vport->vf_info.spoofchk);
11811 if (ret)
11812 return ret;
11813
11814 vport++;
11815 }
11816
11817 return 0;
11818 }
11819
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)11820 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11821 {
11822 struct hclge_vport *vport = hclge_get_vport(handle);
11823 struct hclge_dev *hdev = vport->back;
11824 u32 new_trusted = enable ? 1 : 0;
11825
11826 vport = hclge_get_vf_vport(hdev, vf);
11827 if (!vport)
11828 return -EINVAL;
11829
11830 if (vport->vf_info.trusted == new_trusted)
11831 return 0;
11832
11833 vport->vf_info.trusted = new_trusted;
11834 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11835 hclge_task_schedule(hdev, 0);
11836
11837 return 0;
11838 }
11839
hclge_reset_vf_rate(struct hclge_dev * hdev)11840 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11841 {
11842 int ret;
11843 int vf;
11844
11845 /* reset vf rate to default value */
11846 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11847 struct hclge_vport *vport = &hdev->vport[vf];
11848
11849 vport->vf_info.max_tx_rate = 0;
11850 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11851 if (ret)
11852 dev_err(&hdev->pdev->dev,
11853 "vf%d failed to reset to default, ret=%d\n",
11854 vf - HCLGE_VF_VPORT_START_NUM, ret);
11855 }
11856 }
11857
hclge_vf_rate_param_check(struct hclge_dev * hdev,int min_tx_rate,int max_tx_rate)11858 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11859 int min_tx_rate, int max_tx_rate)
11860 {
11861 if (min_tx_rate != 0 ||
11862 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11863 dev_err(&hdev->pdev->dev,
11864 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11865 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11866 return -EINVAL;
11867 }
11868
11869 return 0;
11870 }
11871
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)11872 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11873 int min_tx_rate, int max_tx_rate, bool force)
11874 {
11875 struct hclge_vport *vport = hclge_get_vport(handle);
11876 struct hclge_dev *hdev = vport->back;
11877 int ret;
11878
11879 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11880 if (ret)
11881 return ret;
11882
11883 vport = hclge_get_vf_vport(hdev, vf);
11884 if (!vport)
11885 return -EINVAL;
11886
11887 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11888 return 0;
11889
11890 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11891 if (ret)
11892 return ret;
11893
11894 vport->vf_info.max_tx_rate = max_tx_rate;
11895
11896 return 0;
11897 }
11898
hclge_resume_vf_rate(struct hclge_dev * hdev)11899 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11900 {
11901 struct hnae3_handle *handle = &hdev->vport->nic;
11902 struct hclge_vport *vport;
11903 int ret;
11904 int vf;
11905
11906 /* resume the vf max_tx_rate after reset */
11907 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11908 vport = hclge_get_vf_vport(hdev, vf);
11909 if (!vport)
11910 return -EINVAL;
11911
11912 /* zero means max rate, after reset, firmware already set it to
11913 * max rate, so just continue.
11914 */
11915 if (!vport->vf_info.max_tx_rate)
11916 continue;
11917
11918 ret = hclge_set_vf_rate(handle, vf, 0,
11919 vport->vf_info.max_tx_rate, true);
11920 if (ret) {
11921 dev_err(&hdev->pdev->dev,
11922 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11923 vf, vport->vf_info.max_tx_rate, ret);
11924 return ret;
11925 }
11926 }
11927
11928 return 0;
11929 }
11930
hclge_reset_vport_state(struct hclge_dev * hdev)11931 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11932 {
11933 struct hclge_vport *vport = hdev->vport;
11934 int i;
11935
11936 for (i = 0; i < hdev->num_alloc_vport; i++) {
11937 hclge_vport_stop(vport);
11938 vport++;
11939 }
11940 }
11941
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)11942 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11943 {
11944 struct hclge_dev *hdev = ae_dev->priv;
11945 struct pci_dev *pdev = ae_dev->pdev;
11946 int ret;
11947
11948 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11949
11950 hclge_stats_clear(hdev);
11951 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11952 * so here should not clean table in memory.
11953 */
11954 if (hdev->reset_type == HNAE3_IMP_RESET ||
11955 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11956 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11957 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11958 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11959 hclge_reset_umv_space(hdev);
11960 }
11961
11962 ret = hclge_cmd_init(hdev);
11963 if (ret) {
11964 dev_err(&pdev->dev, "Cmd queue init failed\n");
11965 return ret;
11966 }
11967
11968 ret = hclge_map_tqp(hdev);
11969 if (ret) {
11970 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11971 return ret;
11972 }
11973
11974 ret = hclge_mac_init(hdev);
11975 if (ret) {
11976 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11977 return ret;
11978 }
11979
11980 ret = hclge_tp_port_init(hdev);
11981 if (ret) {
11982 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11983 ret);
11984 return ret;
11985 }
11986
11987 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11988 if (ret) {
11989 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11990 return ret;
11991 }
11992
11993 ret = hclge_config_gro(hdev);
11994 if (ret)
11995 return ret;
11996
11997 ret = hclge_init_vlan_config(hdev);
11998 if (ret) {
11999 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12000 return ret;
12001 }
12002
12003 ret = hclge_tm_init_hw(hdev, true);
12004 if (ret) {
12005 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12006 return ret;
12007 }
12008
12009 ret = hclge_rss_init_hw(hdev);
12010 if (ret) {
12011 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12012 return ret;
12013 }
12014
12015 ret = init_mgr_tbl(hdev);
12016 if (ret) {
12017 dev_err(&pdev->dev,
12018 "failed to reinit manager table, ret = %d\n", ret);
12019 return ret;
12020 }
12021
12022 ret = hclge_init_fd_config(hdev);
12023 if (ret) {
12024 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12025 return ret;
12026 }
12027
12028 ret = hclge_ptp_init(hdev);
12029 if (ret)
12030 return ret;
12031
12032 /* Log and clear the hw errors those already occurred */
12033 if (hnae3_dev_ras_imp_supported(hdev))
12034 hclge_handle_occurred_error(hdev);
12035 else
12036 hclge_handle_all_hns_hw_errors(ae_dev);
12037
12038 /* Re-enable the hw error interrupts because
12039 * the interrupts get disabled on global reset.
12040 */
12041 ret = hclge_config_nic_hw_error(hdev, true);
12042 if (ret) {
12043 dev_err(&pdev->dev,
12044 "fail(%d) to re-enable NIC hw error interrupts\n",
12045 ret);
12046 return ret;
12047 }
12048
12049 if (hdev->roce_client) {
12050 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12051 if (ret) {
12052 dev_err(&pdev->dev,
12053 "fail(%d) to re-enable roce ras interrupts\n",
12054 ret);
12055 return ret;
12056 }
12057 }
12058
12059 hclge_reset_vport_state(hdev);
12060 ret = hclge_reset_vport_spoofchk(hdev);
12061 if (ret)
12062 return ret;
12063
12064 ret = hclge_resume_vf_rate(hdev);
12065 if (ret)
12066 return ret;
12067
12068 hclge_init_rxd_adv_layout(hdev);
12069
12070 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12071 HCLGE_DRIVER_NAME);
12072
12073 return 0;
12074 }
12075
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)12076 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12077 {
12078 struct hclge_dev *hdev = ae_dev->priv;
12079 struct hclge_mac *mac = &hdev->hw.mac;
12080
12081 hclge_reset_vf_rate(hdev);
12082 hclge_clear_vf_vlan(hdev);
12083 hclge_misc_affinity_teardown(hdev);
12084 hclge_state_uninit(hdev);
12085 hclge_ptp_uninit(hdev);
12086 hclge_uninit_rxd_adv_layout(hdev);
12087 hclge_uninit_mac_table(hdev);
12088 hclge_del_all_fd_entries(hdev);
12089
12090 if (mac->phydev)
12091 mdiobus_unregister(mac->mdio_bus);
12092
12093 /* Disable MISC vector(vector0) */
12094 hclge_enable_vector(&hdev->misc_vector, false);
12095 synchronize_irq(hdev->misc_vector.vector_irq);
12096
12097 /* Disable all hw interrupts */
12098 hclge_config_mac_tnl_int(hdev, false);
12099 hclge_config_nic_hw_error(hdev, false);
12100 hclge_config_rocee_ras_interrupt(hdev, false);
12101
12102 hclge_cmd_uninit(hdev);
12103 hclge_misc_irq_uninit(hdev);
12104 hclge_devlink_uninit(hdev);
12105 hclge_pci_uninit(hdev);
12106 mutex_destroy(&hdev->vport_lock);
12107 hclge_uninit_vport_vlan_table(hdev);
12108 ae_dev->priv = NULL;
12109 }
12110
hclge_get_max_channels(struct hnae3_handle * handle)12111 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12112 {
12113 struct hclge_vport *vport = hclge_get_vport(handle);
12114 struct hclge_dev *hdev = vport->back;
12115
12116 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12117 }
12118
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)12119 static void hclge_get_channels(struct hnae3_handle *handle,
12120 struct ethtool_channels *ch)
12121 {
12122 ch->max_combined = hclge_get_max_channels(handle);
12123 ch->other_count = 1;
12124 ch->max_other = 1;
12125 ch->combined_count = handle->kinfo.rss_size;
12126 }
12127
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)12128 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12129 u16 *alloc_tqps, u16 *max_rss_size)
12130 {
12131 struct hclge_vport *vport = hclge_get_vport(handle);
12132 struct hclge_dev *hdev = vport->back;
12133
12134 *alloc_tqps = vport->alloc_tqps;
12135 *max_rss_size = hdev->pf_rss_size_max;
12136 }
12137
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)12138 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12139 bool rxfh_configured)
12140 {
12141 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12142 struct hclge_vport *vport = hclge_get_vport(handle);
12143 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12144 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12145 struct hclge_dev *hdev = vport->back;
12146 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12147 u16 cur_rss_size = kinfo->rss_size;
12148 u16 cur_tqps = kinfo->num_tqps;
12149 u16 tc_valid[HCLGE_MAX_TC_NUM];
12150 u16 roundup_size;
12151 u32 *rss_indir;
12152 unsigned int i;
12153 int ret;
12154
12155 kinfo->req_rss_size = new_tqps_num;
12156
12157 ret = hclge_tm_vport_map_update(hdev);
12158 if (ret) {
12159 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12160 return ret;
12161 }
12162
12163 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12164 roundup_size = ilog2(roundup_size);
12165 /* Set the RSS TC mode according to the new RSS size */
12166 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12167 tc_valid[i] = 0;
12168
12169 if (!(hdev->hw_tc_map & BIT(i)))
12170 continue;
12171
12172 tc_valid[i] = 1;
12173 tc_size[i] = roundup_size;
12174 tc_offset[i] = kinfo->rss_size * i;
12175 }
12176 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12177 if (ret)
12178 return ret;
12179
12180 /* RSS indirection table has been configured by user */
12181 if (rxfh_configured)
12182 goto out;
12183
12184 /* Reinitializes the rss indirect table according to the new RSS size */
12185 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12186 GFP_KERNEL);
12187 if (!rss_indir)
12188 return -ENOMEM;
12189
12190 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12191 rss_indir[i] = i % kinfo->rss_size;
12192
12193 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12194 if (ret)
12195 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12196 ret);
12197
12198 kfree(rss_indir);
12199
12200 out:
12201 if (!ret)
12202 dev_info(&hdev->pdev->dev,
12203 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12204 cur_rss_size, kinfo->rss_size,
12205 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12206
12207 return ret;
12208 }
12209
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)12210 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12211 u32 *regs_num_64_bit)
12212 {
12213 struct hclge_desc desc;
12214 u32 total_num;
12215 int ret;
12216
12217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12219 if (ret) {
12220 dev_err(&hdev->pdev->dev,
12221 "Query register number cmd failed, ret = %d.\n", ret);
12222 return ret;
12223 }
12224
12225 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12226 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12227
12228 total_num = *regs_num_32_bit + *regs_num_64_bit;
12229 if (!total_num)
12230 return -EINVAL;
12231
12232 return 0;
12233 }
12234
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)12235 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12236 void *data)
12237 {
12238 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12239 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12240
12241 struct hclge_desc *desc;
12242 u32 *reg_val = data;
12243 __le32 *desc_data;
12244 int nodata_num;
12245 int cmd_num;
12246 int i, k, n;
12247 int ret;
12248
12249 if (regs_num == 0)
12250 return 0;
12251
12252 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12253 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12254 HCLGE_32_BIT_REG_RTN_DATANUM);
12255 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12256 if (!desc)
12257 return -ENOMEM;
12258
12259 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12260 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12261 if (ret) {
12262 dev_err(&hdev->pdev->dev,
12263 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12264 kfree(desc);
12265 return ret;
12266 }
12267
12268 for (i = 0; i < cmd_num; i++) {
12269 if (i == 0) {
12270 desc_data = (__le32 *)(&desc[i].data[0]);
12271 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12272 } else {
12273 desc_data = (__le32 *)(&desc[i]);
12274 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12275 }
12276 for (k = 0; k < n; k++) {
12277 *reg_val++ = le32_to_cpu(*desc_data++);
12278
12279 regs_num--;
12280 if (!regs_num)
12281 break;
12282 }
12283 }
12284
12285 kfree(desc);
12286 return 0;
12287 }
12288
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)12289 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12290 void *data)
12291 {
12292 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12293 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12294
12295 struct hclge_desc *desc;
12296 u64 *reg_val = data;
12297 __le64 *desc_data;
12298 int nodata_len;
12299 int cmd_num;
12300 int i, k, n;
12301 int ret;
12302
12303 if (regs_num == 0)
12304 return 0;
12305
12306 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12307 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12308 HCLGE_64_BIT_REG_RTN_DATANUM);
12309 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12310 if (!desc)
12311 return -ENOMEM;
12312
12313 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12314 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12315 if (ret) {
12316 dev_err(&hdev->pdev->dev,
12317 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12318 kfree(desc);
12319 return ret;
12320 }
12321
12322 for (i = 0; i < cmd_num; i++) {
12323 if (i == 0) {
12324 desc_data = (__le64 *)(&desc[i].data[0]);
12325 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12326 } else {
12327 desc_data = (__le64 *)(&desc[i]);
12328 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12329 }
12330 for (k = 0; k < n; k++) {
12331 *reg_val++ = le64_to_cpu(*desc_data++);
12332
12333 regs_num--;
12334 if (!regs_num)
12335 break;
12336 }
12337 }
12338
12339 kfree(desc);
12340 return 0;
12341 }
12342
12343 #define MAX_SEPARATE_NUM 4
12344 #define SEPARATOR_VALUE 0xFDFCFBFA
12345 #define REG_NUM_PER_LINE 4
12346 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12347 #define REG_SEPARATOR_LINE 1
12348 #define REG_NUM_REMAIN_MASK 3
12349
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)12350 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12351 {
12352 int i;
12353
12354 /* initialize command BD except the last one */
12355 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12356 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12357 true);
12358 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12359 }
12360
12361 /* initialize the last command BD */
12362 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12363
12364 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12365 }
12366
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)12367 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12368 int *bd_num_list,
12369 u32 type_num)
12370 {
12371 u32 entries_per_desc, desc_index, index, offset, i;
12372 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12373 int ret;
12374
12375 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12376 if (ret) {
12377 dev_err(&hdev->pdev->dev,
12378 "Get dfx bd num fail, status is %d.\n", ret);
12379 return ret;
12380 }
12381
12382 entries_per_desc = ARRAY_SIZE(desc[0].data);
12383 for (i = 0; i < type_num; i++) {
12384 offset = hclge_dfx_bd_offset_list[i];
12385 index = offset % entries_per_desc;
12386 desc_index = offset / entries_per_desc;
12387 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12388 }
12389
12390 return ret;
12391 }
12392
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)12393 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12394 struct hclge_desc *desc_src, int bd_num,
12395 enum hclge_opcode_type cmd)
12396 {
12397 struct hclge_desc *desc = desc_src;
12398 int i, ret;
12399
12400 hclge_cmd_setup_basic_desc(desc, cmd, true);
12401 for (i = 0; i < bd_num - 1; i++) {
12402 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12403 desc++;
12404 hclge_cmd_setup_basic_desc(desc, cmd, true);
12405 }
12406
12407 desc = desc_src;
12408 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12409 if (ret)
12410 dev_err(&hdev->pdev->dev,
12411 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12412 cmd, ret);
12413
12414 return ret;
12415 }
12416
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)12417 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12418 void *data)
12419 {
12420 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12421 struct hclge_desc *desc = desc_src;
12422 u32 *reg = data;
12423
12424 entries_per_desc = ARRAY_SIZE(desc->data);
12425 reg_num = entries_per_desc * bd_num;
12426 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12427 for (i = 0; i < reg_num; i++) {
12428 index = i % entries_per_desc;
12429 desc_index = i / entries_per_desc;
12430 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12431 }
12432 for (i = 0; i < separator_num; i++)
12433 *reg++ = SEPARATOR_VALUE;
12434
12435 return reg_num + separator_num;
12436 }
12437
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)12438 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12439 {
12440 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12441 int data_len_per_desc, bd_num, i;
12442 int *bd_num_list;
12443 u32 data_len;
12444 int ret;
12445
12446 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12447 if (!bd_num_list)
12448 return -ENOMEM;
12449
12450 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12451 if (ret) {
12452 dev_err(&hdev->pdev->dev,
12453 "Get dfx reg bd num fail, status is %d.\n", ret);
12454 goto out;
12455 }
12456
12457 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12458 *len = 0;
12459 for (i = 0; i < dfx_reg_type_num; i++) {
12460 bd_num = bd_num_list[i];
12461 data_len = data_len_per_desc * bd_num;
12462 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12463 }
12464
12465 out:
12466 kfree(bd_num_list);
12467 return ret;
12468 }
12469
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)12470 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12471 {
12472 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12473 int bd_num, bd_num_max, buf_len, i;
12474 struct hclge_desc *desc_src;
12475 int *bd_num_list;
12476 u32 *reg = data;
12477 int ret;
12478
12479 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12480 if (!bd_num_list)
12481 return -ENOMEM;
12482
12483 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12484 if (ret) {
12485 dev_err(&hdev->pdev->dev,
12486 "Get dfx reg bd num fail, status is %d.\n", ret);
12487 goto out;
12488 }
12489
12490 bd_num_max = bd_num_list[0];
12491 for (i = 1; i < dfx_reg_type_num; i++)
12492 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12493
12494 buf_len = sizeof(*desc_src) * bd_num_max;
12495 desc_src = kzalloc(buf_len, GFP_KERNEL);
12496 if (!desc_src) {
12497 ret = -ENOMEM;
12498 goto out;
12499 }
12500
12501 for (i = 0; i < dfx_reg_type_num; i++) {
12502 bd_num = bd_num_list[i];
12503 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12504 hclge_dfx_reg_opcode_list[i]);
12505 if (ret) {
12506 dev_err(&hdev->pdev->dev,
12507 "Get dfx reg fail, status is %d.\n", ret);
12508 break;
12509 }
12510
12511 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12512 }
12513
12514 kfree(desc_src);
12515 out:
12516 kfree(bd_num_list);
12517 return ret;
12518 }
12519
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)12520 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12521 struct hnae3_knic_private_info *kinfo)
12522 {
12523 #define HCLGE_RING_REG_OFFSET 0x200
12524 #define HCLGE_RING_INT_REG_OFFSET 0x4
12525
12526 int i, j, reg_num, separator_num;
12527 int data_num_sum;
12528 u32 *reg = data;
12529
12530 /* fetching per-PF registers valus from PF PCIe register space */
12531 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12532 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12533 for (i = 0; i < reg_num; i++)
12534 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12535 for (i = 0; i < separator_num; i++)
12536 *reg++ = SEPARATOR_VALUE;
12537 data_num_sum = reg_num + separator_num;
12538
12539 reg_num = ARRAY_SIZE(common_reg_addr_list);
12540 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12541 for (i = 0; i < reg_num; i++)
12542 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12543 for (i = 0; i < separator_num; i++)
12544 *reg++ = SEPARATOR_VALUE;
12545 data_num_sum += reg_num + separator_num;
12546
12547 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12548 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12549 for (j = 0; j < kinfo->num_tqps; j++) {
12550 for (i = 0; i < reg_num; i++)
12551 *reg++ = hclge_read_dev(&hdev->hw,
12552 ring_reg_addr_list[i] +
12553 HCLGE_RING_REG_OFFSET * j);
12554 for (i = 0; i < separator_num; i++)
12555 *reg++ = SEPARATOR_VALUE;
12556 }
12557 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12558
12559 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12560 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12561 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12562 for (i = 0; i < reg_num; i++)
12563 *reg++ = hclge_read_dev(&hdev->hw,
12564 tqp_intr_reg_addr_list[i] +
12565 HCLGE_RING_INT_REG_OFFSET * j);
12566 for (i = 0; i < separator_num; i++)
12567 *reg++ = SEPARATOR_VALUE;
12568 }
12569 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12570
12571 return data_num_sum;
12572 }
12573
hclge_get_regs_len(struct hnae3_handle * handle)12574 static int hclge_get_regs_len(struct hnae3_handle *handle)
12575 {
12576 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12577 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12578 struct hclge_vport *vport = hclge_get_vport(handle);
12579 struct hclge_dev *hdev = vport->back;
12580 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12581 int regs_lines_32_bit, regs_lines_64_bit;
12582 int ret;
12583
12584 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12585 if (ret) {
12586 dev_err(&hdev->pdev->dev,
12587 "Get register number failed, ret = %d.\n", ret);
12588 return ret;
12589 }
12590
12591 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12592 if (ret) {
12593 dev_err(&hdev->pdev->dev,
12594 "Get dfx reg len failed, ret = %d.\n", ret);
12595 return ret;
12596 }
12597
12598 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12599 REG_SEPARATOR_LINE;
12600 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12601 REG_SEPARATOR_LINE;
12602 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12603 REG_SEPARATOR_LINE;
12604 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12605 REG_SEPARATOR_LINE;
12606 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12607 REG_SEPARATOR_LINE;
12608 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12609 REG_SEPARATOR_LINE;
12610
12611 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12612 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12613 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12614 }
12615
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)12616 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12617 void *data)
12618 {
12619 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12620 struct hclge_vport *vport = hclge_get_vport(handle);
12621 struct hclge_dev *hdev = vport->back;
12622 u32 regs_num_32_bit, regs_num_64_bit;
12623 int i, reg_num, separator_num, ret;
12624 u32 *reg = data;
12625
12626 *version = hdev->fw_version;
12627
12628 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12629 if (ret) {
12630 dev_err(&hdev->pdev->dev,
12631 "Get register number failed, ret = %d.\n", ret);
12632 return;
12633 }
12634
12635 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12636
12637 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12638 if (ret) {
12639 dev_err(&hdev->pdev->dev,
12640 "Get 32 bit register failed, ret = %d.\n", ret);
12641 return;
12642 }
12643 reg_num = regs_num_32_bit;
12644 reg += reg_num;
12645 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12646 for (i = 0; i < separator_num; i++)
12647 *reg++ = SEPARATOR_VALUE;
12648
12649 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12650 if (ret) {
12651 dev_err(&hdev->pdev->dev,
12652 "Get 64 bit register failed, ret = %d.\n", ret);
12653 return;
12654 }
12655 reg_num = regs_num_64_bit * 2;
12656 reg += reg_num;
12657 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12658 for (i = 0; i < separator_num; i++)
12659 *reg++ = SEPARATOR_VALUE;
12660
12661 ret = hclge_get_dfx_reg(hdev, reg);
12662 if (ret)
12663 dev_err(&hdev->pdev->dev,
12664 "Get dfx register failed, ret = %d.\n", ret);
12665 }
12666
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)12667 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12668 {
12669 struct hclge_set_led_state_cmd *req;
12670 struct hclge_desc desc;
12671 int ret;
12672
12673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12674
12675 req = (struct hclge_set_led_state_cmd *)desc.data;
12676 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12677 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12678
12679 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12680 if (ret)
12681 dev_err(&hdev->pdev->dev,
12682 "Send set led state cmd error, ret =%d\n", ret);
12683
12684 return ret;
12685 }
12686
12687 enum hclge_led_status {
12688 HCLGE_LED_OFF,
12689 HCLGE_LED_ON,
12690 HCLGE_LED_NO_CHANGE = 0xFF,
12691 };
12692
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)12693 static int hclge_set_led_id(struct hnae3_handle *handle,
12694 enum ethtool_phys_id_state status)
12695 {
12696 struct hclge_vport *vport = hclge_get_vport(handle);
12697 struct hclge_dev *hdev = vport->back;
12698
12699 switch (status) {
12700 case ETHTOOL_ID_ACTIVE:
12701 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12702 case ETHTOOL_ID_INACTIVE:
12703 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12704 default:
12705 return -EINVAL;
12706 }
12707 }
12708
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)12709 static void hclge_get_link_mode(struct hnae3_handle *handle,
12710 unsigned long *supported,
12711 unsigned long *advertising)
12712 {
12713 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12714 struct hclge_vport *vport = hclge_get_vport(handle);
12715 struct hclge_dev *hdev = vport->back;
12716 unsigned int idx = 0;
12717
12718 for (; idx < size; idx++) {
12719 supported[idx] = hdev->hw.mac.supported[idx];
12720 advertising[idx] = hdev->hw.mac.advertising[idx];
12721 }
12722 }
12723
hclge_gro_en(struct hnae3_handle * handle,bool enable)12724 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12725 {
12726 struct hclge_vport *vport = hclge_get_vport(handle);
12727 struct hclge_dev *hdev = vport->back;
12728 bool gro_en_old = hdev->gro_en;
12729 int ret;
12730
12731 hdev->gro_en = enable;
12732 ret = hclge_config_gro(hdev);
12733 if (ret)
12734 hdev->gro_en = gro_en_old;
12735
12736 return ret;
12737 }
12738
hclge_sync_promisc_mode(struct hclge_dev * hdev)12739 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12740 {
12741 struct hclge_vport *vport = &hdev->vport[0];
12742 struct hnae3_handle *handle = &vport->nic;
12743 u8 tmp_flags;
12744 int ret;
12745 u16 i;
12746
12747 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12748 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12749 vport->last_promisc_flags = vport->overflow_promisc_flags;
12750 }
12751
12752 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12753 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12754 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12755 tmp_flags & HNAE3_MPE);
12756 if (!ret) {
12757 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12758 &vport->state);
12759 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12760 &vport->state);
12761 }
12762 }
12763
12764 for (i = 1; i < hdev->num_alloc_vport; i++) {
12765 bool uc_en = false;
12766 bool mc_en = false;
12767 bool bc_en;
12768
12769 vport = &hdev->vport[i];
12770
12771 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12772 &vport->state))
12773 continue;
12774
12775 if (vport->vf_info.trusted) {
12776 uc_en = vport->vf_info.request_uc_en > 0 ||
12777 vport->overflow_promisc_flags &
12778 HNAE3_OVERFLOW_UPE;
12779 mc_en = vport->vf_info.request_mc_en > 0 ||
12780 vport->overflow_promisc_flags &
12781 HNAE3_OVERFLOW_MPE;
12782 }
12783 bc_en = vport->vf_info.request_bc_en > 0;
12784
12785 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12786 mc_en, bc_en);
12787 if (ret) {
12788 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12789 &vport->state);
12790 return;
12791 }
12792 hclge_set_vport_vlan_fltr_change(vport);
12793 }
12794 }
12795
hclge_module_existed(struct hclge_dev * hdev)12796 static bool hclge_module_existed(struct hclge_dev *hdev)
12797 {
12798 struct hclge_desc desc;
12799 u32 existed;
12800 int ret;
12801
12802 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12803 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12804 if (ret) {
12805 dev_err(&hdev->pdev->dev,
12806 "failed to get SFP exist state, ret = %d\n", ret);
12807 return false;
12808 }
12809
12810 existed = le32_to_cpu(desc.data[0]);
12811
12812 return existed != 0;
12813 }
12814
12815 /* need 6 bds(total 140 bytes) in one reading
12816 * return the number of bytes actually read, 0 means read failed.
12817 */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)12818 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12819 u32 len, u8 *data)
12820 {
12821 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12822 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12823 u16 read_len;
12824 u16 copy_len;
12825 int ret;
12826 int i;
12827
12828 /* setup all 6 bds to read module eeprom info. */
12829 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12830 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12831 true);
12832
12833 /* bd0~bd4 need next flag */
12834 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12835 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12836 }
12837
12838 /* setup bd0, this bd contains offset and read length. */
12839 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12840 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12841 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12842 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12843
12844 ret = hclge_cmd_send(&hdev->hw, desc, i);
12845 if (ret) {
12846 dev_err(&hdev->pdev->dev,
12847 "failed to get SFP eeprom info, ret = %d\n", ret);
12848 return 0;
12849 }
12850
12851 /* copy sfp info from bd0 to out buffer. */
12852 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12853 memcpy(data, sfp_info_bd0->data, copy_len);
12854 read_len = copy_len;
12855
12856 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12857 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12858 if (read_len >= len)
12859 return read_len;
12860
12861 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12862 memcpy(data + read_len, desc[i].data, copy_len);
12863 read_len += copy_len;
12864 }
12865
12866 return read_len;
12867 }
12868
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)12869 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12870 u32 len, u8 *data)
12871 {
12872 struct hclge_vport *vport = hclge_get_vport(handle);
12873 struct hclge_dev *hdev = vport->back;
12874 u32 read_len = 0;
12875 u16 data_len;
12876
12877 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12878 return -EOPNOTSUPP;
12879
12880 if (!hclge_module_existed(hdev))
12881 return -ENXIO;
12882
12883 while (read_len < len) {
12884 data_len = hclge_get_sfp_eeprom_info(hdev,
12885 offset + read_len,
12886 len - read_len,
12887 data + read_len);
12888 if (!data_len)
12889 return -EIO;
12890
12891 read_len += data_len;
12892 }
12893
12894 return 0;
12895 }
12896
hclge_get_link_diagnosis_info(struct hnae3_handle * handle,u32 * status_code)12897 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12898 u32 *status_code)
12899 {
12900 struct hclge_vport *vport = hclge_get_vport(handle);
12901 struct hclge_dev *hdev = vport->back;
12902 struct hclge_desc desc;
12903 int ret;
12904
12905 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12906 return -EOPNOTSUPP;
12907
12908 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12909 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12910 if (ret) {
12911 dev_err(&hdev->pdev->dev,
12912 "failed to query link diagnosis info, ret = %d\n", ret);
12913 return ret;
12914 }
12915
12916 *status_code = le32_to_cpu(desc.data[0]);
12917 return 0;
12918 }
12919
12920 static const struct hnae3_ae_ops hclge_ops = {
12921 .init_ae_dev = hclge_init_ae_dev,
12922 .uninit_ae_dev = hclge_uninit_ae_dev,
12923 .reset_prepare = hclge_reset_prepare_general,
12924 .reset_done = hclge_reset_done,
12925 .init_client_instance = hclge_init_client_instance,
12926 .uninit_client_instance = hclge_uninit_client_instance,
12927 .map_ring_to_vector = hclge_map_ring_to_vector,
12928 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12929 .get_vector = hclge_get_vector,
12930 .put_vector = hclge_put_vector,
12931 .set_promisc_mode = hclge_set_promisc_mode,
12932 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12933 .set_loopback = hclge_set_loopback,
12934 .start = hclge_ae_start,
12935 .stop = hclge_ae_stop,
12936 .client_start = hclge_client_start,
12937 .client_stop = hclge_client_stop,
12938 .get_status = hclge_get_status,
12939 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12940 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12941 .get_media_type = hclge_get_media_type,
12942 .check_port_speed = hclge_check_port_speed,
12943 .get_fec = hclge_get_fec,
12944 .set_fec = hclge_set_fec,
12945 .get_rss_key_size = hclge_get_rss_key_size,
12946 .get_rss = hclge_get_rss,
12947 .set_rss = hclge_set_rss,
12948 .set_rss_tuple = hclge_set_rss_tuple,
12949 .get_rss_tuple = hclge_get_rss_tuple,
12950 .get_tc_size = hclge_get_tc_size,
12951 .get_mac_addr = hclge_get_mac_addr,
12952 .set_mac_addr = hclge_set_mac_addr,
12953 .do_ioctl = hclge_do_ioctl,
12954 .add_uc_addr = hclge_add_uc_addr,
12955 .rm_uc_addr = hclge_rm_uc_addr,
12956 .add_mc_addr = hclge_add_mc_addr,
12957 .rm_mc_addr = hclge_rm_mc_addr,
12958 .set_autoneg = hclge_set_autoneg,
12959 .get_autoneg = hclge_get_autoneg,
12960 .restart_autoneg = hclge_restart_autoneg,
12961 .halt_autoneg = hclge_halt_autoneg,
12962 .get_pauseparam = hclge_get_pauseparam,
12963 .set_pauseparam = hclge_set_pauseparam,
12964 .set_mtu = hclge_set_mtu,
12965 .reset_queue = hclge_reset_tqp,
12966 .get_stats = hclge_get_stats,
12967 .get_mac_stats = hclge_get_mac_stat,
12968 .update_stats = hclge_update_stats,
12969 .get_strings = hclge_get_strings,
12970 .get_sset_count = hclge_get_sset_count,
12971 .get_fw_version = hclge_get_fw_version,
12972 .get_mdix_mode = hclge_get_mdix_mode,
12973 .enable_vlan_filter = hclge_enable_vlan_filter,
12974 .set_vlan_filter = hclge_set_vlan_filter,
12975 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12976 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12977 .reset_event = hclge_reset_event,
12978 .get_reset_level = hclge_get_reset_level,
12979 .set_default_reset_request = hclge_set_def_reset_request,
12980 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12981 .set_channels = hclge_set_channels,
12982 .get_channels = hclge_get_channels,
12983 .get_regs_len = hclge_get_regs_len,
12984 .get_regs = hclge_get_regs,
12985 .set_led_id = hclge_set_led_id,
12986 .get_link_mode = hclge_get_link_mode,
12987 .add_fd_entry = hclge_add_fd_entry,
12988 .del_fd_entry = hclge_del_fd_entry,
12989 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12990 .get_fd_rule_info = hclge_get_fd_rule_info,
12991 .get_fd_all_rules = hclge_get_all_rules,
12992 .enable_fd = hclge_enable_fd,
12993 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12994 .dbg_read_cmd = hclge_dbg_read_cmd,
12995 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12996 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12997 .ae_dev_resetting = hclge_ae_dev_resetting,
12998 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12999 .set_gro_en = hclge_gro_en,
13000 .get_global_queue_id = hclge_covert_handle_qid_global,
13001 .set_timer_task = hclge_set_timer_task,
13002 .mac_connect_phy = hclge_mac_connect_phy,
13003 .mac_disconnect_phy = hclge_mac_disconnect_phy,
13004 .get_vf_config = hclge_get_vf_config,
13005 .set_vf_link_state = hclge_set_vf_link_state,
13006 .set_vf_spoofchk = hclge_set_vf_spoofchk,
13007 .set_vf_trust = hclge_set_vf_trust,
13008 .set_vf_rate = hclge_set_vf_rate,
13009 .set_vf_mac = hclge_set_vf_mac,
13010 .get_module_eeprom = hclge_get_module_eeprom,
13011 .get_cmdq_stat = hclge_get_cmdq_stat,
13012 .add_cls_flower = hclge_add_cls_flower,
13013 .del_cls_flower = hclge_del_cls_flower,
13014 .cls_flower_active = hclge_is_cls_flower_active,
13015 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13016 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13017 .set_tx_hwts_info = hclge_ptp_set_tx_info,
13018 .get_rx_hwts = hclge_ptp_get_rx_hwts,
13019 .get_ts_info = hclge_ptp_get_ts_info,
13020 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13021 };
13022
13023 static struct hnae3_ae_algo ae_algo = {
13024 .ops = &hclge_ops,
13025 .pdev_id_table = ae_algo_pci_tbl,
13026 };
13027
hclge_init(void)13028 static int hclge_init(void)
13029 {
13030 pr_info("%s is initializing\n", HCLGE_NAME);
13031
13032 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
13033 if (!hclge_wq) {
13034 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13035 return -ENOMEM;
13036 }
13037
13038 hnae3_register_ae_algo(&ae_algo);
13039
13040 return 0;
13041 }
13042
hclge_exit(void)13043 static void hclge_exit(void)
13044 {
13045 hnae3_unregister_ae_algo_prepare(&ae_algo);
13046 hnae3_unregister_ae_algo(&ae_algo);
13047 destroy_workqueue(hclge_wq);
13048 }
13049 module_init(hclge_init);
13050 module_exit(hclge_exit);
13051
13052 MODULE_LICENSE("GPL");
13053 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13054 MODULE_DESCRIPTION("HCLGE Driver");
13055 MODULE_VERSION(HCLGE_MOD_VERSION);
13056