1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_flow.h"
9 #include "ice_virtchnl_allowlist.h"
10 
11 #define FIELD_SELECTOR(proto_hdr_field) \
12 		BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
13 
14 struct ice_vc_hdr_match_type {
15 	u32 vc_hdr;	/* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
16 	u32 ice_hdr;	/* ice headers (ICE_FLOW_SEG_HDR_XXX) */
17 };
18 
19 static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
20 	{VIRTCHNL_PROTO_HDR_NONE,	ICE_FLOW_SEG_HDR_NONE},
21 	{VIRTCHNL_PROTO_HDR_IPV4,	ICE_FLOW_SEG_HDR_IPV4 |
22 					ICE_FLOW_SEG_HDR_IPV_OTHER},
23 	{VIRTCHNL_PROTO_HDR_IPV6,	ICE_FLOW_SEG_HDR_IPV6 |
24 					ICE_FLOW_SEG_HDR_IPV_OTHER},
25 	{VIRTCHNL_PROTO_HDR_TCP,	ICE_FLOW_SEG_HDR_TCP},
26 	{VIRTCHNL_PROTO_HDR_UDP,	ICE_FLOW_SEG_HDR_UDP},
27 	{VIRTCHNL_PROTO_HDR_SCTP,	ICE_FLOW_SEG_HDR_SCTP},
28 };
29 
30 static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
31 	{VIRTCHNL_PROTO_HDR_NONE,	ICE_FLOW_SEG_HDR_NONE},
32 	{VIRTCHNL_PROTO_HDR_ETH,	ICE_FLOW_SEG_HDR_ETH},
33 	{VIRTCHNL_PROTO_HDR_S_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
34 	{VIRTCHNL_PROTO_HDR_C_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
35 	{VIRTCHNL_PROTO_HDR_IPV4,	ICE_FLOW_SEG_HDR_IPV4 |
36 					ICE_FLOW_SEG_HDR_IPV_OTHER},
37 	{VIRTCHNL_PROTO_HDR_IPV6,	ICE_FLOW_SEG_HDR_IPV6 |
38 					ICE_FLOW_SEG_HDR_IPV_OTHER},
39 	{VIRTCHNL_PROTO_HDR_TCP,	ICE_FLOW_SEG_HDR_TCP},
40 	{VIRTCHNL_PROTO_HDR_UDP,	ICE_FLOW_SEG_HDR_UDP},
41 	{VIRTCHNL_PROTO_HDR_SCTP,	ICE_FLOW_SEG_HDR_SCTP},
42 	{VIRTCHNL_PROTO_HDR_PPPOE,	ICE_FLOW_SEG_HDR_PPPOE},
43 	{VIRTCHNL_PROTO_HDR_GTPU_IP,	ICE_FLOW_SEG_HDR_GTPU_IP},
44 	{VIRTCHNL_PROTO_HDR_GTPU_EH,	ICE_FLOW_SEG_HDR_GTPU_EH},
45 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
46 					ICE_FLOW_SEG_HDR_GTPU_DWN},
47 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
48 					ICE_FLOW_SEG_HDR_GTPU_UP},
49 	{VIRTCHNL_PROTO_HDR_L2TPV3,	ICE_FLOW_SEG_HDR_L2TPV3},
50 	{VIRTCHNL_PROTO_HDR_ESP,	ICE_FLOW_SEG_HDR_ESP},
51 	{VIRTCHNL_PROTO_HDR_AH,		ICE_FLOW_SEG_HDR_AH},
52 	{VIRTCHNL_PROTO_HDR_PFCP,	ICE_FLOW_SEG_HDR_PFCP_SESSION},
53 };
54 
55 struct ice_vc_hash_field_match_type {
56 	u32 vc_hdr;		/* virtchnl headers
57 				 * (VIRTCHNL_PROTO_HDR_XXX)
58 				 */
59 	u32 vc_hash_field;	/* virtchnl hash fields selector
60 				 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
61 				 */
62 	u64 ice_hash_field;	/* ice hash fields
63 				 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
64 				 */
65 };
66 
67 static const struct
68 ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
69 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
70 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
71 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
72 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
73 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
74 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
75 		ICE_FLOW_HASH_IPV4},
76 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
77 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
78 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
79 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
80 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
81 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
82 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
83 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
84 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
85 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
86 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
87 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
88 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
89 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
91 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
92 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
93 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
94 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
95 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
96 		ICE_FLOW_HASH_IPV6},
97 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
98 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
99 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
100 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
101 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
102 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
103 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
104 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
105 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
106 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
107 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
108 		ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
109 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
110 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 	{VIRTCHNL_PROTO_HDR_TCP,
112 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
113 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
114 	{VIRTCHNL_PROTO_HDR_TCP,
115 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
116 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
117 	{VIRTCHNL_PROTO_HDR_TCP,
118 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
119 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
120 		ICE_FLOW_HASH_TCP_PORT},
121 	{VIRTCHNL_PROTO_HDR_UDP,
122 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
123 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
124 	{VIRTCHNL_PROTO_HDR_UDP,
125 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
126 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
127 	{VIRTCHNL_PROTO_HDR_UDP,
128 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
129 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
130 		ICE_FLOW_HASH_UDP_PORT},
131 	{VIRTCHNL_PROTO_HDR_SCTP,
132 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
133 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
134 	{VIRTCHNL_PROTO_HDR_SCTP,
135 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
136 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
137 	{VIRTCHNL_PROTO_HDR_SCTP,
138 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
139 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
140 		ICE_FLOW_HASH_SCTP_PORT},
141 };
142 
143 static const struct
144 ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
145 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
146 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
147 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
148 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
149 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
150 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
151 		ICE_FLOW_HASH_ETH},
152 	{VIRTCHNL_PROTO_HDR_ETH,
153 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
154 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
155 	{VIRTCHNL_PROTO_HDR_S_VLAN,
156 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
157 		BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
158 	{VIRTCHNL_PROTO_HDR_C_VLAN,
159 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
160 		BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
161 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
162 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
163 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
164 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
165 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
166 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
167 		ICE_FLOW_HASH_IPV4},
168 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
169 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
170 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
171 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
172 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
173 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
174 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
175 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
176 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
177 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
178 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
179 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
180 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
181 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
182 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
183 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
184 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
185 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
186 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
187 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
188 		ICE_FLOW_HASH_IPV6},
189 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
190 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
191 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
192 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
193 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
194 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
195 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
196 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
197 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
198 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
199 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
200 		ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
201 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
202 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
203 	{VIRTCHNL_PROTO_HDR_TCP,
204 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
205 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
206 	{VIRTCHNL_PROTO_HDR_TCP,
207 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
208 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
209 	{VIRTCHNL_PROTO_HDR_TCP,
210 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
211 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
212 		ICE_FLOW_HASH_TCP_PORT},
213 	{VIRTCHNL_PROTO_HDR_UDP,
214 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
215 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
216 	{VIRTCHNL_PROTO_HDR_UDP,
217 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
218 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
219 	{VIRTCHNL_PROTO_HDR_UDP,
220 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
221 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
222 		ICE_FLOW_HASH_UDP_PORT},
223 	{VIRTCHNL_PROTO_HDR_SCTP,
224 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
225 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
226 	{VIRTCHNL_PROTO_HDR_SCTP,
227 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
228 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
229 	{VIRTCHNL_PROTO_HDR_SCTP,
230 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
231 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
232 		ICE_FLOW_HASH_SCTP_PORT},
233 	{VIRTCHNL_PROTO_HDR_PPPOE,
234 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
235 		BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
236 	{VIRTCHNL_PROTO_HDR_GTPU_IP,
237 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
238 		BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
239 	{VIRTCHNL_PROTO_HDR_L2TPV3,
240 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
241 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
242 	{VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
243 		BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
244 	{VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
245 		BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
246 	{VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
247 		BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
248 };
249 
250 /**
251  * ice_get_vf_vsi - get VF's VSI based on the stored index
252  * @vf: VF used to get VSI
253  */
ice_get_vf_vsi(struct ice_vf * vf)254 static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
255 {
256 	return vf->pf->vsi[vf->lan_vsi_idx];
257 }
258 
259 /**
260  * ice_validate_vf_id - helper to check if VF ID is valid
261  * @pf: pointer to the PF structure
262  * @vf_id: the ID of the VF to check
263  */
ice_validate_vf_id(struct ice_pf * pf,u16 vf_id)264 static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
265 {
266 	/* vf_id range is only valid for 0-255, and should always be unsigned */
267 	if (vf_id >= pf->num_alloc_vfs) {
268 		dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
269 		return -EINVAL;
270 	}
271 	return 0;
272 }
273 
274 /**
275  * ice_check_vf_init - helper to check if VF init complete
276  * @pf: pointer to the PF structure
277  * @vf: the pointer to the VF to check
278  */
ice_check_vf_init(struct ice_pf * pf,struct ice_vf * vf)279 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
280 {
281 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
282 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
283 			vf->vf_id);
284 		return -EBUSY;
285 	}
286 	return 0;
287 }
288 
289 /**
290  * ice_err_to_virt_err - translate errors for VF return code
291  * @ice_err: error return code
292  */
ice_err_to_virt_err(enum ice_status ice_err)293 static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
294 {
295 	switch (ice_err) {
296 	case ICE_SUCCESS:
297 		return VIRTCHNL_STATUS_SUCCESS;
298 	case ICE_ERR_BAD_PTR:
299 	case ICE_ERR_INVAL_SIZE:
300 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
301 	case ICE_ERR_PARAM:
302 	case ICE_ERR_CFG:
303 		return VIRTCHNL_STATUS_ERR_PARAM;
304 	case ICE_ERR_NO_MEMORY:
305 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
306 	case ICE_ERR_NOT_READY:
307 	case ICE_ERR_RESET_FAILED:
308 	case ICE_ERR_FW_API_VER:
309 	case ICE_ERR_AQ_ERROR:
310 	case ICE_ERR_AQ_TIMEOUT:
311 	case ICE_ERR_AQ_FULL:
312 	case ICE_ERR_AQ_NO_WORK:
313 	case ICE_ERR_AQ_EMPTY:
314 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
315 	default:
316 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
317 	}
318 }
319 
320 /**
321  * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
322  * @pf: pointer to the PF structure
323  * @v_opcode: operation code
324  * @v_retval: return value
325  * @msg: pointer to the msg buffer
326  * @msglen: msg length
327  */
328 static void
ice_vc_vf_broadcast(struct ice_pf * pf,enum virtchnl_ops v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)329 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
330 		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
331 {
332 	struct ice_hw *hw = &pf->hw;
333 	unsigned int i;
334 
335 	ice_for_each_vf(pf, i) {
336 		struct ice_vf *vf = &pf->vf[i];
337 
338 		/* Not all vfs are enabled so skip the ones that are not */
339 		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
340 		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
341 			continue;
342 
343 		/* Ignore return value on purpose - a given VF may fail, but
344 		 * we need to keep going and send to all of them
345 		 */
346 		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
347 				      msglen, NULL);
348 	}
349 }
350 
351 /**
352  * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
353  * @vf: pointer to the VF structure
354  * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
355  * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
356  * @link_up: whether or not to set the link up/down
357  */
358 static void
ice_set_pfe_link(struct ice_vf * vf,struct virtchnl_pf_event * pfe,int ice_link_speed,bool link_up)359 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
360 		 int ice_link_speed, bool link_up)
361 {
362 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
363 		pfe->event_data.link_event_adv.link_status = link_up;
364 		/* Speed in Mbps */
365 		pfe->event_data.link_event_adv.link_speed =
366 			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
367 	} else {
368 		pfe->event_data.link_event.link_status = link_up;
369 		/* Legacy method for virtchnl link speeds */
370 		pfe->event_data.link_event.link_speed =
371 			(enum virtchnl_link_speed)
372 			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
373 	}
374 }
375 
376 /**
377  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
378  * @vf: the VF to check
379  *
380  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
381  * otherwise
382  */
ice_vf_has_no_qs_ena(struct ice_vf * vf)383 static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
384 {
385 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
386 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
387 }
388 
389 /**
390  * ice_is_vf_link_up - check if the VF's link is up
391  * @vf: VF to check if link is up
392  */
ice_is_vf_link_up(struct ice_vf * vf)393 static bool ice_is_vf_link_up(struct ice_vf *vf)
394 {
395 	struct ice_pf *pf = vf->pf;
396 
397 	if (ice_check_vf_init(pf, vf))
398 		return false;
399 
400 	if (ice_vf_has_no_qs_ena(vf))
401 		return false;
402 	else if (vf->link_forced)
403 		return vf->link_up;
404 	else
405 		return pf->hw.port_info->phy.link_info.link_info &
406 			ICE_AQ_LINK_UP;
407 }
408 
409 /**
410  * ice_vc_notify_vf_link_state - Inform a VF of link status
411  * @vf: pointer to the VF structure
412  *
413  * send a link status message to a single VF
414  */
ice_vc_notify_vf_link_state(struct ice_vf * vf)415 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
416 {
417 	struct virtchnl_pf_event pfe = { 0 };
418 	struct ice_hw *hw = &vf->pf->hw;
419 
420 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
421 	pfe.severity = PF_EVENT_SEVERITY_INFO;
422 
423 	if (ice_is_vf_link_up(vf))
424 		ice_set_pfe_link(vf, &pfe,
425 				 hw->port_info->phy.link_info.link_speed, true);
426 	else
427 		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
428 
429 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
430 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
431 			      sizeof(pfe), NULL);
432 }
433 
434 /**
435  * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
436  * @vf: VF to remove access to VSI for
437  */
ice_vf_invalidate_vsi(struct ice_vf * vf)438 static void ice_vf_invalidate_vsi(struct ice_vf *vf)
439 {
440 	vf->lan_vsi_idx = ICE_NO_VSI;
441 	vf->lan_vsi_num = ICE_NO_VSI;
442 }
443 
444 /**
445  * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
446  * @vf: invalidate this VF's VSI after freeing it
447  */
ice_vf_vsi_release(struct ice_vf * vf)448 static void ice_vf_vsi_release(struct ice_vf *vf)
449 {
450 	ice_vsi_release(ice_get_vf_vsi(vf));
451 	ice_vf_invalidate_vsi(vf);
452 }
453 
454 /**
455  * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
456  * @vf: VF that control VSI is being invalidated on
457  */
ice_vf_ctrl_invalidate_vsi(struct ice_vf * vf)458 static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
459 {
460 	vf->ctrl_vsi_idx = ICE_NO_VSI;
461 }
462 
463 /**
464  * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
465  * @vf: VF that control VSI is being released on
466  */
ice_vf_ctrl_vsi_release(struct ice_vf * vf)467 static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
468 {
469 	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
470 	ice_vf_ctrl_invalidate_vsi(vf);
471 }
472 
473 /**
474  * ice_free_vf_res - Free a VF's resources
475  * @vf: pointer to the VF info
476  */
ice_free_vf_res(struct ice_vf * vf)477 static void ice_free_vf_res(struct ice_vf *vf)
478 {
479 	struct ice_pf *pf = vf->pf;
480 	int i, last_vector_idx;
481 
482 	/* First, disable VF's configuration API to prevent OS from
483 	 * accessing the VF's VSI after it's freed or invalidated.
484 	 */
485 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
486 	ice_vf_fdir_exit(vf);
487 	/* free VF control VSI */
488 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
489 		ice_vf_ctrl_vsi_release(vf);
490 
491 	/* free VSI and disconnect it from the parent uplink */
492 	if (vf->lan_vsi_idx != ICE_NO_VSI) {
493 		ice_vf_vsi_release(vf);
494 		vf->num_mac = 0;
495 	}
496 
497 	last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
498 
499 	/* clear VF MDD event information */
500 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
501 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
502 
503 	/* Disable interrupts so that VF starts in a known state */
504 	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
505 		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
506 		ice_flush(&pf->hw);
507 	}
508 	/* reset some of the state variables keeping track of the resources */
509 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
510 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
511 }
512 
513 /**
514  * ice_dis_vf_mappings
515  * @vf: pointer to the VF structure
516  */
ice_dis_vf_mappings(struct ice_vf * vf)517 static void ice_dis_vf_mappings(struct ice_vf *vf)
518 {
519 	struct ice_pf *pf = vf->pf;
520 	struct ice_vsi *vsi;
521 	struct device *dev;
522 	int first, last, v;
523 	struct ice_hw *hw;
524 
525 	hw = &pf->hw;
526 	vsi = ice_get_vf_vsi(vf);
527 
528 	dev = ice_pf_to_dev(pf);
529 	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
530 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
531 
532 	first = vf->first_vector_idx;
533 	last = first + pf->num_msix_per_vf - 1;
534 	for (v = first; v <= last; v++) {
535 		u32 reg;
536 
537 		reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
538 			GLINT_VECT2FUNC_IS_PF_M) |
539 		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
540 			GLINT_VECT2FUNC_PF_NUM_M));
541 		wr32(hw, GLINT_VECT2FUNC(v), reg);
542 	}
543 
544 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
545 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
546 	else
547 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
548 
549 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
550 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
551 	else
552 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
553 }
554 
555 /**
556  * ice_sriov_free_msix_res - Reset/free any used MSIX resources
557  * @pf: pointer to the PF structure
558  *
559  * Since no MSIX entries are taken from the pf->irq_tracker then just clear
560  * the pf->sriov_base_vector.
561  *
562  * Returns 0 on success, and -EINVAL on error.
563  */
ice_sriov_free_msix_res(struct ice_pf * pf)564 static int ice_sriov_free_msix_res(struct ice_pf *pf)
565 {
566 	struct ice_res_tracker *res;
567 
568 	if (!pf)
569 		return -EINVAL;
570 
571 	res = pf->irq_tracker;
572 	if (!res)
573 		return -EINVAL;
574 
575 	/* give back irq_tracker resources used */
576 	WARN_ON(pf->sriov_base_vector < res->num_entries);
577 
578 	pf->sriov_base_vector = 0;
579 
580 	return 0;
581 }
582 
583 /**
584  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
585  * @vf: pointer to the VF structure
586  */
ice_set_vf_state_qs_dis(struct ice_vf * vf)587 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
588 {
589 	/* Clear Rx/Tx enabled queues flag */
590 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
591 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
592 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
593 }
594 
595 /**
596  * ice_dis_vf_qs - Disable the VF queues
597  * @vf: pointer to the VF structure
598  */
ice_dis_vf_qs(struct ice_vf * vf)599 static void ice_dis_vf_qs(struct ice_vf *vf)
600 {
601 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
602 
603 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
604 	ice_vsi_stop_all_rx_rings(vsi);
605 	ice_set_vf_state_qs_dis(vf);
606 }
607 
608 /**
609  * ice_free_vfs - Free all VFs
610  * @pf: pointer to the PF structure
611  */
ice_free_vfs(struct ice_pf * pf)612 void ice_free_vfs(struct ice_pf *pf)
613 {
614 	struct device *dev = ice_pf_to_dev(pf);
615 	struct ice_hw *hw = &pf->hw;
616 	unsigned int tmp, i;
617 
618 	set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
619 
620 	if (!pf->vf)
621 		return;
622 
623 	while (test_and_set_bit(ICE_VF_DIS, pf->state))
624 		usleep_range(1000, 2000);
625 
626 	/* Disable IOV before freeing resources. This lets any VF drivers
627 	 * running in the host get themselves cleaned up before we yank
628 	 * the carpet out from underneath their feet.
629 	 */
630 	if (!pci_vfs_assigned(pf->pdev))
631 		pci_disable_sriov(pf->pdev);
632 	else
633 		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
634 
635 	/* Avoid wait time by stopping all VFs at the same time */
636 	ice_for_each_vf(pf, i)
637 		if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
638 			ice_dis_vf_qs(&pf->vf[i]);
639 
640 	tmp = pf->num_alloc_vfs;
641 	pf->num_qps_per_vf = 0;
642 	pf->num_alloc_vfs = 0;
643 	for (i = 0; i < tmp; i++) {
644 		if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
645 			/* disable VF qp mappings and set VF disable state */
646 			ice_dis_vf_mappings(&pf->vf[i]);
647 			set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
648 			ice_free_vf_res(&pf->vf[i]);
649 		}
650 	}
651 
652 	if (ice_sriov_free_msix_res(pf))
653 		dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
654 
655 	devm_kfree(dev, pf->vf);
656 	pf->vf = NULL;
657 
658 	/* This check is for when the driver is unloaded while VFs are
659 	 * assigned. Setting the number of VFs to 0 through sysfs is caught
660 	 * before this function ever gets called.
661 	 */
662 	if (!pci_vfs_assigned(pf->pdev)) {
663 		unsigned int vf_id;
664 
665 		/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
666 		 * work correctly when SR-IOV gets re-enabled.
667 		 */
668 		for (vf_id = 0; vf_id < tmp; vf_id++) {
669 			u32 reg_idx, bit_idx;
670 
671 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
672 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
673 			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
674 		}
675 	}
676 
677 	/* clear malicious info if the VFs are getting released */
678 	for (i = 0; i < tmp; i++)
679 		if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
680 					ICE_MAX_VF_COUNT, i))
681 			dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
682 				i);
683 
684 	clear_bit(ICE_VF_DIS, pf->state);
685 	clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
686 	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
687 }
688 
689 /**
690  * ice_trigger_vf_reset - Reset a VF on HW
691  * @vf: pointer to the VF structure
692  * @is_vflr: true if VFLR was issued, false if not
693  * @is_pfr: true if the reset was triggered due to a previous PFR
694  *
695  * Trigger hardware to start a reset for a particular VF. Expects the caller
696  * to wait the proper amount of time to allow hardware to reset the VF before
697  * it cleans up and restores VF functionality.
698  */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)699 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
700 {
701 	struct ice_pf *pf = vf->pf;
702 	u32 reg, reg_idx, bit_idx;
703 	unsigned int vf_abs_id, i;
704 	struct device *dev;
705 	struct ice_hw *hw;
706 
707 	dev = ice_pf_to_dev(pf);
708 	hw = &pf->hw;
709 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
710 
711 	/* Inform VF that it is no longer active, as a warning */
712 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
713 
714 	/* Disable VF's configuration API during reset. The flag is re-enabled
715 	 * when it's safe again to access VF's VSI.
716 	 */
717 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
718 
719 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
720 	 * needs to clear them in the case of VFR/VFLR. If this is done for
721 	 * PFR, it can mess up VF resets because the VF driver may already
722 	 * have started cleanup by the time we get here.
723 	 */
724 	if (!is_pfr) {
725 		wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
726 		wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
727 	}
728 
729 	/* In the case of a VFLR, the HW has already reset the VF and we
730 	 * just need to clean up, so don't hit the VFRTRIG register.
731 	 */
732 	if (!is_vflr) {
733 		/* reset VF using VPGEN_VFRTRIG reg */
734 		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
735 		reg |= VPGEN_VFRTRIG_VFSWR_M;
736 		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
737 	}
738 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
739 	reg_idx = (vf_abs_id) / 32;
740 	bit_idx = (vf_abs_id) % 32;
741 	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
742 	ice_flush(hw);
743 
744 	wr32(hw, PF_PCI_CIAA,
745 	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
746 	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
747 		reg = rd32(hw, PF_PCI_CIAD);
748 		/* no transactions pending so stop polling */
749 		if ((reg & VF_TRANS_PENDING_M) == 0)
750 			break;
751 
752 		dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
753 		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
754 	}
755 }
756 
757 /**
758  * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
759  * @vsi: the VSI to update
760  * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
761  * @enable: true for enable PVID false for disable
762  */
ice_vsi_manage_pvid(struct ice_vsi * vsi,u16 pvid_info,bool enable)763 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
764 {
765 	struct ice_hw *hw = &vsi->back->hw;
766 	struct ice_aqc_vsi_props *info;
767 	struct ice_vsi_ctx *ctxt;
768 	enum ice_status status;
769 	int ret = 0;
770 
771 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
772 	if (!ctxt)
773 		return -ENOMEM;
774 
775 	ctxt->info = vsi->info;
776 	info = &ctxt->info;
777 	if (enable) {
778 		info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
779 			ICE_AQ_VSI_PVLAN_INSERT_PVID |
780 			ICE_AQ_VSI_VLAN_EMOD_STR;
781 		info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
782 	} else {
783 		info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
784 			ICE_AQ_VSI_VLAN_MODE_ALL;
785 		info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
786 	}
787 
788 	info->pvid = cpu_to_le16(pvid_info);
789 	info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
790 					   ICE_AQ_VSI_PROP_SW_VALID);
791 
792 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
793 	if (status) {
794 		dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
795 			 ice_stat_str(status),
796 			 ice_aq_str(hw->adminq.sq_last_status));
797 		ret = -EIO;
798 		goto out;
799 	}
800 
801 	vsi->info.vlan_flags = info->vlan_flags;
802 	vsi->info.sw_flags2 = info->sw_flags2;
803 	vsi->info.pvid = info->pvid;
804 out:
805 	kfree(ctxt);
806 	return ret;
807 }
808 
809 /**
810  * ice_vf_get_port_info - Get the VF's port info structure
811  * @vf: VF used to get the port info structure for
812  */
ice_vf_get_port_info(struct ice_vf * vf)813 static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
814 {
815 	return vf->pf->hw.port_info;
816 }
817 
818 /**
819  * ice_vf_vsi_setup - Set up a VF VSI
820  * @vf: VF to setup VSI for
821  *
822  * Returns pointer to the successfully allocated VSI struct on success,
823  * otherwise returns NULL on failure.
824  */
ice_vf_vsi_setup(struct ice_vf * vf)825 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
826 {
827 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
828 	struct ice_pf *pf = vf->pf;
829 	struct ice_vsi *vsi;
830 
831 	vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
832 
833 	if (!vsi) {
834 		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
835 		ice_vf_invalidate_vsi(vf);
836 		return NULL;
837 	}
838 
839 	vf->lan_vsi_idx = vsi->idx;
840 	vf->lan_vsi_num = vsi->vsi_num;
841 
842 	return vsi;
843 }
844 
845 /**
846  * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
847  * @vf: VF to setup control VSI for
848  *
849  * Returns pointer to the successfully allocated VSI struct on success,
850  * otherwise returns NULL on failure.
851  */
ice_vf_ctrl_vsi_setup(struct ice_vf * vf)852 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
853 {
854 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
855 	struct ice_pf *pf = vf->pf;
856 	struct ice_vsi *vsi;
857 
858 	vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
859 	if (!vsi) {
860 		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
861 		ice_vf_ctrl_invalidate_vsi(vf);
862 	}
863 
864 	return vsi;
865 }
866 
867 /**
868  * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
869  * @pf: pointer to PF structure
870  * @vf: pointer to VF that the first MSIX vector index is being calculated for
871  *
872  * This returns the first MSIX vector index in PF space that is used by this VF.
873  * This index is used when accessing PF relative registers such as
874  * GLINT_VECT2FUNC and GLINT_DYN_CTL.
875  * This will always be the OICR index in the AVF driver so any functionality
876  * using vf->first_vector_idx for queue configuration will have to increment by
877  * 1 to avoid meddling with the OICR index.
878  */
ice_calc_vf_first_vector_idx(struct ice_pf * pf,struct ice_vf * vf)879 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
880 {
881 	return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
882 }
883 
884 /**
885  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
886  * @vf: VF to add MAC filters for
887  *
888  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
889  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
890  */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf)891 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
892 {
893 	struct device *dev = ice_pf_to_dev(vf->pf);
894 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
895 	u16 vlan_id = 0;
896 	int err;
897 
898 	if (vf->port_vlan_info) {
899 		err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
900 		if (err) {
901 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
902 				vf->vf_id, err);
903 			return err;
904 		}
905 
906 		vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
907 	}
908 
909 	/* vlan_id will either be 0 or the port VLAN number */
910 	err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
911 	if (err) {
912 		dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
913 			vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
914 			err);
915 		return err;
916 	}
917 
918 	return 0;
919 }
920 
921 /**
922  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
923  * @vf: VF to add MAC filters for
924  *
925  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
926  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
927  */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)928 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
929 {
930 	struct device *dev = ice_pf_to_dev(vf->pf);
931 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
932 	enum ice_status status;
933 	u8 broadcast[ETH_ALEN];
934 
935 	eth_broadcast_addr(broadcast);
936 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
937 	if (status) {
938 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
939 			vf->vf_id, ice_stat_str(status));
940 		return ice_status_to_errno(status);
941 	}
942 
943 	vf->num_mac++;
944 
945 	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
946 		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
947 					  ICE_FWD_TO_VSI);
948 		if (status) {
949 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
950 				&vf->hw_lan_addr.addr[0], vf->vf_id,
951 				ice_stat_str(status));
952 			return ice_status_to_errno(status);
953 		}
954 		vf->num_mac++;
955 
956 		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
957 	}
958 
959 	return 0;
960 }
961 
962 /**
963  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
964  * @vf: VF to configure trust setting for
965  */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)966 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
967 {
968 	if (vf->trusted)
969 		set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
970 	else
971 		clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
972 }
973 
974 /**
975  * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
976  * @vf: VF to enable MSIX mappings for
977  *
978  * Some of the registers need to be indexed/configured using hardware global
979  * device values and other registers need 0-based values, which represent PF
980  * based values.
981  */
ice_ena_vf_msix_mappings(struct ice_vf * vf)982 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
983 {
984 	int device_based_first_msix, device_based_last_msix;
985 	int pf_based_first_msix, pf_based_last_msix, v;
986 	struct ice_pf *pf = vf->pf;
987 	int device_based_vf_id;
988 	struct ice_hw *hw;
989 	u32 reg;
990 
991 	hw = &pf->hw;
992 	pf_based_first_msix = vf->first_vector_idx;
993 	pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
994 
995 	device_based_first_msix = pf_based_first_msix +
996 		pf->hw.func_caps.common_cap.msix_vector_first_id;
997 	device_based_last_msix =
998 		(device_based_first_msix + pf->num_msix_per_vf) - 1;
999 	device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1000 
1001 	reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
1002 		VPINT_ALLOC_FIRST_M) |
1003 	       ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
1004 		VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
1005 	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
1006 
1007 	reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1008 		 & VPINT_ALLOC_PCI_FIRST_M) |
1009 	       ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
1010 		VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
1011 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
1012 
1013 	/* map the interrupts to its functions */
1014 	for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
1015 		reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
1016 			GLINT_VECT2FUNC_VF_NUM_M) |
1017 		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
1018 			GLINT_VECT2FUNC_PF_NUM_M));
1019 		wr32(hw, GLINT_VECT2FUNC(v), reg);
1020 	}
1021 
1022 	/* Map mailbox interrupt to VF MSI-X vector 0 */
1023 	wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
1024 }
1025 
1026 /**
1027  * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
1028  * @vf: VF to enable the mappings for
1029  * @max_txq: max Tx queues allowed on the VF's VSI
1030  * @max_rxq: max Rx queues allowed on the VF's VSI
1031  */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)1032 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
1033 {
1034 	struct device *dev = ice_pf_to_dev(vf->pf);
1035 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1036 	struct ice_hw *hw = &vf->pf->hw;
1037 	u32 reg;
1038 
1039 	/* set regardless of mapping mode */
1040 	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
1041 
1042 	/* VF Tx queues allocation */
1043 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1044 		/* set the VF PF Tx queue range
1045 		 * VFNUMQ value should be set to (number of queues - 1). A value
1046 		 * of 0 means 1 queue and a value of 255 means 256 queues
1047 		 */
1048 		reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
1049 			VPLAN_TX_QBASE_VFFIRSTQ_M) |
1050 		       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
1051 			VPLAN_TX_QBASE_VFNUMQ_M));
1052 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
1053 	} else {
1054 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
1055 	}
1056 
1057 	/* set regardless of mapping mode */
1058 	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
1059 
1060 	/* VF Rx queues allocation */
1061 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1062 		/* set the VF PF Rx queue range
1063 		 * VFNUMQ value should be set to (number of queues - 1). A value
1064 		 * of 0 means 1 queue and a value of 255 means 256 queues
1065 		 */
1066 		reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
1067 			VPLAN_RX_QBASE_VFFIRSTQ_M) |
1068 		       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
1069 			VPLAN_RX_QBASE_VFNUMQ_M));
1070 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
1071 	} else {
1072 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
1073 	}
1074 }
1075 
1076 /**
1077  * ice_ena_vf_mappings - enable VF MSIX and queue mapping
1078  * @vf: pointer to the VF structure
1079  */
ice_ena_vf_mappings(struct ice_vf * vf)1080 static void ice_ena_vf_mappings(struct ice_vf *vf)
1081 {
1082 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1083 
1084 	ice_ena_vf_msix_mappings(vf);
1085 	ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
1086 }
1087 
1088 /**
1089  * ice_determine_res
1090  * @pf: pointer to the PF structure
1091  * @avail_res: available resources in the PF structure
1092  * @max_res: maximum resources that can be given per VF
1093  * @min_res: minimum resources that can be given per VF
1094  *
1095  * Returns non-zero value if resources (queues/vectors) are available or
1096  * returns zero if PF cannot accommodate for all num_alloc_vfs.
1097  */
1098 static int
ice_determine_res(struct ice_pf * pf,u16 avail_res,u16 max_res,u16 min_res)1099 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
1100 {
1101 	bool checked_min_res = false;
1102 	int res;
1103 
1104 	/* start by checking if PF can assign max number of resources for
1105 	 * all num_alloc_vfs.
1106 	 * if yes, return number per VF
1107 	 * If no, divide by 2 and roundup, check again
1108 	 * repeat the loop till we reach a point where even minimum resources
1109 	 * are not available, in that case return 0
1110 	 */
1111 	res = max_res;
1112 	while ((res >= min_res) && !checked_min_res) {
1113 		int num_all_res;
1114 
1115 		num_all_res = pf->num_alloc_vfs * res;
1116 		if (num_all_res <= avail_res)
1117 			return res;
1118 
1119 		if (res == min_res)
1120 			checked_min_res = true;
1121 
1122 		res = DIV_ROUND_UP(res, 2);
1123 	}
1124 	return 0;
1125 }
1126 
1127 /**
1128  * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
1129  * @vf: VF to calculate the register index for
1130  * @q_vector: a q_vector associated to the VF
1131  */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)1132 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
1133 {
1134 	struct ice_pf *pf;
1135 
1136 	if (!vf || !q_vector)
1137 		return -EINVAL;
1138 
1139 	pf = vf->pf;
1140 
1141 	/* always add one to account for the OICR being the first MSIX */
1142 	return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
1143 		q_vector->v_idx + 1;
1144 }
1145 
1146 /**
1147  * ice_get_max_valid_res_idx - Get the max valid resource index
1148  * @res: pointer to the resource to find the max valid index for
1149  *
1150  * Start from the end of the ice_res_tracker and return right when we find the
1151  * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
1152  * valid for SR-IOV because it is the only consumer that manipulates the
1153  * res->end and this is always called when res->end is set to res->num_entries.
1154  */
ice_get_max_valid_res_idx(struct ice_res_tracker * res)1155 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
1156 {
1157 	int i;
1158 
1159 	if (!res)
1160 		return -EINVAL;
1161 
1162 	for (i = res->num_entries - 1; i >= 0; i--)
1163 		if (res->list[i] & ICE_RES_VALID_BIT)
1164 			return i;
1165 
1166 	return 0;
1167 }
1168 
1169 /**
1170  * ice_sriov_set_msix_res - Set any used MSIX resources
1171  * @pf: pointer to PF structure
1172  * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
1173  *
1174  * This function allows SR-IOV resources to be taken from the end of the PF's
1175  * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
1176  * just set the pf->sriov_base_vector and return success.
1177  *
1178  * If there are not enough resources available, return an error. This should
1179  * always be caught by ice_set_per_vf_res().
1180  *
1181  * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
1182  * in the PF's space available for SR-IOV.
1183  */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)1184 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
1185 {
1186 	u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
1187 	int vectors_used = pf->irq_tracker->num_entries;
1188 	int sriov_base_vector;
1189 
1190 	sriov_base_vector = total_vectors - num_msix_needed;
1191 
1192 	/* make sure we only grab irq_tracker entries from the list end and
1193 	 * that we have enough available MSIX vectors
1194 	 */
1195 	if (sriov_base_vector < vectors_used)
1196 		return -EINVAL;
1197 
1198 	pf->sriov_base_vector = sriov_base_vector;
1199 
1200 	return 0;
1201 }
1202 
1203 /**
1204  * ice_set_per_vf_res - check if vectors and queues are available
1205  * @pf: pointer to the PF structure
1206  *
1207  * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
1208  * get more vectors and can enable more queues per VF. Note that this does not
1209  * grab any vectors from the SW pool already allocated. Also note, that all
1210  * vector counts include one for each VF's miscellaneous interrupt vector
1211  * (i.e. OICR).
1212  *
1213  * Minimum VFs - 2 vectors, 1 queue pair
1214  * Small VFs - 5 vectors, 4 queue pairs
1215  * Medium VFs - 17 vectors, 16 queue pairs
1216  *
1217  * Second, determine number of queue pairs per VF by starting with a pre-defined
1218  * maximum each VF supports. If this is not possible, then we adjust based on
1219  * queue pairs available on the device.
1220  *
1221  * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
1222  * by each VF during VF initialization and reset.
1223  */
ice_set_per_vf_res(struct ice_pf * pf)1224 static int ice_set_per_vf_res(struct ice_pf *pf)
1225 {
1226 	int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
1227 	int msix_avail_per_vf, msix_avail_for_sriov;
1228 	struct device *dev = ice_pf_to_dev(pf);
1229 	u16 num_msix_per_vf, num_txq, num_rxq;
1230 
1231 	if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
1232 		return -EINVAL;
1233 
1234 	/* determine MSI-X resources per VF */
1235 	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
1236 		pf->irq_tracker->num_entries;
1237 	msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
1238 	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
1239 		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
1240 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
1241 		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
1242 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
1243 		num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
1244 	} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
1245 		num_msix_per_vf = ICE_MIN_INTR_PER_VF;
1246 	} else {
1247 		dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
1248 			msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
1249 			pf->num_alloc_vfs);
1250 		return -EIO;
1251 	}
1252 
1253 	/* determine queue resources per VF */
1254 	num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
1255 				    min_t(u16,
1256 					  num_msix_per_vf - ICE_NONQ_VECS_VF,
1257 					  ICE_MAX_RSS_QS_PER_VF),
1258 				    ICE_MIN_QS_PER_VF);
1259 
1260 	num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
1261 				    min_t(u16,
1262 					  num_msix_per_vf - ICE_NONQ_VECS_VF,
1263 					  ICE_MAX_RSS_QS_PER_VF),
1264 				    ICE_MIN_QS_PER_VF);
1265 
1266 	if (!num_txq || !num_rxq) {
1267 		dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
1268 			ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
1269 		return -EIO;
1270 	}
1271 
1272 	if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
1273 		dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
1274 			pf->num_alloc_vfs);
1275 		return -EINVAL;
1276 	}
1277 
1278 	/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
1279 	pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
1280 	pf->num_msix_per_vf = num_msix_per_vf;
1281 	dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
1282 		 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
1283 
1284 	return 0;
1285 }
1286 
1287 /**
1288  * ice_clear_vf_reset_trigger - enable VF to access hardware
1289  * @vf: VF to enabled hardware access for
1290  */
ice_clear_vf_reset_trigger(struct ice_vf * vf)1291 static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
1292 {
1293 	struct ice_hw *hw = &vf->pf->hw;
1294 	u32 reg;
1295 
1296 	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
1297 	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
1298 	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
1299 	ice_flush(hw);
1300 }
1301 
1302 /**
1303  * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
1304  * @vf: pointer to the VF info
1305  * @vsi: the VSI being configured
1306  * @promisc_m: mask of promiscuous config bits
1307  * @rm_promisc: promisc flag request from the VF to remove or add filter
1308  *
1309  * This function configures VF VSI promiscuous mode, based on the VF requests,
1310  * for Unicast, Multicast and VLAN
1311  */
1312 static enum ice_status
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m,bool rm_promisc)1313 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1314 		       bool rm_promisc)
1315 {
1316 	struct ice_pf *pf = vf->pf;
1317 	enum ice_status status = 0;
1318 	struct ice_hw *hw;
1319 
1320 	hw = &pf->hw;
1321 	if (vsi->num_vlan) {
1322 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1323 						  rm_promisc);
1324 	} else if (vf->port_vlan_info) {
1325 		if (rm_promisc)
1326 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1327 						       vf->port_vlan_info);
1328 		else
1329 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1330 						     vf->port_vlan_info);
1331 	} else {
1332 		if (rm_promisc)
1333 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1334 						       0);
1335 		else
1336 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1337 						     0);
1338 	}
1339 
1340 	return status;
1341 }
1342 
ice_vf_clear_counters(struct ice_vf * vf)1343 static void ice_vf_clear_counters(struct ice_vf *vf)
1344 {
1345 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1346 
1347 	vf->num_mac = 0;
1348 	vsi->num_vlan = 0;
1349 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1350 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1351 }
1352 
1353 /**
1354  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1355  * @vf: VF to perform pre VSI rebuild tasks
1356  *
1357  * These tasks are items that don't need to be amortized since they are most
1358  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1359  */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)1360 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1361 {
1362 	ice_vf_clear_counters(vf);
1363 	ice_clear_vf_reset_trigger(vf);
1364 }
1365 
1366 /**
1367  * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
1368  * @vsi: Pointer to VSI
1369  *
1370  * This function moves VSI into corresponding scheduler aggregator node
1371  * based on cached value of "aggregator node info" per VSI
1372  */
ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi * vsi)1373 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1374 {
1375 	struct ice_pf *pf = vsi->back;
1376 	enum ice_status status;
1377 	struct device *dev;
1378 
1379 	if (!vsi->agg_node)
1380 		return;
1381 
1382 	dev = ice_pf_to_dev(pf);
1383 	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1384 		dev_dbg(dev,
1385 			"agg_id %u already has reached max_num_vsis %u\n",
1386 			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1387 		return;
1388 	}
1389 
1390 	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1391 				     vsi->idx, vsi->tc_cfg.ena_tc);
1392 	if (status)
1393 		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1394 			vsi->idx, vsi->agg_node->agg_id);
1395 	else
1396 		vsi->agg_node->num_vsis++;
1397 }
1398 
1399 /**
1400  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1401  * @vf: VF to rebuild host configuration on
1402  */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)1403 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1404 {
1405 	struct device *dev = ice_pf_to_dev(vf->pf);
1406 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1407 
1408 	ice_vf_set_host_trust_cfg(vf);
1409 
1410 	if (ice_vf_rebuild_host_mac_cfg(vf))
1411 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1412 			vf->vf_id);
1413 
1414 	if (ice_vf_rebuild_host_vlan_cfg(vf))
1415 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1416 			vf->vf_id);
1417 	/* rebuild aggregator node config for main VF VSI */
1418 	ice_vf_rebuild_aggregator_node_cfg(vsi);
1419 }
1420 
1421 /**
1422  * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1423  * @vf: VF to release and setup the VSI for
1424  *
1425  * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1426  * configuration change, etc.).
1427  */
ice_vf_rebuild_vsi_with_release(struct ice_vf * vf)1428 static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1429 {
1430 	ice_vf_vsi_release(vf);
1431 	if (!ice_vf_vsi_setup(vf))
1432 		return -ENOMEM;
1433 
1434 	return 0;
1435 }
1436 
1437 /**
1438  * ice_vf_rebuild_vsi - rebuild the VF's VSI
1439  * @vf: VF to rebuild the VSI for
1440  *
1441  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1442  * host, PFR, CORER, etc.).
1443  */
ice_vf_rebuild_vsi(struct ice_vf * vf)1444 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1445 {
1446 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1447 	struct ice_pf *pf = vf->pf;
1448 
1449 	if (ice_vsi_rebuild(vsi, true)) {
1450 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1451 			vf->vf_id);
1452 		return -EIO;
1453 	}
1454 	/* vsi->idx will remain the same in this case so don't update
1455 	 * vf->lan_vsi_idx
1456 	 */
1457 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1458 	vf->lan_vsi_num = vsi->vsi_num;
1459 
1460 	return 0;
1461 }
1462 
1463 /**
1464  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1465  * @vf: VF to set in initialized state
1466  *
1467  * After this function the VF will be ready to receive/handle the
1468  * VIRTCHNL_OP_GET_VF_RESOURCES message
1469  */
ice_vf_set_initialized(struct ice_vf * vf)1470 static void ice_vf_set_initialized(struct ice_vf *vf)
1471 {
1472 	ice_set_vf_state_qs_dis(vf);
1473 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1474 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1475 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1476 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1477 }
1478 
1479 /**
1480  * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1481  * @vf: VF to perform tasks on
1482  */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)1483 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1484 {
1485 	struct ice_pf *pf = vf->pf;
1486 	struct ice_hw *hw;
1487 
1488 	hw = &pf->hw;
1489 
1490 	ice_vf_rebuild_host_cfg(vf);
1491 
1492 	ice_vf_set_initialized(vf);
1493 	ice_ena_vf_mappings(vf);
1494 	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1495 }
1496 
1497 /**
1498  * ice_reset_all_vfs - reset all allocated VFs in one go
1499  * @pf: pointer to the PF structure
1500  * @is_vflr: true if VFLR was issued, false if not
1501  *
1502  * First, tell the hardware to reset each VF, then do all the waiting in one
1503  * chunk, and finally finish restoring each VF after the wait. This is useful
1504  * during PF routines which need to reset all VFs, as otherwise it must perform
1505  * these resets in a serialized fashion.
1506  *
1507  * Returns true if any VFs were reset, and false otherwise.
1508  */
ice_reset_all_vfs(struct ice_pf * pf,bool is_vflr)1509 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1510 {
1511 	struct device *dev = ice_pf_to_dev(pf);
1512 	struct ice_hw *hw = &pf->hw;
1513 	struct ice_vf *vf;
1514 	int v, i;
1515 
1516 	/* If we don't have any VFs, then there is nothing to reset */
1517 	if (!pf->num_alloc_vfs)
1518 		return false;
1519 
1520 	/* clear all malicious info if the VFs are getting reset */
1521 	ice_for_each_vf(pf, i)
1522 		if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
1523 			dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1524 
1525 	/* If VFs have been disabled, there is no need to reset */
1526 	if (test_and_set_bit(ICE_VF_DIS, pf->state))
1527 		return false;
1528 
1529 	/* Begin reset on all VFs at once */
1530 	ice_for_each_vf(pf, v)
1531 		ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1532 
1533 	/* HW requires some time to make sure it can flush the FIFO for a VF
1534 	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1535 	 * sequence to make sure that it has completed. We'll keep track of
1536 	 * the VFs using a simple iterator that increments once that VF has
1537 	 * finished resetting.
1538 	 */
1539 	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1540 		/* Check each VF in sequence */
1541 		while (v < pf->num_alloc_vfs) {
1542 			u32 reg;
1543 
1544 			vf = &pf->vf[v];
1545 			reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1546 			if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1547 				/* only delay if the check failed */
1548 				usleep_range(10, 20);
1549 				break;
1550 			}
1551 
1552 			/* If the current VF has finished resetting, move on
1553 			 * to the next VF in sequence.
1554 			 */
1555 			v++;
1556 		}
1557 	}
1558 
1559 	/* Display a warning if at least one VF didn't manage to reset in
1560 	 * time, but continue on with the operation.
1561 	 */
1562 	if (v < pf->num_alloc_vfs)
1563 		dev_warn(dev, "VF reset check timeout\n");
1564 
1565 	/* free VF resources to begin resetting the VSI state */
1566 	ice_for_each_vf(pf, v) {
1567 		vf = &pf->vf[v];
1568 
1569 		vf->driver_caps = 0;
1570 		ice_vc_set_default_allowlist(vf);
1571 
1572 		ice_vf_fdir_exit(vf);
1573 		/* clean VF control VSI when resetting VFs since it should be
1574 		 * setup only when VF creates its first FDIR rule.
1575 		 */
1576 		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1577 			ice_vf_ctrl_invalidate_vsi(vf);
1578 
1579 		ice_vf_pre_vsi_rebuild(vf);
1580 		ice_vf_rebuild_vsi(vf);
1581 		ice_vf_post_vsi_rebuild(vf);
1582 	}
1583 
1584 	ice_flush(hw);
1585 	clear_bit(ICE_VF_DIS, pf->state);
1586 
1587 	return true;
1588 }
1589 
1590 /**
1591  * ice_is_vf_disabled
1592  * @vf: pointer to the VF info
1593  *
1594  * Returns true if the PF or VF is disabled, false otherwise.
1595  */
ice_is_vf_disabled(struct ice_vf * vf)1596 static bool ice_is_vf_disabled(struct ice_vf *vf)
1597 {
1598 	struct ice_pf *pf = vf->pf;
1599 
1600 	/* If the PF has been disabled, there is no need resetting VF until
1601 	 * PF is active again. Similarly, if the VF has been disabled, this
1602 	 * means something else is resetting the VF, so we shouldn't continue.
1603 	 * Otherwise, set disable VF state bit for actual reset, and continue.
1604 	 */
1605 	return (test_bit(ICE_VF_DIS, pf->state) ||
1606 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1607 }
1608 
1609 /**
1610  * ice_reset_vf - Reset a particular VF
1611  * @vf: pointer to the VF structure
1612  * @is_vflr: true if VFLR was issued, false if not
1613  *
1614  * Returns true if the VF is currently in reset, resets successfully, or resets
1615  * are disabled and false otherwise.
1616  */
ice_reset_vf(struct ice_vf * vf,bool is_vflr)1617 bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1618 {
1619 	struct ice_pf *pf = vf->pf;
1620 	struct ice_vsi *vsi;
1621 	struct device *dev;
1622 	struct ice_hw *hw;
1623 	bool rsd = false;
1624 	u8 promisc_m;
1625 	u32 reg;
1626 	int i;
1627 
1628 	dev = ice_pf_to_dev(pf);
1629 
1630 	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
1631 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1632 			vf->vf_id);
1633 		return true;
1634 	}
1635 
1636 	if (ice_is_vf_disabled(vf)) {
1637 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1638 			vf->vf_id);
1639 		return true;
1640 	}
1641 
1642 	/* Set VF disable bit state here, before triggering reset */
1643 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1644 	ice_trigger_vf_reset(vf, is_vflr, false);
1645 
1646 	vsi = ice_get_vf_vsi(vf);
1647 
1648 	if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1649 		ice_dis_vf_qs(vf);
1650 
1651 	/* Call Disable LAN Tx queue AQ whether or not queues are
1652 	 * enabled. This is needed for successful completion of VFR.
1653 	 */
1654 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1655 			NULL, ICE_VF_RESET, vf->vf_id, NULL);
1656 
1657 	hw = &pf->hw;
1658 	/* poll VPGEN_VFRSTAT reg to make sure
1659 	 * that reset is complete
1660 	 */
1661 	for (i = 0; i < 10; i++) {
1662 		/* VF reset requires driver to first reset the VF and then
1663 		 * poll the status register to make sure that the reset
1664 		 * completed successfully.
1665 		 */
1666 		reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1667 		if (reg & VPGEN_VFRSTAT_VFRD_M) {
1668 			rsd = true;
1669 			break;
1670 		}
1671 
1672 		/* only sleep if the reset is not done */
1673 		usleep_range(10, 20);
1674 	}
1675 
1676 	vf->driver_caps = 0;
1677 	ice_vc_set_default_allowlist(vf);
1678 
1679 	/* Display a warning if VF didn't manage to reset in time, but need to
1680 	 * continue on with the operation.
1681 	 */
1682 	if (!rsd)
1683 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1684 
1685 	/* disable promiscuous modes in case they were enabled
1686 	 * ignore any error if disabling process failed
1687 	 */
1688 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1689 	    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1690 		if (vf->port_vlan_info || vsi->num_vlan)
1691 			promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1692 		else
1693 			promisc_m = ICE_UCAST_PROMISC_BITS;
1694 
1695 		if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1696 			dev_err(dev, "disabling promiscuous mode failed\n");
1697 	}
1698 
1699 	ice_vf_fdir_exit(vf);
1700 	/* clean VF control VSI when resetting VF since it should be setup
1701 	 * only when VF creates its first FDIR rule.
1702 	 */
1703 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1704 		ice_vf_ctrl_vsi_release(vf);
1705 
1706 	ice_vf_pre_vsi_rebuild(vf);
1707 
1708 	if (ice_vf_rebuild_vsi_with_release(vf)) {
1709 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1710 		return false;
1711 	}
1712 
1713 	ice_vf_post_vsi_rebuild(vf);
1714 
1715 	/* if the VF has been reset allow it to come up again */
1716 	if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
1717 		dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1718 
1719 	return true;
1720 }
1721 
1722 /**
1723  * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1724  * @pf: pointer to the PF structure
1725  */
ice_vc_notify_link_state(struct ice_pf * pf)1726 void ice_vc_notify_link_state(struct ice_pf *pf)
1727 {
1728 	int i;
1729 
1730 	ice_for_each_vf(pf, i)
1731 		ice_vc_notify_vf_link_state(&pf->vf[i]);
1732 }
1733 
1734 /**
1735  * ice_vc_notify_reset - Send pending reset message to all VFs
1736  * @pf: pointer to the PF structure
1737  *
1738  * indicate a pending reset to all VFs on a given PF
1739  */
ice_vc_notify_reset(struct ice_pf * pf)1740 void ice_vc_notify_reset(struct ice_pf *pf)
1741 {
1742 	struct virtchnl_pf_event pfe;
1743 
1744 	if (!pf->num_alloc_vfs)
1745 		return;
1746 
1747 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1748 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1749 	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1750 			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1751 }
1752 
1753 /**
1754  * ice_vc_notify_vf_reset - Notify VF of a reset event
1755  * @vf: pointer to the VF structure
1756  */
ice_vc_notify_vf_reset(struct ice_vf * vf)1757 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1758 {
1759 	struct virtchnl_pf_event pfe;
1760 	struct ice_pf *pf;
1761 
1762 	if (!vf)
1763 		return;
1764 
1765 	pf = vf->pf;
1766 	if (ice_validate_vf_id(pf, vf->vf_id))
1767 		return;
1768 
1769 	/* Bail out if VF is in disabled state, neither initialized, nor active
1770 	 * state - otherwise proceed with notifications
1771 	 */
1772 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1773 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1774 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1775 		return;
1776 
1777 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1778 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1779 	ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1780 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1781 			      NULL);
1782 }
1783 
1784 /**
1785  * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1786  * @vf: VF to initialize/setup the VSI for
1787  *
1788  * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1789  * VF VSI's broadcast filter and is only used during initial VF creation.
1790  */
ice_init_vf_vsi_res(struct ice_vf * vf)1791 static int ice_init_vf_vsi_res(struct ice_vf *vf)
1792 {
1793 	struct ice_pf *pf = vf->pf;
1794 	u8 broadcast[ETH_ALEN];
1795 	enum ice_status status;
1796 	struct ice_vsi *vsi;
1797 	struct device *dev;
1798 	int err;
1799 
1800 	vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1801 
1802 	dev = ice_pf_to_dev(pf);
1803 	vsi = ice_vf_vsi_setup(vf);
1804 	if (!vsi)
1805 		return -ENOMEM;
1806 
1807 	err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1808 	if (err) {
1809 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1810 			 vf->vf_id);
1811 		goto release_vsi;
1812 	}
1813 
1814 	eth_broadcast_addr(broadcast);
1815 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1816 	if (status) {
1817 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1818 			vf->vf_id, ice_stat_str(status));
1819 		err = ice_status_to_errno(status);
1820 		goto release_vsi;
1821 	}
1822 
1823 	vf->num_mac = 1;
1824 
1825 	return 0;
1826 
1827 release_vsi:
1828 	ice_vf_vsi_release(vf);
1829 	return err;
1830 }
1831 
1832 /**
1833  * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1834  * @pf: PF the VFs are associated with
1835  */
ice_start_vfs(struct ice_pf * pf)1836 static int ice_start_vfs(struct ice_pf *pf)
1837 {
1838 	struct ice_hw *hw = &pf->hw;
1839 	int retval, i;
1840 
1841 	ice_for_each_vf(pf, i) {
1842 		struct ice_vf *vf = &pf->vf[i];
1843 
1844 		ice_clear_vf_reset_trigger(vf);
1845 
1846 		retval = ice_init_vf_vsi_res(vf);
1847 		if (retval) {
1848 			dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1849 				vf->vf_id, retval);
1850 			goto teardown;
1851 		}
1852 
1853 		set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1854 		ice_ena_vf_mappings(vf);
1855 		wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1856 	}
1857 
1858 	ice_flush(hw);
1859 	return 0;
1860 
1861 teardown:
1862 	for (i = i - 1; i >= 0; i--) {
1863 		struct ice_vf *vf = &pf->vf[i];
1864 
1865 		ice_dis_vf_mappings(vf);
1866 		ice_vf_vsi_release(vf);
1867 	}
1868 
1869 	return retval;
1870 }
1871 
1872 /**
1873  * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
1874  * @pf: PF holding reference to all VFs for default configuration
1875  */
ice_set_dflt_settings_vfs(struct ice_pf * pf)1876 static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1877 {
1878 	int i;
1879 
1880 	ice_for_each_vf(pf, i) {
1881 		struct ice_vf *vf = &pf->vf[i];
1882 
1883 		vf->pf = pf;
1884 		vf->vf_id = i;
1885 		vf->vf_sw_id = pf->first_sw;
1886 		/* assign default capabilities */
1887 		set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1888 		vf->spoofchk = true;
1889 		vf->num_vf_qs = pf->num_qps_per_vf;
1890 		ice_vc_set_default_allowlist(vf);
1891 
1892 		/* ctrl_vsi_idx will be set to a valid value only when VF
1893 		 * creates its first fdir rule.
1894 		 */
1895 		ice_vf_ctrl_invalidate_vsi(vf);
1896 		ice_vf_fdir_init(vf);
1897 	}
1898 }
1899 
1900 /**
1901  * ice_alloc_vfs - allocate num_vfs in the PF structure
1902  * @pf: PF to store the allocated VFs in
1903  * @num_vfs: number of VFs to allocate
1904  */
ice_alloc_vfs(struct ice_pf * pf,int num_vfs)1905 static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1906 {
1907 	struct ice_vf *vfs;
1908 
1909 	vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1910 			   GFP_KERNEL);
1911 	if (!vfs)
1912 		return -ENOMEM;
1913 
1914 	pf->vf = vfs;
1915 	pf->num_alloc_vfs = num_vfs;
1916 
1917 	return 0;
1918 }
1919 
1920 /**
1921  * ice_ena_vfs - enable VFs so they are ready to be used
1922  * @pf: pointer to the PF structure
1923  * @num_vfs: number of VFs to enable
1924  */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)1925 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1926 {
1927 	struct device *dev = ice_pf_to_dev(pf);
1928 	struct ice_hw *hw = &pf->hw;
1929 	int ret;
1930 
1931 	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
1932 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1933 	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1934 	set_bit(ICE_OICR_INTR_DIS, pf->state);
1935 	ice_flush(hw);
1936 
1937 	ret = pci_enable_sriov(pf->pdev, num_vfs);
1938 	if (ret) {
1939 		pf->num_alloc_vfs = 0;
1940 		goto err_unroll_intr;
1941 	}
1942 
1943 	ret = ice_alloc_vfs(pf, num_vfs);
1944 	if (ret)
1945 		goto err_pci_disable_sriov;
1946 
1947 	if (ice_set_per_vf_res(pf)) {
1948 		dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1949 			num_vfs);
1950 		ret = -ENOSPC;
1951 		goto err_unroll_sriov;
1952 	}
1953 
1954 	ice_set_dflt_settings_vfs(pf);
1955 
1956 	if (ice_start_vfs(pf)) {
1957 		dev_err(dev, "Failed to start VF(s)\n");
1958 		ret = -EAGAIN;
1959 		goto err_unroll_sriov;
1960 	}
1961 
1962 	clear_bit(ICE_VF_DIS, pf->state);
1963 	return 0;
1964 
1965 err_unroll_sriov:
1966 	devm_kfree(dev, pf->vf);
1967 	pf->vf = NULL;
1968 	pf->num_alloc_vfs = 0;
1969 err_pci_disable_sriov:
1970 	pci_disable_sriov(pf->pdev);
1971 err_unroll_intr:
1972 	/* rearm interrupts here */
1973 	ice_irq_dynamic_ena(hw, NULL, NULL);
1974 	clear_bit(ICE_OICR_INTR_DIS, pf->state);
1975 	return ret;
1976 }
1977 
1978 /**
1979  * ice_pci_sriov_ena - Enable or change number of VFs
1980  * @pf: pointer to the PF structure
1981  * @num_vfs: number of VFs to allocate
1982  *
1983  * Returns 0 on success and negative on failure
1984  */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)1985 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1986 {
1987 	int pre_existing_vfs = pci_num_vf(pf->pdev);
1988 	struct device *dev = ice_pf_to_dev(pf);
1989 	int err;
1990 
1991 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1992 		ice_free_vfs(pf);
1993 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1994 		return 0;
1995 
1996 	if (num_vfs > pf->num_vfs_supported) {
1997 		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1998 			num_vfs, pf->num_vfs_supported);
1999 		return -EOPNOTSUPP;
2000 	}
2001 
2002 	dev_info(dev, "Enabling %d VFs\n", num_vfs);
2003 	err = ice_ena_vfs(pf, num_vfs);
2004 	if (err) {
2005 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
2006 		return err;
2007 	}
2008 
2009 	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
2010 	return 0;
2011 }
2012 
2013 /**
2014  * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
2015  * @pf: PF to enabled SR-IOV on
2016  */
ice_check_sriov_allowed(struct ice_pf * pf)2017 static int ice_check_sriov_allowed(struct ice_pf *pf)
2018 {
2019 	struct device *dev = ice_pf_to_dev(pf);
2020 
2021 	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
2022 		dev_err(dev, "This device is not capable of SR-IOV\n");
2023 		return -EOPNOTSUPP;
2024 	}
2025 
2026 	if (ice_is_safe_mode(pf)) {
2027 		dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
2028 		return -EOPNOTSUPP;
2029 	}
2030 
2031 	if (!ice_pf_state_is_nominal(pf)) {
2032 		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
2033 		return -EBUSY;
2034 	}
2035 
2036 	return 0;
2037 }
2038 
2039 /**
2040  * ice_sriov_configure - Enable or change number of VFs via sysfs
2041  * @pdev: pointer to a pci_dev structure
2042  * @num_vfs: number of VFs to allocate or 0 to free VFs
2043  *
2044  * This function is called when the user updates the number of VFs in sysfs. On
2045  * success return whatever num_vfs was set to by the caller. Return negative on
2046  * failure.
2047  */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)2048 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
2049 {
2050 	struct ice_pf *pf = pci_get_drvdata(pdev);
2051 	struct device *dev = ice_pf_to_dev(pf);
2052 	enum ice_status status;
2053 	int err;
2054 
2055 	err = ice_check_sriov_allowed(pf);
2056 	if (err)
2057 		return err;
2058 
2059 	if (!num_vfs) {
2060 		if (!pci_vfs_assigned(pdev)) {
2061 			ice_mbx_deinit_snapshot(&pf->hw);
2062 			ice_free_vfs(pf);
2063 			if (pf->lag)
2064 				ice_enable_lag(pf->lag);
2065 			return 0;
2066 		}
2067 
2068 		dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
2069 		return -EBUSY;
2070 	}
2071 
2072 	status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
2073 	if (status)
2074 		return ice_status_to_errno(status);
2075 
2076 	err = ice_pci_sriov_ena(pf, num_vfs);
2077 	if (err) {
2078 		ice_mbx_deinit_snapshot(&pf->hw);
2079 		return err;
2080 	}
2081 
2082 	if (pf->lag)
2083 		ice_disable_lag(pf->lag);
2084 	return num_vfs;
2085 }
2086 
2087 /**
2088  * ice_process_vflr_event - Free VF resources via IRQ calls
2089  * @pf: pointer to the PF structure
2090  *
2091  * called from the VFLR IRQ handler to
2092  * free up VF resources and state variables
2093  */
ice_process_vflr_event(struct ice_pf * pf)2094 void ice_process_vflr_event(struct ice_pf *pf)
2095 {
2096 	struct ice_hw *hw = &pf->hw;
2097 	unsigned int vf_id;
2098 	u32 reg;
2099 
2100 	if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2101 	    !pf->num_alloc_vfs)
2102 		return;
2103 
2104 	ice_for_each_vf(pf, vf_id) {
2105 		struct ice_vf *vf = &pf->vf[vf_id];
2106 		u32 reg_idx, bit_idx;
2107 
2108 		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2109 		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2110 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
2111 		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
2112 		if (reg & BIT(bit_idx))
2113 			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
2114 			ice_reset_vf(vf, true);
2115 	}
2116 }
2117 
2118 /**
2119  * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
2120  * @vf: pointer to the VF info
2121  */
ice_vc_reset_vf(struct ice_vf * vf)2122 static void ice_vc_reset_vf(struct ice_vf *vf)
2123 {
2124 	ice_vc_notify_vf_reset(vf);
2125 	ice_reset_vf(vf, false);
2126 }
2127 
2128 /**
2129  * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
2130  * @pf: PF used to index all VFs
2131  * @pfq: queue index relative to the PF's function space
2132  *
2133  * If no VF is found who owns the pfq then return NULL, otherwise return a
2134  * pointer to the VF who owns the pfq
2135  */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)2136 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
2137 {
2138 	unsigned int vf_id;
2139 
2140 	ice_for_each_vf(pf, vf_id) {
2141 		struct ice_vf *vf = &pf->vf[vf_id];
2142 		struct ice_vsi *vsi;
2143 		u16 rxq_idx;
2144 
2145 		vsi = ice_get_vf_vsi(vf);
2146 
2147 		ice_for_each_rxq(vsi, rxq_idx)
2148 			if (vsi->rxq_map[rxq_idx] == pfq)
2149 				return vf;
2150 	}
2151 
2152 	return NULL;
2153 }
2154 
2155 /**
2156  * ice_globalq_to_pfq - convert from global queue index to PF space queue index
2157  * @pf: PF used for conversion
2158  * @globalq: global queue index used to convert to PF space queue index
2159  */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)2160 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
2161 {
2162 	return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
2163 }
2164 
2165 /**
2166  * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
2167  * @pf: PF that the LAN overflow event happened on
2168  * @event: structure holding the event information for the LAN overflow event
2169  *
2170  * Determine if the LAN overflow event was caused by a VF queue. If it was not
2171  * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
2172  * reset on the offending VF.
2173  */
2174 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)2175 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
2176 {
2177 	u32 gldcb_rtctq, queue;
2178 	struct ice_vf *vf;
2179 
2180 	gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
2181 	dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
2182 
2183 	/* event returns device global Rx queue number */
2184 	queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
2185 		GLDCB_RTCTQ_RXQNUM_S;
2186 
2187 	vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
2188 	if (!vf)
2189 		return;
2190 
2191 	ice_vc_reset_vf(vf);
2192 }
2193 
2194 /**
2195  * ice_vc_send_msg_to_vf - Send message to VF
2196  * @vf: pointer to the VF info
2197  * @v_opcode: virtual channel opcode
2198  * @v_retval: virtual channel return value
2199  * @msg: pointer to the msg buffer
2200  * @msglen: msg length
2201  *
2202  * send msg to VF
2203  */
2204 int
ice_vc_send_msg_to_vf(struct ice_vf * vf,u32 v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)2205 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
2206 		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
2207 {
2208 	enum ice_status aq_ret;
2209 	struct device *dev;
2210 	struct ice_pf *pf;
2211 
2212 	if (!vf)
2213 		return -EINVAL;
2214 
2215 	pf = vf->pf;
2216 	if (ice_validate_vf_id(pf, vf->vf_id))
2217 		return -EINVAL;
2218 
2219 	dev = ice_pf_to_dev(pf);
2220 
2221 	/* single place to detect unsuccessful return values */
2222 	if (v_retval) {
2223 		vf->num_inval_msgs++;
2224 		dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
2225 			 v_opcode, v_retval);
2226 		if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
2227 			dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
2228 				vf->vf_id);
2229 			dev_err(dev, "Use PF Control I/F to enable the VF\n");
2230 			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
2231 			return -EIO;
2232 		}
2233 	} else {
2234 		vf->num_valid_msgs++;
2235 		/* reset the invalid counter, if a valid message is received. */
2236 		vf->num_inval_msgs = 0;
2237 	}
2238 
2239 	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
2240 				       msg, msglen, NULL);
2241 	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
2242 		dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
2243 			 vf->vf_id, ice_stat_str(aq_ret),
2244 			 ice_aq_str(pf->hw.mailboxq.sq_last_status));
2245 		return -EIO;
2246 	}
2247 
2248 	return 0;
2249 }
2250 
2251 /**
2252  * ice_vc_get_ver_msg
2253  * @vf: pointer to the VF info
2254  * @msg: pointer to the msg buffer
2255  *
2256  * called from the VF to request the API version used by the PF
2257  */
ice_vc_get_ver_msg(struct ice_vf * vf,u8 * msg)2258 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
2259 {
2260 	struct virtchnl_version_info info = {
2261 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2262 	};
2263 
2264 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
2265 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2266 	if (VF_IS_V10(&vf->vf_ver))
2267 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2268 
2269 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2270 				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
2271 				     sizeof(struct virtchnl_version_info));
2272 }
2273 
2274 /**
2275  * ice_vc_get_max_frame_size - get max frame size allowed for VF
2276  * @vf: VF used to determine max frame size
2277  *
2278  * Max frame size is determined based on the current port's max frame size and
2279  * whether a port VLAN is configured on this VF. The VF is not aware whether
2280  * it's in a port VLAN so the PF needs to account for this in max frame size
2281  * checks and sending the max frame size to the VF.
2282  */
ice_vc_get_max_frame_size(struct ice_vf * vf)2283 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
2284 {
2285 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
2286 	u16 max_frame_size;
2287 
2288 	max_frame_size = pi->phy.link_info.max_frame_size;
2289 
2290 	if (vf->port_vlan_info)
2291 		max_frame_size -= VLAN_HLEN;
2292 
2293 	return max_frame_size;
2294 }
2295 
2296 /**
2297  * ice_vc_get_vf_res_msg
2298  * @vf: pointer to the VF info
2299  * @msg: pointer to the msg buffer
2300  *
2301  * called from the VF to request its resources
2302  */
ice_vc_get_vf_res_msg(struct ice_vf * vf,u8 * msg)2303 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
2304 {
2305 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2306 	struct virtchnl_vf_resource *vfres = NULL;
2307 	struct ice_pf *pf = vf->pf;
2308 	struct ice_vsi *vsi;
2309 	int len = 0;
2310 	int ret;
2311 
2312 	if (ice_check_vf_init(pf, vf)) {
2313 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2314 		goto err;
2315 	}
2316 
2317 	len = sizeof(struct virtchnl_vf_resource);
2318 
2319 	vfres = kzalloc(len, GFP_KERNEL);
2320 	if (!vfres) {
2321 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2322 		len = 0;
2323 		goto err;
2324 	}
2325 	if (VF_IS_V11(&vf->vf_ver))
2326 		vf->driver_caps = *(u32 *)msg;
2327 	else
2328 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2329 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2330 				  VIRTCHNL_VF_OFFLOAD_VLAN;
2331 
2332 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2333 	vsi = ice_get_vf_vsi(vf);
2334 	if (!vsi) {
2335 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2336 		goto err;
2337 	}
2338 
2339 	if (!vsi->info.pvid)
2340 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2341 
2342 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2343 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2344 	} else {
2345 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
2346 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2347 		else
2348 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2349 	}
2350 
2351 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
2352 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
2353 
2354 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2355 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2356 
2357 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2358 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2359 
2360 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
2361 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2362 
2363 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
2364 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2365 
2366 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2367 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2368 
2369 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2370 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2371 
2372 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
2373 		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2374 
2375 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
2376 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
2377 
2378 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
2379 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
2380 
2381 	vfres->num_vsis = 1;
2382 	/* Tx and Rx queue are equal for VF */
2383 	vfres->num_queue_pairs = vsi->num_txq;
2384 	vfres->max_vectors = pf->num_msix_per_vf;
2385 	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
2386 	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
2387 	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
2388 
2389 	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
2390 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2391 	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
2392 	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2393 			vf->hw_lan_addr.addr);
2394 
2395 	/* match guest capabilities */
2396 	vf->driver_caps = vfres->vf_cap_flags;
2397 
2398 	ice_vc_set_caps_allowlist(vf);
2399 	ice_vc_set_working_allowlist(vf);
2400 
2401 	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2402 
2403 err:
2404 	/* send the response back to the VF */
2405 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2406 				    (u8 *)vfres, len);
2407 
2408 	kfree(vfres);
2409 	return ret;
2410 }
2411 
2412 /**
2413  * ice_vc_reset_vf_msg
2414  * @vf: pointer to the VF info
2415  *
2416  * called from the VF to reset itself,
2417  * unlike other virtchnl messages, PF driver
2418  * doesn't send the response back to the VF
2419  */
ice_vc_reset_vf_msg(struct ice_vf * vf)2420 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2421 {
2422 	if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2423 		ice_reset_vf(vf, false);
2424 }
2425 
2426 /**
2427  * ice_find_vsi_from_id
2428  * @pf: the PF structure to search for the VSI
2429  * @id: ID of the VSI it is searching for
2430  *
2431  * searches for the VSI with the given ID
2432  */
ice_find_vsi_from_id(struct ice_pf * pf,u16 id)2433 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2434 {
2435 	int i;
2436 
2437 	ice_for_each_vsi(pf, i)
2438 		if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2439 			return pf->vsi[i];
2440 
2441 	return NULL;
2442 }
2443 
2444 /**
2445  * ice_vc_isvalid_vsi_id
2446  * @vf: pointer to the VF info
2447  * @vsi_id: VF relative VSI ID
2448  *
2449  * check for the valid VSI ID
2450  */
ice_vc_isvalid_vsi_id(struct ice_vf * vf,u16 vsi_id)2451 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2452 {
2453 	struct ice_pf *pf = vf->pf;
2454 	struct ice_vsi *vsi;
2455 
2456 	vsi = ice_find_vsi_from_id(pf, vsi_id);
2457 
2458 	return (vsi && (vsi->vf_id == vf->vf_id));
2459 }
2460 
2461 /**
2462  * ice_vc_isvalid_q_id
2463  * @vf: pointer to the VF info
2464  * @vsi_id: VSI ID
2465  * @qid: VSI relative queue ID
2466  *
2467  * check for the valid queue ID
2468  */
ice_vc_isvalid_q_id(struct ice_vf * vf,u16 vsi_id,u8 qid)2469 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2470 {
2471 	struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2472 	/* allocated Tx and Rx queues should be always equal for VF VSI */
2473 	return (vsi && (qid < vsi->alloc_txq));
2474 }
2475 
2476 /**
2477  * ice_vc_isvalid_ring_len
2478  * @ring_len: length of ring
2479  *
2480  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2481  * or zero
2482  */
ice_vc_isvalid_ring_len(u16 ring_len)2483 static bool ice_vc_isvalid_ring_len(u16 ring_len)
2484 {
2485 	return ring_len == 0 ||
2486 	       (ring_len >= ICE_MIN_NUM_DESC &&
2487 		ring_len <= ICE_MAX_NUM_DESC &&
2488 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
2489 }
2490 
2491 /**
2492  * ice_vc_parse_rss_cfg - parses hash fields and headers from
2493  * a specific virtchnl RSS cfg
2494  * @hw: pointer to the hardware
2495  * @rss_cfg: pointer to the virtchnl RSS cfg
2496  * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
2497  * to configure
2498  * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
2499  *
2500  * Return true if all the protocol header and hash fields in the RSS cfg could
2501  * be parsed, else return false
2502  *
2503  * This function parses the virtchnl RSS cfg to be the intended
2504  * hash fields and the intended header for RSS configuration
2505  */
2506 static bool
ice_vc_parse_rss_cfg(struct ice_hw * hw,struct virtchnl_rss_cfg * rss_cfg,u32 * addl_hdrs,u64 * hash_flds)2507 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
2508 		     u32 *addl_hdrs, u64 *hash_flds)
2509 {
2510 	const struct ice_vc_hash_field_match_type *hf_list;
2511 	const struct ice_vc_hdr_match_type *hdr_list;
2512 	int i, hf_list_len, hdr_list_len;
2513 
2514 	if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
2515 		     sizeof(hw->active_pkg_name))) {
2516 		hf_list = ice_vc_hash_field_list_comms;
2517 		hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
2518 		hdr_list = ice_vc_hdr_list_comms;
2519 		hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
2520 	} else {
2521 		hf_list = ice_vc_hash_field_list_os;
2522 		hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
2523 		hdr_list = ice_vc_hdr_list_os;
2524 		hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
2525 	}
2526 
2527 	for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
2528 		struct virtchnl_proto_hdr *proto_hdr =
2529 					&rss_cfg->proto_hdrs.proto_hdr[i];
2530 		bool hdr_found = false;
2531 		int j;
2532 
2533 		/* Find matched ice headers according to virtchnl headers. */
2534 		for (j = 0; j < hdr_list_len; j++) {
2535 			struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
2536 
2537 			if (proto_hdr->type == hdr_map.vc_hdr) {
2538 				*addl_hdrs |= hdr_map.ice_hdr;
2539 				hdr_found = true;
2540 			}
2541 		}
2542 
2543 		if (!hdr_found)
2544 			return false;
2545 
2546 		/* Find matched ice hash fields according to
2547 		 * virtchnl hash fields.
2548 		 */
2549 		for (j = 0; j < hf_list_len; j++) {
2550 			struct ice_vc_hash_field_match_type hf_map = hf_list[j];
2551 
2552 			if (proto_hdr->type == hf_map.vc_hdr &&
2553 			    proto_hdr->field_selector == hf_map.vc_hash_field) {
2554 				*hash_flds |= hf_map.ice_hash_field;
2555 				break;
2556 			}
2557 		}
2558 	}
2559 
2560 	return true;
2561 }
2562 
2563 /**
2564  * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
2565  * RSS offloads
2566  * @caps: VF driver negotiated capabilities
2567  *
2568  * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
2569  * else return false
2570  */
ice_vf_adv_rss_offload_ena(u32 caps)2571 static bool ice_vf_adv_rss_offload_ena(u32 caps)
2572 {
2573 	return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
2574 }
2575 
2576 /**
2577  * ice_vc_handle_rss_cfg
2578  * @vf: pointer to the VF info
2579  * @msg: pointer to the message buffer
2580  * @add: add a RSS config if true, otherwise delete a RSS config
2581  *
2582  * This function adds/deletes a RSS config
2583  */
ice_vc_handle_rss_cfg(struct ice_vf * vf,u8 * msg,bool add)2584 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
2585 {
2586 	u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
2587 	struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
2588 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2589 	struct device *dev = ice_pf_to_dev(vf->pf);
2590 	struct ice_hw *hw = &vf->pf->hw;
2591 	struct ice_vsi *vsi;
2592 
2593 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2594 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
2595 			vf->vf_id);
2596 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2597 		goto error_param;
2598 	}
2599 
2600 	if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
2601 		dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
2602 			vf->vf_id);
2603 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2604 		goto error_param;
2605 	}
2606 
2607 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2608 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2609 		goto error_param;
2610 	}
2611 
2612 	if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
2613 	    rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
2614 	    rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
2615 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
2616 			vf->vf_id);
2617 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2618 		goto error_param;
2619 	}
2620 
2621 	vsi = ice_get_vf_vsi(vf);
2622 	if (!vsi) {
2623 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2624 		goto error_param;
2625 	}
2626 
2627 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
2628 		struct ice_vsi_ctx *ctx;
2629 		enum ice_status status;
2630 		u8 lut_type, hash_type;
2631 
2632 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
2633 		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
2634 				ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
2635 
2636 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2637 		if (!ctx) {
2638 			v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2639 			goto error_param;
2640 		}
2641 
2642 		ctx->info.q_opt_rss = ((lut_type <<
2643 					ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
2644 				       ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
2645 				       (hash_type &
2646 					ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
2647 
2648 		/* Preserve existing queueing option setting */
2649 		ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
2650 					  ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
2651 		ctx->info.q_opt_tc = vsi->info.q_opt_tc;
2652 		ctx->info.q_opt_flags = vsi->info.q_opt_rss;
2653 
2654 		ctx->info.valid_sections =
2655 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
2656 
2657 		status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
2658 		if (status) {
2659 			dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
2660 				ice_stat_str(status),
2661 				ice_aq_str(hw->adminq.sq_last_status));
2662 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2663 		} else {
2664 			vsi->info.q_opt_rss = ctx->info.q_opt_rss;
2665 		}
2666 
2667 		kfree(ctx);
2668 	} else {
2669 		u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
2670 		u64 hash_flds = ICE_HASH_INVALID;
2671 
2672 		if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
2673 					  &hash_flds)) {
2674 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2675 			goto error_param;
2676 		}
2677 
2678 		if (add) {
2679 			if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
2680 					    addl_hdrs)) {
2681 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2682 				dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
2683 					vsi->vsi_num, v_ret);
2684 			}
2685 		} else {
2686 			enum ice_status status;
2687 
2688 			status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
2689 						 addl_hdrs);
2690 			/* We just ignore ICE_ERR_DOES_NOT_EXIST, because
2691 			 * if two configurations share the same profile remove
2692 			 * one of them actually removes both, since the
2693 			 * profile is deleted.
2694 			 */
2695 			if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2696 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2697 				dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
2698 					vf->vf_id, ice_stat_str(status));
2699 			}
2700 		}
2701 	}
2702 
2703 error_param:
2704 	return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
2705 }
2706 
2707 /**
2708  * ice_vc_config_rss_key
2709  * @vf: pointer to the VF info
2710  * @msg: pointer to the msg buffer
2711  *
2712  * Configure the VF's RSS key
2713  */
ice_vc_config_rss_key(struct ice_vf * vf,u8 * msg)2714 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2715 {
2716 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2717 	struct virtchnl_rss_key *vrk =
2718 		(struct virtchnl_rss_key *)msg;
2719 	struct ice_vsi *vsi;
2720 
2721 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2722 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2723 		goto error_param;
2724 	}
2725 
2726 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2727 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2728 		goto error_param;
2729 	}
2730 
2731 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2732 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2733 		goto error_param;
2734 	}
2735 
2736 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2737 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2738 		goto error_param;
2739 	}
2740 
2741 	vsi = ice_get_vf_vsi(vf);
2742 	if (!vsi) {
2743 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2744 		goto error_param;
2745 	}
2746 
2747 	if (ice_set_rss_key(vsi, vrk->key))
2748 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2749 error_param:
2750 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2751 				     NULL, 0);
2752 }
2753 
2754 /**
2755  * ice_vc_config_rss_lut
2756  * @vf: pointer to the VF info
2757  * @msg: pointer to the msg buffer
2758  *
2759  * Configure the VF's RSS LUT
2760  */
ice_vc_config_rss_lut(struct ice_vf * vf,u8 * msg)2761 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2762 {
2763 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2764 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2765 	struct ice_vsi *vsi;
2766 
2767 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2768 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2769 		goto error_param;
2770 	}
2771 
2772 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2773 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2774 		goto error_param;
2775 	}
2776 
2777 	if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2778 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2779 		goto error_param;
2780 	}
2781 
2782 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2783 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2784 		goto error_param;
2785 	}
2786 
2787 	vsi = ice_get_vf_vsi(vf);
2788 	if (!vsi) {
2789 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2790 		goto error_param;
2791 	}
2792 
2793 	if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2794 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2795 error_param:
2796 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2797 				     NULL, 0);
2798 }
2799 
2800 /**
2801  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2802  * @vf: The VF being resseting
2803  *
2804  * The max poll time is about ~800ms, which is about the maximum time it takes
2805  * for a VF to be reset and/or a VF driver to be removed.
2806  */
ice_wait_on_vf_reset(struct ice_vf * vf)2807 static void ice_wait_on_vf_reset(struct ice_vf *vf)
2808 {
2809 	int i;
2810 
2811 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2812 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2813 			break;
2814 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2815 	}
2816 }
2817 
2818 /**
2819  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2820  * @vf: VF to check if it's ready to be configured/queried
2821  *
2822  * The purpose of this function is to make sure the VF is not in reset, not
2823  * disabled, and initialized so it can be configured and/or queried by a host
2824  * administrator.
2825  */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)2826 static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2827 {
2828 	struct ice_pf *pf;
2829 
2830 	ice_wait_on_vf_reset(vf);
2831 
2832 	if (ice_is_vf_disabled(vf))
2833 		return -EINVAL;
2834 
2835 	pf = vf->pf;
2836 	if (ice_check_vf_init(pf, vf))
2837 		return -EBUSY;
2838 
2839 	return 0;
2840 }
2841 
2842 /**
2843  * ice_set_vf_spoofchk
2844  * @netdev: network interface device structure
2845  * @vf_id: VF identifier
2846  * @ena: flag to enable or disable feature
2847  *
2848  * Enable or disable VF spoof checking
2849  */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)2850 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2851 {
2852 	struct ice_netdev_priv *np = netdev_priv(netdev);
2853 	struct ice_pf *pf = np->vsi->back;
2854 	struct ice_vsi_ctx *ctx;
2855 	struct ice_vsi *vf_vsi;
2856 	enum ice_status status;
2857 	struct device *dev;
2858 	struct ice_vf *vf;
2859 	int ret;
2860 
2861 	dev = ice_pf_to_dev(pf);
2862 	if (ice_validate_vf_id(pf, vf_id))
2863 		return -EINVAL;
2864 
2865 	vf = &pf->vf[vf_id];
2866 	ret = ice_check_vf_ready_for_cfg(vf);
2867 	if (ret)
2868 		return ret;
2869 
2870 	vf_vsi = ice_get_vf_vsi(vf);
2871 	if (!vf_vsi) {
2872 		netdev_err(netdev, "VSI %d for VF %d is null\n",
2873 			   vf->lan_vsi_idx, vf->vf_id);
2874 		return -EINVAL;
2875 	}
2876 
2877 	if (vf_vsi->type != ICE_VSI_VF) {
2878 		netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2879 			   vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2880 		return -ENODEV;
2881 	}
2882 
2883 	if (ena == vf->spoofchk) {
2884 		dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2885 		return 0;
2886 	}
2887 
2888 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2889 	if (!ctx)
2890 		return -ENOMEM;
2891 
2892 	ctx->info.sec_flags = vf_vsi->info.sec_flags;
2893 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2894 	if (ena) {
2895 		ctx->info.sec_flags |=
2896 			ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2897 			(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2898 			 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2899 	} else {
2900 		ctx->info.sec_flags &=
2901 			~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2902 			  (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2903 			   ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2904 	}
2905 
2906 	status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2907 	if (status) {
2908 		dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2909 			ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2910 			ice_stat_str(status));
2911 		ret = -EIO;
2912 		goto out;
2913 	}
2914 
2915 	/* only update spoofchk state and VSI context on success */
2916 	vf_vsi->info.sec_flags = ctx->info.sec_flags;
2917 	vf->spoofchk = ena;
2918 
2919 out:
2920 	kfree(ctx);
2921 	return ret;
2922 }
2923 
2924 /**
2925  * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2926  * @pf: PF structure for accessing VF(s)
2927  *
2928  * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2929  * else return true
2930  */
ice_is_any_vf_in_promisc(struct ice_pf * pf)2931 bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2932 {
2933 	int vf_idx;
2934 
2935 	ice_for_each_vf(pf, vf_idx) {
2936 		struct ice_vf *vf = &pf->vf[vf_idx];
2937 
2938 		/* found a VF that has promiscuous mode configured */
2939 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2940 		    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2941 			return true;
2942 	}
2943 
2944 	return false;
2945 }
2946 
2947 /**
2948  * ice_vc_cfg_promiscuous_mode_msg
2949  * @vf: pointer to the VF info
2950  * @msg: pointer to the msg buffer
2951  *
2952  * called from the VF to configure VF VSIs promiscuous mode
2953  */
ice_vc_cfg_promiscuous_mode_msg(struct ice_vf * vf,u8 * msg)2954 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2955 {
2956 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2957 	bool rm_promisc, alluni = false, allmulti = false;
2958 	struct virtchnl_promisc_info *info =
2959 	    (struct virtchnl_promisc_info *)msg;
2960 	struct ice_pf *pf = vf->pf;
2961 	struct ice_vsi *vsi;
2962 	struct device *dev;
2963 	int ret = 0;
2964 
2965 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2966 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2967 		goto error_param;
2968 	}
2969 
2970 	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2971 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2972 		goto error_param;
2973 	}
2974 
2975 	vsi = ice_get_vf_vsi(vf);
2976 	if (!vsi) {
2977 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2978 		goto error_param;
2979 	}
2980 
2981 	dev = ice_pf_to_dev(pf);
2982 	if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2983 		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2984 			vf->vf_id);
2985 		/* Leave v_ret alone, lie to the VF on purpose. */
2986 		goto error_param;
2987 	}
2988 
2989 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2990 		alluni = true;
2991 
2992 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2993 		allmulti = true;
2994 
2995 	rm_promisc = !allmulti && !alluni;
2996 
2997 	if (vsi->num_vlan || vf->port_vlan_info) {
2998 		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2999 		struct net_device *pf_netdev;
3000 
3001 		if (!pf_vsi) {
3002 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3003 			goto error_param;
3004 		}
3005 
3006 		pf_netdev = pf_vsi->netdev;
3007 
3008 		ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
3009 		if (ret) {
3010 			dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
3011 				rm_promisc ? "ON" : "OFF", vf->vf_id,
3012 				vsi->vsi_num);
3013 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3014 		}
3015 
3016 		ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
3017 		if (ret) {
3018 			dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
3019 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3020 			goto error_param;
3021 		}
3022 	}
3023 
3024 	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
3025 		bool set_dflt_vsi = alluni || allmulti;
3026 
3027 		if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
3028 			/* only attempt to set the default forwarding VSI if
3029 			 * it's not currently set
3030 			 */
3031 			ret = ice_set_dflt_vsi(pf->first_sw, vsi);
3032 		else if (!set_dflt_vsi &&
3033 			 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
3034 			/* only attempt to free the default forwarding VSI if we
3035 			 * are the owner
3036 			 */
3037 			ret = ice_clear_dflt_vsi(pf->first_sw);
3038 
3039 		if (ret) {
3040 			dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
3041 				set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
3042 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3043 			goto error_param;
3044 		}
3045 	} else {
3046 		enum ice_status status;
3047 		u8 promisc_m;
3048 
3049 		if (alluni) {
3050 			if (vf->port_vlan_info || vsi->num_vlan)
3051 				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3052 			else
3053 				promisc_m = ICE_UCAST_PROMISC_BITS;
3054 		} else if (allmulti) {
3055 			if (vf->port_vlan_info || vsi->num_vlan)
3056 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
3057 			else
3058 				promisc_m = ICE_MCAST_PROMISC_BITS;
3059 		} else {
3060 			if (vf->port_vlan_info || vsi->num_vlan)
3061 				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3062 			else
3063 				promisc_m = ICE_UCAST_PROMISC_BITS;
3064 		}
3065 
3066 		/* Configure multicast/unicast with or without VLAN promiscuous
3067 		 * mode
3068 		 */
3069 		status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
3070 		if (status) {
3071 			dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
3072 				rm_promisc ? "dis" : "en", vf->vf_id,
3073 				ice_stat_str(status));
3074 			v_ret = ice_err_to_virt_err(status);
3075 			goto error_param;
3076 		} else {
3077 			dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
3078 				rm_promisc ? "dis" : "en", vf->vf_id);
3079 		}
3080 	}
3081 
3082 	if (allmulti &&
3083 	    !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3084 		dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
3085 	else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3086 		dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
3087 
3088 	if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3089 		dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
3090 	else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3091 		dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
3092 
3093 error_param:
3094 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3095 				     v_ret, NULL, 0);
3096 }
3097 
3098 /**
3099  * ice_vc_get_stats_msg
3100  * @vf: pointer to the VF info
3101  * @msg: pointer to the msg buffer
3102  *
3103  * called from the VF to get VSI stats
3104  */
ice_vc_get_stats_msg(struct ice_vf * vf,u8 * msg)3105 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
3106 {
3107 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3108 	struct virtchnl_queue_select *vqs =
3109 		(struct virtchnl_queue_select *)msg;
3110 	struct ice_eth_stats stats = { 0 };
3111 	struct ice_vsi *vsi;
3112 
3113 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3114 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3115 		goto error_param;
3116 	}
3117 
3118 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3119 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3120 		goto error_param;
3121 	}
3122 
3123 	vsi = ice_get_vf_vsi(vf);
3124 	if (!vsi) {
3125 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3126 		goto error_param;
3127 	}
3128 
3129 	ice_update_eth_stats(vsi);
3130 
3131 	stats = vsi->eth_stats;
3132 
3133 error_param:
3134 	/* send the response to the VF */
3135 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
3136 				     (u8 *)&stats, sizeof(stats));
3137 }
3138 
3139 /**
3140  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
3141  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
3142  *
3143  * Return true on successful validation, else false
3144  */
ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)3145 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
3146 {
3147 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
3148 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
3149 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
3150 		return false;
3151 
3152 	return true;
3153 }
3154 
3155 /**
3156  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
3157  * @vsi: VSI of the VF to configure
3158  * @q_idx: VF queue index used to determine the queue in the PF's space
3159  */
ice_vf_ena_txq_interrupt(struct ice_vsi * vsi,u32 q_idx)3160 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3161 {
3162 	struct ice_hw *hw = &vsi->back->hw;
3163 	u32 pfq = vsi->txq_map[q_idx];
3164 	u32 reg;
3165 
3166 	reg = rd32(hw, QINT_TQCTL(pfq));
3167 
3168 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
3169 	 * this is most likely a poll mode VF driver, so don't enable an
3170 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3171 	 */
3172 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
3173 		return;
3174 
3175 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
3176 }
3177 
3178 /**
3179  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
3180  * @vsi: VSI of the VF to configure
3181  * @q_idx: VF queue index used to determine the queue in the PF's space
3182  */
ice_vf_ena_rxq_interrupt(struct ice_vsi * vsi,u32 q_idx)3183 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3184 {
3185 	struct ice_hw *hw = &vsi->back->hw;
3186 	u32 pfq = vsi->rxq_map[q_idx];
3187 	u32 reg;
3188 
3189 	reg = rd32(hw, QINT_RQCTL(pfq));
3190 
3191 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
3192 	 * this is most likely a poll mode VF driver, so don't enable an
3193 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3194 	 */
3195 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
3196 		return;
3197 
3198 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
3199 }
3200 
3201 /**
3202  * ice_vc_ena_qs_msg
3203  * @vf: pointer to the VF info
3204  * @msg: pointer to the msg buffer
3205  *
3206  * called from the VF to enable all or specific queue(s)
3207  */
ice_vc_ena_qs_msg(struct ice_vf * vf,u8 * msg)3208 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
3209 {
3210 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3211 	struct virtchnl_queue_select *vqs =
3212 	    (struct virtchnl_queue_select *)msg;
3213 	struct ice_vsi *vsi;
3214 	unsigned long q_map;
3215 	u16 vf_q_id;
3216 
3217 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3218 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3219 		goto error_param;
3220 	}
3221 
3222 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3223 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3224 		goto error_param;
3225 	}
3226 
3227 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3228 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3229 		goto error_param;
3230 	}
3231 
3232 	vsi = ice_get_vf_vsi(vf);
3233 	if (!vsi) {
3234 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3235 		goto error_param;
3236 	}
3237 
3238 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
3239 	 * Tx queue group list was configured and the context bits were
3240 	 * programmed using ice_vsi_cfg_txqs
3241 	 */
3242 	q_map = vqs->rx_queues;
3243 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3244 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3245 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3246 			goto error_param;
3247 		}
3248 
3249 		/* Skip queue if enabled */
3250 		if (test_bit(vf_q_id, vf->rxq_ena))
3251 			continue;
3252 
3253 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
3254 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
3255 				vf_q_id, vsi->vsi_num);
3256 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3257 			goto error_param;
3258 		}
3259 
3260 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
3261 		set_bit(vf_q_id, vf->rxq_ena);
3262 	}
3263 
3264 	q_map = vqs->tx_queues;
3265 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3266 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3267 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3268 			goto error_param;
3269 		}
3270 
3271 		/* Skip queue if enabled */
3272 		if (test_bit(vf_q_id, vf->txq_ena))
3273 			continue;
3274 
3275 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
3276 		set_bit(vf_q_id, vf->txq_ena);
3277 	}
3278 
3279 	/* Set flag to indicate that queues are enabled */
3280 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
3281 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3282 
3283 error_param:
3284 	/* send the response to the VF */
3285 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
3286 				     NULL, 0);
3287 }
3288 
3289 /**
3290  * ice_vc_dis_qs_msg
3291  * @vf: pointer to the VF info
3292  * @msg: pointer to the msg buffer
3293  *
3294  * called from the VF to disable all or specific
3295  * queue(s)
3296  */
ice_vc_dis_qs_msg(struct ice_vf * vf,u8 * msg)3297 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
3298 {
3299 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3300 	struct virtchnl_queue_select *vqs =
3301 	    (struct virtchnl_queue_select *)msg;
3302 	struct ice_vsi *vsi;
3303 	unsigned long q_map;
3304 	u16 vf_q_id;
3305 
3306 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
3307 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
3308 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3309 		goto error_param;
3310 	}
3311 
3312 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3313 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3314 		goto error_param;
3315 	}
3316 
3317 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3318 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3319 		goto error_param;
3320 	}
3321 
3322 	vsi = ice_get_vf_vsi(vf);
3323 	if (!vsi) {
3324 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3325 		goto error_param;
3326 	}
3327 
3328 	if (vqs->tx_queues) {
3329 		q_map = vqs->tx_queues;
3330 
3331 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3332 			struct ice_ring *ring = vsi->tx_rings[vf_q_id];
3333 			struct ice_txq_meta txq_meta = { 0 };
3334 
3335 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3336 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3337 				goto error_param;
3338 			}
3339 
3340 			/* Skip queue if not enabled */
3341 			if (!test_bit(vf_q_id, vf->txq_ena))
3342 				continue;
3343 
3344 			ice_fill_txq_meta(vsi, ring, &txq_meta);
3345 
3346 			if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
3347 						 ring, &txq_meta)) {
3348 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
3349 					vf_q_id, vsi->vsi_num);
3350 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3351 				goto error_param;
3352 			}
3353 
3354 			/* Clear enabled queues flag */
3355 			clear_bit(vf_q_id, vf->txq_ena);
3356 		}
3357 	}
3358 
3359 	q_map = vqs->rx_queues;
3360 	/* speed up Rx queue disable by batching them if possible */
3361 	if (q_map &&
3362 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
3363 		if (ice_vsi_stop_all_rx_rings(vsi)) {
3364 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
3365 				vsi->vsi_num);
3366 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367 			goto error_param;
3368 		}
3369 
3370 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
3371 	} else if (q_map) {
3372 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3373 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3374 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3375 				goto error_param;
3376 			}
3377 
3378 			/* Skip queue if not enabled */
3379 			if (!test_bit(vf_q_id, vf->rxq_ena))
3380 				continue;
3381 
3382 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
3383 						     true)) {
3384 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
3385 					vf_q_id, vsi->vsi_num);
3386 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3387 				goto error_param;
3388 			}
3389 
3390 			/* Clear enabled queues flag */
3391 			clear_bit(vf_q_id, vf->rxq_ena);
3392 		}
3393 	}
3394 
3395 	/* Clear enabled queues flag */
3396 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
3397 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3398 
3399 error_param:
3400 	/* send the response to the VF */
3401 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
3402 				     NULL, 0);
3403 }
3404 
3405 /**
3406  * ice_cfg_interrupt
3407  * @vf: pointer to the VF info
3408  * @vsi: the VSI being configured
3409  * @vector_id: vector ID
3410  * @map: vector map for mapping vectors to queues
3411  * @q_vector: structure for interrupt vector
3412  * configure the IRQ to queue map
3413  */
3414 static int
ice_cfg_interrupt(struct ice_vf * vf,struct ice_vsi * vsi,u16 vector_id,struct virtchnl_vector_map * map,struct ice_q_vector * q_vector)3415 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
3416 		  struct virtchnl_vector_map *map,
3417 		  struct ice_q_vector *q_vector)
3418 {
3419 	u16 vsi_q_id, vsi_q_id_idx;
3420 	unsigned long qmap;
3421 
3422 	q_vector->num_ring_rx = 0;
3423 	q_vector->num_ring_tx = 0;
3424 
3425 	qmap = map->rxq_map;
3426 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3427 		vsi_q_id = vsi_q_id_idx;
3428 
3429 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3430 			return VIRTCHNL_STATUS_ERR_PARAM;
3431 
3432 		q_vector->num_ring_rx++;
3433 		q_vector->rx.itr_idx = map->rxitr_idx;
3434 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
3435 		ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
3436 				      q_vector->rx.itr_idx);
3437 	}
3438 
3439 	qmap = map->txq_map;
3440 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3441 		vsi_q_id = vsi_q_id_idx;
3442 
3443 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3444 			return VIRTCHNL_STATUS_ERR_PARAM;
3445 
3446 		q_vector->num_ring_tx++;
3447 		q_vector->tx.itr_idx = map->txitr_idx;
3448 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
3449 		ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
3450 				      q_vector->tx.itr_idx);
3451 	}
3452 
3453 	return VIRTCHNL_STATUS_SUCCESS;
3454 }
3455 
3456 /**
3457  * ice_vc_cfg_irq_map_msg
3458  * @vf: pointer to the VF info
3459  * @msg: pointer to the msg buffer
3460  *
3461  * called from the VF to configure the IRQ to queue map
3462  */
ice_vc_cfg_irq_map_msg(struct ice_vf * vf,u8 * msg)3463 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
3464 {
3465 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3466 	u16 num_q_vectors_mapped, vsi_id, vector_id;
3467 	struct virtchnl_irq_map_info *irqmap_info;
3468 	struct virtchnl_vector_map *map;
3469 	struct ice_pf *pf = vf->pf;
3470 	struct ice_vsi *vsi;
3471 	int i;
3472 
3473 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
3474 	num_q_vectors_mapped = irqmap_info->num_vectors;
3475 
3476 	/* Check to make sure number of VF vectors mapped is not greater than
3477 	 * number of VF vectors originally allocated, and check that
3478 	 * there is actually at least a single VF queue vector mapped
3479 	 */
3480 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3481 	    pf->num_msix_per_vf < num_q_vectors_mapped ||
3482 	    !num_q_vectors_mapped) {
3483 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3484 		goto error_param;
3485 	}
3486 
3487 	vsi = ice_get_vf_vsi(vf);
3488 	if (!vsi) {
3489 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3490 		goto error_param;
3491 	}
3492 
3493 	for (i = 0; i < num_q_vectors_mapped; i++) {
3494 		struct ice_q_vector *q_vector;
3495 
3496 		map = &irqmap_info->vecmap[i];
3497 
3498 		vector_id = map->vector_id;
3499 		vsi_id = map->vsi_id;
3500 		/* vector_id is always 0-based for each VF, and can never be
3501 		 * larger than or equal to the max allowed interrupts per VF
3502 		 */
3503 		if (!(vector_id < pf->num_msix_per_vf) ||
3504 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
3505 		    (!vector_id && (map->rxq_map || map->txq_map))) {
3506 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3507 			goto error_param;
3508 		}
3509 
3510 		/* No need to map VF miscellaneous or rogue vector */
3511 		if (!vector_id)
3512 			continue;
3513 
3514 		/* Subtract non queue vector from vector_id passed by VF
3515 		 * to get actual number of VSI queue vector array index
3516 		 */
3517 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
3518 		if (!q_vector) {
3519 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3520 			goto error_param;
3521 		}
3522 
3523 		/* lookout for the invalid queue index */
3524 		v_ret = (enum virtchnl_status_code)
3525 			ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
3526 		if (v_ret)
3527 			goto error_param;
3528 	}
3529 
3530 error_param:
3531 	/* send the response to the VF */
3532 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
3533 				     NULL, 0);
3534 }
3535 
3536 /**
3537  * ice_vc_cfg_qs_msg
3538  * @vf: pointer to the VF info
3539  * @msg: pointer to the msg buffer
3540  *
3541  * called from the VF to configure the Rx/Tx queues
3542  */
ice_vc_cfg_qs_msg(struct ice_vf * vf,u8 * msg)3543 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
3544 {
3545 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3546 	struct virtchnl_vsi_queue_config_info *qci =
3547 	    (struct virtchnl_vsi_queue_config_info *)msg;
3548 	struct virtchnl_queue_pair_info *qpi;
3549 	struct ice_pf *pf = vf->pf;
3550 	struct ice_vsi *vsi;
3551 	int i, q_idx;
3552 
3553 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3554 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3555 		goto error_param;
3556 	}
3557 
3558 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
3559 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3560 		goto error_param;
3561 	}
3562 
3563 	vsi = ice_get_vf_vsi(vf);
3564 	if (!vsi) {
3565 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3566 		goto error_param;
3567 	}
3568 
3569 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
3570 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
3571 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
3572 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3573 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3574 		goto error_param;
3575 	}
3576 
3577 	for (i = 0; i < qci->num_queue_pairs; i++) {
3578 		qpi = &qci->qpair[i];
3579 		if (qpi->txq.vsi_id != qci->vsi_id ||
3580 		    qpi->rxq.vsi_id != qci->vsi_id ||
3581 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
3582 		    qpi->txq.headwb_enabled ||
3583 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
3584 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
3585 		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
3586 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3587 			goto error_param;
3588 		}
3589 
3590 		q_idx = qpi->rxq.queue_id;
3591 
3592 		/* make sure selected "q_idx" is in valid range of queues
3593 		 * for selected "vsi"
3594 		 */
3595 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
3596 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3597 			goto error_param;
3598 		}
3599 
3600 		/* copy Tx queue info from VF into VSI */
3601 		if (qpi->txq.ring_len > 0) {
3602 			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
3603 			vsi->tx_rings[i]->count = qpi->txq.ring_len;
3604 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
3605 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3606 				goto error_param;
3607 			}
3608 		}
3609 
3610 		/* copy Rx queue info from VF into VSI */
3611 		if (qpi->rxq.ring_len > 0) {
3612 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
3613 
3614 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
3615 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
3616 
3617 			if (qpi->rxq.databuffer_size != 0 &&
3618 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
3619 			     qpi->rxq.databuffer_size < 1024)) {
3620 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3621 				goto error_param;
3622 			}
3623 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
3624 			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
3625 			if (qpi->rxq.max_pkt_size > max_frame_size ||
3626 			    qpi->rxq.max_pkt_size < 64) {
3627 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3628 				goto error_param;
3629 			}
3630 
3631 			vsi->max_frame = qpi->rxq.max_pkt_size;
3632 			/* add space for the port VLAN since the VF driver is not
3633 			 * expected to account for it in the MTU calculation
3634 			 */
3635 			if (vf->port_vlan_info)
3636 				vsi->max_frame += VLAN_HLEN;
3637 
3638 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
3639 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3640 				goto error_param;
3641 			}
3642 		}
3643 	}
3644 
3645 error_param:
3646 	/* send the response to the VF */
3647 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3648 				     NULL, 0);
3649 }
3650 
3651 /**
3652  * ice_is_vf_trusted
3653  * @vf: pointer to the VF info
3654  */
ice_is_vf_trusted(struct ice_vf * vf)3655 static bool ice_is_vf_trusted(struct ice_vf *vf)
3656 {
3657 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3658 }
3659 
3660 /**
3661  * ice_can_vf_change_mac
3662  * @vf: pointer to the VF info
3663  *
3664  * Return true if the VF is allowed to change its MAC filters, false otherwise
3665  */
ice_can_vf_change_mac(struct ice_vf * vf)3666 static bool ice_can_vf_change_mac(struct ice_vf *vf)
3667 {
3668 	/* If the VF MAC address has been set administratively (via the
3669 	 * ndo_set_vf_mac command), then deny permission to the VF to
3670 	 * add/delete unicast MAC addresses, unless the VF is trusted
3671 	 */
3672 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3673 		return false;
3674 
3675 	return true;
3676 }
3677 
3678 /**
3679  * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
3680  * @vc_ether_addr: used to extract the type
3681  */
3682 static u8
ice_vc_ether_addr_type(struct virtchnl_ether_addr * vc_ether_addr)3683 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
3684 {
3685 	return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
3686 }
3687 
3688 /**
3689  * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
3690  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
3691  */
3692 static bool
ice_is_vc_addr_legacy(struct virtchnl_ether_addr * vc_ether_addr)3693 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
3694 {
3695 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3696 
3697 	return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
3698 }
3699 
3700 /**
3701  * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
3702  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
3703  *
3704  * This function should only be called when the MAC address in
3705  * virtchnl_ether_addr is a valid unicast MAC
3706  */
3707 static bool
ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused * vc_ether_addr)3708 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
3709 {
3710 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3711 
3712 	return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
3713 }
3714 
3715 /**
3716  * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
3717  * @vf: VF to update
3718  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3719  */
3720 static void
ice_vfhw_mac_add(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)3721 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3722 {
3723 	u8 *mac_addr = vc_ether_addr->addr;
3724 
3725 	if (!is_valid_ether_addr(mac_addr))
3726 		return;
3727 
3728 	/* only allow legacy VF drivers to set the device and hardware MAC if it
3729 	 * is zero and allow new VF drivers to set the hardware MAC if the type
3730 	 * was correctly specified over VIRTCHNL
3731 	 */
3732 	if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
3733 	     is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
3734 	    ice_is_vc_addr_primary(vc_ether_addr)) {
3735 		ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
3736 		ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
3737 	}
3738 
3739 	/* hardware and device MACs are already set, but its possible that the
3740 	 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
3741 	 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
3742 	 * away for the legacy VF driver case as it will be updated in the
3743 	 * delete flow for this case
3744 	 */
3745 	if (ice_is_vc_addr_legacy(vc_ether_addr)) {
3746 		ether_addr_copy(vf->legacy_last_added_umac.addr,
3747 				mac_addr);
3748 		vf->legacy_last_added_umac.time_modified = jiffies;
3749 	}
3750 }
3751 
3752 /**
3753  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3754  * @vf: pointer to the VF info
3755  * @vsi: pointer to the VF's VSI
3756  * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
3757  */
3758 static int
ice_vc_add_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_ether_addr * vc_ether_addr)3759 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3760 		    struct virtchnl_ether_addr *vc_ether_addr)
3761 {
3762 	struct device *dev = ice_pf_to_dev(vf->pf);
3763 	u8 *mac_addr = vc_ether_addr->addr;
3764 	enum ice_status status;
3765 
3766 	/* device MAC already added */
3767 	if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
3768 		return 0;
3769 
3770 	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3771 		dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3772 		return -EPERM;
3773 	}
3774 
3775 	status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3776 	if (status == ICE_ERR_ALREADY_EXISTS) {
3777 		dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3778 			vf->vf_id);
3779 		return -EEXIST;
3780 	} else if (status) {
3781 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3782 			mac_addr, vf->vf_id, ice_stat_str(status));
3783 		return -EIO;
3784 	}
3785 
3786 	ice_vfhw_mac_add(vf, vc_ether_addr);
3787 
3788 	vf->num_mac++;
3789 
3790 	return 0;
3791 }
3792 
3793 /**
3794  * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
3795  * @last_added_umac: structure used to check expiration
3796  */
ice_is_legacy_umac_expired(struct ice_time_mac * last_added_umac)3797 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
3798 {
3799 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME	msecs_to_jiffies(3000)
3800 	return time_is_before_jiffies(last_added_umac->time_modified +
3801 				      ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
3802 }
3803 
3804 /**
3805  * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
3806  * @vf: VF to update
3807  * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
3808  */
3809 static void
ice_vfhw_mac_del(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)3810 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3811 {
3812 	u8 *mac_addr = vc_ether_addr->addr;
3813 
3814 	if (!is_valid_ether_addr(mac_addr) ||
3815 	    !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
3816 		return;
3817 
3818 	/* allow the device MAC to be repopulated in the add flow and don't
3819 	 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
3820 	 * to be persistent on VM reboot and across driver unload/load, which
3821 	 * won't work if we clear the hardware MAC here
3822 	 */
3823 	eth_zero_addr(vf->dev_lan_addr.addr);
3824 
3825 	/* only update cached hardware MAC for legacy VF drivers on delete
3826 	 * because we cannot guarantee order/type of MAC from the VF driver
3827 	 */
3828 	if (ice_is_vc_addr_legacy(vc_ether_addr) &&
3829 	    !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
3830 		ether_addr_copy(vf->dev_lan_addr.addr,
3831 				vf->legacy_last_added_umac.addr);
3832 		ether_addr_copy(vf->hw_lan_addr.addr,
3833 				vf->legacy_last_added_umac.addr);
3834 	}
3835 }
3836 
3837 /**
3838  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3839  * @vf: pointer to the VF info
3840  * @vsi: pointer to the VF's VSI
3841  * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
3842  */
3843 static int
ice_vc_del_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_ether_addr * vc_ether_addr)3844 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3845 		    struct virtchnl_ether_addr *vc_ether_addr)
3846 {
3847 	struct device *dev = ice_pf_to_dev(vf->pf);
3848 	u8 *mac_addr = vc_ether_addr->addr;
3849 	enum ice_status status;
3850 
3851 	if (!ice_can_vf_change_mac(vf) &&
3852 	    ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
3853 		return 0;
3854 
3855 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3856 	if (status == ICE_ERR_DOES_NOT_EXIST) {
3857 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3858 			vf->vf_id);
3859 		return -ENOENT;
3860 	} else if (status) {
3861 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3862 			mac_addr, vf->vf_id, ice_stat_str(status));
3863 		return -EIO;
3864 	}
3865 
3866 	ice_vfhw_mac_del(vf, vc_ether_addr);
3867 
3868 	vf->num_mac--;
3869 
3870 	return 0;
3871 }
3872 
3873 /**
3874  * ice_vc_handle_mac_addr_msg
3875  * @vf: pointer to the VF info
3876  * @msg: pointer to the msg buffer
3877  * @set: true if MAC filters are being set, false otherwise
3878  *
3879  * add guest MAC address filter
3880  */
3881 static int
ice_vc_handle_mac_addr_msg(struct ice_vf * vf,u8 * msg,bool set)3882 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3883 {
3884 	int (*ice_vc_cfg_mac)
3885 		(struct ice_vf *vf, struct ice_vsi *vsi,
3886 		 struct virtchnl_ether_addr *virtchnl_ether_addr);
3887 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3888 	struct virtchnl_ether_addr_list *al =
3889 	    (struct virtchnl_ether_addr_list *)msg;
3890 	struct ice_pf *pf = vf->pf;
3891 	enum virtchnl_ops vc_op;
3892 	struct ice_vsi *vsi;
3893 	int i;
3894 
3895 	if (set) {
3896 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3897 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
3898 	} else {
3899 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3900 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
3901 	}
3902 
3903 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3904 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3905 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3906 		goto handle_mac_exit;
3907 	}
3908 
3909 	/* If this VF is not privileged, then we can't add more than a
3910 	 * limited number of addresses. Check to make sure that the
3911 	 * additions do not push us over the limit.
3912 	 */
3913 	if (set && !ice_is_vf_trusted(vf) &&
3914 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3915 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3916 			vf->vf_id);
3917 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3918 		goto handle_mac_exit;
3919 	}
3920 
3921 	vsi = ice_get_vf_vsi(vf);
3922 	if (!vsi) {
3923 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3924 		goto handle_mac_exit;
3925 	}
3926 
3927 	for (i = 0; i < al->num_elements; i++) {
3928 		u8 *mac_addr = al->list[i].addr;
3929 		int result;
3930 
3931 		if (is_broadcast_ether_addr(mac_addr) ||
3932 		    is_zero_ether_addr(mac_addr))
3933 			continue;
3934 
3935 		result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
3936 		if (result == -EEXIST || result == -ENOENT) {
3937 			continue;
3938 		} else if (result) {
3939 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3940 			goto handle_mac_exit;
3941 		}
3942 	}
3943 
3944 handle_mac_exit:
3945 	/* send the response to the VF */
3946 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3947 }
3948 
3949 /**
3950  * ice_vc_add_mac_addr_msg
3951  * @vf: pointer to the VF info
3952  * @msg: pointer to the msg buffer
3953  *
3954  * add guest MAC address filter
3955  */
ice_vc_add_mac_addr_msg(struct ice_vf * vf,u8 * msg)3956 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3957 {
3958 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
3959 }
3960 
3961 /**
3962  * ice_vc_del_mac_addr_msg
3963  * @vf: pointer to the VF info
3964  * @msg: pointer to the msg buffer
3965  *
3966  * remove guest MAC address filter
3967  */
ice_vc_del_mac_addr_msg(struct ice_vf * vf,u8 * msg)3968 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3969 {
3970 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
3971 }
3972 
3973 /**
3974  * ice_vc_request_qs_msg
3975  * @vf: pointer to the VF info
3976  * @msg: pointer to the msg buffer
3977  *
3978  * VFs get a default number of queues but can use this message to request a
3979  * different number. If the request is successful, PF will reset the VF and
3980  * return 0. If unsuccessful, PF will send message informing VF of number of
3981  * available queue pairs via virtchnl message response to VF.
3982  */
ice_vc_request_qs_msg(struct ice_vf * vf,u8 * msg)3983 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3984 {
3985 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3986 	struct virtchnl_vf_res_request *vfres =
3987 		(struct virtchnl_vf_res_request *)msg;
3988 	u16 req_queues = vfres->num_queue_pairs;
3989 	struct ice_pf *pf = vf->pf;
3990 	u16 max_allowed_vf_queues;
3991 	u16 tx_rx_queue_left;
3992 	struct device *dev;
3993 	u16 cur_queues;
3994 
3995 	dev = ice_pf_to_dev(pf);
3996 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3997 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3998 		goto error_param;
3999 	}
4000 
4001 	cur_queues = vf->num_vf_qs;
4002 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
4003 				 ice_get_avail_rxq_count(pf));
4004 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
4005 	if (!req_queues) {
4006 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
4007 			vf->vf_id);
4008 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4009 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
4010 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
4011 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
4012 	} else if (req_queues > cur_queues &&
4013 		   req_queues - cur_queues > tx_rx_queue_left) {
4014 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
4015 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
4016 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
4017 					       ICE_MAX_RSS_QS_PER_VF);
4018 	} else {
4019 		/* request is successful, then reset VF */
4020 		vf->num_req_qs = req_queues;
4021 		ice_vc_reset_vf(vf);
4022 		dev_info(dev, "VF %d granted request of %u queues.\n",
4023 			 vf->vf_id, req_queues);
4024 		return 0;
4025 	}
4026 
4027 error_param:
4028 	/* send the response to the VF */
4029 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
4030 				     v_ret, (u8 *)vfres, sizeof(*vfres));
4031 }
4032 
4033 /**
4034  * ice_set_vf_port_vlan
4035  * @netdev: network interface device structure
4036  * @vf_id: VF identifier
4037  * @vlan_id: VLAN ID being set
4038  * @qos: priority setting
4039  * @vlan_proto: VLAN protocol
4040  *
4041  * program VF Port VLAN ID and/or QoS
4042  */
4043 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)4044 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
4045 		     __be16 vlan_proto)
4046 {
4047 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4048 	struct device *dev;
4049 	struct ice_vf *vf;
4050 	u16 vlanprio;
4051 	int ret;
4052 
4053 	dev = ice_pf_to_dev(pf);
4054 	if (ice_validate_vf_id(pf, vf_id))
4055 		return -EINVAL;
4056 
4057 	if (vlan_id >= VLAN_N_VID || qos > 7) {
4058 		dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
4059 			vf_id, vlan_id, qos);
4060 		return -EINVAL;
4061 	}
4062 
4063 	if (vlan_proto != htons(ETH_P_8021Q)) {
4064 		dev_err(dev, "VF VLAN protocol is not supported\n");
4065 		return -EPROTONOSUPPORT;
4066 	}
4067 
4068 	vf = &pf->vf[vf_id];
4069 	ret = ice_check_vf_ready_for_cfg(vf);
4070 	if (ret)
4071 		return ret;
4072 
4073 	vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
4074 
4075 	if (vf->port_vlan_info == vlanprio) {
4076 		/* duplicate request, so just return success */
4077 		dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
4078 		return 0;
4079 	}
4080 
4081 	vf->port_vlan_info = vlanprio;
4082 
4083 	if (vf->port_vlan_info)
4084 		dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
4085 			 vlan_id, qos, vf_id);
4086 	else
4087 		dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
4088 
4089 	ice_vc_reset_vf(vf);
4090 
4091 	return 0;
4092 }
4093 
4094 /**
4095  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
4096  * @caps: VF driver negotiated capabilities
4097  *
4098  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
4099  */
ice_vf_vlan_offload_ena(u32 caps)4100 static bool ice_vf_vlan_offload_ena(u32 caps)
4101 {
4102 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
4103 }
4104 
4105 /**
4106  * ice_vc_process_vlan_msg
4107  * @vf: pointer to the VF info
4108  * @msg: pointer to the msg buffer
4109  * @add_v: Add VLAN if true, otherwise delete VLAN
4110  *
4111  * Process virtchnl op to add or remove programmed guest VLAN ID
4112  */
ice_vc_process_vlan_msg(struct ice_vf * vf,u8 * msg,bool add_v)4113 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
4114 {
4115 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4116 	struct virtchnl_vlan_filter_list *vfl =
4117 	    (struct virtchnl_vlan_filter_list *)msg;
4118 	struct ice_pf *pf = vf->pf;
4119 	bool vlan_promisc = false;
4120 	struct ice_vsi *vsi;
4121 	struct device *dev;
4122 	struct ice_hw *hw;
4123 	int status = 0;
4124 	u8 promisc_m;
4125 	int i;
4126 
4127 	dev = ice_pf_to_dev(pf);
4128 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4129 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4130 		goto error_param;
4131 	}
4132 
4133 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4134 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4135 		goto error_param;
4136 	}
4137 
4138 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
4139 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4140 		goto error_param;
4141 	}
4142 
4143 	for (i = 0; i < vfl->num_elements; i++) {
4144 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
4145 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4146 			dev_err(dev, "invalid VF VLAN id %d\n",
4147 				vfl->vlan_id[i]);
4148 			goto error_param;
4149 		}
4150 	}
4151 
4152 	hw = &pf->hw;
4153 	vsi = ice_get_vf_vsi(vf);
4154 	if (!vsi) {
4155 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4156 		goto error_param;
4157 	}
4158 
4159 	if (add_v && !ice_is_vf_trusted(vf) &&
4160 	    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4161 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4162 			 vf->vf_id);
4163 		/* There is no need to let VF know about being not trusted,
4164 		 * so we can just return success message here
4165 		 */
4166 		goto error_param;
4167 	}
4168 
4169 	if (vsi->info.pvid) {
4170 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4171 		goto error_param;
4172 	}
4173 
4174 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
4175 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
4176 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
4177 		vlan_promisc = true;
4178 
4179 	if (add_v) {
4180 		for (i = 0; i < vfl->num_elements; i++) {
4181 			u16 vid = vfl->vlan_id[i];
4182 
4183 			if (!ice_is_vf_trusted(vf) &&
4184 			    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4185 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4186 					 vf->vf_id);
4187 				/* There is no need to let VF know about being
4188 				 * not trusted, so we can just return success
4189 				 * message here as well.
4190 				 */
4191 				goto error_param;
4192 			}
4193 
4194 			/* we add VLAN 0 by default for each VF so we can enable
4195 			 * Tx VLAN anti-spoof without triggering MDD events so
4196 			 * we don't need to add it again here
4197 			 */
4198 			if (!vid)
4199 				continue;
4200 
4201 			status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
4202 			if (status) {
4203 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4204 				goto error_param;
4205 			}
4206 
4207 			/* Enable VLAN pruning when non-zero VLAN is added */
4208 			if (!vlan_promisc && vid &&
4209 			    !ice_vsi_is_vlan_pruning_ena(vsi)) {
4210 				status = ice_cfg_vlan_pruning(vsi, true, false);
4211 				if (status) {
4212 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4213 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
4214 						vid, status);
4215 					goto error_param;
4216 				}
4217 			} else if (vlan_promisc) {
4218 				/* Enable Ucast/Mcast VLAN promiscuous mode */
4219 				promisc_m = ICE_PROMISC_VLAN_TX |
4220 					    ICE_PROMISC_VLAN_RX;
4221 
4222 				status = ice_set_vsi_promisc(hw, vsi->idx,
4223 							     promisc_m, vid);
4224 				if (status) {
4225 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4226 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
4227 						vid, status);
4228 				}
4229 			}
4230 		}
4231 	} else {
4232 		/* In case of non_trusted VF, number of VLAN elements passed
4233 		 * to PF for removal might be greater than number of VLANs
4234 		 * filter programmed for that VF - So, use actual number of
4235 		 * VLANS added earlier with add VLAN opcode. In order to avoid
4236 		 * removing VLAN that doesn't exist, which result to sending
4237 		 * erroneous failed message back to the VF
4238 		 */
4239 		int num_vf_vlan;
4240 
4241 		num_vf_vlan = vsi->num_vlan;
4242 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
4243 			u16 vid = vfl->vlan_id[i];
4244 
4245 			/* we add VLAN 0 by default for each VF so we can enable
4246 			 * Tx VLAN anti-spoof without triggering MDD events so
4247 			 * we don't want a VIRTCHNL request to remove it
4248 			 */
4249 			if (!vid)
4250 				continue;
4251 
4252 			/* Make sure ice_vsi_kill_vlan is successful before
4253 			 * updating VLAN information
4254 			 */
4255 			status = ice_vsi_kill_vlan(vsi, vid);
4256 			if (status) {
4257 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4258 				goto error_param;
4259 			}
4260 
4261 			/* Disable VLAN pruning when only VLAN 0 is left */
4262 			if (vsi->num_vlan == 1 &&
4263 			    ice_vsi_is_vlan_pruning_ena(vsi))
4264 				ice_cfg_vlan_pruning(vsi, false, false);
4265 
4266 			/* Disable Unicast/Multicast VLAN promiscuous mode */
4267 			if (vlan_promisc) {
4268 				promisc_m = ICE_PROMISC_VLAN_TX |
4269 					    ICE_PROMISC_VLAN_RX;
4270 
4271 				ice_clear_vsi_promisc(hw, vsi->idx,
4272 						      promisc_m, vid);
4273 			}
4274 		}
4275 	}
4276 
4277 error_param:
4278 	/* send the response to the VF */
4279 	if (add_v)
4280 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
4281 					     NULL, 0);
4282 	else
4283 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
4284 					     NULL, 0);
4285 }
4286 
4287 /**
4288  * ice_vc_add_vlan_msg
4289  * @vf: pointer to the VF info
4290  * @msg: pointer to the msg buffer
4291  *
4292  * Add and program guest VLAN ID
4293  */
ice_vc_add_vlan_msg(struct ice_vf * vf,u8 * msg)4294 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
4295 {
4296 	return ice_vc_process_vlan_msg(vf, msg, true);
4297 }
4298 
4299 /**
4300  * ice_vc_remove_vlan_msg
4301  * @vf: pointer to the VF info
4302  * @msg: pointer to the msg buffer
4303  *
4304  * remove programmed guest VLAN ID
4305  */
ice_vc_remove_vlan_msg(struct ice_vf * vf,u8 * msg)4306 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
4307 {
4308 	return ice_vc_process_vlan_msg(vf, msg, false);
4309 }
4310 
4311 /**
4312  * ice_vc_ena_vlan_stripping
4313  * @vf: pointer to the VF info
4314  *
4315  * Enable VLAN header stripping for a given VF
4316  */
ice_vc_ena_vlan_stripping(struct ice_vf * vf)4317 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
4318 {
4319 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4320 	struct ice_vsi *vsi;
4321 
4322 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4323 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4324 		goto error_param;
4325 	}
4326 
4327 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4328 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4329 		goto error_param;
4330 	}
4331 
4332 	vsi = ice_get_vf_vsi(vf);
4333 	if (ice_vsi_manage_vlan_stripping(vsi, true))
4334 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4335 
4336 error_param:
4337 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
4338 				     v_ret, NULL, 0);
4339 }
4340 
4341 /**
4342  * ice_vc_dis_vlan_stripping
4343  * @vf: pointer to the VF info
4344  *
4345  * Disable VLAN header stripping for a given VF
4346  */
ice_vc_dis_vlan_stripping(struct ice_vf * vf)4347 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
4348 {
4349 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4350 	struct ice_vsi *vsi;
4351 
4352 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4353 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4354 		goto error_param;
4355 	}
4356 
4357 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4358 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4359 		goto error_param;
4360 	}
4361 
4362 	vsi = ice_get_vf_vsi(vf);
4363 	if (!vsi) {
4364 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4365 		goto error_param;
4366 	}
4367 
4368 	if (ice_vsi_manage_vlan_stripping(vsi, false))
4369 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4370 
4371 error_param:
4372 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
4373 				     v_ret, NULL, 0);
4374 }
4375 
4376 /**
4377  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
4378  * @vf: VF to enable/disable VLAN stripping for on initialization
4379  *
4380  * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
4381  * the flag is cleared then we want to disable stripping. For example, the flag
4382  * will be cleared when port VLANs are configured by the administrator before
4383  * passing the VF to the guest or if the AVF driver doesn't support VLAN
4384  * offloads.
4385  */
ice_vf_init_vlan_stripping(struct ice_vf * vf)4386 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
4387 {
4388 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
4389 
4390 	if (!vsi)
4391 		return -EINVAL;
4392 
4393 	/* don't modify stripping if port VLAN is configured */
4394 	if (vsi->info.pvid)
4395 		return 0;
4396 
4397 	if (ice_vf_vlan_offload_ena(vf->driver_caps))
4398 		return ice_vsi_manage_vlan_stripping(vsi, true);
4399 	else
4400 		return ice_vsi_manage_vlan_stripping(vsi, false);
4401 }
4402 
4403 /**
4404  * ice_vc_process_vf_msg - Process request from VF
4405  * @pf: pointer to the PF structure
4406  * @event: pointer to the AQ event
4407  *
4408  * called from the common asq/arq handler to
4409  * process request from VF
4410  */
ice_vc_process_vf_msg(struct ice_pf * pf,struct ice_rq_event_info * event)4411 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
4412 {
4413 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
4414 	s16 vf_id = le16_to_cpu(event->desc.retval);
4415 	u16 msglen = event->msg_len;
4416 	u8 *msg = event->msg_buf;
4417 	struct ice_vf *vf = NULL;
4418 	struct device *dev;
4419 	int err = 0;
4420 
4421 	/* if de-init is underway, don't process messages from VF */
4422 	if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
4423 		return;
4424 
4425 	dev = ice_pf_to_dev(pf);
4426 	if (ice_validate_vf_id(pf, vf_id)) {
4427 		err = -EINVAL;
4428 		goto error_handler;
4429 	}
4430 
4431 	vf = &pf->vf[vf_id];
4432 
4433 	/* Check if VF is disabled. */
4434 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
4435 		err = -EPERM;
4436 		goto error_handler;
4437 	}
4438 
4439 	/* Perform basic checks on the msg */
4440 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4441 	if (err) {
4442 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
4443 			err = -EPERM;
4444 		else
4445 			err = -EINVAL;
4446 	}
4447 
4448 	if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
4449 		ice_vc_send_msg_to_vf(vf, v_opcode,
4450 				      VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
4451 				      0);
4452 		return;
4453 	}
4454 
4455 error_handler:
4456 	if (err) {
4457 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
4458 				      NULL, 0);
4459 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
4460 			vf_id, v_opcode, msglen, err);
4461 		return;
4462 	}
4463 
4464 	switch (v_opcode) {
4465 	case VIRTCHNL_OP_VERSION:
4466 		err = ice_vc_get_ver_msg(vf, msg);
4467 		break;
4468 	case VIRTCHNL_OP_GET_VF_RESOURCES:
4469 		err = ice_vc_get_vf_res_msg(vf, msg);
4470 		if (ice_vf_init_vlan_stripping(vf))
4471 			dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
4472 				vf->vf_id);
4473 		ice_vc_notify_vf_link_state(vf);
4474 		break;
4475 	case VIRTCHNL_OP_RESET_VF:
4476 		ice_vc_reset_vf_msg(vf);
4477 		break;
4478 	case VIRTCHNL_OP_ADD_ETH_ADDR:
4479 		err = ice_vc_add_mac_addr_msg(vf, msg);
4480 		break;
4481 	case VIRTCHNL_OP_DEL_ETH_ADDR:
4482 		err = ice_vc_del_mac_addr_msg(vf, msg);
4483 		break;
4484 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4485 		err = ice_vc_cfg_qs_msg(vf, msg);
4486 		break;
4487 	case VIRTCHNL_OP_ENABLE_QUEUES:
4488 		err = ice_vc_ena_qs_msg(vf, msg);
4489 		ice_vc_notify_vf_link_state(vf);
4490 		break;
4491 	case VIRTCHNL_OP_DISABLE_QUEUES:
4492 		err = ice_vc_dis_qs_msg(vf, msg);
4493 		break;
4494 	case VIRTCHNL_OP_REQUEST_QUEUES:
4495 		err = ice_vc_request_qs_msg(vf, msg);
4496 		break;
4497 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4498 		err = ice_vc_cfg_irq_map_msg(vf, msg);
4499 		break;
4500 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4501 		err = ice_vc_config_rss_key(vf, msg);
4502 		break;
4503 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4504 		err = ice_vc_config_rss_lut(vf, msg);
4505 		break;
4506 	case VIRTCHNL_OP_GET_STATS:
4507 		err = ice_vc_get_stats_msg(vf, msg);
4508 		break;
4509 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4510 		err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
4511 		break;
4512 	case VIRTCHNL_OP_ADD_VLAN:
4513 		err = ice_vc_add_vlan_msg(vf, msg);
4514 		break;
4515 	case VIRTCHNL_OP_DEL_VLAN:
4516 		err = ice_vc_remove_vlan_msg(vf, msg);
4517 		break;
4518 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4519 		err = ice_vc_ena_vlan_stripping(vf);
4520 		break;
4521 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4522 		err = ice_vc_dis_vlan_stripping(vf);
4523 		break;
4524 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
4525 		err = ice_vc_add_fdir_fltr(vf, msg);
4526 		break;
4527 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
4528 		err = ice_vc_del_fdir_fltr(vf, msg);
4529 		break;
4530 	case VIRTCHNL_OP_ADD_RSS_CFG:
4531 		err = ice_vc_handle_rss_cfg(vf, msg, true);
4532 		break;
4533 	case VIRTCHNL_OP_DEL_RSS_CFG:
4534 		err = ice_vc_handle_rss_cfg(vf, msg, false);
4535 		break;
4536 	case VIRTCHNL_OP_UNKNOWN:
4537 	default:
4538 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
4539 			vf_id);
4540 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
4541 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
4542 					    NULL, 0);
4543 		break;
4544 	}
4545 	if (err) {
4546 		/* Helper function cares less about error return values here
4547 		 * as it is busy with pending work.
4548 		 */
4549 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
4550 			 vf_id, v_opcode, err);
4551 	}
4552 }
4553 
4554 /**
4555  * ice_get_vf_cfg
4556  * @netdev: network interface device structure
4557  * @vf_id: VF identifier
4558  * @ivi: VF configuration structure
4559  *
4560  * return VF configuration
4561  */
4562 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)4563 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
4564 {
4565 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4566 	struct ice_vf *vf;
4567 
4568 	if (ice_validate_vf_id(pf, vf_id))
4569 		return -EINVAL;
4570 
4571 	vf = &pf->vf[vf_id];
4572 
4573 	if (ice_check_vf_init(pf, vf))
4574 		return -EBUSY;
4575 
4576 	ivi->vf = vf_id;
4577 	ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
4578 
4579 	/* VF configuration for VLAN and applicable QoS */
4580 	ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
4581 	ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
4582 
4583 	ivi->trusted = vf->trusted;
4584 	ivi->spoofchk = vf->spoofchk;
4585 	if (!vf->link_forced)
4586 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4587 	else if (vf->link_up)
4588 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4589 	else
4590 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4591 	ivi->max_tx_rate = vf->tx_rate;
4592 	ivi->min_tx_rate = 0;
4593 	return 0;
4594 }
4595 
4596 /**
4597  * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
4598  * @pf: PF used to reference the switch's rules
4599  * @umac: unicast MAC to compare against existing switch rules
4600  *
4601  * Return true on the first/any match, else return false
4602  */
ice_unicast_mac_exists(struct ice_pf * pf,u8 * umac)4603 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
4604 {
4605 	struct ice_sw_recipe *mac_recipe_list =
4606 		&pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
4607 	struct ice_fltr_mgmt_list_entry *list_itr;
4608 	struct list_head *rule_head;
4609 	struct mutex *rule_lock; /* protect MAC filter list access */
4610 
4611 	rule_head = &mac_recipe_list->filt_rules;
4612 	rule_lock = &mac_recipe_list->filt_rule_lock;
4613 
4614 	mutex_lock(rule_lock);
4615 	list_for_each_entry(list_itr, rule_head, list_entry) {
4616 		u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4617 
4618 		if (ether_addr_equal(existing_mac, umac)) {
4619 			mutex_unlock(rule_lock);
4620 			return true;
4621 		}
4622 	}
4623 
4624 	mutex_unlock(rule_lock);
4625 
4626 	return false;
4627 }
4628 
4629 /**
4630  * ice_set_vf_mac
4631  * @netdev: network interface device structure
4632  * @vf_id: VF identifier
4633  * @mac: MAC address
4634  *
4635  * program VF MAC address
4636  */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)4637 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4638 {
4639 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4640 	struct ice_vf *vf;
4641 	int ret;
4642 
4643 	if (ice_validate_vf_id(pf, vf_id))
4644 		return -EINVAL;
4645 
4646 	if (is_multicast_ether_addr(mac)) {
4647 		netdev_err(netdev, "%pM not a valid unicast address\n", mac);
4648 		return -EINVAL;
4649 	}
4650 
4651 	vf = &pf->vf[vf_id];
4652 	/* nothing left to do, unicast MAC already set */
4653 	if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
4654 	    ether_addr_equal(vf->hw_lan_addr.addr, mac))
4655 		return 0;
4656 
4657 	ret = ice_check_vf_ready_for_cfg(vf);
4658 	if (ret)
4659 		return ret;
4660 
4661 	if (ice_unicast_mac_exists(pf, mac)) {
4662 		netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
4663 			   mac, vf_id, mac);
4664 		return -EINVAL;
4665 	}
4666 
4667 	/* VF is notified of its new MAC via the PF's response to the
4668 	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
4669 	 */
4670 	ether_addr_copy(vf->dev_lan_addr.addr, mac);
4671 	ether_addr_copy(vf->hw_lan_addr.addr, mac);
4672 	if (is_zero_ether_addr(mac)) {
4673 		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
4674 		vf->pf_set_mac = false;
4675 		netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
4676 			    vf->vf_id);
4677 	} else {
4678 		/* PF will add MAC rule for the VF */
4679 		vf->pf_set_mac = true;
4680 		netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
4681 			    mac, vf_id);
4682 	}
4683 
4684 	ice_vc_reset_vf(vf);
4685 	return 0;
4686 }
4687 
4688 /**
4689  * ice_set_vf_trust
4690  * @netdev: network interface device structure
4691  * @vf_id: VF identifier
4692  * @trusted: Boolean value to enable/disable trusted VF
4693  *
4694  * Enable or disable a given VF as trusted
4695  */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)4696 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
4697 {
4698 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4699 	struct ice_vf *vf;
4700 	int ret;
4701 
4702 	if (ice_validate_vf_id(pf, vf_id))
4703 		return -EINVAL;
4704 
4705 	vf = &pf->vf[vf_id];
4706 	ret = ice_check_vf_ready_for_cfg(vf);
4707 	if (ret)
4708 		return ret;
4709 
4710 	/* Check if already trusted */
4711 	if (trusted == vf->trusted)
4712 		return 0;
4713 
4714 	vf->trusted = trusted;
4715 	ice_vc_reset_vf(vf);
4716 	dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
4717 		 vf_id, trusted ? "" : "un");
4718 
4719 	return 0;
4720 }
4721 
4722 /**
4723  * ice_set_vf_link_state
4724  * @netdev: network interface device structure
4725  * @vf_id: VF identifier
4726  * @link_state: required link state
4727  *
4728  * Set VF's link state, irrespective of physical link state status
4729  */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)4730 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
4731 {
4732 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4733 	struct ice_vf *vf;
4734 	int ret;
4735 
4736 	if (ice_validate_vf_id(pf, vf_id))
4737 		return -EINVAL;
4738 
4739 	vf = &pf->vf[vf_id];
4740 	ret = ice_check_vf_ready_for_cfg(vf);
4741 	if (ret)
4742 		return ret;
4743 
4744 	switch (link_state) {
4745 	case IFLA_VF_LINK_STATE_AUTO:
4746 		vf->link_forced = false;
4747 		break;
4748 	case IFLA_VF_LINK_STATE_ENABLE:
4749 		vf->link_forced = true;
4750 		vf->link_up = true;
4751 		break;
4752 	case IFLA_VF_LINK_STATE_DISABLE:
4753 		vf->link_forced = true;
4754 		vf->link_up = false;
4755 		break;
4756 	default:
4757 		return -EINVAL;
4758 	}
4759 
4760 	ice_vc_notify_vf_link_state(vf);
4761 
4762 	return 0;
4763 }
4764 
4765 /**
4766  * ice_get_vf_stats - populate some stats for the VF
4767  * @netdev: the netdev of the PF
4768  * @vf_id: the host OS identifier (0-255)
4769  * @vf_stats: pointer to the OS memory to be initialized
4770  */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4771 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4772 		     struct ifla_vf_stats *vf_stats)
4773 {
4774 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4775 	struct ice_eth_stats *stats;
4776 	struct ice_vsi *vsi;
4777 	struct ice_vf *vf;
4778 	int ret;
4779 
4780 	if (ice_validate_vf_id(pf, vf_id))
4781 		return -EINVAL;
4782 
4783 	vf = &pf->vf[vf_id];
4784 	ret = ice_check_vf_ready_for_cfg(vf);
4785 	if (ret)
4786 		return ret;
4787 
4788 	vsi = ice_get_vf_vsi(vf);
4789 	if (!vsi)
4790 		return -EINVAL;
4791 
4792 	ice_update_eth_stats(vsi);
4793 	stats = &vsi->eth_stats;
4794 
4795 	memset(vf_stats, 0, sizeof(*vf_stats));
4796 
4797 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4798 		stats->rx_multicast;
4799 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4800 		stats->tx_multicast;
4801 	vf_stats->rx_bytes   = stats->rx_bytes;
4802 	vf_stats->tx_bytes   = stats->tx_bytes;
4803 	vf_stats->broadcast  = stats->rx_broadcast;
4804 	vf_stats->multicast  = stats->rx_multicast;
4805 	vf_stats->rx_dropped = stats->rx_discards;
4806 	vf_stats->tx_dropped = stats->tx_discards;
4807 
4808 	return 0;
4809 }
4810 
4811 /**
4812  * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4813  * @vf: pointer to the VF structure
4814  */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)4815 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4816 {
4817 	struct ice_pf *pf = vf->pf;
4818 	struct device *dev;
4819 
4820 	dev = ice_pf_to_dev(pf);
4821 
4822 	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4823 		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4824 		 vf->dev_lan_addr.addr,
4825 		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4826 			  ? "on" : "off");
4827 }
4828 
4829 /**
4830  * ice_print_vfs_mdd_events - print VFs malicious driver detect event
4831  * @pf: pointer to the PF structure
4832  *
4833  * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4834  */
ice_print_vfs_mdd_events(struct ice_pf * pf)4835 void ice_print_vfs_mdd_events(struct ice_pf *pf)
4836 {
4837 	struct device *dev = ice_pf_to_dev(pf);
4838 	struct ice_hw *hw = &pf->hw;
4839 	int i;
4840 
4841 	/* check that there are pending MDD events to print */
4842 	if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
4843 		return;
4844 
4845 	/* VF MDD event logs are rate limited to one second intervals */
4846 	if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4847 		return;
4848 
4849 	pf->last_printed_mdd_jiffies = jiffies;
4850 
4851 	ice_for_each_vf(pf, i) {
4852 		struct ice_vf *vf = &pf->vf[i];
4853 
4854 		/* only print Rx MDD event message if there are new events */
4855 		if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4856 			vf->mdd_rx_events.last_printed =
4857 							vf->mdd_rx_events.count;
4858 			ice_print_vf_rx_mdd_event(vf);
4859 		}
4860 
4861 		/* only print Tx MDD event message if there are new events */
4862 		if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4863 			vf->mdd_tx_events.last_printed =
4864 							vf->mdd_tx_events.count;
4865 
4866 			dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4867 				 vf->mdd_tx_events.count, hw->pf_id, i,
4868 				 vf->dev_lan_addr.addr);
4869 		}
4870 	}
4871 }
4872 
4873 /**
4874  * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4875  * @pdev: pointer to a pci_dev structure
4876  *
4877  * Called when recovering from a PF FLR to restore interrupt capability to
4878  * the VFs.
4879  */
ice_restore_all_vfs_msi_state(struct pci_dev * pdev)4880 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4881 {
4882 	u16 vf_id;
4883 	int pos;
4884 
4885 	if (!pci_num_vf(pdev))
4886 		return;
4887 
4888 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4889 	if (pos) {
4890 		struct pci_dev *vfdev;
4891 
4892 		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4893 				     &vf_id);
4894 		vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4895 		while (vfdev) {
4896 			if (vfdev->is_virtfn && vfdev->physfn == pdev)
4897 				pci_restore_msi_state(vfdev);
4898 			vfdev = pci_get_device(pdev->vendor, vf_id,
4899 					       vfdev);
4900 		}
4901 	}
4902 }
4903 
4904 /**
4905  * ice_is_malicious_vf - helper function to detect a malicious VF
4906  * @pf: ptr to struct ice_pf
4907  * @event: pointer to the AQ event
4908  * @num_msg_proc: the number of messages processed so far
4909  * @num_msg_pending: the number of messages peinding in admin queue
4910  */
4911 bool
ice_is_malicious_vf(struct ice_pf * pf,struct ice_rq_event_info * event,u16 num_msg_proc,u16 num_msg_pending)4912 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
4913 		    u16 num_msg_proc, u16 num_msg_pending)
4914 {
4915 	s16 vf_id = le16_to_cpu(event->desc.retval);
4916 	struct device *dev = ice_pf_to_dev(pf);
4917 	struct ice_mbx_data mbxdata;
4918 	enum ice_status status;
4919 	bool malvf = false;
4920 	struct ice_vf *vf;
4921 
4922 	if (ice_validate_vf_id(pf, vf_id))
4923 		return false;
4924 
4925 	vf = &pf->vf[vf_id];
4926 	/* Check if VF is disabled. */
4927 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
4928 		return false;
4929 
4930 	mbxdata.num_msg_proc = num_msg_proc;
4931 	mbxdata.num_pending_arq = num_msg_pending;
4932 	mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
4933 #define ICE_MBX_OVERFLOW_WATERMARK 64
4934 	mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
4935 
4936 	/* check to see if we have a malicious VF */
4937 	status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
4938 	if (status)
4939 		return false;
4940 
4941 	if (malvf) {
4942 		bool report_vf = false;
4943 
4944 		/* if the VF is malicious and we haven't let the user
4945 		 * know about it, then let them know now
4946 		 */
4947 		status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
4948 					      ICE_MAX_VF_COUNT, vf_id,
4949 					      &report_vf);
4950 		if (status)
4951 			dev_dbg(dev, "Error reporting malicious VF\n");
4952 
4953 		if (report_vf) {
4954 			struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
4955 
4956 			if (pf_vsi)
4957 				dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
4958 					 &vf->dev_lan_addr.addr[0],
4959 					 pf_vsi->netdev->dev_addr);
4960 		}
4961 
4962 		return true;
4963 	}
4964 
4965 	/* if there was an error in detection or the VF is not malicious then
4966 	 * return false
4967 	 */
4968 	return false;
4969 }
4970