1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "i40e.h"
5
6 /*********************notification routines***********************/
7
8 /**
9 * i40e_vc_vf_broadcast
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
14 * @msglen: msg length
15 *
16 * send a message to all VFs on a given PF
17 **/
i40e_vc_vf_broadcast(struct i40e_pf * pf,enum virtchnl_ops v_opcode,i40e_status v_retval,u8 * msg,u16 msglen)18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
21 u16 msglen)
22 {
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
25 int i;
26
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 continue;
33
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
36 */
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 msg, msglen, NULL);
39 }
40 }
41
42 /**
43 * i40e_vc_notify_vf_link_state
44 * @vf: pointer to the VF structure
45 *
46 * send a link status message to a single VF
47 **/
i40e_vc_notify_vf_link_state(struct i40e_vf * vf)48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
49 {
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
55
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
58 if (vf->link_forced) {
59 pfe.event_data.link_event.link_status = vf->link_up;
60 pfe.event_data.link_event.link_speed =
61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
62 } else {
63 pfe.event_data.link_event.link_status =
64 ls->link_info & I40E_AQ_LINK_UP;
65 pfe.event_data.link_event.link_speed =
66 i40e_virtchnl_link_speed(ls->link_speed);
67 }
68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
69 0, (u8 *)&pfe, sizeof(pfe), NULL);
70 }
71
72 /**
73 * i40e_vc_notify_link_state
74 * @pf: pointer to the PF structure
75 *
76 * send a link status message to all VFs on a given PF
77 **/
i40e_vc_notify_link_state(struct i40e_pf * pf)78 void i40e_vc_notify_link_state(struct i40e_pf *pf)
79 {
80 int i;
81
82 for (i = 0; i < pf->num_alloc_vfs; i++)
83 i40e_vc_notify_vf_link_state(&pf->vf[i]);
84 }
85
86 /**
87 * i40e_vc_notify_reset
88 * @pf: pointer to the PF structure
89 *
90 * indicate a pending reset to all VFs on a given PF
91 **/
i40e_vc_notify_reset(struct i40e_pf * pf)92 void i40e_vc_notify_reset(struct i40e_pf *pf)
93 {
94 struct virtchnl_pf_event pfe;
95
96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
100 }
101
102 /**
103 * i40e_vc_notify_vf_reset
104 * @vf: pointer to the VF structure
105 *
106 * indicate a pending reset to the given VF
107 **/
i40e_vc_notify_vf_reset(struct i40e_vf * vf)108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
109 {
110 struct virtchnl_pf_event pfe;
111 int abs_vf_id;
112
113 /* validate the request */
114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
115 return;
116
117 /* verify if the VF is in either init or active before proceeding */
118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
120 return;
121
122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
123
124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
127 0, (u8 *)&pfe,
128 sizeof(struct virtchnl_pf_event), NULL);
129 }
130 /***********************misc routines*****************************/
131
132 /**
133 * i40e_vc_disable_vf
134 * @vf: pointer to the VF info
135 *
136 * Disable the VF through a SW reset.
137 **/
i40e_vc_disable_vf(struct i40e_vf * vf)138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
139 {
140 int i;
141
142 i40e_vc_notify_vf_reset(vf);
143
144 /* We want to ensure that an actual reset occurs initiated after this
145 * function was called. However, we do not want to wait forever, so
146 * we'll give a reasonable time and print a message if we failed to
147 * ensure a reset.
148 */
149 for (i = 0; i < 20; i++) {
150 if (i40e_reset_vf(vf, false))
151 return;
152 usleep_range(10000, 20000);
153 }
154
155 dev_warn(&vf->pf->pdev->dev,
156 "Failed to initiate reset for VF %d after 200 milliseconds\n",
157 vf->vf_id);
158 }
159
160 /**
161 * i40e_vc_isvalid_vsi_id
162 * @vf: pointer to the VF info
163 * @vsi_id: VF relative VSI id
164 *
165 * check for the valid VSI id
166 **/
i40e_vc_isvalid_vsi_id(struct i40e_vf * vf,u16 vsi_id)167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
168 {
169 struct i40e_pf *pf = vf->pf;
170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
171
172 return (vsi && (vsi->vf_id == vf->vf_id));
173 }
174
175 /**
176 * i40e_vc_isvalid_queue_id
177 * @vf: pointer to the VF info
178 * @vsi_id: vsi id
179 * @qid: vsi relative queue id
180 *
181 * check for the valid queue id
182 **/
i40e_vc_isvalid_queue_id(struct i40e_vf * vf,u16 vsi_id,u8 qid)183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
184 u8 qid)
185 {
186 struct i40e_pf *pf = vf->pf;
187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
188
189 return (vsi && (qid < vsi->alloc_queue_pairs));
190 }
191
192 /**
193 * i40e_vc_isvalid_vector_id
194 * @vf: pointer to the VF info
195 * @vector_id: VF relative vector id
196 *
197 * check for the valid vector id
198 **/
i40e_vc_isvalid_vector_id(struct i40e_vf * vf,u8 vector_id)199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
200 {
201 struct i40e_pf *pf = vf->pf;
202
203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
204 }
205
206 /***********************vf resource mgmt routines*****************/
207
208 /**
209 * i40e_vc_get_pf_queue_id
210 * @vf: pointer to the VF info
211 * @vsi_id: id of VSI as provided by the FW
212 * @vsi_queue_id: vsi relative queue id
213 *
214 * return PF relative queue id
215 **/
i40e_vc_get_pf_queue_id(struct i40e_vf * vf,u16 vsi_id,u8 vsi_queue_id)216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
217 u8 vsi_queue_id)
218 {
219 struct i40e_pf *pf = vf->pf;
220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
222
223 if (!vsi)
224 return pf_queue_id;
225
226 if (le16_to_cpu(vsi->info.mapping_flags) &
227 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
228 pf_queue_id =
229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
230 else
231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
232 vsi_queue_id;
233
234 return pf_queue_id;
235 }
236
237 /**
238 * i40e_get_real_pf_qid
239 * @vf: pointer to the VF info
240 * @vsi_id: vsi id
241 * @queue_id: queue number
242 *
243 * wrapper function to get pf_queue_id handling ADq code as well
244 **/
i40e_get_real_pf_qid(struct i40e_vf * vf,u16 vsi_id,u16 queue_id)245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
246 {
247 int i;
248
249 if (vf->adq_enabled) {
250 /* Although VF considers all the queues(can be 1 to 16) as its
251 * own but they may actually belong to different VSIs(up to 4).
252 * We need to find which queues belongs to which VSI.
253 */
254 for (i = 0; i < vf->num_tc; i++) {
255 if (queue_id < vf->ch[i].num_qps) {
256 vsi_id = vf->ch[i].vsi_id;
257 break;
258 }
259 /* find right queue id which is relative to a
260 * given VSI.
261 */
262 queue_id -= vf->ch[i].num_qps;
263 }
264 }
265
266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
267 }
268
269 /**
270 * i40e_config_irq_link_list
271 * @vf: pointer to the VF info
272 * @vsi_id: id of VSI as given by the FW
273 * @vecmap: irq map info
274 *
275 * configure irq link list from the map
276 **/
i40e_config_irq_link_list(struct i40e_vf * vf,u16 vsi_id,struct virtchnl_vector_map * vecmap)277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
278 struct virtchnl_vector_map *vecmap)
279 {
280 unsigned long linklistmap = 0, tempmap;
281 struct i40e_pf *pf = vf->pf;
282 struct i40e_hw *hw = &pf->hw;
283 u16 vsi_queue_id, pf_queue_id;
284 enum i40e_queue_type qtype;
285 u16 next_q, vector_id, size;
286 u32 reg, reg_idx;
287 u16 itr_idx = 0;
288
289 vector_id = vecmap->vector_id;
290 /* setup the head */
291 if (0 == vector_id)
292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
293 else
294 reg_idx = I40E_VPINT_LNKLSTN(
295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
296 (vector_id - 1));
297
298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
299 /* Special case - No queues mapped on this vector */
300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
301 goto irq_list_done;
302 }
303 tempmap = vecmap->rxq_map;
304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
306 vsi_queue_id));
307 }
308
309 tempmap = vecmap->txq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
312 vsi_queue_id + 1));
313 }
314
315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
316 next_q = find_first_bit(&linklistmap, size);
317 if (unlikely(next_q == size))
318 goto irq_list_done;
319
320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
324
325 wr32(hw, reg_idx, reg);
326
327 while (next_q < size) {
328 switch (qtype) {
329 case I40E_QUEUE_TYPE_RX:
330 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
331 itr_idx = vecmap->rxitr_idx;
332 break;
333 case I40E_QUEUE_TYPE_TX:
334 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
335 itr_idx = vecmap->txitr_idx;
336 break;
337 default:
338 break;
339 }
340
341 next_q = find_next_bit(&linklistmap, size, next_q + 1);
342 if (next_q < size) {
343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
345 pf_queue_id = i40e_get_real_pf_qid(vf,
346 vsi_id,
347 vsi_queue_id);
348 } else {
349 pf_queue_id = I40E_QUEUE_END_OF_LIST;
350 qtype = 0;
351 }
352
353 /* format for the RQCTL & TQCTL regs is same */
354 reg = (vector_id) |
355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
359 wr32(hw, reg_idx, reg);
360 }
361
362 /* if the vf is running in polling mode and using interrupt zero,
363 * need to disable auto-mask on enabling zero interrupt for VFs.
364 */
365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
366 (vector_id == 0)) {
367 reg = rd32(hw, I40E_GLINT_CTL);
368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
370 wr32(hw, I40E_GLINT_CTL, reg);
371 }
372 }
373
374 irq_list_done:
375 i40e_flush(hw);
376 }
377
378 /**
379 * i40e_release_iwarp_qvlist
380 * @vf: pointer to the VF.
381 *
382 **/
i40e_release_iwarp_qvlist(struct i40e_vf * vf)383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
384 {
385 struct i40e_pf *pf = vf->pf;
386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
387 u32 msix_vf;
388 u32 i;
389
390 if (!vf->qvlist_info)
391 return;
392
393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
394 for (i = 0; i < qvlist_info->num_vectors; i++) {
395 struct virtchnl_iwarp_qv_info *qv_info;
396 u32 next_q_index, next_q_type;
397 struct i40e_hw *hw = &pf->hw;
398 u32 v_idx, reg_idx, reg;
399
400 qv_info = &qvlist_info->qv_info[i];
401 if (!qv_info)
402 continue;
403 v_idx = qv_info->v_idx;
404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
405 /* Figure out the queue after CEQ and make that the
406 * first queue.
407 */
408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
414
415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
416 reg = (next_q_index &
417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
418 (next_q_type <<
419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
420
421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
422 }
423 }
424 kfree(vf->qvlist_info);
425 vf->qvlist_info = NULL;
426 }
427
428 /**
429 * i40e_config_iwarp_qvlist
430 * @vf: pointer to the VF info
431 * @qvlist_info: queue and vector list
432 *
433 * Return 0 on success or < 0 on error
434 **/
i40e_config_iwarp_qvlist(struct i40e_vf * vf,struct virtchnl_iwarp_qvlist_info * qvlist_info)435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
436 struct virtchnl_iwarp_qvlist_info *qvlist_info)
437 {
438 struct i40e_pf *pf = vf->pf;
439 struct i40e_hw *hw = &pf->hw;
440 struct virtchnl_iwarp_qv_info *qv_info;
441 u32 v_idx, i, reg_idx, reg;
442 u32 next_q_idx, next_q_type;
443 u32 msix_vf, size;
444
445 size = sizeof(struct virtchnl_iwarp_qvlist_info) +
446 (sizeof(struct virtchnl_iwarp_qv_info) *
447 (qvlist_info->num_vectors - 1));
448 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
449 if (!vf->qvlist_info)
450 return -ENOMEM;
451
452 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
453
454 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
455 for (i = 0; i < qvlist_info->num_vectors; i++) {
456 qv_info = &qvlist_info->qv_info[i];
457 if (!qv_info)
458 continue;
459 v_idx = qv_info->v_idx;
460
461 /* Validate vector id belongs to this vf */
462 if (!i40e_vc_isvalid_vector_id(vf, v_idx))
463 goto err;
464
465 vf->qvlist_info->qv_info[i] = *qv_info;
466
467 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
468 /* We might be sharing the interrupt, so get the first queue
469 * index and type, push it down the list by adding the new
470 * queue on top. Also link it with the new queue in CEQCTL.
471 */
472 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
473 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
474 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
475 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
476 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
477
478 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
479 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
480 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
481 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
482 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
483 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
484 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
485 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
486
487 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
488 reg = (qv_info->ceq_idx &
489 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
490 (I40E_QUEUE_TYPE_PE_CEQ <<
491 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
492 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
493 }
494
495 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
496 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
497 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
498 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
499
500 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
501 }
502 }
503
504 return 0;
505 err:
506 kfree(vf->qvlist_info);
507 vf->qvlist_info = NULL;
508 return -EINVAL;
509 }
510
511 /**
512 * i40e_config_vsi_tx_queue
513 * @vf: pointer to the VF info
514 * @vsi_id: id of VSI as provided by the FW
515 * @vsi_queue_id: vsi relative queue index
516 * @info: config. info
517 *
518 * configure tx queue
519 **/
i40e_config_vsi_tx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_txq_info * info)520 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
521 u16 vsi_queue_id,
522 struct virtchnl_txq_info *info)
523 {
524 struct i40e_pf *pf = vf->pf;
525 struct i40e_hw *hw = &pf->hw;
526 struct i40e_hmc_obj_txq tx_ctx;
527 struct i40e_vsi *vsi;
528 u16 pf_queue_id;
529 u32 qtx_ctl;
530 int ret = 0;
531
532 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
533 ret = -ENOENT;
534 goto error_context;
535 }
536 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
537 vsi = i40e_find_vsi_from_id(pf, vsi_id);
538 if (!vsi) {
539 ret = -ENOENT;
540 goto error_context;
541 }
542
543 /* clear the context structure first */
544 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
545
546 /* only set the required fields */
547 tx_ctx.base = info->dma_ring_addr / 128;
548 tx_ctx.qlen = info->ring_len;
549 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
550 tx_ctx.rdylist_act = 0;
551 tx_ctx.head_wb_ena = info->headwb_enabled;
552 tx_ctx.head_wb_addr = info->dma_headwb_addr;
553
554 /* clear the context in the HMC */
555 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
556 if (ret) {
557 dev_err(&pf->pdev->dev,
558 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
559 pf_queue_id, ret);
560 ret = -ENOENT;
561 goto error_context;
562 }
563
564 /* set the context in the HMC */
565 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
566 if (ret) {
567 dev_err(&pf->pdev->dev,
568 "Failed to set VF LAN Tx queue context %d error: %d\n",
569 pf_queue_id, ret);
570 ret = -ENOENT;
571 goto error_context;
572 }
573
574 /* associate this queue with the PCI VF function */
575 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
576 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
577 & I40E_QTX_CTL_PF_INDX_MASK);
578 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
579 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
580 & I40E_QTX_CTL_VFVM_INDX_MASK);
581 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
582 i40e_flush(hw);
583
584 error_context:
585 return ret;
586 }
587
588 /**
589 * i40e_config_vsi_rx_queue
590 * @vf: pointer to the VF info
591 * @vsi_id: id of VSI as provided by the FW
592 * @vsi_queue_id: vsi relative queue index
593 * @info: config. info
594 *
595 * configure rx queue
596 **/
i40e_config_vsi_rx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_rxq_info * info)597 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
598 u16 vsi_queue_id,
599 struct virtchnl_rxq_info *info)
600 {
601 struct i40e_pf *pf = vf->pf;
602 struct i40e_hw *hw = &pf->hw;
603 struct i40e_hmc_obj_rxq rx_ctx;
604 u16 pf_queue_id;
605 int ret = 0;
606
607 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
608
609 /* clear the context structure first */
610 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
611
612 /* only set the required fields */
613 rx_ctx.base = info->dma_ring_addr / 128;
614 rx_ctx.qlen = info->ring_len;
615
616 if (info->splithdr_enabled) {
617 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
618 I40E_RX_SPLIT_IP |
619 I40E_RX_SPLIT_TCP_UDP |
620 I40E_RX_SPLIT_SCTP;
621 /* header length validation */
622 if (info->hdr_size > ((2 * 1024) - 64)) {
623 ret = -EINVAL;
624 goto error_param;
625 }
626 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
627
628 /* set split mode 10b */
629 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
630 }
631
632 /* databuffer length validation */
633 if (info->databuffer_size > ((16 * 1024) - 128)) {
634 ret = -EINVAL;
635 goto error_param;
636 }
637 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
638
639 /* max pkt. length validation */
640 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
641 ret = -EINVAL;
642 goto error_param;
643 }
644 rx_ctx.rxmax = info->max_pkt_size;
645
646 /* enable 32bytes desc always */
647 rx_ctx.dsize = 1;
648
649 /* default values */
650 rx_ctx.lrxqthresh = 1;
651 rx_ctx.crcstrip = 1;
652 rx_ctx.prefena = 1;
653 rx_ctx.l2tsel = 1;
654
655 /* clear the context in the HMC */
656 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
657 if (ret) {
658 dev_err(&pf->pdev->dev,
659 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
660 pf_queue_id, ret);
661 ret = -ENOENT;
662 goto error_param;
663 }
664
665 /* set the context in the HMC */
666 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
667 if (ret) {
668 dev_err(&pf->pdev->dev,
669 "Failed to set VF LAN Rx queue context %d error: %d\n",
670 pf_queue_id, ret);
671 ret = -ENOENT;
672 goto error_param;
673 }
674
675 error_param:
676 return ret;
677 }
678
679 /**
680 * i40e_alloc_vsi_res
681 * @vf: pointer to the VF info
682 * @idx: VSI index, applies only for ADq mode, zero otherwise
683 *
684 * alloc VF vsi context & resources
685 **/
i40e_alloc_vsi_res(struct i40e_vf * vf,u8 idx)686 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
687 {
688 struct i40e_mac_filter *f = NULL;
689 struct i40e_pf *pf = vf->pf;
690 struct i40e_vsi *vsi;
691 u64 max_tx_rate = 0;
692 int ret = 0;
693
694 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
695 vf->vf_id);
696
697 if (!vsi) {
698 dev_err(&pf->pdev->dev,
699 "add vsi failed for VF %d, aq_err %d\n",
700 vf->vf_id, pf->hw.aq.asq_last_status);
701 ret = -ENOENT;
702 goto error_alloc_vsi_res;
703 }
704
705 if (!idx) {
706 u64 hena = i40e_pf_get_default_rss_hena(pf);
707 u8 broadcast[ETH_ALEN];
708
709 vf->lan_vsi_idx = vsi->idx;
710 vf->lan_vsi_id = vsi->id;
711 /* If the port VLAN has been configured and then the
712 * VF driver was removed then the VSI port VLAN
713 * configuration was destroyed. Check if there is
714 * a port VLAN and restore the VSI configuration if
715 * needed.
716 */
717 if (vf->port_vlan_id)
718 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
719
720 spin_lock_bh(&vsi->mac_filter_hash_lock);
721 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
722 f = i40e_add_mac_filter(vsi,
723 vf->default_lan_addr.addr);
724 if (!f)
725 dev_info(&pf->pdev->dev,
726 "Could not add MAC filter %pM for VF %d\n",
727 vf->default_lan_addr.addr, vf->vf_id);
728 }
729 eth_broadcast_addr(broadcast);
730 f = i40e_add_mac_filter(vsi, broadcast);
731 if (!f)
732 dev_info(&pf->pdev->dev,
733 "Could not allocate VF broadcast filter\n");
734 spin_unlock_bh(&vsi->mac_filter_hash_lock);
735 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
736 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
737 /* program mac filter only for VF VSI */
738 ret = i40e_sync_vsi_filters(vsi);
739 if (ret)
740 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
741 }
742
743 /* storing VSI index and id for ADq and don't apply the mac filter */
744 if (vf->adq_enabled) {
745 vf->ch[idx].vsi_idx = vsi->idx;
746 vf->ch[idx].vsi_id = vsi->id;
747 }
748
749 /* Set VF bandwidth if specified */
750 if (vf->tx_rate) {
751 max_tx_rate = vf->tx_rate;
752 } else if (vf->ch[idx].max_tx_rate) {
753 max_tx_rate = vf->ch[idx].max_tx_rate;
754 }
755
756 if (max_tx_rate) {
757 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
758 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
759 max_tx_rate, 0, NULL);
760 if (ret)
761 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
762 vf->vf_id, ret);
763 }
764
765 error_alloc_vsi_res:
766 return ret;
767 }
768
769 /**
770 * i40e_map_pf_queues_to_vsi
771 * @vf: pointer to the VF info
772 *
773 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
774 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
775 **/
i40e_map_pf_queues_to_vsi(struct i40e_vf * vf)776 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
777 {
778 struct i40e_pf *pf = vf->pf;
779 struct i40e_hw *hw = &pf->hw;
780 u32 reg, num_tc = 1; /* VF has at least one traffic class */
781 u16 vsi_id, qps;
782 int i, j;
783
784 if (vf->adq_enabled)
785 num_tc = vf->num_tc;
786
787 for (i = 0; i < num_tc; i++) {
788 if (vf->adq_enabled) {
789 qps = vf->ch[i].num_qps;
790 vsi_id = vf->ch[i].vsi_id;
791 } else {
792 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
793 vsi_id = vf->lan_vsi_id;
794 }
795
796 for (j = 0; j < 7; j++) {
797 if (j * 2 >= qps) {
798 /* end of list */
799 reg = 0x07FF07FF;
800 } else {
801 u16 qid = i40e_vc_get_pf_queue_id(vf,
802 vsi_id,
803 j * 2);
804 reg = qid;
805 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
806 (j * 2) + 1);
807 reg |= qid << 16;
808 }
809 i40e_write_rx_ctl(hw,
810 I40E_VSILAN_QTABLE(j, vsi_id),
811 reg);
812 }
813 }
814 }
815
816 /**
817 * i40e_map_pf_to_vf_queues
818 * @vf: pointer to the VF info
819 *
820 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
821 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
822 **/
i40e_map_pf_to_vf_queues(struct i40e_vf * vf)823 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
824 {
825 struct i40e_pf *pf = vf->pf;
826 struct i40e_hw *hw = &pf->hw;
827 u32 reg, total_qps = 0;
828 u32 qps, num_tc = 1; /* VF has at least one traffic class */
829 u16 vsi_id, qid;
830 int i, j;
831
832 if (vf->adq_enabled)
833 num_tc = vf->num_tc;
834
835 for (i = 0; i < num_tc; i++) {
836 if (vf->adq_enabled) {
837 qps = vf->ch[i].num_qps;
838 vsi_id = vf->ch[i].vsi_id;
839 } else {
840 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
841 vsi_id = vf->lan_vsi_id;
842 }
843
844 for (j = 0; j < qps; j++) {
845 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
846
847 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
848 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
849 reg);
850 total_qps++;
851 }
852 }
853 }
854
855 /**
856 * i40e_enable_vf_mappings
857 * @vf: pointer to the VF info
858 *
859 * enable VF mappings
860 **/
i40e_enable_vf_mappings(struct i40e_vf * vf)861 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
862 {
863 struct i40e_pf *pf = vf->pf;
864 struct i40e_hw *hw = &pf->hw;
865 u32 reg;
866
867 /* Tell the hardware we're using noncontiguous mapping. HW requires
868 * that VF queues be mapped using this method, even when they are
869 * contiguous in real life
870 */
871 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
872 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
873
874 /* enable VF vplan_qtable mappings */
875 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
876 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
877
878 i40e_map_pf_to_vf_queues(vf);
879 i40e_map_pf_queues_to_vsi(vf);
880
881 i40e_flush(hw);
882 }
883
884 /**
885 * i40e_disable_vf_mappings
886 * @vf: pointer to the VF info
887 *
888 * disable VF mappings
889 **/
i40e_disable_vf_mappings(struct i40e_vf * vf)890 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
891 {
892 struct i40e_pf *pf = vf->pf;
893 struct i40e_hw *hw = &pf->hw;
894 int i;
895
896 /* disable qp mappings */
897 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
898 for (i = 0; i < I40E_MAX_VSI_QP; i++)
899 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
900 I40E_QUEUE_END_OF_LIST);
901 i40e_flush(hw);
902 }
903
904 /**
905 * i40e_free_vf_res
906 * @vf: pointer to the VF info
907 *
908 * free VF resources
909 **/
i40e_free_vf_res(struct i40e_vf * vf)910 static void i40e_free_vf_res(struct i40e_vf *vf)
911 {
912 struct i40e_pf *pf = vf->pf;
913 struct i40e_hw *hw = &pf->hw;
914 u32 reg_idx, reg;
915 int i, j, msix_vf;
916
917 /* Start by disabling VF's configuration API to prevent the OS from
918 * accessing the VF's VSI after it's freed / invalidated.
919 */
920 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
921
922 /* It's possible the VF had requeuested more queues than the default so
923 * do the accounting here when we're about to free them.
924 */
925 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
926 pf->queues_left += vf->num_queue_pairs -
927 I40E_DEFAULT_QUEUES_PER_VF;
928 }
929
930 /* free vsi & disconnect it from the parent uplink */
931 if (vf->lan_vsi_idx) {
932 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
933 vf->lan_vsi_idx = 0;
934 vf->lan_vsi_id = 0;
935 vf->num_mac = 0;
936 }
937
938 /* do the accounting and remove additional ADq VSI's */
939 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
940 for (j = 0; j < vf->num_tc; j++) {
941 /* At this point VSI0 is already released so don't
942 * release it again and only clear their values in
943 * structure variables
944 */
945 if (j)
946 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
947 vf->ch[j].vsi_idx = 0;
948 vf->ch[j].vsi_id = 0;
949 }
950 }
951 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
952
953 /* disable interrupts so the VF starts in a known state */
954 for (i = 0; i < msix_vf; i++) {
955 /* format is same for both registers */
956 if (0 == i)
957 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
958 else
959 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
960 (vf->vf_id))
961 + (i - 1));
962 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
963 i40e_flush(hw);
964 }
965
966 /* clear the irq settings */
967 for (i = 0; i < msix_vf; i++) {
968 /* format is same for both registers */
969 if (0 == i)
970 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
971 else
972 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
973 (vf->vf_id))
974 + (i - 1));
975 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
976 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
977 wr32(hw, reg_idx, reg);
978 i40e_flush(hw);
979 }
980 /* reset some of the state variables keeping track of the resources */
981 vf->num_queue_pairs = 0;
982 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
983 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
984 }
985
986 /**
987 * i40e_alloc_vf_res
988 * @vf: pointer to the VF info
989 *
990 * allocate VF resources
991 **/
i40e_alloc_vf_res(struct i40e_vf * vf)992 static int i40e_alloc_vf_res(struct i40e_vf *vf)
993 {
994 struct i40e_pf *pf = vf->pf;
995 int total_queue_pairs = 0;
996 int ret, idx;
997
998 if (vf->num_req_queues &&
999 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1000 pf->num_vf_qps = vf->num_req_queues;
1001 else
1002 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1003
1004 /* allocate hw vsi context & associated resources */
1005 ret = i40e_alloc_vsi_res(vf, 0);
1006 if (ret)
1007 goto error_alloc;
1008 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1009
1010 /* allocate additional VSIs based on tc information for ADq */
1011 if (vf->adq_enabled) {
1012 if (pf->queues_left >=
1013 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1014 /* TC 0 always belongs to VF VSI */
1015 for (idx = 1; idx < vf->num_tc; idx++) {
1016 ret = i40e_alloc_vsi_res(vf, idx);
1017 if (ret)
1018 goto error_alloc;
1019 }
1020 /* send correct number of queues */
1021 total_queue_pairs = I40E_MAX_VF_QUEUES;
1022 } else {
1023 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1024 vf->vf_id);
1025 vf->adq_enabled = false;
1026 }
1027 }
1028
1029 /* We account for each VF to get a default number of queue pairs. If
1030 * the VF has now requested more, we need to account for that to make
1031 * certain we never request more queues than we actually have left in
1032 * HW.
1033 */
1034 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1035 pf->queues_left -=
1036 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1037
1038 if (vf->trusted)
1039 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1040 else
1041 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1042
1043 /* store the total qps number for the runtime
1044 * VF req validation
1045 */
1046 vf->num_queue_pairs = total_queue_pairs;
1047
1048 /* VF is now completely initialized */
1049 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1050
1051 error_alloc:
1052 if (ret)
1053 i40e_free_vf_res(vf);
1054
1055 return ret;
1056 }
1057
1058 #define VF_DEVICE_STATUS 0xAA
1059 #define VF_TRANS_PENDING_MASK 0x20
1060 /**
1061 * i40e_quiesce_vf_pci
1062 * @vf: pointer to the VF structure
1063 *
1064 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1065 * if the transactions never clear.
1066 **/
i40e_quiesce_vf_pci(struct i40e_vf * vf)1067 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1068 {
1069 struct i40e_pf *pf = vf->pf;
1070 struct i40e_hw *hw = &pf->hw;
1071 int vf_abs_id, i;
1072 u32 reg;
1073
1074 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1075
1076 wr32(hw, I40E_PF_PCI_CIAA,
1077 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1078 for (i = 0; i < 100; i++) {
1079 reg = rd32(hw, I40E_PF_PCI_CIAD);
1080 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1081 return 0;
1082 udelay(1);
1083 }
1084 return -EIO;
1085 }
1086
1087 /**
1088 * i40e_trigger_vf_reset
1089 * @vf: pointer to the VF structure
1090 * @flr: VFLR was issued or not
1091 *
1092 * Trigger hardware to start a reset for a particular VF. Expects the caller
1093 * to wait the proper amount of time to allow hardware to reset the VF before
1094 * it cleans up and restores VF functionality.
1095 **/
i40e_trigger_vf_reset(struct i40e_vf * vf,bool flr)1096 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1097 {
1098 struct i40e_pf *pf = vf->pf;
1099 struct i40e_hw *hw = &pf->hw;
1100 u32 reg, reg_idx, bit_idx;
1101
1102 /* warn the VF */
1103 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1104
1105 /* Disable VF's configuration API during reset. The flag is re-enabled
1106 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1107 * It's normally disabled in i40e_free_vf_res(), but it's safer
1108 * to do it earlier to give some time to finish to any VF config
1109 * functions that may still be running at this point.
1110 */
1111 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1112
1113 /* In the case of a VFLR, the HW has already reset the VF and we
1114 * just need to clean up, so don't hit the VFRTRIG register.
1115 */
1116 if (!flr) {
1117 /* reset VF using VPGEN_VFRTRIG reg */
1118 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1119 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1120 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1121 i40e_flush(hw);
1122 }
1123 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1124 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1125 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1126 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1127 i40e_flush(hw);
1128
1129 if (i40e_quiesce_vf_pci(vf))
1130 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1131 vf->vf_id);
1132 }
1133
1134 /**
1135 * i40e_cleanup_reset_vf
1136 * @vf: pointer to the VF structure
1137 *
1138 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1139 * have verified whether the reset is finished properly, and ensure the
1140 * minimum amount of wait time has passed.
1141 **/
i40e_cleanup_reset_vf(struct i40e_vf * vf)1142 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1143 {
1144 struct i40e_pf *pf = vf->pf;
1145 struct i40e_hw *hw = &pf->hw;
1146 u32 reg;
1147
1148 /* free VF resources to begin resetting the VSI state */
1149 i40e_free_vf_res(vf);
1150
1151 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1152 * By doing this we allow HW to access VF memory at any point. If we
1153 * did it any sooner, HW could access memory while it was being freed
1154 * in i40e_free_vf_res(), causing an IOMMU fault.
1155 *
1156 * On the other hand, this needs to be done ASAP, because the VF driver
1157 * is waiting for this to happen and may report a timeout. It's
1158 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1159 * it.
1160 */
1161 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1162 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1163 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1164
1165 /* reallocate VF resources to finish resetting the VSI state */
1166 if (!i40e_alloc_vf_res(vf)) {
1167 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1168 i40e_enable_vf_mappings(vf);
1169 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1170 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1171 /* Do not notify the client during VF init */
1172 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1173 &vf->vf_states))
1174 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1175 vf->num_vlan = 0;
1176 }
1177
1178 /* Tell the VF driver the reset is done. This needs to be done only
1179 * after VF has been fully initialized, because the VF driver may
1180 * request resources immediately after setting this flag.
1181 */
1182 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1183 }
1184
1185 /**
1186 * i40e_reset_vf
1187 * @vf: pointer to the VF structure
1188 * @flr: VFLR was issued or not
1189 *
1190 * Returns true if the VF is reset, false otherwise.
1191 **/
i40e_reset_vf(struct i40e_vf * vf,bool flr)1192 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1193 {
1194 struct i40e_pf *pf = vf->pf;
1195 struct i40e_hw *hw = &pf->hw;
1196 bool rsd = false;
1197 u32 reg;
1198 int i;
1199
1200 /* If the VFs have been disabled, this means something else is
1201 * resetting the VF, so we shouldn't continue.
1202 */
1203 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1204 return false;
1205
1206 i40e_trigger_vf_reset(vf, flr);
1207
1208 /* poll VPGEN_VFRSTAT reg to make sure
1209 * that reset is complete
1210 */
1211 for (i = 0; i < 10; i++) {
1212 /* VF reset requires driver to first reset the VF and then
1213 * poll the status register to make sure that the reset
1214 * completed successfully. Due to internal HW FIFO flushes,
1215 * we must wait 10ms before the register will be valid.
1216 */
1217 usleep_range(10000, 20000);
1218 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1219 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1220 rsd = true;
1221 break;
1222 }
1223 }
1224
1225 if (flr)
1226 usleep_range(10000, 20000);
1227
1228 if (!rsd)
1229 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1230 vf->vf_id);
1231 usleep_range(10000, 20000);
1232
1233 /* On initial reset, we don't have any queues to disable */
1234 if (vf->lan_vsi_idx != 0)
1235 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1236
1237 i40e_cleanup_reset_vf(vf);
1238
1239 i40e_flush(hw);
1240 clear_bit(__I40E_VF_DISABLE, pf->state);
1241
1242 return true;
1243 }
1244
1245 /**
1246 * i40e_reset_all_vfs
1247 * @pf: pointer to the PF structure
1248 * @flr: VFLR was issued or not
1249 *
1250 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1251 * VF, then do all the waiting in one chunk, and finally finish restoring each
1252 * VF after the wait. This is useful during PF routines which need to reset
1253 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1254 *
1255 * Returns true if any VFs were reset, and false otherwise.
1256 **/
i40e_reset_all_vfs(struct i40e_pf * pf,bool flr)1257 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1258 {
1259 struct i40e_hw *hw = &pf->hw;
1260 struct i40e_vf *vf;
1261 int i, v;
1262 u32 reg;
1263
1264 /* If we don't have any VFs, then there is nothing to reset */
1265 if (!pf->num_alloc_vfs)
1266 return false;
1267
1268 /* If VFs have been disabled, there is no need to reset */
1269 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1270 return false;
1271
1272 /* Begin reset on all VFs at once */
1273 for (v = 0; v < pf->num_alloc_vfs; v++)
1274 i40e_trigger_vf_reset(&pf->vf[v], flr);
1275
1276 /* HW requires some time to make sure it can flush the FIFO for a VF
1277 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1278 * sequence to make sure that it has completed. We'll keep track of
1279 * the VFs using a simple iterator that increments once that VF has
1280 * finished resetting.
1281 */
1282 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1283 usleep_range(10000, 20000);
1284
1285 /* Check each VF in sequence, beginning with the VF to fail
1286 * the previous check.
1287 */
1288 while (v < pf->num_alloc_vfs) {
1289 vf = &pf->vf[v];
1290 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1291 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1292 break;
1293
1294 /* If the current VF has finished resetting, move on
1295 * to the next VF in sequence.
1296 */
1297 v++;
1298 }
1299 }
1300
1301 if (flr)
1302 usleep_range(10000, 20000);
1303
1304 /* Display a warning if at least one VF didn't manage to reset in
1305 * time, but continue on with the operation.
1306 */
1307 if (v < pf->num_alloc_vfs)
1308 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1309 pf->vf[v].vf_id);
1310 usleep_range(10000, 20000);
1311
1312 /* Begin disabling all the rings associated with VFs, but do not wait
1313 * between each VF.
1314 */
1315 for (v = 0; v < pf->num_alloc_vfs; v++) {
1316 /* On initial reset, we don't have any queues to disable */
1317 if (pf->vf[v].lan_vsi_idx == 0)
1318 continue;
1319
1320 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1321 }
1322
1323 /* Now that we've notified HW to disable all of the VF rings, wait
1324 * until they finish.
1325 */
1326 for (v = 0; v < pf->num_alloc_vfs; v++) {
1327 /* On initial reset, we don't have any queues to disable */
1328 if (pf->vf[v].lan_vsi_idx == 0)
1329 continue;
1330
1331 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1332 }
1333
1334 /* Hw may need up to 50ms to finish disabling the RX queues. We
1335 * minimize the wait by delaying only once for all VFs.
1336 */
1337 mdelay(50);
1338
1339 /* Finish the reset on each VF */
1340 for (v = 0; v < pf->num_alloc_vfs; v++)
1341 i40e_cleanup_reset_vf(&pf->vf[v]);
1342
1343 i40e_flush(hw);
1344 clear_bit(__I40E_VF_DISABLE, pf->state);
1345
1346 return true;
1347 }
1348
1349 /**
1350 * i40e_free_vfs
1351 * @pf: pointer to the PF structure
1352 *
1353 * free VF resources
1354 **/
i40e_free_vfs(struct i40e_pf * pf)1355 void i40e_free_vfs(struct i40e_pf *pf)
1356 {
1357 struct i40e_hw *hw = &pf->hw;
1358 u32 reg_idx, bit_idx;
1359 int i, tmp, vf_id;
1360
1361 if (!pf->vf)
1362 return;
1363 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1364 usleep_range(1000, 2000);
1365
1366 i40e_notify_client_of_vf_enable(pf, 0);
1367
1368 /* Amortize wait time by stopping all VFs at the same time */
1369 for (i = 0; i < pf->num_alloc_vfs; i++) {
1370 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1371 continue;
1372
1373 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1374 }
1375
1376 for (i = 0; i < pf->num_alloc_vfs; i++) {
1377 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1378 continue;
1379
1380 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1381 }
1382
1383 /* Disable IOV before freeing resources. This lets any VF drivers
1384 * running in the host get themselves cleaned up before we yank
1385 * the carpet out from underneath their feet.
1386 */
1387 if (!pci_vfs_assigned(pf->pdev))
1388 pci_disable_sriov(pf->pdev);
1389 else
1390 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1391
1392 /* free up VF resources */
1393 tmp = pf->num_alloc_vfs;
1394 pf->num_alloc_vfs = 0;
1395 for (i = 0; i < tmp; i++) {
1396 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1397 i40e_free_vf_res(&pf->vf[i]);
1398 /* disable qp mappings */
1399 i40e_disable_vf_mappings(&pf->vf[i]);
1400 }
1401
1402 kfree(pf->vf);
1403 pf->vf = NULL;
1404
1405 /* This check is for when the driver is unloaded while VFs are
1406 * assigned. Setting the number of VFs to 0 through sysfs is caught
1407 * before this function ever gets called.
1408 */
1409 if (!pci_vfs_assigned(pf->pdev)) {
1410 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1411 * work correctly when SR-IOV gets re-enabled.
1412 */
1413 for (vf_id = 0; vf_id < tmp; vf_id++) {
1414 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1415 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1416 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1417 }
1418 }
1419 clear_bit(__I40E_VF_DISABLE, pf->state);
1420 }
1421
1422 #ifdef CONFIG_PCI_IOV
1423 /**
1424 * i40e_alloc_vfs
1425 * @pf: pointer to the PF structure
1426 * @num_alloc_vfs: number of VFs to allocate
1427 *
1428 * allocate VF resources
1429 **/
i40e_alloc_vfs(struct i40e_pf * pf,u16 num_alloc_vfs)1430 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1431 {
1432 struct i40e_vf *vfs;
1433 int i, ret = 0;
1434
1435 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1436 i40e_irq_dynamic_disable_icr0(pf);
1437
1438 /* Check to see if we're just allocating resources for extant VFs */
1439 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1440 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1441 if (ret) {
1442 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1443 pf->num_alloc_vfs = 0;
1444 goto err_iov;
1445 }
1446 }
1447 /* allocate memory */
1448 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1449 if (!vfs) {
1450 ret = -ENOMEM;
1451 goto err_alloc;
1452 }
1453 pf->vf = vfs;
1454
1455 /* apply default profile */
1456 for (i = 0; i < num_alloc_vfs; i++) {
1457 vfs[i].pf = pf;
1458 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1459 vfs[i].vf_id = i;
1460
1461 /* assign default capabilities */
1462 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1463 vfs[i].spoofchk = true;
1464
1465 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1466
1467 }
1468 pf->num_alloc_vfs = num_alloc_vfs;
1469
1470 /* VF resources get allocated during reset */
1471 i40e_reset_all_vfs(pf, false);
1472
1473 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1474
1475 err_alloc:
1476 if (ret)
1477 i40e_free_vfs(pf);
1478 err_iov:
1479 /* Re-enable interrupt 0. */
1480 i40e_irq_dynamic_enable_icr0(pf);
1481 return ret;
1482 }
1483
1484 #endif
1485 /**
1486 * i40e_pci_sriov_enable
1487 * @pdev: pointer to a pci_dev structure
1488 * @num_vfs: number of VFs to allocate
1489 *
1490 * Enable or change the number of VFs
1491 **/
i40e_pci_sriov_enable(struct pci_dev * pdev,int num_vfs)1492 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1493 {
1494 #ifdef CONFIG_PCI_IOV
1495 struct i40e_pf *pf = pci_get_drvdata(pdev);
1496 int pre_existing_vfs = pci_num_vf(pdev);
1497 int err = 0;
1498
1499 if (test_bit(__I40E_TESTING, pf->state)) {
1500 dev_warn(&pdev->dev,
1501 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1502 err = -EPERM;
1503 goto err_out;
1504 }
1505
1506 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1507 i40e_free_vfs(pf);
1508 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1509 goto out;
1510
1511 if (num_vfs > pf->num_req_vfs) {
1512 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1513 num_vfs, pf->num_req_vfs);
1514 err = -EPERM;
1515 goto err_out;
1516 }
1517
1518 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1519 err = i40e_alloc_vfs(pf, num_vfs);
1520 if (err) {
1521 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1522 goto err_out;
1523 }
1524
1525 out:
1526 return num_vfs;
1527
1528 err_out:
1529 return err;
1530 #endif
1531 return 0;
1532 }
1533
1534 /**
1535 * i40e_pci_sriov_configure
1536 * @pdev: pointer to a pci_dev structure
1537 * @num_vfs: number of VFs to allocate
1538 *
1539 * Enable or change the number of VFs. Called when the user updates the number
1540 * of VFs in sysfs.
1541 **/
i40e_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)1542 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1543 {
1544 struct i40e_pf *pf = pci_get_drvdata(pdev);
1545
1546 if (num_vfs) {
1547 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1548 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1549 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1550 }
1551 return i40e_pci_sriov_enable(pdev, num_vfs);
1552 }
1553
1554 if (!pci_vfs_assigned(pf->pdev)) {
1555 i40e_free_vfs(pf);
1556 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1557 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1558 } else {
1559 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1560 return -EINVAL;
1561 }
1562 return 0;
1563 }
1564
1565 /***********************virtual channel routines******************/
1566
1567 /**
1568 * i40e_vc_send_msg_to_vf
1569 * @vf: pointer to the VF info
1570 * @v_opcode: virtual channel opcode
1571 * @v_retval: virtual channel return value
1572 * @msg: pointer to the msg buffer
1573 * @msglen: msg length
1574 *
1575 * send msg to VF
1576 **/
i40e_vc_send_msg_to_vf(struct i40e_vf * vf,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen)1577 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1578 u32 v_retval, u8 *msg, u16 msglen)
1579 {
1580 struct i40e_pf *pf;
1581 struct i40e_hw *hw;
1582 int abs_vf_id;
1583 i40e_status aq_ret;
1584
1585 /* validate the request */
1586 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1587 return -EINVAL;
1588
1589 pf = vf->pf;
1590 hw = &pf->hw;
1591 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1592
1593 /* single place to detect unsuccessful return values */
1594 if (v_retval) {
1595 vf->num_invalid_msgs++;
1596 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1597 vf->vf_id, v_opcode, v_retval);
1598 if (vf->num_invalid_msgs >
1599 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1600 dev_err(&pf->pdev->dev,
1601 "Number of invalid messages exceeded for VF %d\n",
1602 vf->vf_id);
1603 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1604 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1605 }
1606 } else {
1607 vf->num_valid_msgs++;
1608 /* reset the invalid counter, if a valid message is received. */
1609 vf->num_invalid_msgs = 0;
1610 }
1611
1612 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1613 msg, msglen, NULL);
1614 if (aq_ret) {
1615 dev_info(&pf->pdev->dev,
1616 "Unable to send the message to VF %d aq_err %d\n",
1617 vf->vf_id, pf->hw.aq.asq_last_status);
1618 return -EIO;
1619 }
1620
1621 return 0;
1622 }
1623
1624 /**
1625 * i40e_vc_send_resp_to_vf
1626 * @vf: pointer to the VF info
1627 * @opcode: operation code
1628 * @retval: return value
1629 *
1630 * send resp msg to VF
1631 **/
i40e_vc_send_resp_to_vf(struct i40e_vf * vf,enum virtchnl_ops opcode,i40e_status retval)1632 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1633 enum virtchnl_ops opcode,
1634 i40e_status retval)
1635 {
1636 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1637 }
1638
1639 /**
1640 * i40e_vc_get_version_msg
1641 * @vf: pointer to the VF info
1642 * @msg: pointer to the msg buffer
1643 *
1644 * called from the VF to request the API version used by the PF
1645 **/
i40e_vc_get_version_msg(struct i40e_vf * vf,u8 * msg)1646 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1647 {
1648 struct virtchnl_version_info info = {
1649 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1650 };
1651
1652 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1653 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1654 if (VF_IS_V10(&vf->vf_ver))
1655 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1656 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1657 I40E_SUCCESS, (u8 *)&info,
1658 sizeof(struct virtchnl_version_info));
1659 }
1660
1661 /**
1662 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1663 * @vf: pointer to VF structure
1664 **/
i40e_del_qch(struct i40e_vf * vf)1665 static void i40e_del_qch(struct i40e_vf *vf)
1666 {
1667 struct i40e_pf *pf = vf->pf;
1668 int i;
1669
1670 /* first element in the array belongs to primary VF VSI and we shouldn't
1671 * delete it. We should however delete the rest of the VSIs created
1672 */
1673 for (i = 1; i < vf->num_tc; i++) {
1674 if (vf->ch[i].vsi_idx) {
1675 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1676 vf->ch[i].vsi_idx = 0;
1677 vf->ch[i].vsi_id = 0;
1678 }
1679 }
1680 }
1681
1682 /**
1683 * i40e_vc_get_vf_resources_msg
1684 * @vf: pointer to the VF info
1685 * @msg: pointer to the msg buffer
1686 *
1687 * called from the VF to request its resources
1688 **/
i40e_vc_get_vf_resources_msg(struct i40e_vf * vf,u8 * msg)1689 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1690 {
1691 struct virtchnl_vf_resource *vfres = NULL;
1692 struct i40e_pf *pf = vf->pf;
1693 i40e_status aq_ret = 0;
1694 struct i40e_vsi *vsi;
1695 int num_vsis = 1;
1696 int len = 0;
1697 int ret;
1698
1699 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1700 aq_ret = I40E_ERR_PARAM;
1701 goto err;
1702 }
1703
1704 len = (sizeof(struct virtchnl_vf_resource) +
1705 sizeof(struct virtchnl_vsi_resource) * num_vsis);
1706
1707 vfres = kzalloc(len, GFP_KERNEL);
1708 if (!vfres) {
1709 aq_ret = I40E_ERR_NO_MEMORY;
1710 len = 0;
1711 goto err;
1712 }
1713 if (VF_IS_V11(&vf->vf_ver))
1714 vf->driver_caps = *(u32 *)msg;
1715 else
1716 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1717 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1718 VIRTCHNL_VF_OFFLOAD_VLAN;
1719
1720 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1721 vsi = pf->vsi[vf->lan_vsi_idx];
1722 if (!vsi->info.pvid)
1723 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1724
1725 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1726 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1727 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1728 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1729 } else {
1730 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1731 }
1732
1733 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1734 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1735 } else {
1736 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1737 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1738 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1739 else
1740 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1741 }
1742
1743 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1744 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1745 vfres->vf_cap_flags |=
1746 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1747 }
1748
1749 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1750 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1751
1752 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1753 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1754 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1755
1756 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1757 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1758 dev_err(&pf->pdev->dev,
1759 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1760 vf->vf_id);
1761 aq_ret = I40E_ERR_PARAM;
1762 goto err;
1763 }
1764 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1765 }
1766
1767 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1768 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1769 vfres->vf_cap_flags |=
1770 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1771 }
1772
1773 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1774 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1775
1776 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1777 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1778
1779 vfres->num_vsis = num_vsis;
1780 vfres->num_queue_pairs = vf->num_queue_pairs;
1781 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1782 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1783 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1784
1785 if (vf->lan_vsi_idx) {
1786 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1787 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1788 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1789 /* VFs only use TC 0 */
1790 vfres->vsi_res[0].qset_handle
1791 = le16_to_cpu(vsi->info.qs_handle[0]);
1792 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1793 vf->default_lan_addr.addr);
1794 }
1795 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1796
1797 err:
1798 /* send the response back to the VF */
1799 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1800 aq_ret, (u8 *)vfres, len);
1801
1802 kfree(vfres);
1803 return ret;
1804 }
1805
1806 /**
1807 * i40e_vc_reset_vf_msg
1808 * @vf: pointer to the VF info
1809 *
1810 * called from the VF to reset itself,
1811 * unlike other virtchnl messages, PF driver
1812 * doesn't send the response back to the VF
1813 **/
i40e_vc_reset_vf_msg(struct i40e_vf * vf)1814 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1815 {
1816 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1817 i40e_reset_vf(vf, false);
1818 }
1819
1820 /**
1821 * i40e_getnum_vf_vsi_vlan_filters
1822 * @vsi: pointer to the vsi
1823 *
1824 * called to get the number of VLANs offloaded on this VF
1825 **/
i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)1826 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1827 {
1828 struct i40e_mac_filter *f;
1829 int num_vlans = 0, bkt;
1830
1831 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1832 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1833 num_vlans++;
1834 }
1835
1836 return num_vlans;
1837 }
1838
1839 /**
1840 * i40e_vc_config_promiscuous_mode_msg
1841 * @vf: pointer to the VF info
1842 * @msg: pointer to the msg buffer
1843 * @msglen: msg length
1844 *
1845 * called from the VF to configure the promiscuous mode of
1846 * VF vsis
1847 **/
i40e_vc_config_promiscuous_mode_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)1848 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1849 u8 *msg, u16 msglen)
1850 {
1851 struct virtchnl_promisc_info *info =
1852 (struct virtchnl_promisc_info *)msg;
1853 struct i40e_pf *pf = vf->pf;
1854 struct i40e_hw *hw = &pf->hw;
1855 struct i40e_mac_filter *f;
1856 i40e_status aq_ret = 0;
1857 bool allmulti = false;
1858 struct i40e_vsi *vsi;
1859 bool alluni = false;
1860 int aq_err = 0;
1861 int bkt;
1862
1863 vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1864 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1865 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1866 !vsi) {
1867 aq_ret = I40E_ERR_PARAM;
1868 goto error_param;
1869 }
1870 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1871 dev_err(&pf->pdev->dev,
1872 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1873 vf->vf_id);
1874 /* Lie to the VF on purpose. */
1875 aq_ret = 0;
1876 goto error_param;
1877 }
1878 /* Multicast promiscuous handling*/
1879 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1880 allmulti = true;
1881
1882 if (vf->port_vlan_id) {
1883 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1884 allmulti,
1885 vf->port_vlan_id,
1886 NULL);
1887 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1888 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1889 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1890 continue;
1891 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1892 vsi->seid,
1893 allmulti,
1894 f->vlan,
1895 NULL);
1896 aq_err = pf->hw.aq.asq_last_status;
1897 if (aq_ret) {
1898 dev_err(&pf->pdev->dev,
1899 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1900 f->vlan,
1901 i40e_stat_str(&pf->hw, aq_ret),
1902 i40e_aq_str(&pf->hw, aq_err));
1903 break;
1904 }
1905 }
1906 } else {
1907 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1908 allmulti, NULL);
1909 aq_err = pf->hw.aq.asq_last_status;
1910 if (aq_ret) {
1911 dev_err(&pf->pdev->dev,
1912 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1913 vf->vf_id,
1914 i40e_stat_str(&pf->hw, aq_ret),
1915 i40e_aq_str(&pf->hw, aq_err));
1916 goto error_param;
1917 }
1918 }
1919
1920 if (!aq_ret) {
1921 dev_info(&pf->pdev->dev,
1922 "VF %d successfully set multicast promiscuous mode\n",
1923 vf->vf_id);
1924 if (allmulti)
1925 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1926 else
1927 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1928 }
1929
1930 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1931 alluni = true;
1932 if (vf->port_vlan_id) {
1933 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1934 alluni,
1935 vf->port_vlan_id,
1936 NULL);
1937 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1938 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1939 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1940 continue;
1941 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1942 vsi->seid,
1943 alluni,
1944 f->vlan,
1945 NULL);
1946 aq_err = pf->hw.aq.asq_last_status;
1947 if (aq_ret)
1948 dev_err(&pf->pdev->dev,
1949 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1950 f->vlan,
1951 i40e_stat_str(&pf->hw, aq_ret),
1952 i40e_aq_str(&pf->hw, aq_err));
1953 }
1954 } else {
1955 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1956 alluni, NULL,
1957 true);
1958 aq_err = pf->hw.aq.asq_last_status;
1959 if (aq_ret) {
1960 dev_err(&pf->pdev->dev,
1961 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1962 vf->vf_id, info->flags,
1963 i40e_stat_str(&pf->hw, aq_ret),
1964 i40e_aq_str(&pf->hw, aq_err));
1965 goto error_param;
1966 }
1967 }
1968
1969 if (!aq_ret) {
1970 dev_info(&pf->pdev->dev,
1971 "VF %d successfully set unicast promiscuous mode\n",
1972 vf->vf_id);
1973 if (alluni)
1974 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1975 else
1976 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1977 }
1978
1979 error_param:
1980 /* send the response to the VF */
1981 return i40e_vc_send_resp_to_vf(vf,
1982 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1983 aq_ret);
1984 }
1985
1986 /**
1987 * i40e_vc_config_queues_msg
1988 * @vf: pointer to the VF info
1989 * @msg: pointer to the msg buffer
1990 * @msglen: msg length
1991 *
1992 * called from the VF to configure the rx/tx
1993 * queues
1994 **/
i40e_vc_config_queues_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)1995 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1996 {
1997 struct virtchnl_vsi_queue_config_info *qci =
1998 (struct virtchnl_vsi_queue_config_info *)msg;
1999 struct virtchnl_queue_pair_info *qpi;
2000 struct i40e_pf *pf = vf->pf;
2001 u16 vsi_id, vsi_queue_id = 0;
2002 i40e_status aq_ret = 0;
2003 int i, j = 0, idx = 0;
2004
2005 vsi_id = qci->vsi_id;
2006
2007 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2008 aq_ret = I40E_ERR_PARAM;
2009 goto error_param;
2010 }
2011
2012 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2013 aq_ret = I40E_ERR_PARAM;
2014 goto error_param;
2015 }
2016
2017 for (i = 0; i < qci->num_queue_pairs; i++) {
2018 qpi = &qci->qpair[i];
2019
2020 if (!vf->adq_enabled) {
2021 vsi_queue_id = qpi->txq.queue_id;
2022
2023 if (qpi->txq.vsi_id != qci->vsi_id ||
2024 qpi->rxq.vsi_id != qci->vsi_id ||
2025 qpi->rxq.queue_id != vsi_queue_id) {
2026 aq_ret = I40E_ERR_PARAM;
2027 goto error_param;
2028 }
2029 }
2030
2031 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
2032 aq_ret = I40E_ERR_PARAM;
2033 goto error_param;
2034 }
2035
2036 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2037 &qpi->rxq) ||
2038 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2039 &qpi->txq)) {
2040 aq_ret = I40E_ERR_PARAM;
2041 goto error_param;
2042 }
2043
2044 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2045 * VF does not know about these additional VSIs and all
2046 * it cares is about its own queues. PF configures these queues
2047 * to its appropriate VSIs based on TC mapping
2048 **/
2049 if (vf->adq_enabled) {
2050 if (j == (vf->ch[idx].num_qps - 1)) {
2051 idx++;
2052 j = 0; /* resetting the queue count */
2053 vsi_queue_id = 0;
2054 } else {
2055 j++;
2056 vsi_queue_id++;
2057 }
2058 vsi_id = vf->ch[idx].vsi_id;
2059 }
2060 }
2061 /* set vsi num_queue_pairs in use to num configured by VF */
2062 if (!vf->adq_enabled) {
2063 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2064 qci->num_queue_pairs;
2065 } else {
2066 for (i = 0; i < vf->num_tc; i++)
2067 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2068 vf->ch[i].num_qps;
2069 }
2070
2071 error_param:
2072 /* send the response to the VF */
2073 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2074 aq_ret);
2075 }
2076
2077 /**
2078 * i40e_validate_queue_map
2079 * @vsi_id: vsi id
2080 * @queuemap: Tx or Rx queue map
2081 *
2082 * check if Tx or Rx queue map is valid
2083 **/
i40e_validate_queue_map(struct i40e_vf * vf,u16 vsi_id,unsigned long queuemap)2084 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2085 unsigned long queuemap)
2086 {
2087 u16 vsi_queue_id, queue_id;
2088
2089 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2090 if (vf->adq_enabled) {
2091 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2092 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2093 } else {
2094 queue_id = vsi_queue_id;
2095 }
2096
2097 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2098 return -EINVAL;
2099 }
2100
2101 return 0;
2102 }
2103
2104 /**
2105 * i40e_vc_config_irq_map_msg
2106 * @vf: pointer to the VF info
2107 * @msg: pointer to the msg buffer
2108 * @msglen: msg length
2109 *
2110 * called from the VF to configure the irq to
2111 * queue map
2112 **/
i40e_vc_config_irq_map_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2113 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2114 {
2115 struct virtchnl_irq_map_info *irqmap_info =
2116 (struct virtchnl_irq_map_info *)msg;
2117 struct virtchnl_vector_map *map;
2118 u16 vsi_id, vector_id;
2119 i40e_status aq_ret = 0;
2120 int i;
2121
2122 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2123 aq_ret = I40E_ERR_PARAM;
2124 goto error_param;
2125 }
2126
2127 for (i = 0; i < irqmap_info->num_vectors; i++) {
2128 map = &irqmap_info->vecmap[i];
2129 vector_id = map->vector_id;
2130 vsi_id = map->vsi_id;
2131 /* validate msg params */
2132 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
2133 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2134 aq_ret = I40E_ERR_PARAM;
2135 goto error_param;
2136 }
2137
2138 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2139 aq_ret = I40E_ERR_PARAM;
2140 goto error_param;
2141 }
2142
2143 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2144 aq_ret = I40E_ERR_PARAM;
2145 goto error_param;
2146 }
2147
2148 i40e_config_irq_link_list(vf, vsi_id, map);
2149 }
2150 error_param:
2151 /* send the response to the VF */
2152 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2153 aq_ret);
2154 }
2155
2156 /**
2157 * i40e_ctrl_vf_tx_rings
2158 * @vsi: the SRIOV VSI being configured
2159 * @q_map: bit map of the queues to be enabled
2160 * @enable: start or stop the queue
2161 **/
i40e_ctrl_vf_tx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2162 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2163 bool enable)
2164 {
2165 struct i40e_pf *pf = vsi->back;
2166 int ret = 0;
2167 u16 q_id;
2168
2169 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2170 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2171 vsi->base_queue + q_id,
2172 false /*is xdp*/, enable);
2173 if (ret)
2174 break;
2175 }
2176 return ret;
2177 }
2178
2179 /**
2180 * i40e_ctrl_vf_rx_rings
2181 * @vsi: the SRIOV VSI being configured
2182 * @q_map: bit map of the queues to be enabled
2183 * @enable: start or stop the queue
2184 **/
i40e_ctrl_vf_rx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2185 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2186 bool enable)
2187 {
2188 struct i40e_pf *pf = vsi->back;
2189 int ret = 0;
2190 u16 q_id;
2191
2192 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2193 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2194 enable);
2195 if (ret)
2196 break;
2197 }
2198 return ret;
2199 }
2200
2201 /**
2202 * i40e_vc_enable_queues_msg
2203 * @vf: pointer to the VF info
2204 * @msg: pointer to the msg buffer
2205 * @msglen: msg length
2206 *
2207 * called from the VF to enable all or specific queue(s)
2208 **/
i40e_vc_enable_queues_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2209 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2210 {
2211 struct virtchnl_queue_select *vqs =
2212 (struct virtchnl_queue_select *)msg;
2213 struct i40e_pf *pf = vf->pf;
2214 u16 vsi_id = vqs->vsi_id;
2215 i40e_status aq_ret = 0;
2216 int i;
2217
2218 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2219 aq_ret = I40E_ERR_PARAM;
2220 goto error_param;
2221 }
2222
2223 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2224 aq_ret = I40E_ERR_PARAM;
2225 goto error_param;
2226 }
2227
2228 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2229 aq_ret = I40E_ERR_PARAM;
2230 goto error_param;
2231 }
2232
2233 /* Use the queue bit map sent by the VF */
2234 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2235 true)) {
2236 aq_ret = I40E_ERR_TIMEOUT;
2237 goto error_param;
2238 }
2239 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2240 true)) {
2241 aq_ret = I40E_ERR_TIMEOUT;
2242 goto error_param;
2243 }
2244
2245 /* need to start the rings for additional ADq VSI's as well */
2246 if (vf->adq_enabled) {
2247 /* zero belongs to LAN VSI */
2248 for (i = 1; i < vf->num_tc; i++) {
2249 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2250 aq_ret = I40E_ERR_TIMEOUT;
2251 }
2252 }
2253
2254 error_param:
2255 /* send the response to the VF */
2256 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2257 aq_ret);
2258 }
2259
2260 /**
2261 * i40e_vc_disable_queues_msg
2262 * @vf: pointer to the VF info
2263 * @msg: pointer to the msg buffer
2264 * @msglen: msg length
2265 *
2266 * called from the VF to disable all or specific
2267 * queue(s)
2268 **/
i40e_vc_disable_queues_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2269 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2270 {
2271 struct virtchnl_queue_select *vqs =
2272 (struct virtchnl_queue_select *)msg;
2273 struct i40e_pf *pf = vf->pf;
2274 i40e_status aq_ret = 0;
2275
2276 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2277 aq_ret = I40E_ERR_PARAM;
2278 goto error_param;
2279 }
2280
2281 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2282 aq_ret = I40E_ERR_PARAM;
2283 goto error_param;
2284 }
2285
2286 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2287 aq_ret = I40E_ERR_PARAM;
2288 goto error_param;
2289 }
2290
2291 /* Use the queue bit map sent by the VF */
2292 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2293 false)) {
2294 aq_ret = I40E_ERR_TIMEOUT;
2295 goto error_param;
2296 }
2297 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2298 false)) {
2299 aq_ret = I40E_ERR_TIMEOUT;
2300 goto error_param;
2301 }
2302 error_param:
2303 /* send the response to the VF */
2304 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2305 aq_ret);
2306 }
2307
2308 /**
2309 * i40e_vc_request_queues_msg
2310 * @vf: pointer to the VF info
2311 * @msg: pointer to the msg buffer
2312 * @msglen: msg length
2313 *
2314 * VFs get a default number of queues but can use this message to request a
2315 * different number. If the request is successful, PF will reset the VF and
2316 * return 0. If unsuccessful, PF will send message informing VF of number of
2317 * available queues and return result of sending VF a message.
2318 **/
i40e_vc_request_queues_msg(struct i40e_vf * vf,u8 * msg,int msglen)2319 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2320 {
2321 struct virtchnl_vf_res_request *vfres =
2322 (struct virtchnl_vf_res_request *)msg;
2323 int req_pairs = vfres->num_queue_pairs;
2324 int cur_pairs = vf->num_queue_pairs;
2325 struct i40e_pf *pf = vf->pf;
2326
2327 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2328 return -EINVAL;
2329
2330 if (req_pairs <= 0) {
2331 dev_err(&pf->pdev->dev,
2332 "VF %d tried to request %d queues. Ignoring.\n",
2333 vf->vf_id, req_pairs);
2334 } else if (req_pairs > I40E_MAX_VF_QUEUES) {
2335 dev_err(&pf->pdev->dev,
2336 "VF %d tried to request more than %d queues.\n",
2337 vf->vf_id,
2338 I40E_MAX_VF_QUEUES);
2339 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2340 } else if (req_pairs - cur_pairs > pf->queues_left) {
2341 dev_warn(&pf->pdev->dev,
2342 "VF %d requested %d more queues, but only %d left.\n",
2343 vf->vf_id,
2344 req_pairs - cur_pairs,
2345 pf->queues_left);
2346 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2347 } else {
2348 /* successful request */
2349 vf->num_req_queues = req_pairs;
2350 i40e_vc_notify_vf_reset(vf);
2351 i40e_reset_vf(vf, false);
2352 return 0;
2353 }
2354
2355 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2356 (u8 *)vfres, sizeof(*vfres));
2357 }
2358
2359 /**
2360 * i40e_vc_get_stats_msg
2361 * @vf: pointer to the VF info
2362 * @msg: pointer to the msg buffer
2363 * @msglen: msg length
2364 *
2365 * called from the VF to get vsi stats
2366 **/
i40e_vc_get_stats_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2367 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2368 {
2369 struct virtchnl_queue_select *vqs =
2370 (struct virtchnl_queue_select *)msg;
2371 struct i40e_pf *pf = vf->pf;
2372 struct i40e_eth_stats stats;
2373 i40e_status aq_ret = 0;
2374 struct i40e_vsi *vsi;
2375
2376 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2377
2378 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2379 aq_ret = I40E_ERR_PARAM;
2380 goto error_param;
2381 }
2382
2383 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2384 aq_ret = I40E_ERR_PARAM;
2385 goto error_param;
2386 }
2387
2388 vsi = pf->vsi[vf->lan_vsi_idx];
2389 if (!vsi) {
2390 aq_ret = I40E_ERR_PARAM;
2391 goto error_param;
2392 }
2393 i40e_update_eth_stats(vsi);
2394 stats = vsi->eth_stats;
2395
2396 error_param:
2397 /* send the response back to the VF */
2398 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2399 (u8 *)&stats, sizeof(stats));
2400 }
2401
2402 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */
2403 #define I40E_VC_MAX_MAC_ADDR_PER_VF 12
2404 #define I40E_VC_MAX_VLAN_PER_VF 8
2405
2406 /**
2407 * i40e_check_vf_permission
2408 * @vf: pointer to the VF info
2409 * @al: MAC address list from virtchnl
2410 *
2411 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2412 * if any address in the list is not valid. Checks the following conditions:
2413 *
2414 * 1) broadcast and zero addresses are never valid
2415 * 2) unicast addresses are not allowed if the VMM has administratively set
2416 * the VF MAC address, unless the VF is marked as privileged.
2417 * 3) There is enough space to add all the addresses.
2418 *
2419 * Note that to guarantee consistency, it is expected this function be called
2420 * while holding the mac_filter_hash_lock, as otherwise the current number of
2421 * addresses might not be accurate.
2422 **/
i40e_check_vf_permission(struct i40e_vf * vf,struct virtchnl_ether_addr_list * al)2423 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2424 struct virtchnl_ether_addr_list *al)
2425 {
2426 struct i40e_pf *pf = vf->pf;
2427 int i;
2428
2429 /* If this VF is not privileged, then we can't add more than a limited
2430 * number of addresses. Check to make sure that the additions do not
2431 * push us over the limit.
2432 */
2433 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2434 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2435 dev_err(&pf->pdev->dev,
2436 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2437 return -EPERM;
2438 }
2439
2440 for (i = 0; i < al->num_elements; i++) {
2441 u8 *addr = al->list[i].addr;
2442
2443 if (is_broadcast_ether_addr(addr) ||
2444 is_zero_ether_addr(addr)) {
2445 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2446 addr);
2447 return I40E_ERR_INVALID_MAC_ADDR;
2448 }
2449
2450 /* If the host VMM administrator has set the VF MAC address
2451 * administratively via the ndo_set_vf_mac command then deny
2452 * permission to the VF to add or delete unicast MAC addresses.
2453 * Unless the VF is privileged and then it can do whatever.
2454 * The VF may request to set the MAC address filter already
2455 * assigned to it so do not return an error in that case.
2456 */
2457 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2458 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2459 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2460 dev_err(&pf->pdev->dev,
2461 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2462 return -EPERM;
2463 }
2464 }
2465
2466 return 0;
2467 }
2468
2469 /**
2470 * i40e_vc_add_mac_addr_msg
2471 * @vf: pointer to the VF info
2472 * @msg: pointer to the msg buffer
2473 * @msglen: msg length
2474 *
2475 * add guest mac address filter
2476 **/
i40e_vc_add_mac_addr_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2477 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2478 {
2479 struct virtchnl_ether_addr_list *al =
2480 (struct virtchnl_ether_addr_list *)msg;
2481 struct i40e_pf *pf = vf->pf;
2482 struct i40e_vsi *vsi = NULL;
2483 u16 vsi_id = al->vsi_id;
2484 i40e_status ret = 0;
2485 int i;
2486
2487 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2488 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2489 ret = I40E_ERR_PARAM;
2490 goto error_param;
2491 }
2492
2493 vsi = pf->vsi[vf->lan_vsi_idx];
2494
2495 /* Lock once, because all function inside for loop accesses VSI's
2496 * MAC filter list which needs to be protected using same lock.
2497 */
2498 spin_lock_bh(&vsi->mac_filter_hash_lock);
2499
2500 ret = i40e_check_vf_permission(vf, al);
2501 if (ret) {
2502 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2503 goto error_param;
2504 }
2505
2506 /* add new addresses to the list */
2507 for (i = 0; i < al->num_elements; i++) {
2508 struct i40e_mac_filter *f;
2509
2510 f = i40e_find_mac(vsi, al->list[i].addr);
2511 if (!f) {
2512 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2513
2514 if (!f) {
2515 dev_err(&pf->pdev->dev,
2516 "Unable to add MAC filter %pM for VF %d\n",
2517 al->list[i].addr, vf->vf_id);
2518 ret = I40E_ERR_PARAM;
2519 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2520 goto error_param;
2521 } else {
2522 vf->num_mac++;
2523 }
2524 }
2525 }
2526 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2527
2528 /* program the updated filter list */
2529 ret = i40e_sync_vsi_filters(vsi);
2530 if (ret)
2531 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2532 vf->vf_id, ret);
2533
2534 error_param:
2535 /* send the response to the VF */
2536 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2537 ret);
2538 }
2539
2540 /**
2541 * i40e_vc_del_mac_addr_msg
2542 * @vf: pointer to the VF info
2543 * @msg: pointer to the msg buffer
2544 * @msglen: msg length
2545 *
2546 * remove guest mac address filter
2547 **/
i40e_vc_del_mac_addr_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2548 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2549 {
2550 struct virtchnl_ether_addr_list *al =
2551 (struct virtchnl_ether_addr_list *)msg;
2552 struct i40e_pf *pf = vf->pf;
2553 struct i40e_vsi *vsi = NULL;
2554 u16 vsi_id = al->vsi_id;
2555 i40e_status ret = 0;
2556 int i;
2557
2558 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2559 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2560 ret = I40E_ERR_PARAM;
2561 goto error_param;
2562 }
2563
2564 for (i = 0; i < al->num_elements; i++) {
2565 if (is_broadcast_ether_addr(al->list[i].addr) ||
2566 is_zero_ether_addr(al->list[i].addr)) {
2567 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2568 al->list[i].addr, vf->vf_id);
2569 ret = I40E_ERR_INVALID_MAC_ADDR;
2570 goto error_param;
2571 }
2572 }
2573 vsi = pf->vsi[vf->lan_vsi_idx];
2574
2575 spin_lock_bh(&vsi->mac_filter_hash_lock);
2576 /* delete addresses from the list */
2577 for (i = 0; i < al->num_elements; i++)
2578 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2579 ret = I40E_ERR_INVALID_MAC_ADDR;
2580 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2581 goto error_param;
2582 } else {
2583 vf->num_mac--;
2584 }
2585
2586 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2587
2588 /* program the updated filter list */
2589 ret = i40e_sync_vsi_filters(vsi);
2590 if (ret)
2591 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2592 vf->vf_id, ret);
2593
2594 error_param:
2595 /* send the response to the VF */
2596 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2597 ret);
2598 }
2599
2600 /**
2601 * i40e_vc_add_vlan_msg
2602 * @vf: pointer to the VF info
2603 * @msg: pointer to the msg buffer
2604 * @msglen: msg length
2605 *
2606 * program guest vlan id
2607 **/
i40e_vc_add_vlan_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2608 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2609 {
2610 struct virtchnl_vlan_filter_list *vfl =
2611 (struct virtchnl_vlan_filter_list *)msg;
2612 struct i40e_pf *pf = vf->pf;
2613 struct i40e_vsi *vsi = NULL;
2614 u16 vsi_id = vfl->vsi_id;
2615 i40e_status aq_ret = 0;
2616 int i;
2617
2618 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2619 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2620 dev_err(&pf->pdev->dev,
2621 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2622 goto error_param;
2623 }
2624 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2625 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2626 aq_ret = I40E_ERR_PARAM;
2627 goto error_param;
2628 }
2629
2630 for (i = 0; i < vfl->num_elements; i++) {
2631 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2632 aq_ret = I40E_ERR_PARAM;
2633 dev_err(&pf->pdev->dev,
2634 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2635 goto error_param;
2636 }
2637 }
2638 vsi = pf->vsi[vf->lan_vsi_idx];
2639 if (vsi->info.pvid) {
2640 aq_ret = I40E_ERR_PARAM;
2641 goto error_param;
2642 }
2643
2644 i40e_vlan_stripping_enable(vsi);
2645 for (i = 0; i < vfl->num_elements; i++) {
2646 /* add new VLAN filter */
2647 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2648 if (!ret)
2649 vf->num_vlan++;
2650
2651 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2652 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2653 true,
2654 vfl->vlan_id[i],
2655 NULL);
2656 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2657 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2658 true,
2659 vfl->vlan_id[i],
2660 NULL);
2661
2662 if (ret)
2663 dev_err(&pf->pdev->dev,
2664 "Unable to add VLAN filter %d for VF %d, error %d\n",
2665 vfl->vlan_id[i], vf->vf_id, ret);
2666 }
2667
2668 error_param:
2669 /* send the response to the VF */
2670 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2671 }
2672
2673 /**
2674 * i40e_vc_remove_vlan_msg
2675 * @vf: pointer to the VF info
2676 * @msg: pointer to the msg buffer
2677 * @msglen: msg length
2678 *
2679 * remove programmed guest vlan id
2680 **/
i40e_vc_remove_vlan_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2681 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2682 {
2683 struct virtchnl_vlan_filter_list *vfl =
2684 (struct virtchnl_vlan_filter_list *)msg;
2685 struct i40e_pf *pf = vf->pf;
2686 struct i40e_vsi *vsi = NULL;
2687 u16 vsi_id = vfl->vsi_id;
2688 i40e_status aq_ret = 0;
2689 int i;
2690
2691 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2692 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2693 aq_ret = I40E_ERR_PARAM;
2694 goto error_param;
2695 }
2696
2697 for (i = 0; i < vfl->num_elements; i++) {
2698 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2699 aq_ret = I40E_ERR_PARAM;
2700 goto error_param;
2701 }
2702 }
2703
2704 vsi = pf->vsi[vf->lan_vsi_idx];
2705 if (vsi->info.pvid) {
2706 aq_ret = I40E_ERR_PARAM;
2707 goto error_param;
2708 }
2709
2710 for (i = 0; i < vfl->num_elements; i++) {
2711 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2712 vf->num_vlan--;
2713
2714 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2715 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2716 false,
2717 vfl->vlan_id[i],
2718 NULL);
2719 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2720 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2721 false,
2722 vfl->vlan_id[i],
2723 NULL);
2724 }
2725
2726 error_param:
2727 /* send the response to the VF */
2728 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2729 }
2730
2731 /**
2732 * i40e_vc_iwarp_msg
2733 * @vf: pointer to the VF info
2734 * @msg: pointer to the msg buffer
2735 * @msglen: msg length
2736 *
2737 * called from the VF for the iwarp msgs
2738 **/
i40e_vc_iwarp_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)2739 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2740 {
2741 struct i40e_pf *pf = vf->pf;
2742 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2743 i40e_status aq_ret = 0;
2744
2745 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2746 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2747 aq_ret = I40E_ERR_PARAM;
2748 goto error_param;
2749 }
2750
2751 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2752 msg, msglen);
2753
2754 error_param:
2755 /* send the response to the VF */
2756 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2757 aq_ret);
2758 }
2759
2760 /**
2761 * i40e_vc_iwarp_qvmap_msg
2762 * @vf: pointer to the VF info
2763 * @msg: pointer to the msg buffer
2764 * @msglen: msg length
2765 * @config: config qvmap or release it
2766 *
2767 * called from the VF for the iwarp msgs
2768 **/
i40e_vc_iwarp_qvmap_msg(struct i40e_vf * vf,u8 * msg,u16 msglen,bool config)2769 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2770 bool config)
2771 {
2772 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2773 (struct virtchnl_iwarp_qvlist_info *)msg;
2774 i40e_status aq_ret = 0;
2775
2776 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2777 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2778 aq_ret = I40E_ERR_PARAM;
2779 goto error_param;
2780 }
2781
2782 if (config) {
2783 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2784 aq_ret = I40E_ERR_PARAM;
2785 } else {
2786 i40e_release_iwarp_qvlist(vf);
2787 }
2788
2789 error_param:
2790 /* send the response to the VF */
2791 return i40e_vc_send_resp_to_vf(vf,
2792 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2793 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2794 aq_ret);
2795 }
2796
2797 /**
2798 * i40e_vc_config_rss_key
2799 * @vf: pointer to the VF info
2800 * @msg: pointer to the msg buffer
2801 * @msglen: msg length
2802 *
2803 * Configure the VF's RSS key
2804 **/
i40e_vc_config_rss_key(struct i40e_vf * vf,u8 * msg,u16 msglen)2805 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2806 {
2807 struct virtchnl_rss_key *vrk =
2808 (struct virtchnl_rss_key *)msg;
2809 struct i40e_pf *pf = vf->pf;
2810 struct i40e_vsi *vsi = NULL;
2811 u16 vsi_id = vrk->vsi_id;
2812 i40e_status aq_ret = 0;
2813
2814 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2815 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2816 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2817 aq_ret = I40E_ERR_PARAM;
2818 goto err;
2819 }
2820
2821 vsi = pf->vsi[vf->lan_vsi_idx];
2822 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2823 err:
2824 /* send the response to the VF */
2825 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2826 aq_ret);
2827 }
2828
2829 /**
2830 * i40e_vc_config_rss_lut
2831 * @vf: pointer to the VF info
2832 * @msg: pointer to the msg buffer
2833 * @msglen: msg length
2834 *
2835 * Configure the VF's RSS LUT
2836 **/
i40e_vc_config_rss_lut(struct i40e_vf * vf,u8 * msg,u16 msglen)2837 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2838 {
2839 struct virtchnl_rss_lut *vrl =
2840 (struct virtchnl_rss_lut *)msg;
2841 struct i40e_pf *pf = vf->pf;
2842 struct i40e_vsi *vsi = NULL;
2843 u16 vsi_id = vrl->vsi_id;
2844 i40e_status aq_ret = 0;
2845
2846 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2847 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2848 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2849 aq_ret = I40E_ERR_PARAM;
2850 goto err;
2851 }
2852
2853 vsi = pf->vsi[vf->lan_vsi_idx];
2854 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2855 /* send the response to the VF */
2856 err:
2857 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2858 aq_ret);
2859 }
2860
2861 /**
2862 * i40e_vc_get_rss_hena
2863 * @vf: pointer to the VF info
2864 * @msg: pointer to the msg buffer
2865 * @msglen: msg length
2866 *
2867 * Return the RSS HENA bits allowed by the hardware
2868 **/
i40e_vc_get_rss_hena(struct i40e_vf * vf,u8 * msg,u16 msglen)2869 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2870 {
2871 struct virtchnl_rss_hena *vrh = NULL;
2872 struct i40e_pf *pf = vf->pf;
2873 i40e_status aq_ret = 0;
2874 int len = 0;
2875
2876 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2877 aq_ret = I40E_ERR_PARAM;
2878 goto err;
2879 }
2880 len = sizeof(struct virtchnl_rss_hena);
2881
2882 vrh = kzalloc(len, GFP_KERNEL);
2883 if (!vrh) {
2884 aq_ret = I40E_ERR_NO_MEMORY;
2885 len = 0;
2886 goto err;
2887 }
2888 vrh->hena = i40e_pf_get_default_rss_hena(pf);
2889 err:
2890 /* send the response back to the VF */
2891 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2892 aq_ret, (u8 *)vrh, len);
2893 kfree(vrh);
2894 return aq_ret;
2895 }
2896
2897 /**
2898 * i40e_vc_set_rss_hena
2899 * @vf: pointer to the VF info
2900 * @msg: pointer to the msg buffer
2901 * @msglen: msg length
2902 *
2903 * Set the RSS HENA bits for the VF
2904 **/
i40e_vc_set_rss_hena(struct i40e_vf * vf,u8 * msg,u16 msglen)2905 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2906 {
2907 struct virtchnl_rss_hena *vrh =
2908 (struct virtchnl_rss_hena *)msg;
2909 struct i40e_pf *pf = vf->pf;
2910 struct i40e_hw *hw = &pf->hw;
2911 i40e_status aq_ret = 0;
2912
2913 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2914 aq_ret = I40E_ERR_PARAM;
2915 goto err;
2916 }
2917 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2918 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2919 (u32)(vrh->hena >> 32));
2920
2921 /* send the response to the VF */
2922 err:
2923 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2924 }
2925
2926 /**
2927 * i40e_vc_enable_vlan_stripping
2928 * @vf: pointer to the VF info
2929 * @msg: pointer to the msg buffer
2930 * @msglen: msg length
2931 *
2932 * Enable vlan header stripping for the VF
2933 **/
i40e_vc_enable_vlan_stripping(struct i40e_vf * vf,u8 * msg,u16 msglen)2934 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2935 u16 msglen)
2936 {
2937 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2938 i40e_status aq_ret = 0;
2939
2940 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2941 aq_ret = I40E_ERR_PARAM;
2942 goto err;
2943 }
2944
2945 i40e_vlan_stripping_enable(vsi);
2946
2947 /* send the response to the VF */
2948 err:
2949 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2950 aq_ret);
2951 }
2952
2953 /**
2954 * i40e_vc_disable_vlan_stripping
2955 * @vf: pointer to the VF info
2956 * @msg: pointer to the msg buffer
2957 * @msglen: msg length
2958 *
2959 * Disable vlan header stripping for the VF
2960 **/
i40e_vc_disable_vlan_stripping(struct i40e_vf * vf,u8 * msg,u16 msglen)2961 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2962 u16 msglen)
2963 {
2964 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2965 i40e_status aq_ret = 0;
2966
2967 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2968 aq_ret = I40E_ERR_PARAM;
2969 goto err;
2970 }
2971
2972 i40e_vlan_stripping_disable(vsi);
2973
2974 /* send the response to the VF */
2975 err:
2976 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2977 aq_ret);
2978 }
2979
2980 /**
2981 * i40e_validate_cloud_filter
2982 * @mask: mask for TC filter
2983 * @data: data for TC filter
2984 *
2985 * This function validates cloud filter programmed as TC filter for ADq
2986 **/
i40e_validate_cloud_filter(struct i40e_vf * vf,struct virtchnl_filter * tc_filter)2987 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
2988 struct virtchnl_filter *tc_filter)
2989 {
2990 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
2991 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
2992 struct i40e_pf *pf = vf->pf;
2993 struct i40e_vsi *vsi = NULL;
2994 struct i40e_mac_filter *f;
2995 struct hlist_node *h;
2996 bool found = false;
2997 int bkt;
2998
2999 if (!tc_filter->action) {
3000 dev_info(&pf->pdev->dev,
3001 "VF %d: Currently ADq doesn't support Drop Action\n",
3002 vf->vf_id);
3003 goto err;
3004 }
3005
3006 /* action_meta is TC number here to which the filter is applied */
3007 if (!tc_filter->action_meta ||
3008 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3009 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3010 vf->vf_id, tc_filter->action_meta);
3011 goto err;
3012 }
3013
3014 /* Check filter if it's programmed for advanced mode or basic mode.
3015 * There are two ADq modes (for VF only),
3016 * 1. Basic mode: intended to allow as many filter options as possible
3017 * to be added to a VF in Non-trusted mode. Main goal is
3018 * to add filters to its own MAC and VLAN id.
3019 * 2. Advanced mode: is for allowing filters to be applied other than
3020 * its own MAC or VLAN. This mode requires the VF to be
3021 * Trusted.
3022 */
3023 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3024 vsi = pf->vsi[vf->lan_vsi_idx];
3025 f = i40e_find_mac(vsi, data.dst_mac);
3026
3027 if (!f) {
3028 dev_info(&pf->pdev->dev,
3029 "Destination MAC %pM doesn't belong to VF %d\n",
3030 data.dst_mac, vf->vf_id);
3031 goto err;
3032 }
3033
3034 if (mask.vlan_id) {
3035 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3036 hlist) {
3037 if (f->vlan == ntohs(data.vlan_id)) {
3038 found = true;
3039 break;
3040 }
3041 }
3042 if (!found) {
3043 dev_info(&pf->pdev->dev,
3044 "VF %d doesn't have any VLAN id %u\n",
3045 vf->vf_id, ntohs(data.vlan_id));
3046 goto err;
3047 }
3048 }
3049 } else {
3050 /* Check if VF is trusted */
3051 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3052 dev_err(&pf->pdev->dev,
3053 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3054 vf->vf_id);
3055 return I40E_ERR_CONFIG;
3056 }
3057 }
3058
3059 if (mask.dst_mac[0] & data.dst_mac[0]) {
3060 if (is_broadcast_ether_addr(data.dst_mac) ||
3061 is_zero_ether_addr(data.dst_mac)) {
3062 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3063 vf->vf_id, data.dst_mac);
3064 goto err;
3065 }
3066 }
3067
3068 if (mask.src_mac[0] & data.src_mac[0]) {
3069 if (is_broadcast_ether_addr(data.src_mac) ||
3070 is_zero_ether_addr(data.src_mac)) {
3071 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3072 vf->vf_id, data.src_mac);
3073 goto err;
3074 }
3075 }
3076
3077 if (mask.dst_port & data.dst_port) {
3078 if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
3079 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3080 vf->vf_id);
3081 goto err;
3082 }
3083 }
3084
3085 if (mask.src_port & data.src_port) {
3086 if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
3087 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3088 vf->vf_id);
3089 goto err;
3090 }
3091 }
3092
3093 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3094 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3095 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3096 vf->vf_id);
3097 goto err;
3098 }
3099
3100 if (mask.vlan_id & data.vlan_id) {
3101 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3102 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3103 vf->vf_id);
3104 goto err;
3105 }
3106 }
3107
3108 return I40E_SUCCESS;
3109 err:
3110 return I40E_ERR_CONFIG;
3111 }
3112
3113 /**
3114 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3115 * @vf: pointer to the VF info
3116 * @seid - seid of the vsi it is searching for
3117 **/
i40e_find_vsi_from_seid(struct i40e_vf * vf,u16 seid)3118 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3119 {
3120 struct i40e_pf *pf = vf->pf;
3121 struct i40e_vsi *vsi = NULL;
3122 int i;
3123
3124 for (i = 0; i < vf->num_tc ; i++) {
3125 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3126 if (vsi && vsi->seid == seid)
3127 return vsi;
3128 }
3129 return NULL;
3130 }
3131
3132 /**
3133 * i40e_del_all_cloud_filters
3134 * @vf: pointer to the VF info
3135 *
3136 * This function deletes all cloud filters
3137 **/
i40e_del_all_cloud_filters(struct i40e_vf * vf)3138 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3139 {
3140 struct i40e_cloud_filter *cfilter = NULL;
3141 struct i40e_pf *pf = vf->pf;
3142 struct i40e_vsi *vsi = NULL;
3143 struct hlist_node *node;
3144 int ret;
3145
3146 hlist_for_each_entry_safe(cfilter, node,
3147 &vf->cloud_filter_list, cloud_node) {
3148 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3149
3150 if (!vsi) {
3151 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3152 vf->vf_id, cfilter->seid);
3153 continue;
3154 }
3155
3156 if (cfilter->dst_port)
3157 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3158 false);
3159 else
3160 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3161 if (ret)
3162 dev_err(&pf->pdev->dev,
3163 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3164 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3165 i40e_aq_str(&pf->hw,
3166 pf->hw.aq.asq_last_status));
3167
3168 hlist_del(&cfilter->cloud_node);
3169 kfree(cfilter);
3170 vf->num_cloud_filters--;
3171 }
3172 }
3173
3174 /**
3175 * i40e_vc_del_cloud_filter
3176 * @vf: pointer to the VF info
3177 * @msg: pointer to the msg buffer
3178 *
3179 * This function deletes a cloud filter programmed as TC filter for ADq
3180 **/
i40e_vc_del_cloud_filter(struct i40e_vf * vf,u8 * msg)3181 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3182 {
3183 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3184 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3185 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3186 struct i40e_cloud_filter cfilter, *cf = NULL;
3187 struct i40e_pf *pf = vf->pf;
3188 struct i40e_vsi *vsi = NULL;
3189 struct hlist_node *node;
3190 i40e_status aq_ret = 0;
3191 int i, ret;
3192
3193 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3194 aq_ret = I40E_ERR_PARAM;
3195 goto err;
3196 }
3197
3198 if (!vf->adq_enabled) {
3199 dev_info(&pf->pdev->dev,
3200 "VF %d: ADq not enabled, can't apply cloud filter\n",
3201 vf->vf_id);
3202 aq_ret = I40E_ERR_PARAM;
3203 goto err;
3204 }
3205
3206 if (i40e_validate_cloud_filter(vf, vcf)) {
3207 dev_info(&pf->pdev->dev,
3208 "VF %d: Invalid input, can't apply cloud filter\n",
3209 vf->vf_id);
3210 aq_ret = I40E_ERR_PARAM;
3211 goto err;
3212 }
3213
3214 memset(&cfilter, 0, sizeof(cfilter));
3215 /* parse destination mac address */
3216 for (i = 0; i < ETH_ALEN; i++)
3217 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3218
3219 /* parse source mac address */
3220 for (i = 0; i < ETH_ALEN; i++)
3221 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3222
3223 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3224 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3225 cfilter.src_port = mask.src_port & tcf.src_port;
3226
3227 switch (vcf->flow_type) {
3228 case VIRTCHNL_TCP_V4_FLOW:
3229 cfilter.n_proto = ETH_P_IP;
3230 if (mask.dst_ip[0] & tcf.dst_ip[0])
3231 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3232 ARRAY_SIZE(tcf.dst_ip));
3233 else if (mask.src_ip[0] & tcf.dst_ip[0])
3234 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3235 ARRAY_SIZE(tcf.dst_ip));
3236 break;
3237 case VIRTCHNL_TCP_V6_FLOW:
3238 cfilter.n_proto = ETH_P_IPV6;
3239 if (mask.dst_ip[3] & tcf.dst_ip[3])
3240 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3241 sizeof(cfilter.ip.v6.dst_ip6));
3242 if (mask.src_ip[3] & tcf.src_ip[3])
3243 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3244 sizeof(cfilter.ip.v6.src_ip6));
3245 break;
3246 default:
3247 /* TC filter can be configured based on different combinations
3248 * and in this case IP is not a part of filter config
3249 */
3250 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3251 vf->vf_id);
3252 }
3253
3254 /* get the vsi to which the tc belongs to */
3255 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3256 cfilter.seid = vsi->seid;
3257 cfilter.flags = vcf->field_flags;
3258
3259 /* Deleting TC filter */
3260 if (tcf.dst_port)
3261 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3262 else
3263 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3264 if (ret) {
3265 dev_err(&pf->pdev->dev,
3266 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3267 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3268 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3269 goto err;
3270 }
3271
3272 hlist_for_each_entry_safe(cf, node,
3273 &vf->cloud_filter_list, cloud_node) {
3274 if (cf->seid != cfilter.seid)
3275 continue;
3276 if (mask.dst_port)
3277 if (cfilter.dst_port != cf->dst_port)
3278 continue;
3279 if (mask.dst_mac[0])
3280 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3281 continue;
3282 /* for ipv4 data to be valid, only first byte of mask is set */
3283 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3284 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3285 ARRAY_SIZE(tcf.dst_ip)))
3286 continue;
3287 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3288 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3289 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3290 sizeof(cfilter.ip.v6.src_ip6)))
3291 continue;
3292 if (mask.vlan_id)
3293 if (cfilter.vlan_id != cf->vlan_id)
3294 continue;
3295
3296 hlist_del(&cf->cloud_node);
3297 kfree(cf);
3298 vf->num_cloud_filters--;
3299 }
3300
3301 err:
3302 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3303 aq_ret);
3304 }
3305
3306 /**
3307 * i40e_vc_add_cloud_filter
3308 * @vf: pointer to the VF info
3309 * @msg: pointer to the msg buffer
3310 *
3311 * This function adds a cloud filter programmed as TC filter for ADq
3312 **/
i40e_vc_add_cloud_filter(struct i40e_vf * vf,u8 * msg)3313 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3314 {
3315 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3316 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3317 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3318 struct i40e_cloud_filter *cfilter = NULL;
3319 struct i40e_pf *pf = vf->pf;
3320 struct i40e_vsi *vsi = NULL;
3321 i40e_status aq_ret = 0;
3322 int i, ret;
3323
3324 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3325 aq_ret = I40E_ERR_PARAM;
3326 goto err;
3327 }
3328
3329 if (!vf->adq_enabled) {
3330 dev_info(&pf->pdev->dev,
3331 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3332 vf->vf_id);
3333 aq_ret = I40E_ERR_PARAM;
3334 goto err;
3335 }
3336
3337 if (i40e_validate_cloud_filter(vf, vcf)) {
3338 dev_info(&pf->pdev->dev,
3339 "VF %d: Invalid input/s, can't apply cloud filter\n",
3340 vf->vf_id);
3341 aq_ret = I40E_ERR_PARAM;
3342 goto err;
3343 }
3344
3345 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3346 if (!cfilter)
3347 return -ENOMEM;
3348
3349 /* parse destination mac address */
3350 for (i = 0; i < ETH_ALEN; i++)
3351 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3352
3353 /* parse source mac address */
3354 for (i = 0; i < ETH_ALEN; i++)
3355 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3356
3357 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3358 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3359 cfilter->src_port = mask.src_port & tcf.src_port;
3360
3361 switch (vcf->flow_type) {
3362 case VIRTCHNL_TCP_V4_FLOW:
3363 cfilter->n_proto = ETH_P_IP;
3364 if (mask.dst_ip[0] & tcf.dst_ip[0])
3365 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3366 ARRAY_SIZE(tcf.dst_ip));
3367 else if (mask.src_ip[0] & tcf.dst_ip[0])
3368 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3369 ARRAY_SIZE(tcf.dst_ip));
3370 break;
3371 case VIRTCHNL_TCP_V6_FLOW:
3372 cfilter->n_proto = ETH_P_IPV6;
3373 if (mask.dst_ip[3] & tcf.dst_ip[3])
3374 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3375 sizeof(cfilter->ip.v6.dst_ip6));
3376 if (mask.src_ip[3] & tcf.src_ip[3])
3377 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3378 sizeof(cfilter->ip.v6.src_ip6));
3379 break;
3380 default:
3381 /* TC filter can be configured based on different combinations
3382 * and in this case IP is not a part of filter config
3383 */
3384 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3385 vf->vf_id);
3386 }
3387
3388 /* get the VSI to which the TC belongs to */
3389 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3390 cfilter->seid = vsi->seid;
3391 cfilter->flags = vcf->field_flags;
3392
3393 /* Adding cloud filter programmed as TC filter */
3394 if (tcf.dst_port)
3395 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3396 else
3397 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3398 if (ret) {
3399 dev_err(&pf->pdev->dev,
3400 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3401 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3402 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3403 goto err;
3404 }
3405
3406 INIT_HLIST_NODE(&cfilter->cloud_node);
3407 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3408 vf->num_cloud_filters++;
3409 err:
3410 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3411 aq_ret);
3412 }
3413
3414 /**
3415 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3416 * @vf: pointer to the VF info
3417 * @msg: pointer to the msg buffer
3418 **/
i40e_vc_add_qch_msg(struct i40e_vf * vf,u8 * msg)3419 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3420 {
3421 struct virtchnl_tc_info *tci =
3422 (struct virtchnl_tc_info *)msg;
3423 struct i40e_pf *pf = vf->pf;
3424 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3425 int i, adq_request_qps = 0, speed = 0;
3426 i40e_status aq_ret = 0;
3427
3428 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3429 aq_ret = I40E_ERR_PARAM;
3430 goto err;
3431 }
3432
3433 /* ADq cannot be applied if spoof check is ON */
3434 if (vf->spoofchk) {
3435 dev_err(&pf->pdev->dev,
3436 "Spoof check is ON, turn it OFF to enable ADq\n");
3437 aq_ret = I40E_ERR_PARAM;
3438 goto err;
3439 }
3440
3441 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3442 dev_err(&pf->pdev->dev,
3443 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3444 vf->vf_id);
3445 aq_ret = I40E_ERR_PARAM;
3446 goto err;
3447 }
3448
3449 /* max number of traffic classes for VF currently capped at 4 */
3450 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3451 dev_err(&pf->pdev->dev,
3452 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
3453 vf->vf_id, tci->num_tc);
3454 aq_ret = I40E_ERR_PARAM;
3455 goto err;
3456 }
3457
3458 /* validate queues for each TC */
3459 for (i = 0; i < tci->num_tc; i++)
3460 if (!tci->list[i].count ||
3461 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3462 dev_err(&pf->pdev->dev,
3463 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
3464 vf->vf_id, i, tci->list[i].count);
3465 aq_ret = I40E_ERR_PARAM;
3466 goto err;
3467 }
3468
3469 /* need Max VF queues but already have default number of queues */
3470 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3471
3472 if (pf->queues_left < adq_request_qps) {
3473 dev_err(&pf->pdev->dev,
3474 "No queues left to allocate to VF %d\n",
3475 vf->vf_id);
3476 aq_ret = I40E_ERR_PARAM;
3477 goto err;
3478 } else {
3479 /* we need to allocate max VF queues to enable ADq so as to
3480 * make sure ADq enabled VF always gets back queues when it
3481 * goes through a reset.
3482 */
3483 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3484 }
3485
3486 /* get link speed in MB to validate rate limit */
3487 switch (ls->link_speed) {
3488 case VIRTCHNL_LINK_SPEED_100MB:
3489 speed = SPEED_100;
3490 break;
3491 case VIRTCHNL_LINK_SPEED_1GB:
3492 speed = SPEED_1000;
3493 break;
3494 case VIRTCHNL_LINK_SPEED_10GB:
3495 speed = SPEED_10000;
3496 break;
3497 case VIRTCHNL_LINK_SPEED_20GB:
3498 speed = SPEED_20000;
3499 break;
3500 case VIRTCHNL_LINK_SPEED_25GB:
3501 speed = SPEED_25000;
3502 break;
3503 case VIRTCHNL_LINK_SPEED_40GB:
3504 speed = SPEED_40000;
3505 break;
3506 default:
3507 dev_err(&pf->pdev->dev,
3508 "Cannot detect link speed\n");
3509 aq_ret = I40E_ERR_PARAM;
3510 goto err;
3511 }
3512
3513 /* parse data from the queue channel info */
3514 vf->num_tc = tci->num_tc;
3515 for (i = 0; i < vf->num_tc; i++) {
3516 if (tci->list[i].max_tx_rate) {
3517 if (tci->list[i].max_tx_rate > speed) {
3518 dev_err(&pf->pdev->dev,
3519 "Invalid max tx rate %llu specified for VF %d.",
3520 tci->list[i].max_tx_rate,
3521 vf->vf_id);
3522 aq_ret = I40E_ERR_PARAM;
3523 goto err;
3524 } else {
3525 vf->ch[i].max_tx_rate =
3526 tci->list[i].max_tx_rate;
3527 }
3528 }
3529 vf->ch[i].num_qps = tci->list[i].count;
3530 }
3531
3532 /* set this flag only after making sure all inputs are sane */
3533 vf->adq_enabled = true;
3534 /* num_req_queues is set when user changes number of queues via ethtool
3535 * and this causes issue for default VSI(which depends on this variable)
3536 * when ADq is enabled, hence reset it.
3537 */
3538 vf->num_req_queues = 0;
3539
3540 /* reset the VF in order to allocate resources */
3541 i40e_vc_notify_vf_reset(vf);
3542 i40e_reset_vf(vf, false);
3543
3544 return I40E_SUCCESS;
3545
3546 /* send the response to the VF */
3547 err:
3548 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3549 aq_ret);
3550 }
3551
3552 /**
3553 * i40e_vc_del_qch_msg
3554 * @vf: pointer to the VF info
3555 * @msg: pointer to the msg buffer
3556 **/
i40e_vc_del_qch_msg(struct i40e_vf * vf,u8 * msg)3557 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3558 {
3559 struct i40e_pf *pf = vf->pf;
3560 i40e_status aq_ret = 0;
3561
3562 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3563 aq_ret = I40E_ERR_PARAM;
3564 goto err;
3565 }
3566
3567 if (vf->adq_enabled) {
3568 i40e_del_all_cloud_filters(vf);
3569 i40e_del_qch(vf);
3570 vf->adq_enabled = false;
3571 vf->num_tc = 0;
3572 dev_info(&pf->pdev->dev,
3573 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3574 vf->vf_id);
3575 } else {
3576 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3577 vf->vf_id);
3578 aq_ret = I40E_ERR_PARAM;
3579 }
3580
3581 /* reset the VF in order to allocate resources */
3582 i40e_vc_notify_vf_reset(vf);
3583 i40e_reset_vf(vf, false);
3584
3585 return I40E_SUCCESS;
3586
3587 err:
3588 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3589 aq_ret);
3590 }
3591
3592 /**
3593 * i40e_vc_process_vf_msg
3594 * @pf: pointer to the PF structure
3595 * @vf_id: source VF id
3596 * @v_opcode: operation code
3597 * @v_retval: unused return value code
3598 * @msg: pointer to the msg buffer
3599 * @msglen: msg length
3600 *
3601 * called from the common aeq/arq handler to
3602 * process request from VF
3603 **/
i40e_vc_process_vf_msg(struct i40e_pf * pf,s16 vf_id,u32 v_opcode,u32 __always_unused v_retval,u8 * msg,u16 msglen)3604 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3605 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3606 {
3607 struct i40e_hw *hw = &pf->hw;
3608 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3609 struct i40e_vf *vf;
3610 int ret;
3611
3612 pf->vf_aq_requests++;
3613 if (local_vf_id >= pf->num_alloc_vfs)
3614 return -EINVAL;
3615 vf = &(pf->vf[local_vf_id]);
3616
3617 /* Check if VF is disabled. */
3618 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3619 return I40E_ERR_PARAM;
3620
3621 /* perform basic checks on the msg */
3622 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3623
3624 /* perform additional checks specific to this driver */
3625 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
3626 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
3627
3628 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
3629 ret = -EINVAL;
3630 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
3631 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
3632
3633 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
3634 ret = -EINVAL;
3635 }
3636
3637 if (ret) {
3638 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3639 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3640 local_vf_id, v_opcode, msglen);
3641 switch (ret) {
3642 case VIRTCHNL_ERR_PARAM:
3643 return -EPERM;
3644 default:
3645 return -EINVAL;
3646 }
3647 }
3648
3649 switch (v_opcode) {
3650 case VIRTCHNL_OP_VERSION:
3651 ret = i40e_vc_get_version_msg(vf, msg);
3652 break;
3653 case VIRTCHNL_OP_GET_VF_RESOURCES:
3654 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3655 i40e_vc_notify_vf_link_state(vf);
3656 break;
3657 case VIRTCHNL_OP_RESET_VF:
3658 i40e_vc_reset_vf_msg(vf);
3659 ret = 0;
3660 break;
3661 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3662 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
3663 break;
3664 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3665 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
3666 break;
3667 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3668 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
3669 break;
3670 case VIRTCHNL_OP_ENABLE_QUEUES:
3671 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
3672 i40e_vc_notify_vf_link_state(vf);
3673 break;
3674 case VIRTCHNL_OP_DISABLE_QUEUES:
3675 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
3676 break;
3677 case VIRTCHNL_OP_ADD_ETH_ADDR:
3678 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
3679 break;
3680 case VIRTCHNL_OP_DEL_ETH_ADDR:
3681 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
3682 break;
3683 case VIRTCHNL_OP_ADD_VLAN:
3684 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
3685 break;
3686 case VIRTCHNL_OP_DEL_VLAN:
3687 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
3688 break;
3689 case VIRTCHNL_OP_GET_STATS:
3690 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
3691 break;
3692 case VIRTCHNL_OP_IWARP:
3693 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3694 break;
3695 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3696 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
3697 break;
3698 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3699 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
3700 break;
3701 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3702 ret = i40e_vc_config_rss_key(vf, msg, msglen);
3703 break;
3704 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3705 ret = i40e_vc_config_rss_lut(vf, msg, msglen);
3706 break;
3707 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3708 ret = i40e_vc_get_rss_hena(vf, msg, msglen);
3709 break;
3710 case VIRTCHNL_OP_SET_RSS_HENA:
3711 ret = i40e_vc_set_rss_hena(vf, msg, msglen);
3712 break;
3713 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3714 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
3715 break;
3716 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3717 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
3718 break;
3719 case VIRTCHNL_OP_REQUEST_QUEUES:
3720 ret = i40e_vc_request_queues_msg(vf, msg, msglen);
3721 break;
3722 case VIRTCHNL_OP_ENABLE_CHANNELS:
3723 ret = i40e_vc_add_qch_msg(vf, msg);
3724 break;
3725 case VIRTCHNL_OP_DISABLE_CHANNELS:
3726 ret = i40e_vc_del_qch_msg(vf, msg);
3727 break;
3728 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3729 ret = i40e_vc_add_cloud_filter(vf, msg);
3730 break;
3731 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3732 ret = i40e_vc_del_cloud_filter(vf, msg);
3733 break;
3734 case VIRTCHNL_OP_UNKNOWN:
3735 default:
3736 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3737 v_opcode, local_vf_id);
3738 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3739 I40E_ERR_NOT_IMPLEMENTED);
3740 break;
3741 }
3742
3743 return ret;
3744 }
3745
3746 /**
3747 * i40e_vc_process_vflr_event
3748 * @pf: pointer to the PF structure
3749 *
3750 * called from the vlfr irq handler to
3751 * free up VF resources and state variables
3752 **/
i40e_vc_process_vflr_event(struct i40e_pf * pf)3753 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3754 {
3755 struct i40e_hw *hw = &pf->hw;
3756 u32 reg, reg_idx, bit_idx;
3757 struct i40e_vf *vf;
3758 int vf_id;
3759
3760 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3761 return 0;
3762
3763 /* Re-enable the VFLR interrupt cause here, before looking for which
3764 * VF got reset. Otherwise, if another VF gets a reset while the
3765 * first one is being processed, that interrupt will be lost, and
3766 * that VF will be stuck in reset forever.
3767 */
3768 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3769 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3770 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3771 i40e_flush(hw);
3772
3773 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3774 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3775 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3776 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3777 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
3778 vf = &pf->vf[vf_id];
3779 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3780 if (reg & BIT(bit_idx))
3781 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3782 i40e_reset_vf(vf, true);
3783 }
3784
3785 return 0;
3786 }
3787
3788 /**
3789 * i40e_ndo_set_vf_mac
3790 * @netdev: network interface device structure
3791 * @vf_id: VF identifier
3792 * @mac: mac address
3793 *
3794 * program VF mac address
3795 **/
i40e_ndo_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)3796 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3797 {
3798 struct i40e_netdev_priv *np = netdev_priv(netdev);
3799 struct i40e_vsi *vsi = np->vsi;
3800 struct i40e_pf *pf = vsi->back;
3801 struct i40e_mac_filter *f;
3802 struct i40e_vf *vf;
3803 int ret = 0;
3804 struct hlist_node *h;
3805 int bkt;
3806 u8 i;
3807
3808 /* validate the request */
3809 if (vf_id >= pf->num_alloc_vfs) {
3810 dev_err(&pf->pdev->dev,
3811 "Invalid VF Identifier %d\n", vf_id);
3812 ret = -EINVAL;
3813 goto error_param;
3814 }
3815
3816 vf = &(pf->vf[vf_id]);
3817 vsi = pf->vsi[vf->lan_vsi_idx];
3818
3819 /* When the VF is resetting wait until it is done.
3820 * It can take up to 200 milliseconds,
3821 * but wait for up to 300 milliseconds to be safe.
3822 */
3823 for (i = 0; i < 15; i++) {
3824 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3825 break;
3826 msleep(20);
3827 }
3828 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3829 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3830 vf_id);
3831 ret = -EAGAIN;
3832 goto error_param;
3833 }
3834
3835 if (is_multicast_ether_addr(mac)) {
3836 dev_err(&pf->pdev->dev,
3837 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3838 ret = -EINVAL;
3839 goto error_param;
3840 }
3841
3842 /* Lock once because below invoked function add/del_filter requires
3843 * mac_filter_hash_lock to be held
3844 */
3845 spin_lock_bh(&vsi->mac_filter_hash_lock);
3846
3847 /* delete the temporary mac address */
3848 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
3849 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
3850
3851 /* Delete all the filters for this VSI - we're going to kill it
3852 * anyway.
3853 */
3854 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
3855 __i40e_del_filter(vsi, f);
3856
3857 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3858
3859 /* program mac filter */
3860 if (i40e_sync_vsi_filters(vsi)) {
3861 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
3862 ret = -EIO;
3863 goto error_param;
3864 }
3865 ether_addr_copy(vf->default_lan_addr.addr, mac);
3866
3867 if (is_zero_ether_addr(mac)) {
3868 vf->pf_set_mac = false;
3869 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
3870 } else {
3871 vf->pf_set_mac = true;
3872 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
3873 mac, vf_id);
3874 }
3875
3876 /* Force the VF driver stop so it has to reload with new MAC address */
3877 i40e_vc_disable_vf(vf);
3878 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
3879
3880 error_param:
3881 return ret;
3882 }
3883
3884 /**
3885 * i40e_vsi_has_vlans - True if VSI has configured VLANs
3886 * @vsi: pointer to the vsi
3887 *
3888 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
3889 * we have no configured VLANs. Do not call while holding the
3890 * mac_filter_hash_lock.
3891 */
i40e_vsi_has_vlans(struct i40e_vsi * vsi)3892 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
3893 {
3894 bool have_vlans;
3895
3896 /* If we have a port VLAN, then the VSI cannot have any VLANs
3897 * configured, as all MAC/VLAN filters will be assigned to the PVID.
3898 */
3899 if (vsi->info.pvid)
3900 return false;
3901
3902 /* Since we don't have a PVID, we know that if the device is in VLAN
3903 * mode it must be because of a VLAN filter configured on this VSI.
3904 */
3905 spin_lock_bh(&vsi->mac_filter_hash_lock);
3906 have_vlans = i40e_is_vsi_in_vlan(vsi);
3907 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3908
3909 return have_vlans;
3910 }
3911
3912 /**
3913 * i40e_ndo_set_vf_port_vlan
3914 * @netdev: network interface device structure
3915 * @vf_id: VF identifier
3916 * @vlan_id: mac address
3917 * @qos: priority setting
3918 * @vlan_proto: vlan protocol
3919 *
3920 * program VF vlan id and/or qos
3921 **/
i40e_ndo_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)3922 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3923 u16 vlan_id, u8 qos, __be16 vlan_proto)
3924 {
3925 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3926 struct i40e_netdev_priv *np = netdev_priv(netdev);
3927 struct i40e_pf *pf = np->vsi->back;
3928 struct i40e_vsi *vsi;
3929 struct i40e_vf *vf;
3930 int ret = 0;
3931
3932 /* validate the request */
3933 if (vf_id >= pf->num_alloc_vfs) {
3934 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3935 ret = -EINVAL;
3936 goto error_pvid;
3937 }
3938
3939 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3940 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3941 ret = -EINVAL;
3942 goto error_pvid;
3943 }
3944
3945 if (vlan_proto != htons(ETH_P_8021Q)) {
3946 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3947 ret = -EPROTONOSUPPORT;
3948 goto error_pvid;
3949 }
3950
3951 vf = &(pf->vf[vf_id]);
3952 vsi = pf->vsi[vf->lan_vsi_idx];
3953 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3954 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3955 vf_id);
3956 ret = -EAGAIN;
3957 goto error_pvid;
3958 }
3959
3960 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
3961 /* duplicate request, so just return success */
3962 goto error_pvid;
3963
3964 if (i40e_vsi_has_vlans(vsi)) {
3965 dev_err(&pf->pdev->dev,
3966 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
3967 vf_id);
3968 /* Administrator Error - knock the VF offline until he does
3969 * the right thing by reconfiguring his network correctly
3970 * and then reloading the VF driver.
3971 */
3972 i40e_vc_disable_vf(vf);
3973 /* During reset the VF got a new VSI, so refresh the pointer. */
3974 vsi = pf->vsi[vf->lan_vsi_idx];
3975 }
3976
3977 /* Locked once because multiple functions below iterate list */
3978 spin_lock_bh(&vsi->mac_filter_hash_lock);
3979
3980 /* Check for condition where there was already a port VLAN ID
3981 * filter set and now it is being deleted by setting it to zero.
3982 * Additionally check for the condition where there was a port
3983 * VLAN but now there is a new and different port VLAN being set.
3984 * Before deleting all the old VLAN filters we must add new ones
3985 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
3986 * MAC addresses deleted.
3987 */
3988 if ((!(vlan_id || qos) ||
3989 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
3990 vsi->info.pvid) {
3991 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
3992 if (ret) {
3993 dev_info(&vsi->back->pdev->dev,
3994 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3995 vsi->back->hw.aq.asq_last_status);
3996 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3997 goto error_pvid;
3998 }
3999 }
4000
4001 if (vsi->info.pvid) {
4002 /* remove all filters on the old VLAN */
4003 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4004 VLAN_VID_MASK));
4005 }
4006
4007 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4008 if (vlan_id || qos)
4009 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4010 else
4011 i40e_vsi_remove_pvid(vsi);
4012 spin_lock_bh(&vsi->mac_filter_hash_lock);
4013
4014 if (vlan_id) {
4015 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4016 vlan_id, qos, vf_id);
4017
4018 /* add new VLAN filter for each MAC */
4019 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4020 if (ret) {
4021 dev_info(&vsi->back->pdev->dev,
4022 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4023 vsi->back->hw.aq.asq_last_status);
4024 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4025 goto error_pvid;
4026 }
4027
4028 /* remove the previously added non-VLAN MAC filters */
4029 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4030 }
4031
4032 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4033
4034 /* Schedule the worker thread to take care of applying changes */
4035 i40e_service_event_schedule(vsi->back);
4036
4037 if (ret) {
4038 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4039 goto error_pvid;
4040 }
4041
4042 /* The Port VLAN needs to be saved across resets the same as the
4043 * default LAN MAC address.
4044 */
4045 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4046 ret = 0;
4047
4048 error_pvid:
4049 return ret;
4050 }
4051
4052 /**
4053 * i40e_ndo_set_vf_bw
4054 * @netdev: network interface device structure
4055 * @vf_id: VF identifier
4056 * @min_tx_rate: Minimum Tx rate
4057 * @max_tx_rate: Maximum Tx rate
4058 *
4059 * configure VF Tx rate
4060 **/
i40e_ndo_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)4061 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4062 int max_tx_rate)
4063 {
4064 struct i40e_netdev_priv *np = netdev_priv(netdev);
4065 struct i40e_pf *pf = np->vsi->back;
4066 struct i40e_vsi *vsi;
4067 struct i40e_vf *vf;
4068 int ret = 0;
4069
4070 /* validate the request */
4071 if (vf_id >= pf->num_alloc_vfs) {
4072 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
4073 ret = -EINVAL;
4074 goto error;
4075 }
4076
4077 if (min_tx_rate) {
4078 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4079 min_tx_rate, vf_id);
4080 return -EINVAL;
4081 }
4082
4083 vf = &(pf->vf[vf_id]);
4084 vsi = pf->vsi[vf->lan_vsi_idx];
4085 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4086 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4087 vf_id);
4088 ret = -EAGAIN;
4089 goto error;
4090 }
4091
4092 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4093 if (ret)
4094 goto error;
4095
4096 vf->tx_rate = max_tx_rate;
4097 error:
4098 return ret;
4099 }
4100
4101 /**
4102 * i40e_ndo_get_vf_config
4103 * @netdev: network interface device structure
4104 * @vf_id: VF identifier
4105 * @ivi: VF configuration structure
4106 *
4107 * return VF configuration
4108 **/
i40e_ndo_get_vf_config(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)4109 int i40e_ndo_get_vf_config(struct net_device *netdev,
4110 int vf_id, struct ifla_vf_info *ivi)
4111 {
4112 struct i40e_netdev_priv *np = netdev_priv(netdev);
4113 struct i40e_vsi *vsi = np->vsi;
4114 struct i40e_pf *pf = vsi->back;
4115 struct i40e_vf *vf;
4116 int ret = 0;
4117
4118 /* validate the request */
4119 if (vf_id >= pf->num_alloc_vfs) {
4120 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4121 ret = -EINVAL;
4122 goto error_param;
4123 }
4124
4125 vf = &(pf->vf[vf_id]);
4126 /* first vsi is always the LAN vsi */
4127 vsi = pf->vsi[vf->lan_vsi_idx];
4128 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4129 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4130 vf_id);
4131 ret = -EAGAIN;
4132 goto error_param;
4133 }
4134
4135 ivi->vf = vf_id;
4136
4137 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4138
4139 ivi->max_tx_rate = vf->tx_rate;
4140 ivi->min_tx_rate = 0;
4141 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4142 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4143 I40E_VLAN_PRIORITY_SHIFT;
4144 if (vf->link_forced == false)
4145 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4146 else if (vf->link_up == true)
4147 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4148 else
4149 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4150 ivi->spoofchk = vf->spoofchk;
4151 ivi->trusted = vf->trusted;
4152 ret = 0;
4153
4154 error_param:
4155 return ret;
4156 }
4157
4158 /**
4159 * i40e_ndo_set_vf_link_state
4160 * @netdev: network interface device structure
4161 * @vf_id: VF identifier
4162 * @link: required link state
4163 *
4164 * Set the link state of a specified VF, regardless of physical link state
4165 **/
i40e_ndo_set_vf_link_state(struct net_device * netdev,int vf_id,int link)4166 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4167 {
4168 struct i40e_netdev_priv *np = netdev_priv(netdev);
4169 struct i40e_pf *pf = np->vsi->back;
4170 struct virtchnl_pf_event pfe;
4171 struct i40e_hw *hw = &pf->hw;
4172 struct i40e_vf *vf;
4173 int abs_vf_id;
4174 int ret = 0;
4175
4176 /* validate the request */
4177 if (vf_id >= pf->num_alloc_vfs) {
4178 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4179 ret = -EINVAL;
4180 goto error_out;
4181 }
4182
4183 vf = &pf->vf[vf_id];
4184 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4185
4186 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4187 pfe.severity = PF_EVENT_SEVERITY_INFO;
4188
4189 switch (link) {
4190 case IFLA_VF_LINK_STATE_AUTO:
4191 vf->link_forced = false;
4192 pfe.event_data.link_event.link_status =
4193 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4194 pfe.event_data.link_event.link_speed =
4195 (enum virtchnl_link_speed)
4196 pf->hw.phy.link_info.link_speed;
4197 break;
4198 case IFLA_VF_LINK_STATE_ENABLE:
4199 vf->link_forced = true;
4200 vf->link_up = true;
4201 pfe.event_data.link_event.link_status = true;
4202 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
4203 break;
4204 case IFLA_VF_LINK_STATE_DISABLE:
4205 vf->link_forced = true;
4206 vf->link_up = false;
4207 pfe.event_data.link_event.link_status = false;
4208 pfe.event_data.link_event.link_speed = 0;
4209 break;
4210 default:
4211 ret = -EINVAL;
4212 goto error_out;
4213 }
4214 /* Notify the VF of its new link state */
4215 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4216 0, (u8 *)&pfe, sizeof(pfe), NULL);
4217
4218 error_out:
4219 return ret;
4220 }
4221
4222 /**
4223 * i40e_ndo_set_vf_spoofchk
4224 * @netdev: network interface device structure
4225 * @vf_id: VF identifier
4226 * @enable: flag to enable or disable feature
4227 *
4228 * Enable or disable VF spoof checking
4229 **/
i40e_ndo_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool enable)4230 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4231 {
4232 struct i40e_netdev_priv *np = netdev_priv(netdev);
4233 struct i40e_vsi *vsi = np->vsi;
4234 struct i40e_pf *pf = vsi->back;
4235 struct i40e_vsi_context ctxt;
4236 struct i40e_hw *hw = &pf->hw;
4237 struct i40e_vf *vf;
4238 int ret = 0;
4239
4240 /* validate the request */
4241 if (vf_id >= pf->num_alloc_vfs) {
4242 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4243 ret = -EINVAL;
4244 goto out;
4245 }
4246
4247 vf = &(pf->vf[vf_id]);
4248 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4249 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4250 vf_id);
4251 ret = -EAGAIN;
4252 goto out;
4253 }
4254
4255 if (enable == vf->spoofchk)
4256 goto out;
4257
4258 vf->spoofchk = enable;
4259 memset(&ctxt, 0, sizeof(ctxt));
4260 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4261 ctxt.pf_num = pf->hw.pf_id;
4262 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4263 if (enable)
4264 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4265 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4266 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4267 if (ret) {
4268 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4269 ret);
4270 ret = -EIO;
4271 }
4272 out:
4273 return ret;
4274 }
4275
4276 /**
4277 * i40e_ndo_set_vf_trust
4278 * @netdev: network interface device structure of the pf
4279 * @vf_id: VF identifier
4280 * @setting: trust setting
4281 *
4282 * Enable or disable VF trust setting
4283 **/
i40e_ndo_set_vf_trust(struct net_device * netdev,int vf_id,bool setting)4284 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4285 {
4286 struct i40e_netdev_priv *np = netdev_priv(netdev);
4287 struct i40e_pf *pf = np->vsi->back;
4288 struct i40e_vf *vf;
4289 int ret = 0;
4290
4291 /* validate the request */
4292 if (vf_id >= pf->num_alloc_vfs) {
4293 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4294 return -EINVAL;
4295 }
4296
4297 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4298 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4299 return -EINVAL;
4300 }
4301
4302 vf = &pf->vf[vf_id];
4303
4304 if (setting == vf->trusted)
4305 goto out;
4306
4307 vf->trusted = setting;
4308 i40e_vc_disable_vf(vf);
4309 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4310 vf_id, setting ? "" : "un");
4311
4312 if (vf->adq_enabled) {
4313 if (!vf->trusted) {
4314 dev_info(&pf->pdev->dev,
4315 "VF %u no longer Trusted, deleting all cloud filters\n",
4316 vf_id);
4317 i40e_del_all_cloud_filters(vf);
4318 }
4319 }
4320
4321 out:
4322 return ret;
4323 }
4324