1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice_base.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_flow.h"
11 #include "ice_eswitch.h"
12 #include "ice_virtchnl_allowlist.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_vf_vsi_vlan_ops.h"
15 #include "ice_vlan.h"
16
17 /**
18 * ice_free_vf_entries - Free all VF entries from the hash table
19 * @pf: pointer to the PF structure
20 *
21 * Iterate over the VF hash table, removing and releasing all VF entries.
22 * Called during VF teardown or as cleanup during failed VF initialization.
23 */
ice_free_vf_entries(struct ice_pf * pf)24 static void ice_free_vf_entries(struct ice_pf *pf)
25 {
26 struct ice_vfs *vfs = &pf->vfs;
27 struct hlist_node *tmp;
28 struct ice_vf *vf;
29 unsigned int bkt;
30
31 /* Remove all VFs from the hash table and release their main
32 * reference. Once all references to the VF are dropped, ice_put_vf()
33 * will call ice_release_vf which will remove the VF memory.
34 */
35 lockdep_assert_held(&vfs->table_lock);
36
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
38 hash_del_rcu(&vf->entry);
39 ice_put_vf(vf);
40 }
41 }
42
43 /**
44 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
45 * @vf: invalidate this VF's VSI after freeing it
46 */
ice_vf_vsi_release(struct ice_vf * vf)47 static void ice_vf_vsi_release(struct ice_vf *vf)
48 {
49 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
50
51 if (WARN_ON(!vsi))
52 return;
53
54 ice_vsi_release(vsi);
55 ice_vf_invalidate_vsi(vf);
56 }
57
58 /**
59 * ice_free_vf_res - Free a VF's resources
60 * @vf: pointer to the VF info
61 */
ice_free_vf_res(struct ice_vf * vf)62 static void ice_free_vf_res(struct ice_vf *vf)
63 {
64 struct ice_pf *pf = vf->pf;
65 int i, last_vector_idx;
66
67 /* First, disable VF's configuration API to prevent OS from
68 * accessing the VF's VSI after it's freed or invalidated.
69 */
70 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
71 ice_vf_fdir_exit(vf);
72 /* free VF control VSI */
73 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
74 ice_vf_ctrl_vsi_release(vf);
75
76 /* free VSI and disconnect it from the parent uplink */
77 if (vf->lan_vsi_idx != ICE_NO_VSI) {
78 ice_vf_vsi_release(vf);
79 vf->num_mac = 0;
80 }
81
82 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
83
84 /* clear VF MDD event information */
85 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
86 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
87
88 /* Disable interrupts so that VF starts in a known state */
89 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
90 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
91 ice_flush(&pf->hw);
92 }
93 /* reset some of the state variables keeping track of the resources */
94 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
95 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
96 }
97
98 /**
99 * ice_dis_vf_mappings
100 * @vf: pointer to the VF structure
101 */
ice_dis_vf_mappings(struct ice_vf * vf)102 static void ice_dis_vf_mappings(struct ice_vf *vf)
103 {
104 struct ice_pf *pf = vf->pf;
105 struct ice_vsi *vsi;
106 struct device *dev;
107 int first, last, v;
108 struct ice_hw *hw;
109
110 hw = &pf->hw;
111 vsi = ice_get_vf_vsi(vf);
112 if (WARN_ON(!vsi))
113 return;
114
115 dev = ice_pf_to_dev(pf);
116 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
117 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
118
119 first = vf->first_vector_idx;
120 last = first + pf->vfs.num_msix_per - 1;
121 for (v = first; v <= last; v++) {
122 u32 reg;
123
124 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
125 GLINT_VECT2FUNC_IS_PF_M) |
126 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
127 GLINT_VECT2FUNC_PF_NUM_M));
128 wr32(hw, GLINT_VECT2FUNC(v), reg);
129 }
130
131 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
132 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
133 else
134 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
135
136 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
137 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
138 else
139 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
140 }
141
142 /**
143 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
144 * @pf: pointer to the PF structure
145 *
146 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
147 * the pf->sriov_base_vector.
148 *
149 * Returns 0 on success, and -EINVAL on error.
150 */
ice_sriov_free_msix_res(struct ice_pf * pf)151 static int ice_sriov_free_msix_res(struct ice_pf *pf)
152 {
153 struct ice_res_tracker *res;
154
155 if (!pf)
156 return -EINVAL;
157
158 res = pf->irq_tracker;
159 if (!res)
160 return -EINVAL;
161
162 /* give back irq_tracker resources used */
163 WARN_ON(pf->sriov_base_vector < res->num_entries);
164
165 pf->sriov_base_vector = 0;
166
167 return 0;
168 }
169
170 /**
171 * ice_free_vfs - Free all VFs
172 * @pf: pointer to the PF structure
173 */
ice_free_vfs(struct ice_pf * pf)174 void ice_free_vfs(struct ice_pf *pf)
175 {
176 struct device *dev = ice_pf_to_dev(pf);
177 struct ice_vfs *vfs = &pf->vfs;
178 struct ice_hw *hw = &pf->hw;
179 struct ice_vf *vf;
180 unsigned int bkt;
181
182 if (!ice_has_vfs(pf))
183 return;
184
185 while (test_and_set_bit(ICE_VF_DIS, pf->state))
186 usleep_range(1000, 2000);
187
188 /* Disable IOV before freeing resources. This lets any VF drivers
189 * running in the host get themselves cleaned up before we yank
190 * the carpet out from underneath their feet.
191 */
192 if (!pci_vfs_assigned(pf->pdev))
193 pci_disable_sriov(pf->pdev);
194 else
195 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
196
197 mutex_lock(&vfs->table_lock);
198
199 ice_eswitch_release(pf);
200
201 ice_for_each_vf(pf, bkt, vf) {
202 mutex_lock(&vf->cfg_lock);
203
204 ice_dis_vf_qs(vf);
205
206 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
207 /* disable VF qp mappings and set VF disable state */
208 ice_dis_vf_mappings(vf);
209 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
210 ice_free_vf_res(vf);
211 }
212
213 if (!pci_vfs_assigned(pf->pdev)) {
214 u32 reg_idx, bit_idx;
215
216 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
217 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
218 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
219 }
220
221 /* clear malicious info since the VF is getting released */
222 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
223 ICE_MAX_SRIOV_VFS, vf->vf_id))
224 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
225 vf->vf_id);
226
227 mutex_unlock(&vf->cfg_lock);
228 }
229
230 if (ice_sriov_free_msix_res(pf))
231 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
232
233 vfs->num_qps_per = 0;
234 ice_free_vf_entries(pf);
235
236 mutex_unlock(&vfs->table_lock);
237
238 clear_bit(ICE_VF_DIS, pf->state);
239 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
240 }
241
242 /**
243 * ice_vf_vsi_setup - Set up a VF VSI
244 * @vf: VF to setup VSI for
245 *
246 * Returns pointer to the successfully allocated VSI struct on success,
247 * otherwise returns NULL on failure.
248 */
ice_vf_vsi_setup(struct ice_vf * vf)249 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
250 {
251 struct ice_port_info *pi = ice_vf_get_port_info(vf);
252 struct ice_pf *pf = vf->pf;
253 struct ice_vsi *vsi;
254
255 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
256
257 if (!vsi) {
258 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
259 ice_vf_invalidate_vsi(vf);
260 return NULL;
261 }
262
263 vf->lan_vsi_idx = vsi->idx;
264 vf->lan_vsi_num = vsi->vsi_num;
265
266 return vsi;
267 }
268
269 /**
270 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
271 * @pf: pointer to PF structure
272 * @vf: pointer to VF that the first MSIX vector index is being calculated for
273 *
274 * This returns the first MSIX vector index in PF space that is used by this VF.
275 * This index is used when accessing PF relative registers such as
276 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
277 * This will always be the OICR index in the AVF driver so any functionality
278 * using vf->first_vector_idx for queue configuration will have to increment by
279 * 1 to avoid meddling with the OICR index.
280 */
ice_calc_vf_first_vector_idx(struct ice_pf * pf,struct ice_vf * vf)281 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
282 {
283 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
284 }
285
286 /**
287 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
288 * @vf: VF to enable MSIX mappings for
289 *
290 * Some of the registers need to be indexed/configured using hardware global
291 * device values and other registers need 0-based values, which represent PF
292 * based values.
293 */
ice_ena_vf_msix_mappings(struct ice_vf * vf)294 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
295 {
296 int device_based_first_msix, device_based_last_msix;
297 int pf_based_first_msix, pf_based_last_msix, v;
298 struct ice_pf *pf = vf->pf;
299 int device_based_vf_id;
300 struct ice_hw *hw;
301 u32 reg;
302
303 hw = &pf->hw;
304 pf_based_first_msix = vf->first_vector_idx;
305 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
306
307 device_based_first_msix = pf_based_first_msix +
308 pf->hw.func_caps.common_cap.msix_vector_first_id;
309 device_based_last_msix =
310 (device_based_first_msix + pf->vfs.num_msix_per) - 1;
311 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
312
313 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
314 VPINT_ALLOC_FIRST_M) |
315 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
316 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
317 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
318
319 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
320 & VPINT_ALLOC_PCI_FIRST_M) |
321 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
322 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
323 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
324
325 /* map the interrupts to its functions */
326 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
327 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
328 GLINT_VECT2FUNC_VF_NUM_M) |
329 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
330 GLINT_VECT2FUNC_PF_NUM_M));
331 wr32(hw, GLINT_VECT2FUNC(v), reg);
332 }
333
334 /* Map mailbox interrupt to VF MSI-X vector 0 */
335 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
336 }
337
338 /**
339 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
340 * @vf: VF to enable the mappings for
341 * @max_txq: max Tx queues allowed on the VF's VSI
342 * @max_rxq: max Rx queues allowed on the VF's VSI
343 */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)344 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
345 {
346 struct device *dev = ice_pf_to_dev(vf->pf);
347 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
348 struct ice_hw *hw = &vf->pf->hw;
349 u32 reg;
350
351 if (WARN_ON(!vsi))
352 return;
353
354 /* set regardless of mapping mode */
355 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
356
357 /* VF Tx queues allocation */
358 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
359 /* set the VF PF Tx queue range
360 * VFNUMQ value should be set to (number of queues - 1). A value
361 * of 0 means 1 queue and a value of 255 means 256 queues
362 */
363 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
364 VPLAN_TX_QBASE_VFFIRSTQ_M) |
365 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
366 VPLAN_TX_QBASE_VFNUMQ_M));
367 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
368 } else {
369 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
370 }
371
372 /* set regardless of mapping mode */
373 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
374
375 /* VF Rx queues allocation */
376 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
377 /* set the VF PF Rx queue range
378 * VFNUMQ value should be set to (number of queues - 1). A value
379 * of 0 means 1 queue and a value of 255 means 256 queues
380 */
381 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
382 VPLAN_RX_QBASE_VFFIRSTQ_M) |
383 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
384 VPLAN_RX_QBASE_VFNUMQ_M));
385 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
386 } else {
387 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
388 }
389 }
390
391 /**
392 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
393 * @vf: pointer to the VF structure
394 */
ice_ena_vf_mappings(struct ice_vf * vf)395 static void ice_ena_vf_mappings(struct ice_vf *vf)
396 {
397 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
398
399 if (WARN_ON(!vsi))
400 return;
401
402 ice_ena_vf_msix_mappings(vf);
403 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
404 }
405
406 /**
407 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
408 * @vf: VF to calculate the register index for
409 * @q_vector: a q_vector associated to the VF
410 */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)411 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
412 {
413 struct ice_pf *pf;
414
415 if (!vf || !q_vector)
416 return -EINVAL;
417
418 pf = vf->pf;
419
420 /* always add one to account for the OICR being the first MSIX */
421 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
422 q_vector->v_idx + 1;
423 }
424
425 /**
426 * ice_get_max_valid_res_idx - Get the max valid resource index
427 * @res: pointer to the resource to find the max valid index for
428 *
429 * Start from the end of the ice_res_tracker and return right when we find the
430 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
431 * valid for SR-IOV because it is the only consumer that manipulates the
432 * res->end and this is always called when res->end is set to res->num_entries.
433 */
ice_get_max_valid_res_idx(struct ice_res_tracker * res)434 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
435 {
436 int i;
437
438 if (!res)
439 return -EINVAL;
440
441 for (i = res->num_entries - 1; i >= 0; i--)
442 if (res->list[i] & ICE_RES_VALID_BIT)
443 return i;
444
445 return 0;
446 }
447
448 /**
449 * ice_sriov_set_msix_res - Set any used MSIX resources
450 * @pf: pointer to PF structure
451 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
452 *
453 * This function allows SR-IOV resources to be taken from the end of the PF's
454 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
455 * just set the pf->sriov_base_vector and return success.
456 *
457 * If there are not enough resources available, return an error. This should
458 * always be caught by ice_set_per_vf_res().
459 *
460 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
461 * in the PF's space available for SR-IOV.
462 */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)463 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
464 {
465 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
466 int vectors_used = pf->irq_tracker->num_entries;
467 int sriov_base_vector;
468
469 sriov_base_vector = total_vectors - num_msix_needed;
470
471 /* make sure we only grab irq_tracker entries from the list end and
472 * that we have enough available MSIX vectors
473 */
474 if (sriov_base_vector < vectors_used)
475 return -EINVAL;
476
477 pf->sriov_base_vector = sriov_base_vector;
478
479 return 0;
480 }
481
482 /**
483 * ice_set_per_vf_res - check if vectors and queues are available
484 * @pf: pointer to the PF structure
485 * @num_vfs: the number of SR-IOV VFs being configured
486 *
487 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
488 * get more vectors and can enable more queues per VF. Note that this does not
489 * grab any vectors from the SW pool already allocated. Also note, that all
490 * vector counts include one for each VF's miscellaneous interrupt vector
491 * (i.e. OICR).
492 *
493 * Minimum VFs - 2 vectors, 1 queue pair
494 * Small VFs - 5 vectors, 4 queue pairs
495 * Medium VFs - 17 vectors, 16 queue pairs
496 *
497 * Second, determine number of queue pairs per VF by starting with a pre-defined
498 * maximum each VF supports. If this is not possible, then we adjust based on
499 * queue pairs available on the device.
500 *
501 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
502 * by each VF during VF initialization and reset.
503 */
ice_set_per_vf_res(struct ice_pf * pf,u16 num_vfs)504 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
505 {
506 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
507 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
508 int msix_avail_per_vf, msix_avail_for_sriov;
509 struct device *dev = ice_pf_to_dev(pf);
510 int err;
511
512 lockdep_assert_held(&pf->vfs.table_lock);
513
514 if (!num_vfs)
515 return -EINVAL;
516
517 if (max_valid_res_idx < 0)
518 return -ENOSPC;
519
520 /* determine MSI-X resources per VF */
521 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
522 pf->irq_tracker->num_entries;
523 msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
524 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
525 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
526 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
527 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
528 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
529 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
530 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
531 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
532 } else {
533 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
534 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
535 num_vfs);
536 return -ENOSPC;
537 }
538
539 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
540 ICE_MAX_RSS_QS_PER_VF);
541 avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
542 if (!avail_qs)
543 num_txq = 0;
544 else if (num_txq > avail_qs)
545 num_txq = rounddown_pow_of_two(avail_qs);
546
547 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
548 ICE_MAX_RSS_QS_PER_VF);
549 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
550 if (!avail_qs)
551 num_rxq = 0;
552 else if (num_rxq > avail_qs)
553 num_rxq = rounddown_pow_of_two(avail_qs);
554
555 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
556 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
557 ICE_MIN_QS_PER_VF, num_vfs);
558 return -ENOSPC;
559 }
560
561 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
562 if (err) {
563 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
564 num_vfs, err);
565 return err;
566 }
567
568 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
569 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
570 pf->vfs.num_msix_per = num_msix_per_vf;
571 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
572 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
573
574 return 0;
575 }
576
577 /**
578 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
579 * @vf: VF to initialize/setup the VSI for
580 *
581 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
582 * VF VSI's broadcast filter and is only used during initial VF creation.
583 */
ice_init_vf_vsi_res(struct ice_vf * vf)584 static int ice_init_vf_vsi_res(struct ice_vf *vf)
585 {
586 struct ice_vsi_vlan_ops *vlan_ops;
587 struct ice_pf *pf = vf->pf;
588 u8 broadcast[ETH_ALEN];
589 struct ice_vsi *vsi;
590 struct device *dev;
591 int err;
592
593 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
594
595 dev = ice_pf_to_dev(pf);
596 vsi = ice_vf_vsi_setup(vf);
597 if (!vsi)
598 return -ENOMEM;
599
600 err = ice_vsi_add_vlan_zero(vsi);
601 if (err) {
602 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
603 vf->vf_id);
604 goto release_vsi;
605 }
606
607 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
608 err = vlan_ops->ena_rx_filtering(vsi);
609 if (err) {
610 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
611 vf->vf_id);
612 goto release_vsi;
613 }
614
615 eth_broadcast_addr(broadcast);
616 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
617 if (err) {
618 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
619 vf->vf_id, err);
620 goto release_vsi;
621 }
622
623 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
624 if (err) {
625 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
626 vf->vf_id);
627 goto release_vsi;
628 }
629
630 vf->num_mac = 1;
631
632 return 0;
633
634 release_vsi:
635 ice_vf_vsi_release(vf);
636 return err;
637 }
638
639 /**
640 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
641 * @pf: PF the VFs are associated with
642 */
ice_start_vfs(struct ice_pf * pf)643 static int ice_start_vfs(struct ice_pf *pf)
644 {
645 struct ice_hw *hw = &pf->hw;
646 unsigned int bkt, it_cnt;
647 struct ice_vf *vf;
648 int retval;
649
650 lockdep_assert_held(&pf->vfs.table_lock);
651
652 it_cnt = 0;
653 ice_for_each_vf(pf, bkt, vf) {
654 vf->vf_ops->clear_reset_trigger(vf);
655
656 retval = ice_init_vf_vsi_res(vf);
657 if (retval) {
658 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
659 vf->vf_id, retval);
660 goto teardown;
661 }
662
663 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
664 ice_ena_vf_mappings(vf);
665 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
666 it_cnt++;
667 }
668
669 ice_flush(hw);
670 return 0;
671
672 teardown:
673 ice_for_each_vf(pf, bkt, vf) {
674 if (it_cnt == 0)
675 break;
676
677 ice_dis_vf_mappings(vf);
678 ice_vf_vsi_release(vf);
679 it_cnt--;
680 }
681
682 return retval;
683 }
684
685 /**
686 * ice_sriov_free_vf - Free VF memory after all references are dropped
687 * @vf: pointer to VF to free
688 *
689 * Called by ice_put_vf through ice_release_vf once the last reference to a VF
690 * structure has been dropped.
691 */
ice_sriov_free_vf(struct ice_vf * vf)692 static void ice_sriov_free_vf(struct ice_vf *vf)
693 {
694 mutex_destroy(&vf->cfg_lock);
695
696 kfree_rcu(vf, rcu);
697 }
698
699 /**
700 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
701 * @vf: the vf to configure
702 */
ice_sriov_clear_mbx_register(struct ice_vf * vf)703 static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
704 {
705 struct ice_pf *pf = vf->pf;
706
707 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
708 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
709 }
710
711 /**
712 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
713 * @vf: pointer to VF structure
714 * @is_vflr: true if reset occurred due to VFLR
715 *
716 * Trigger and cleanup after a VF reset for a SR-IOV VF.
717 */
ice_sriov_trigger_reset_register(struct ice_vf * vf,bool is_vflr)718 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
719 {
720 struct ice_pf *pf = vf->pf;
721 u32 reg, reg_idx, bit_idx;
722 unsigned int vf_abs_id, i;
723 struct device *dev;
724 struct ice_hw *hw;
725
726 dev = ice_pf_to_dev(pf);
727 hw = &pf->hw;
728 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
729
730 /* In the case of a VFLR, HW has already reset the VF and we just need
731 * to clean up. Otherwise we must first trigger the reset using the
732 * VFRTRIG register.
733 */
734 if (!is_vflr) {
735 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
736 reg |= VPGEN_VFRTRIG_VFSWR_M;
737 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
738 }
739
740 /* clear the VFLR bit in GLGEN_VFLRSTAT */
741 reg_idx = (vf_abs_id) / 32;
742 bit_idx = (vf_abs_id) % 32;
743 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
744 ice_flush(hw);
745
746 wr32(hw, PF_PCI_CIAA,
747 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
748 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
749 reg = rd32(hw, PF_PCI_CIAD);
750 /* no transactions pending so stop polling */
751 if ((reg & VF_TRANS_PENDING_M) == 0)
752 break;
753
754 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
755 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
756 }
757 }
758
759 /**
760 * ice_sriov_poll_reset_status - poll SRIOV VF reset status
761 * @vf: pointer to VF structure
762 *
763 * Returns true when reset is successful, else returns false
764 */
ice_sriov_poll_reset_status(struct ice_vf * vf)765 static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
766 {
767 struct ice_pf *pf = vf->pf;
768 unsigned int i;
769 u32 reg;
770
771 for (i = 0; i < 10; i++) {
772 /* VF reset requires driver to first reset the VF and then
773 * poll the status register to make sure that the reset
774 * completed successfully.
775 */
776 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
777 if (reg & VPGEN_VFRSTAT_VFRD_M)
778 return true;
779
780 /* only sleep if the reset is not done */
781 usleep_range(10, 20);
782 }
783 return false;
784 }
785
786 /**
787 * ice_sriov_clear_reset_trigger - enable VF to access hardware
788 * @vf: VF to enabled hardware access for
789 */
ice_sriov_clear_reset_trigger(struct ice_vf * vf)790 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
791 {
792 struct ice_hw *hw = &vf->pf->hw;
793 u32 reg;
794
795 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
796 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
797 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
798 ice_flush(hw);
799 }
800
801 /**
802 * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
803 * @vf: VF to release and setup the VSI for
804 *
805 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
806 * configuration change, etc.).
807 */
ice_sriov_vsi_rebuild(struct ice_vf * vf)808 static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
809 {
810 struct ice_pf *pf = vf->pf;
811
812 ice_vf_vsi_release(vf);
813 if (!ice_vf_vsi_setup(vf)) {
814 dev_err(ice_pf_to_dev(pf),
815 "Failed to release and setup the VF%u's VSI\n",
816 vf->vf_id);
817 return -ENOMEM;
818 }
819
820 return 0;
821 }
822
823 /**
824 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
825 * @vf: VF to perform tasks on
826 */
ice_sriov_post_vsi_rebuild(struct ice_vf * vf)827 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
828 {
829 ice_vf_rebuild_host_cfg(vf);
830 ice_vf_set_initialized(vf);
831 ice_ena_vf_mappings(vf);
832 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
833 }
834
835 static const struct ice_vf_ops ice_sriov_vf_ops = {
836 .reset_type = ICE_VF_RESET,
837 .free = ice_sriov_free_vf,
838 .clear_mbx_register = ice_sriov_clear_mbx_register,
839 .trigger_reset_register = ice_sriov_trigger_reset_register,
840 .poll_reset_status = ice_sriov_poll_reset_status,
841 .clear_reset_trigger = ice_sriov_clear_reset_trigger,
842 .vsi_rebuild = ice_sriov_vsi_rebuild,
843 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
844 };
845
846 /**
847 * ice_create_vf_entries - Allocate and insert VF entries
848 * @pf: pointer to the PF structure
849 * @num_vfs: the number of VFs to allocate
850 *
851 * Allocate new VF entries and insert them into the hash table. Set some
852 * basic default fields for initializing the new VFs.
853 *
854 * After this function exits, the hash table will have num_vfs entries
855 * inserted.
856 *
857 * Returns 0 on success or an integer error code on failure.
858 */
ice_create_vf_entries(struct ice_pf * pf,u16 num_vfs)859 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
860 {
861 struct ice_vfs *vfs = &pf->vfs;
862 struct ice_vf *vf;
863 u16 vf_id;
864 int err;
865
866 lockdep_assert_held(&vfs->table_lock);
867
868 for (vf_id = 0; vf_id < num_vfs; vf_id++) {
869 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
870 if (!vf) {
871 err = -ENOMEM;
872 goto err_free_entries;
873 }
874 kref_init(&vf->refcnt);
875
876 vf->pf = pf;
877 vf->vf_id = vf_id;
878
879 /* set sriov vf ops for VFs created during SRIOV flow */
880 vf->vf_ops = &ice_sriov_vf_ops;
881
882 vf->vf_sw_id = pf->first_sw;
883 /* assign default capabilities */
884 vf->spoofchk = true;
885 vf->num_vf_qs = pf->vfs.num_qps_per;
886 ice_vc_set_default_allowlist(vf);
887
888 /* ctrl_vsi_idx will be set to a valid value only when VF
889 * creates its first fdir rule.
890 */
891 ice_vf_ctrl_invalidate_vsi(vf);
892 ice_vf_fdir_init(vf);
893
894 ice_virtchnl_set_dflt_ops(vf);
895
896 mutex_init(&vf->cfg_lock);
897
898 hash_add_rcu(vfs->table, &vf->entry, vf_id);
899 }
900
901 return 0;
902
903 err_free_entries:
904 ice_free_vf_entries(pf);
905 return err;
906 }
907
908 /**
909 * ice_ena_vfs - enable VFs so they are ready to be used
910 * @pf: pointer to the PF structure
911 * @num_vfs: number of VFs to enable
912 */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)913 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
914 {
915 struct device *dev = ice_pf_to_dev(pf);
916 struct ice_hw *hw = &pf->hw;
917 int ret;
918
919 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
920 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
921 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
922 set_bit(ICE_OICR_INTR_DIS, pf->state);
923 ice_flush(hw);
924
925 ret = pci_enable_sriov(pf->pdev, num_vfs);
926 if (ret)
927 goto err_unroll_intr;
928
929 mutex_lock(&pf->vfs.table_lock);
930
931 ret = ice_set_per_vf_res(pf, num_vfs);
932 if (ret) {
933 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
934 num_vfs, ret);
935 goto err_unroll_sriov;
936 }
937
938 ret = ice_create_vf_entries(pf, num_vfs);
939 if (ret) {
940 dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
941 num_vfs);
942 goto err_unroll_sriov;
943 }
944
945 ret = ice_start_vfs(pf);
946 if (ret) {
947 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
948 ret = -EAGAIN;
949 goto err_unroll_vf_entries;
950 }
951
952 clear_bit(ICE_VF_DIS, pf->state);
953
954 ret = ice_eswitch_configure(pf);
955 if (ret) {
956 dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
957 goto err_unroll_sriov;
958 }
959
960 /* rearm global interrupts */
961 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
962 ice_irq_dynamic_ena(hw, NULL, NULL);
963
964 mutex_unlock(&pf->vfs.table_lock);
965
966 return 0;
967
968 err_unroll_vf_entries:
969 ice_free_vf_entries(pf);
970 err_unroll_sriov:
971 mutex_unlock(&pf->vfs.table_lock);
972 pci_disable_sriov(pf->pdev);
973 err_unroll_intr:
974 /* rearm interrupts here */
975 ice_irq_dynamic_ena(hw, NULL, NULL);
976 clear_bit(ICE_OICR_INTR_DIS, pf->state);
977 return ret;
978 }
979
980 /**
981 * ice_pci_sriov_ena - Enable or change number of VFs
982 * @pf: pointer to the PF structure
983 * @num_vfs: number of VFs to allocate
984 *
985 * Returns 0 on success and negative on failure
986 */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)987 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
988 {
989 int pre_existing_vfs = pci_num_vf(pf->pdev);
990 struct device *dev = ice_pf_to_dev(pf);
991 int err;
992
993 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
994 ice_free_vfs(pf);
995 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
996 return 0;
997
998 if (num_vfs > pf->vfs.num_supported) {
999 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1000 num_vfs, pf->vfs.num_supported);
1001 return -EOPNOTSUPP;
1002 }
1003
1004 dev_info(dev, "Enabling %d VFs\n", num_vfs);
1005 err = ice_ena_vfs(pf, num_vfs);
1006 if (err) {
1007 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1008 return err;
1009 }
1010
1011 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1012 return 0;
1013 }
1014
1015 /**
1016 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1017 * @pf: PF to enabled SR-IOV on
1018 */
ice_check_sriov_allowed(struct ice_pf * pf)1019 static int ice_check_sriov_allowed(struct ice_pf *pf)
1020 {
1021 struct device *dev = ice_pf_to_dev(pf);
1022
1023 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1024 dev_err(dev, "This device is not capable of SR-IOV\n");
1025 return -EOPNOTSUPP;
1026 }
1027
1028 if (ice_is_safe_mode(pf)) {
1029 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1030 return -EOPNOTSUPP;
1031 }
1032
1033 if (!ice_pf_state_is_nominal(pf)) {
1034 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1035 return -EBUSY;
1036 }
1037
1038 return 0;
1039 }
1040
1041 /**
1042 * ice_sriov_configure - Enable or change number of VFs via sysfs
1043 * @pdev: pointer to a pci_dev structure
1044 * @num_vfs: number of VFs to allocate or 0 to free VFs
1045 *
1046 * This function is called when the user updates the number of VFs in sysfs. On
1047 * success return whatever num_vfs was set to by the caller. Return negative on
1048 * failure.
1049 */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)1050 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1051 {
1052 struct ice_pf *pf = pci_get_drvdata(pdev);
1053 struct device *dev = ice_pf_to_dev(pf);
1054 int err;
1055
1056 err = ice_check_sriov_allowed(pf);
1057 if (err)
1058 return err;
1059
1060 if (!num_vfs) {
1061 if (!pci_vfs_assigned(pdev)) {
1062 ice_free_vfs(pf);
1063 ice_mbx_deinit_snapshot(&pf->hw);
1064 if (pf->lag)
1065 ice_enable_lag(pf->lag);
1066 return 0;
1067 }
1068
1069 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1070 return -EBUSY;
1071 }
1072
1073 err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
1074 if (err)
1075 return err;
1076
1077 err = ice_pci_sriov_ena(pf, num_vfs);
1078 if (err) {
1079 ice_mbx_deinit_snapshot(&pf->hw);
1080 return err;
1081 }
1082
1083 if (pf->lag)
1084 ice_disable_lag(pf->lag);
1085 return num_vfs;
1086 }
1087
1088 /**
1089 * ice_process_vflr_event - Free VF resources via IRQ calls
1090 * @pf: pointer to the PF structure
1091 *
1092 * called from the VFLR IRQ handler to
1093 * free up VF resources and state variables
1094 */
ice_process_vflr_event(struct ice_pf * pf)1095 void ice_process_vflr_event(struct ice_pf *pf)
1096 {
1097 struct ice_hw *hw = &pf->hw;
1098 struct ice_vf *vf;
1099 unsigned int bkt;
1100 u32 reg;
1101
1102 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1103 !ice_has_vfs(pf))
1104 return;
1105
1106 mutex_lock(&pf->vfs.table_lock);
1107 ice_for_each_vf(pf, bkt, vf) {
1108 u32 reg_idx, bit_idx;
1109
1110 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1111 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1112 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1113 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1114 if (reg & BIT(bit_idx))
1115 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1116 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1117 }
1118 mutex_unlock(&pf->vfs.table_lock);
1119 }
1120
1121 /**
1122 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1123 * @pf: PF used to index all VFs
1124 * @pfq: queue index relative to the PF's function space
1125 *
1126 * If no VF is found who owns the pfq then return NULL, otherwise return a
1127 * pointer to the VF who owns the pfq
1128 *
1129 * If this function returns non-NULL, it acquires a reference count of the VF
1130 * structure. The caller is responsible for calling ice_put_vf() to drop this
1131 * reference.
1132 */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)1133 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1134 {
1135 struct ice_vf *vf;
1136 unsigned int bkt;
1137
1138 rcu_read_lock();
1139 ice_for_each_vf_rcu(pf, bkt, vf) {
1140 struct ice_vsi *vsi;
1141 u16 rxq_idx;
1142
1143 vsi = ice_get_vf_vsi(vf);
1144 if (!vsi)
1145 continue;
1146
1147 ice_for_each_rxq(vsi, rxq_idx)
1148 if (vsi->rxq_map[rxq_idx] == pfq) {
1149 struct ice_vf *found;
1150
1151 if (kref_get_unless_zero(&vf->refcnt))
1152 found = vf;
1153 else
1154 found = NULL;
1155 rcu_read_unlock();
1156 return found;
1157 }
1158 }
1159 rcu_read_unlock();
1160
1161 return NULL;
1162 }
1163
1164 /**
1165 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1166 * @pf: PF used for conversion
1167 * @globalq: global queue index used to convert to PF space queue index
1168 */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)1169 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1170 {
1171 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1172 }
1173
1174 /**
1175 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1176 * @pf: PF that the LAN overflow event happened on
1177 * @event: structure holding the event information for the LAN overflow event
1178 *
1179 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1180 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1181 * reset on the offending VF.
1182 */
1183 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)1184 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1185 {
1186 u32 gldcb_rtctq, queue;
1187 struct ice_vf *vf;
1188
1189 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1190 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1191
1192 /* event returns device global Rx queue number */
1193 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1194 GLDCB_RTCTQ_RXQNUM_S;
1195
1196 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1197 if (!vf)
1198 return;
1199
1200 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1201 ice_put_vf(vf);
1202 }
1203
1204 /**
1205 * ice_set_vf_spoofchk
1206 * @netdev: network interface device structure
1207 * @vf_id: VF identifier
1208 * @ena: flag to enable or disable feature
1209 *
1210 * Enable or disable VF spoof checking
1211 */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)1212 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1213 {
1214 struct ice_netdev_priv *np = netdev_priv(netdev);
1215 struct ice_pf *pf = np->vsi->back;
1216 struct ice_vsi *vf_vsi;
1217 struct device *dev;
1218 struct ice_vf *vf;
1219 int ret;
1220
1221 dev = ice_pf_to_dev(pf);
1222
1223 vf = ice_get_vf_by_id(pf, vf_id);
1224 if (!vf)
1225 return -EINVAL;
1226
1227 ret = ice_check_vf_ready_for_cfg(vf);
1228 if (ret)
1229 goto out_put_vf;
1230
1231 vf_vsi = ice_get_vf_vsi(vf);
1232 if (!vf_vsi) {
1233 netdev_err(netdev, "VSI %d for VF %d is null\n",
1234 vf->lan_vsi_idx, vf->vf_id);
1235 ret = -EINVAL;
1236 goto out_put_vf;
1237 }
1238
1239 if (vf_vsi->type != ICE_VSI_VF) {
1240 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1241 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1242 ret = -ENODEV;
1243 goto out_put_vf;
1244 }
1245
1246 if (ena == vf->spoofchk) {
1247 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1248 ret = 0;
1249 goto out_put_vf;
1250 }
1251
1252 ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1253 if (ret)
1254 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1255 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1256 else
1257 vf->spoofchk = ena;
1258
1259 out_put_vf:
1260 ice_put_vf(vf);
1261 return ret;
1262 }
1263
1264 /**
1265 * ice_get_vf_cfg
1266 * @netdev: network interface device structure
1267 * @vf_id: VF identifier
1268 * @ivi: VF configuration structure
1269 *
1270 * return VF configuration
1271 */
1272 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)1273 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1274 {
1275 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1276 struct ice_vf *vf;
1277 int ret;
1278
1279 vf = ice_get_vf_by_id(pf, vf_id);
1280 if (!vf)
1281 return -EINVAL;
1282
1283 ret = ice_check_vf_ready_for_cfg(vf);
1284 if (ret)
1285 goto out_put_vf;
1286
1287 ivi->vf = vf_id;
1288 ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
1289
1290 /* VF configuration for VLAN and applicable QoS */
1291 ivi->vlan = ice_vf_get_port_vlan_id(vf);
1292 ivi->qos = ice_vf_get_port_vlan_prio(vf);
1293 if (ice_vf_is_port_vlan_ena(vf))
1294 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1295
1296 ivi->trusted = vf->trusted;
1297 ivi->spoofchk = vf->spoofchk;
1298 if (!vf->link_forced)
1299 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1300 else if (vf->link_up)
1301 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1302 else
1303 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1304 ivi->max_tx_rate = vf->max_tx_rate;
1305 ivi->min_tx_rate = vf->min_tx_rate;
1306
1307 out_put_vf:
1308 ice_put_vf(vf);
1309 return ret;
1310 }
1311
1312 /**
1313 * ice_set_vf_mac
1314 * @netdev: network interface device structure
1315 * @vf_id: VF identifier
1316 * @mac: MAC address
1317 *
1318 * program VF MAC address
1319 */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)1320 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1321 {
1322 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1323 struct ice_vf *vf;
1324 int ret;
1325
1326 if (is_multicast_ether_addr(mac)) {
1327 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
1328 return -EINVAL;
1329 }
1330
1331 vf = ice_get_vf_by_id(pf, vf_id);
1332 if (!vf)
1333 return -EINVAL;
1334
1335 /* nothing left to do, unicast MAC already set */
1336 if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
1337 ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
1338 ret = 0;
1339 goto out_put_vf;
1340 }
1341
1342 ret = ice_check_vf_ready_for_cfg(vf);
1343 if (ret)
1344 goto out_put_vf;
1345
1346 mutex_lock(&vf->cfg_lock);
1347
1348 /* VF is notified of its new MAC via the PF's response to the
1349 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1350 */
1351 ether_addr_copy(vf->dev_lan_addr.addr, mac);
1352 ether_addr_copy(vf->hw_lan_addr.addr, mac);
1353 if (is_zero_ether_addr(mac)) {
1354 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1355 vf->pf_set_mac = false;
1356 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1357 vf->vf_id);
1358 } else {
1359 /* PF will add MAC rule for the VF */
1360 vf->pf_set_mac = true;
1361 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1362 mac, vf_id);
1363 }
1364
1365 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1366 mutex_unlock(&vf->cfg_lock);
1367
1368 out_put_vf:
1369 ice_put_vf(vf);
1370 return ret;
1371 }
1372
1373 /**
1374 * ice_set_vf_trust
1375 * @netdev: network interface device structure
1376 * @vf_id: VF identifier
1377 * @trusted: Boolean value to enable/disable trusted VF
1378 *
1379 * Enable or disable a given VF as trusted
1380 */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)1381 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1382 {
1383 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1384 struct ice_vf *vf;
1385 int ret;
1386
1387 if (ice_is_eswitch_mode_switchdev(pf)) {
1388 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1389 return -EOPNOTSUPP;
1390 }
1391
1392 vf = ice_get_vf_by_id(pf, vf_id);
1393 if (!vf)
1394 return -EINVAL;
1395
1396 ret = ice_check_vf_ready_for_cfg(vf);
1397 if (ret)
1398 goto out_put_vf;
1399
1400 /* Check if already trusted */
1401 if (trusted == vf->trusted) {
1402 ret = 0;
1403 goto out_put_vf;
1404 }
1405
1406 mutex_lock(&vf->cfg_lock);
1407
1408 vf->trusted = trusted;
1409 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1410 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1411 vf_id, trusted ? "" : "un");
1412
1413 mutex_unlock(&vf->cfg_lock);
1414
1415 out_put_vf:
1416 ice_put_vf(vf);
1417 return ret;
1418 }
1419
1420 /**
1421 * ice_set_vf_link_state
1422 * @netdev: network interface device structure
1423 * @vf_id: VF identifier
1424 * @link_state: required link state
1425 *
1426 * Set VF's link state, irrespective of physical link state status
1427 */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)1428 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1429 {
1430 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1431 struct ice_vf *vf;
1432 int ret;
1433
1434 vf = ice_get_vf_by_id(pf, vf_id);
1435 if (!vf)
1436 return -EINVAL;
1437
1438 ret = ice_check_vf_ready_for_cfg(vf);
1439 if (ret)
1440 goto out_put_vf;
1441
1442 switch (link_state) {
1443 case IFLA_VF_LINK_STATE_AUTO:
1444 vf->link_forced = false;
1445 break;
1446 case IFLA_VF_LINK_STATE_ENABLE:
1447 vf->link_forced = true;
1448 vf->link_up = true;
1449 break;
1450 case IFLA_VF_LINK_STATE_DISABLE:
1451 vf->link_forced = true;
1452 vf->link_up = false;
1453 break;
1454 default:
1455 ret = -EINVAL;
1456 goto out_put_vf;
1457 }
1458
1459 ice_vc_notify_vf_link_state(vf);
1460
1461 out_put_vf:
1462 ice_put_vf(vf);
1463 return ret;
1464 }
1465
1466 /**
1467 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1468 * @pf: PF associated with VFs
1469 */
ice_calc_all_vfs_min_tx_rate(struct ice_pf * pf)1470 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1471 {
1472 struct ice_vf *vf;
1473 unsigned int bkt;
1474 int rate = 0;
1475
1476 rcu_read_lock();
1477 ice_for_each_vf_rcu(pf, bkt, vf)
1478 rate += vf->min_tx_rate;
1479 rcu_read_unlock();
1480
1481 return rate;
1482 }
1483
1484 /**
1485 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1486 * @vf: VF trying to configure min_tx_rate
1487 * @min_tx_rate: min Tx rate in Mbps
1488 *
1489 * Check if the min_tx_rate being passed in will cause oversubscription of total
1490 * min_tx_rate based on the current link speed and all other VFs configured
1491 * min_tx_rate
1492 *
1493 * Return true if the passed min_tx_rate would cause oversubscription, else
1494 * return false
1495 */
1496 static bool
ice_min_tx_rate_oversubscribed(struct ice_vf * vf,int min_tx_rate)1497 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1498 {
1499 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1500 int all_vfs_min_tx_rate;
1501 int link_speed_mbps;
1502
1503 if (WARN_ON(!vsi))
1504 return false;
1505
1506 link_speed_mbps = ice_get_link_speed_mbps(vsi);
1507 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1508
1509 /* this VF's previous rate is being overwritten */
1510 all_vfs_min_tx_rate -= vf->min_tx_rate;
1511
1512 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1513 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1514 min_tx_rate, vf->vf_id,
1515 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1516 link_speed_mbps);
1517 return true;
1518 }
1519
1520 return false;
1521 }
1522
1523 /**
1524 * ice_set_vf_bw - set min/max VF bandwidth
1525 * @netdev: network interface device structure
1526 * @vf_id: VF identifier
1527 * @min_tx_rate: Minimum Tx rate in Mbps
1528 * @max_tx_rate: Maximum Tx rate in Mbps
1529 */
1530 int
ice_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)1531 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1532 int max_tx_rate)
1533 {
1534 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1535 struct ice_vsi *vsi;
1536 struct device *dev;
1537 struct ice_vf *vf;
1538 int ret;
1539
1540 dev = ice_pf_to_dev(pf);
1541
1542 vf = ice_get_vf_by_id(pf, vf_id);
1543 if (!vf)
1544 return -EINVAL;
1545
1546 ret = ice_check_vf_ready_for_cfg(vf);
1547 if (ret)
1548 goto out_put_vf;
1549
1550 vsi = ice_get_vf_vsi(vf);
1551 if (!vsi) {
1552 ret = -EINVAL;
1553 goto out_put_vf;
1554 }
1555
1556 if (min_tx_rate && ice_is_dcb_active(pf)) {
1557 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1558 ret = -EOPNOTSUPP;
1559 goto out_put_vf;
1560 }
1561
1562 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1563 ret = -EINVAL;
1564 goto out_put_vf;
1565 }
1566
1567 if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1568 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1569 if (ret) {
1570 dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1571 vf->vf_id);
1572 goto out_put_vf;
1573 }
1574
1575 vf->min_tx_rate = min_tx_rate;
1576 }
1577
1578 if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1579 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1580 if (ret) {
1581 dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1582 vf->vf_id);
1583 goto out_put_vf;
1584 }
1585
1586 vf->max_tx_rate = max_tx_rate;
1587 }
1588
1589 out_put_vf:
1590 ice_put_vf(vf);
1591 return ret;
1592 }
1593
1594 /**
1595 * ice_get_vf_stats - populate some stats for the VF
1596 * @netdev: the netdev of the PF
1597 * @vf_id: the host OS identifier (0-255)
1598 * @vf_stats: pointer to the OS memory to be initialized
1599 */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)1600 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1601 struct ifla_vf_stats *vf_stats)
1602 {
1603 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1604 struct ice_eth_stats *stats;
1605 struct ice_vsi *vsi;
1606 struct ice_vf *vf;
1607 int ret;
1608
1609 vf = ice_get_vf_by_id(pf, vf_id);
1610 if (!vf)
1611 return -EINVAL;
1612
1613 ret = ice_check_vf_ready_for_cfg(vf);
1614 if (ret)
1615 goto out_put_vf;
1616
1617 vsi = ice_get_vf_vsi(vf);
1618 if (!vsi) {
1619 ret = -EINVAL;
1620 goto out_put_vf;
1621 }
1622
1623 ice_update_eth_stats(vsi);
1624 stats = &vsi->eth_stats;
1625
1626 memset(vf_stats, 0, sizeof(*vf_stats));
1627
1628 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1629 stats->rx_multicast;
1630 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1631 stats->tx_multicast;
1632 vf_stats->rx_bytes = stats->rx_bytes;
1633 vf_stats->tx_bytes = stats->tx_bytes;
1634 vf_stats->broadcast = stats->rx_broadcast;
1635 vf_stats->multicast = stats->rx_multicast;
1636 vf_stats->rx_dropped = stats->rx_discards;
1637 vf_stats->tx_dropped = stats->tx_discards;
1638
1639 out_put_vf:
1640 ice_put_vf(vf);
1641 return ret;
1642 }
1643
1644 /**
1645 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1646 * @hw: hardware structure used to check the VLAN mode
1647 * @vlan_proto: VLAN TPID being checked
1648 *
1649 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1650 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1651 * Mode (SVM), then only ETH_P_8021Q is supported.
1652 */
1653 static bool
ice_is_supported_port_vlan_proto(struct ice_hw * hw,u16 vlan_proto)1654 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1655 {
1656 bool is_supported = false;
1657
1658 switch (vlan_proto) {
1659 case ETH_P_8021Q:
1660 is_supported = true;
1661 break;
1662 case ETH_P_8021AD:
1663 if (ice_is_dvm_ena(hw))
1664 is_supported = true;
1665 break;
1666 }
1667
1668 return is_supported;
1669 }
1670
1671 /**
1672 * ice_set_vf_port_vlan
1673 * @netdev: network interface device structure
1674 * @vf_id: VF identifier
1675 * @vlan_id: VLAN ID being set
1676 * @qos: priority setting
1677 * @vlan_proto: VLAN protocol
1678 *
1679 * program VF Port VLAN ID and/or QoS
1680 */
1681 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)1682 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1683 __be16 vlan_proto)
1684 {
1685 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1686 u16 local_vlan_proto = ntohs(vlan_proto);
1687 struct device *dev;
1688 struct ice_vf *vf;
1689 int ret;
1690
1691 dev = ice_pf_to_dev(pf);
1692
1693 if (vlan_id >= VLAN_N_VID || qos > 7) {
1694 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1695 vf_id, vlan_id, qos);
1696 return -EINVAL;
1697 }
1698
1699 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1700 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1701 local_vlan_proto);
1702 return -EPROTONOSUPPORT;
1703 }
1704
1705 vf = ice_get_vf_by_id(pf, vf_id);
1706 if (!vf)
1707 return -EINVAL;
1708
1709 ret = ice_check_vf_ready_for_cfg(vf);
1710 if (ret)
1711 goto out_put_vf;
1712
1713 if (ice_vf_get_port_vlan_prio(vf) == qos &&
1714 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1715 ice_vf_get_port_vlan_id(vf) == vlan_id) {
1716 /* duplicate request, so just return success */
1717 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1718 vlan_id, qos, local_vlan_proto);
1719 ret = 0;
1720 goto out_put_vf;
1721 }
1722
1723 mutex_lock(&vf->cfg_lock);
1724
1725 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1726 if (ice_vf_is_port_vlan_ena(vf))
1727 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1728 vlan_id, qos, local_vlan_proto, vf_id);
1729 else
1730 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1731
1732 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1733 mutex_unlock(&vf->cfg_lock);
1734
1735 out_put_vf:
1736 ice_put_vf(vf);
1737 return ret;
1738 }
1739
1740 /**
1741 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1742 * @vf: pointer to the VF structure
1743 */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)1744 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1745 {
1746 struct ice_pf *pf = vf->pf;
1747 struct device *dev;
1748
1749 dev = ice_pf_to_dev(pf);
1750
1751 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1752 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1753 vf->dev_lan_addr.addr,
1754 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1755 ? "on" : "off");
1756 }
1757
1758 /**
1759 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1760 * @pf: pointer to the PF structure
1761 *
1762 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1763 */
ice_print_vfs_mdd_events(struct ice_pf * pf)1764 void ice_print_vfs_mdd_events(struct ice_pf *pf)
1765 {
1766 struct device *dev = ice_pf_to_dev(pf);
1767 struct ice_hw *hw = &pf->hw;
1768 struct ice_vf *vf;
1769 unsigned int bkt;
1770
1771 /* check that there are pending MDD events to print */
1772 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1773 return;
1774
1775 /* VF MDD event logs are rate limited to one second intervals */
1776 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1777 return;
1778
1779 pf->vfs.last_printed_mdd_jiffies = jiffies;
1780
1781 mutex_lock(&pf->vfs.table_lock);
1782 ice_for_each_vf(pf, bkt, vf) {
1783 /* only print Rx MDD event message if there are new events */
1784 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1785 vf->mdd_rx_events.last_printed =
1786 vf->mdd_rx_events.count;
1787 ice_print_vf_rx_mdd_event(vf);
1788 }
1789
1790 /* only print Tx MDD event message if there are new events */
1791 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1792 vf->mdd_tx_events.last_printed =
1793 vf->mdd_tx_events.count;
1794
1795 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
1796 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
1797 vf->dev_lan_addr.addr);
1798 }
1799 }
1800 mutex_unlock(&pf->vfs.table_lock);
1801 }
1802
1803 /**
1804 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1805 * @pdev: pointer to a pci_dev structure
1806 *
1807 * Called when recovering from a PF FLR to restore interrupt capability to
1808 * the VFs.
1809 */
ice_restore_all_vfs_msi_state(struct pci_dev * pdev)1810 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
1811 {
1812 u16 vf_id;
1813 int pos;
1814
1815 if (!pci_num_vf(pdev))
1816 return;
1817
1818 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1819 if (pos) {
1820 struct pci_dev *vfdev;
1821
1822 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
1823 &vf_id);
1824 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
1825 while (vfdev) {
1826 if (vfdev->is_virtfn && vfdev->physfn == pdev)
1827 pci_restore_msi_state(vfdev);
1828 vfdev = pci_get_device(pdev->vendor, vf_id,
1829 vfdev);
1830 }
1831 }
1832 }
1833
1834 /**
1835 * ice_is_malicious_vf - helper function to detect a malicious VF
1836 * @pf: ptr to struct ice_pf
1837 * @event: pointer to the AQ event
1838 * @num_msg_proc: the number of messages processed so far
1839 * @num_msg_pending: the number of messages peinding in admin queue
1840 */
1841 bool
ice_is_malicious_vf(struct ice_pf * pf,struct ice_rq_event_info * event,u16 num_msg_proc,u16 num_msg_pending)1842 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
1843 u16 num_msg_proc, u16 num_msg_pending)
1844 {
1845 s16 vf_id = le16_to_cpu(event->desc.retval);
1846 struct device *dev = ice_pf_to_dev(pf);
1847 struct ice_mbx_data mbxdata;
1848 bool malvf = false;
1849 struct ice_vf *vf;
1850 int status;
1851
1852 vf = ice_get_vf_by_id(pf, vf_id);
1853 if (!vf)
1854 return false;
1855
1856 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1857 goto out_put_vf;
1858
1859 mbxdata.num_msg_proc = num_msg_proc;
1860 mbxdata.num_pending_arq = num_msg_pending;
1861 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
1862 #define ICE_MBX_OVERFLOW_WATERMARK 64
1863 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1864
1865 /* check to see if we have a malicious VF */
1866 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
1867 if (status)
1868 goto out_put_vf;
1869
1870 if (malvf) {
1871 bool report_vf = false;
1872
1873 /* if the VF is malicious and we haven't let the user
1874 * know about it, then let them know now
1875 */
1876 status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
1877 ICE_MAX_SRIOV_VFS, vf_id,
1878 &report_vf);
1879 if (status)
1880 dev_dbg(dev, "Error reporting malicious VF\n");
1881
1882 if (report_vf) {
1883 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
1884
1885 if (pf_vsi)
1886 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
1887 &vf->dev_lan_addr.addr[0],
1888 pf_vsi->netdev->dev_addr);
1889 }
1890 }
1891
1892 out_put_vf:
1893 ice_put_vf(vf);
1894 return malvf;
1895 }
1896