1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/qed/qed_iov_if.h>
37 #include "qed_cxt.h"
38 #include "qed_hsi.h"
39 #include "qed_hw.h"
40 #include "qed_init_ops.h"
41 #include "qed_int.h"
42 #include "qed_mcp.h"
43 #include "qed_reg_addr.h"
44 #include "qed_sp.h"
45 #include "qed_sriov.h"
46 #include "qed_vf.h"
47 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
48 u8 opcode,
49 __le16 echo,
50 union event_ring_data *data, u8 fw_return_code);
51 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
52
qed_vf_calculate_legacy(struct qed_vf_info * p_vf)53 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
54 {
55 u8 legacy = 0;
56
57 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
58 ETH_HSI_VER_NO_PKT_LEN_TUNN)
59 legacy |= QED_QCID_LEGACY_VF_RX_PROD;
60
61 if (!(p_vf->acquire.vfdev_info.capabilities &
62 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
63 legacy |= QED_QCID_LEGACY_VF_CID;
64
65 return legacy;
66 }
67
68 /* IOV ramrods */
qed_sp_vf_start(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)69 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
70 {
71 struct vf_start_ramrod_data *p_ramrod = NULL;
72 struct qed_spq_entry *p_ent = NULL;
73 struct qed_sp_init_data init_data;
74 int rc = -EINVAL;
75 u8 fp_minor;
76
77 /* Get SPQ entry */
78 memset(&init_data, 0, sizeof(init_data));
79 init_data.cid = qed_spq_get_cid(p_hwfn);
80 init_data.opaque_fid = p_vf->opaque_fid;
81 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
82
83 rc = qed_sp_init_request(p_hwfn, &p_ent,
84 COMMON_RAMROD_VF_START,
85 PROTOCOLID_COMMON, &init_data);
86 if (rc)
87 return rc;
88
89 p_ramrod = &p_ent->ramrod.vf_start;
90
91 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
92 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
93
94 switch (p_hwfn->hw_info.personality) {
95 case QED_PCI_ETH:
96 p_ramrod->personality = PERSONALITY_ETH;
97 break;
98 case QED_PCI_ETH_ROCE:
99 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
100 break;
101 default:
102 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
103 p_hwfn->hw_info.personality);
104 qed_sp_destroy_request(p_hwfn, p_ent);
105 return -EINVAL;
106 }
107
108 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
109 if (fp_minor > ETH_HSI_VER_MINOR &&
110 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
111 DP_VERBOSE(p_hwfn,
112 QED_MSG_IOV,
113 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
114 p_vf->abs_vf_id,
115 ETH_HSI_VER_MAJOR,
116 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
117 fp_minor = ETH_HSI_VER_MINOR;
118 }
119
120 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
121 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
122
123 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
124 "VF[%d] - Starting using HSI %02x.%02x\n",
125 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
126
127 return qed_spq_post(p_hwfn, p_ent, NULL);
128 }
129
qed_sp_vf_stop(struct qed_hwfn * p_hwfn,u32 concrete_vfid,u16 opaque_vfid)130 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
131 u32 concrete_vfid, u16 opaque_vfid)
132 {
133 struct vf_stop_ramrod_data *p_ramrod = NULL;
134 struct qed_spq_entry *p_ent = NULL;
135 struct qed_sp_init_data init_data;
136 int rc = -EINVAL;
137
138 /* Get SPQ entry */
139 memset(&init_data, 0, sizeof(init_data));
140 init_data.cid = qed_spq_get_cid(p_hwfn);
141 init_data.opaque_fid = opaque_vfid;
142 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
143
144 rc = qed_sp_init_request(p_hwfn, &p_ent,
145 COMMON_RAMROD_VF_STOP,
146 PROTOCOLID_COMMON, &init_data);
147 if (rc)
148 return rc;
149
150 p_ramrod = &p_ent->ramrod.vf_stop;
151
152 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
153
154 return qed_spq_post(p_hwfn, p_ent, NULL);
155 }
156
qed_iov_is_valid_vfid(struct qed_hwfn * p_hwfn,int rel_vf_id,bool b_enabled_only,bool b_non_malicious)157 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
158 int rel_vf_id,
159 bool b_enabled_only, bool b_non_malicious)
160 {
161 if (!p_hwfn->pf_iov_info) {
162 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
163 return false;
164 }
165
166 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
167 (rel_vf_id < 0))
168 return false;
169
170 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
171 b_enabled_only)
172 return false;
173
174 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
175 b_non_malicious)
176 return false;
177
178 return true;
179 }
180
qed_iov_get_vf_info(struct qed_hwfn * p_hwfn,u16 relative_vf_id,bool b_enabled_only)181 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
182 u16 relative_vf_id,
183 bool b_enabled_only)
184 {
185 struct qed_vf_info *vf = NULL;
186
187 if (!p_hwfn->pf_iov_info) {
188 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
189 return NULL;
190 }
191
192 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
193 b_enabled_only, false))
194 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
195 else
196 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
197 relative_vf_id);
198
199 return vf;
200 }
201
202 static struct qed_queue_cid *
qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue * p_queue)203 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
204 {
205 int i;
206
207 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
208 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
209 return p_queue->cids[i].p_cid;
210 }
211
212 return NULL;
213 }
214
215 enum qed_iov_validate_q_mode {
216 QED_IOV_VALIDATE_Q_NA,
217 QED_IOV_VALIDATE_Q_ENABLE,
218 QED_IOV_VALIDATE_Q_DISABLE,
219 };
220
qed_iov_validate_queue_mode(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 qid,enum qed_iov_validate_q_mode mode,bool b_is_tx)221 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
222 struct qed_vf_info *p_vf,
223 u16 qid,
224 enum qed_iov_validate_q_mode mode,
225 bool b_is_tx)
226 {
227 int i;
228
229 if (mode == QED_IOV_VALIDATE_Q_NA)
230 return true;
231
232 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
233 struct qed_vf_queue_cid *p_qcid;
234
235 p_qcid = &p_vf->vf_queues[qid].cids[i];
236
237 if (!p_qcid->p_cid)
238 continue;
239
240 if (p_qcid->b_is_tx != b_is_tx)
241 continue;
242
243 return mode == QED_IOV_VALIDATE_Q_ENABLE;
244 }
245
246 /* In case we haven't found any valid cid, then its disabled */
247 return mode == QED_IOV_VALIDATE_Q_DISABLE;
248 }
249
qed_iov_validate_rxq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 rx_qid,enum qed_iov_validate_q_mode mode)250 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
251 struct qed_vf_info *p_vf,
252 u16 rx_qid,
253 enum qed_iov_validate_q_mode mode)
254 {
255 if (rx_qid >= p_vf->num_rxqs) {
256 DP_VERBOSE(p_hwfn,
257 QED_MSG_IOV,
258 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
259 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
260 return false;
261 }
262
263 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
264 }
265
qed_iov_validate_txq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 tx_qid,enum qed_iov_validate_q_mode mode)266 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
267 struct qed_vf_info *p_vf,
268 u16 tx_qid,
269 enum qed_iov_validate_q_mode mode)
270 {
271 if (tx_qid >= p_vf->num_txqs) {
272 DP_VERBOSE(p_hwfn,
273 QED_MSG_IOV,
274 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
275 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
276 return false;
277 }
278
279 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
280 }
281
qed_iov_validate_sb(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 sb_idx)282 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
283 struct qed_vf_info *p_vf, u16 sb_idx)
284 {
285 int i;
286
287 for (i = 0; i < p_vf->num_sbs; i++)
288 if (p_vf->igu_sbs[i] == sb_idx)
289 return true;
290
291 DP_VERBOSE(p_hwfn,
292 QED_MSG_IOV,
293 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
294 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
295
296 return false;
297 }
298
qed_iov_validate_active_rxq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)299 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
300 struct qed_vf_info *p_vf)
301 {
302 u8 i;
303
304 for (i = 0; i < p_vf->num_rxqs; i++)
305 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
306 QED_IOV_VALIDATE_Q_ENABLE,
307 false))
308 return true;
309
310 return false;
311 }
312
qed_iov_validate_active_txq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)313 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
314 struct qed_vf_info *p_vf)
315 {
316 u8 i;
317
318 for (i = 0; i < p_vf->num_txqs; i++)
319 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
320 QED_IOV_VALIDATE_Q_ENABLE,
321 true))
322 return true;
323
324 return false;
325 }
326
qed_iov_post_vf_bulletin(struct qed_hwfn * p_hwfn,int vfid,struct qed_ptt * p_ptt)327 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
328 int vfid, struct qed_ptt *p_ptt)
329 {
330 struct qed_bulletin_content *p_bulletin;
331 int crc_size = sizeof(p_bulletin->crc);
332 struct qed_dmae_params params;
333 struct qed_vf_info *p_vf;
334
335 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
336 if (!p_vf)
337 return -EINVAL;
338
339 if (!p_vf->vf_bulletin)
340 return -EINVAL;
341
342 p_bulletin = p_vf->bulletin.p_virt;
343
344 /* Increment bulletin board version and compute crc */
345 p_bulletin->version++;
346 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
347 p_vf->bulletin.size - crc_size);
348
349 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
350 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
351 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
352
353 /* propagate bulletin board via dmae to vm memory */
354 memset(¶ms, 0, sizeof(params));
355 params.flags = QED_DMAE_FLAG_VF_DST;
356 params.dst_vfid = p_vf->abs_vf_id;
357 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
358 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
359 ¶ms);
360 }
361
qed_iov_pci_cfg_info(struct qed_dev * cdev)362 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
363 {
364 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
365 int pos = iov->pos;
366
367 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
368 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
369
370 pci_read_config_word(cdev->pdev,
371 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
372 pci_read_config_word(cdev->pdev,
373 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
374
375 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
376 if (iov->num_vfs) {
377 DP_VERBOSE(cdev,
378 QED_MSG_IOV,
379 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
380 iov->num_vfs = 0;
381 }
382
383 pci_read_config_word(cdev->pdev,
384 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
385
386 pci_read_config_word(cdev->pdev,
387 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
388
389 pci_read_config_word(cdev->pdev,
390 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
391
392 pci_read_config_dword(cdev->pdev,
393 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
394
395 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
396
397 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
398
399 DP_VERBOSE(cdev,
400 QED_MSG_IOV,
401 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
402 iov->nres,
403 iov->cap,
404 iov->ctrl,
405 iov->total_vfs,
406 iov->initial_vfs,
407 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
408
409 /* Some sanity checks */
410 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
411 iov->total_vfs > NUM_OF_VFS(cdev)) {
412 /* This can happen only due to a bug. In this case we set
413 * num_vfs to zero to avoid memory corruption in the code that
414 * assumes max number of vfs
415 */
416 DP_NOTICE(cdev,
417 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
418 iov->num_vfs);
419
420 iov->num_vfs = 0;
421 iov->total_vfs = 0;
422 }
423
424 return 0;
425 }
426
qed_iov_setup_vfdb(struct qed_hwfn * p_hwfn)427 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
428 {
429 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
430 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
431 struct qed_bulletin_content *p_bulletin_virt;
432 dma_addr_t req_p, rply_p, bulletin_p;
433 union pfvf_tlvs *p_reply_virt_addr;
434 union vfpf_tlvs *p_req_virt_addr;
435 u8 idx = 0;
436
437 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
438
439 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
440 req_p = p_iov_info->mbx_msg_phys_addr;
441 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
442 rply_p = p_iov_info->mbx_reply_phys_addr;
443 p_bulletin_virt = p_iov_info->p_bulletins;
444 bulletin_p = p_iov_info->bulletins_phys;
445 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
446 DP_ERR(p_hwfn,
447 "qed_iov_setup_vfdb called without allocating mem first\n");
448 return;
449 }
450
451 for (idx = 0; idx < p_iov->total_vfs; idx++) {
452 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
453 u32 concrete;
454
455 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
456 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
457 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
458 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
459
460 vf->state = VF_STOPPED;
461 vf->b_init = false;
462
463 vf->bulletin.phys = idx *
464 sizeof(struct qed_bulletin_content) +
465 bulletin_p;
466 vf->bulletin.p_virt = p_bulletin_virt + idx;
467 vf->bulletin.size = sizeof(struct qed_bulletin_content);
468
469 vf->relative_vf_id = idx;
470 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
471 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
472 vf->concrete_fid = concrete;
473 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
474 (vf->abs_vf_id << 8);
475 vf->vport_id = idx + 1;
476
477 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
478 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
479 }
480 }
481
qed_iov_allocate_vfdb(struct qed_hwfn * p_hwfn)482 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
483 {
484 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
485 void **p_v_addr;
486 u16 num_vfs = 0;
487
488 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
489
490 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
491 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
492
493 /* Allocate PF Mailbox buffer (per-VF) */
494 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
495 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
496 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
497 p_iov_info->mbx_msg_size,
498 &p_iov_info->mbx_msg_phys_addr,
499 GFP_KERNEL);
500 if (!*p_v_addr)
501 return -ENOMEM;
502
503 /* Allocate PF Mailbox Reply buffer (per-VF) */
504 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
505 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
506 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
507 p_iov_info->mbx_reply_size,
508 &p_iov_info->mbx_reply_phys_addr,
509 GFP_KERNEL);
510 if (!*p_v_addr)
511 return -ENOMEM;
512
513 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
514 num_vfs;
515 p_v_addr = &p_iov_info->p_bulletins;
516 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
517 p_iov_info->bulletins_size,
518 &p_iov_info->bulletins_phys,
519 GFP_KERNEL);
520 if (!*p_v_addr)
521 return -ENOMEM;
522
523 DP_VERBOSE(p_hwfn,
524 QED_MSG_IOV,
525 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
526 p_iov_info->mbx_msg_virt_addr,
527 (u64) p_iov_info->mbx_msg_phys_addr,
528 p_iov_info->mbx_reply_virt_addr,
529 (u64) p_iov_info->mbx_reply_phys_addr,
530 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
531
532 return 0;
533 }
534
qed_iov_free_vfdb(struct qed_hwfn * p_hwfn)535 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
536 {
537 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
538
539 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
540 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
541 p_iov_info->mbx_msg_size,
542 p_iov_info->mbx_msg_virt_addr,
543 p_iov_info->mbx_msg_phys_addr);
544
545 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
546 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
547 p_iov_info->mbx_reply_size,
548 p_iov_info->mbx_reply_virt_addr,
549 p_iov_info->mbx_reply_phys_addr);
550
551 if (p_iov_info->p_bulletins)
552 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
553 p_iov_info->bulletins_size,
554 p_iov_info->p_bulletins,
555 p_iov_info->bulletins_phys);
556 }
557
qed_iov_alloc(struct qed_hwfn * p_hwfn)558 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
559 {
560 struct qed_pf_iov *p_sriov;
561
562 if (!IS_PF_SRIOV(p_hwfn)) {
563 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
564 "No SR-IOV - no need for IOV db\n");
565 return 0;
566 }
567
568 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
569 if (!p_sriov)
570 return -ENOMEM;
571
572 p_hwfn->pf_iov_info = p_sriov;
573
574 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
575 qed_sriov_eqe_event);
576
577 return qed_iov_allocate_vfdb(p_hwfn);
578 }
579
qed_iov_setup(struct qed_hwfn * p_hwfn)580 void qed_iov_setup(struct qed_hwfn *p_hwfn)
581 {
582 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
583 return;
584
585 qed_iov_setup_vfdb(p_hwfn);
586 }
587
qed_iov_free(struct qed_hwfn * p_hwfn)588 void qed_iov_free(struct qed_hwfn *p_hwfn)
589 {
590 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
591
592 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
593 qed_iov_free_vfdb(p_hwfn);
594 kfree(p_hwfn->pf_iov_info);
595 }
596 }
597
qed_iov_free_hw_info(struct qed_dev * cdev)598 void qed_iov_free_hw_info(struct qed_dev *cdev)
599 {
600 kfree(cdev->p_iov_info);
601 cdev->p_iov_info = NULL;
602 }
603
qed_iov_hw_info(struct qed_hwfn * p_hwfn)604 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
605 {
606 struct qed_dev *cdev = p_hwfn->cdev;
607 int pos;
608 int rc;
609
610 if (IS_VF(p_hwfn->cdev))
611 return 0;
612
613 /* Learn the PCI configuration */
614 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
615 PCI_EXT_CAP_ID_SRIOV);
616 if (!pos) {
617 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
618 return 0;
619 }
620
621 /* Allocate a new struct for IOV information */
622 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
623 if (!cdev->p_iov_info)
624 return -ENOMEM;
625
626 cdev->p_iov_info->pos = pos;
627
628 rc = qed_iov_pci_cfg_info(cdev);
629 if (rc)
630 return rc;
631
632 /* We want PF IOV to be synonemous with the existance of p_iov_info;
633 * In case the capability is published but there are no VFs, simply
634 * de-allocate the struct.
635 */
636 if (!cdev->p_iov_info->total_vfs) {
637 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
638 "IOV capabilities, but no VFs are published\n");
639 kfree(cdev->p_iov_info);
640 cdev->p_iov_info = NULL;
641 return 0;
642 }
643
644 /* First VF index based on offset is tricky:
645 * - If ARI is supported [likely], offset - (16 - pf_id) would
646 * provide the number for eng0. 2nd engine Vfs would begin
647 * after the first engine's VFs.
648 * - If !ARI, VFs would start on next device.
649 * so offset - (256 - pf_id) would provide the number.
650 * Utilize the fact that (256 - pf_id) is achieved only by later
651 * to differentiate between the two.
652 */
653
654 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
655 u32 first = p_hwfn->cdev->p_iov_info->offset +
656 p_hwfn->abs_pf_id - 16;
657
658 cdev->p_iov_info->first_vf_in_pf = first;
659
660 if (QED_PATH_ID(p_hwfn))
661 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
662 } else {
663 u32 first = p_hwfn->cdev->p_iov_info->offset +
664 p_hwfn->abs_pf_id - 256;
665
666 cdev->p_iov_info->first_vf_in_pf = first;
667 }
668
669 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
670 "First VF in hwfn 0x%08x\n",
671 cdev->p_iov_info->first_vf_in_pf);
672
673 return 0;
674 }
675
_qed_iov_pf_sanity_check(struct qed_hwfn * p_hwfn,int vfid,bool b_fail_malicious)676 static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
677 int vfid, bool b_fail_malicious)
678 {
679 /* Check PF supports sriov */
680 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
681 !IS_PF_SRIOV_ALLOC(p_hwfn))
682 return false;
683
684 /* Check VF validity */
685 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
686 return false;
687
688 return true;
689 }
690
qed_iov_pf_sanity_check(struct qed_hwfn * p_hwfn,int vfid)691 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
692 {
693 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
694 }
695
qed_iov_set_vf_to_disable(struct qed_dev * cdev,u16 rel_vf_id,u8 to_disable)696 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
697 u16 rel_vf_id, u8 to_disable)
698 {
699 struct qed_vf_info *vf;
700 int i;
701
702 for_each_hwfn(cdev, i) {
703 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
704
705 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
706 if (!vf)
707 continue;
708
709 vf->to_disable = to_disable;
710 }
711 }
712
qed_iov_set_vfs_to_disable(struct qed_dev * cdev,u8 to_disable)713 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
714 {
715 u16 i;
716
717 if (!IS_QED_SRIOV(cdev))
718 return;
719
720 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
721 qed_iov_set_vf_to_disable(cdev, i, to_disable);
722 }
723
qed_iov_vf_pglue_clear_err(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 abs_vfid)724 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
725 struct qed_ptt *p_ptt, u8 abs_vfid)
726 {
727 qed_wr(p_hwfn, p_ptt,
728 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
729 1 << (abs_vfid & 0x1f));
730 }
731
qed_iov_vf_igu_reset(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)732 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
733 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
734 {
735 int i;
736
737 /* Set VF masks and configuration - pretend */
738 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
739
740 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
741
742 /* unpretend */
743 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
744
745 /* iterate over all queues, clear sb consumer */
746 for (i = 0; i < vf->num_sbs; i++)
747 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
748 vf->igu_sbs[i],
749 vf->opaque_fid, true);
750 }
751
qed_iov_vf_igu_set_int(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,bool enable)752 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
753 struct qed_ptt *p_ptt,
754 struct qed_vf_info *vf, bool enable)
755 {
756 u32 igu_vf_conf;
757
758 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
759
760 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
761
762 if (enable)
763 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
764 else
765 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
766
767 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
768
769 /* unpretend */
770 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
771 }
772
773 static int
qed_iov_enable_vf_access_msix(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 abs_vf_id,u8 num_sbs)774 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
775 struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
776 {
777 u8 current_max = 0;
778 int i;
779
780 /* For AH onward, configuration is per-PF. Find maximum of all
781 * the currently enabled child VFs, and set the number to be that.
782 */
783 if (!QED_IS_BB(p_hwfn->cdev)) {
784 qed_for_each_vf(p_hwfn, i) {
785 struct qed_vf_info *p_vf;
786
787 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
788 if (!p_vf)
789 continue;
790
791 current_max = max_t(u8, current_max, p_vf->num_sbs);
792 }
793 }
794
795 if (num_sbs > current_max)
796 return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
797 abs_vf_id, num_sbs);
798
799 return 0;
800 }
801
qed_iov_enable_vf_access(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)802 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
803 struct qed_ptt *p_ptt,
804 struct qed_vf_info *vf)
805 {
806 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
807 int rc;
808
809 /* It's possible VF was previously considered malicious -
810 * clear the indication even if we're only going to disable VF.
811 */
812 vf->b_malicious = false;
813
814 if (vf->to_disable)
815 return 0;
816
817 DP_VERBOSE(p_hwfn,
818 QED_MSG_IOV,
819 "Enable internal access for vf %x [abs %x]\n",
820 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
821
822 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
823
824 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
825
826 rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
827 vf->abs_vf_id, vf->num_sbs);
828 if (rc)
829 return rc;
830
831 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
832
833 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
834 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
835
836 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
837 p_hwfn->hw_info.hw_mode);
838
839 /* unpretend */
840 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
841
842 vf->state = VF_FREE;
843
844 return rc;
845 }
846
847 /**
848 * @brief qed_iov_config_perm_table - configure the permission
849 * zone table.
850 * In E4, queue zone permission table size is 320x9. There
851 * are 320 VF queues for single engine device (256 for dual
852 * engine device), and each entry has the following format:
853 * {Valid, VF[7:0]}
854 * @param p_hwfn
855 * @param p_ptt
856 * @param vf
857 * @param enable
858 */
qed_iov_config_perm_table(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,u8 enable)859 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
860 struct qed_ptt *p_ptt,
861 struct qed_vf_info *vf, u8 enable)
862 {
863 u32 reg_addr, val;
864 u16 qzone_id = 0;
865 int qid;
866
867 for (qid = 0; qid < vf->num_rxqs; qid++) {
868 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
869 &qzone_id);
870
871 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
872 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
873 qed_wr(p_hwfn, p_ptt, reg_addr, val);
874 }
875 }
876
qed_iov_enable_vf_traffic(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)877 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
878 struct qed_ptt *p_ptt,
879 struct qed_vf_info *vf)
880 {
881 /* Reset vf in IGU - interrupts are still disabled */
882 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
883
884 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
885
886 /* Permission Table */
887 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
888 }
889
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,u16 num_rx_queues)890 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
891 struct qed_ptt *p_ptt,
892 struct qed_vf_info *vf, u16 num_rx_queues)
893 {
894 struct qed_igu_block *p_block;
895 struct cau_sb_entry sb_entry;
896 int qid = 0;
897 u32 val = 0;
898
899 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
900 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
901 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
902
903 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
904 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
905 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
906
907 for (qid = 0; qid < num_rx_queues; qid++) {
908 p_block = qed_get_igu_free_sb(p_hwfn, false);
909 vf->igu_sbs[qid] = p_block->igu_sb_id;
910 p_block->status &= ~QED_IGU_STATUS_FREE;
911 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
912
913 qed_wr(p_hwfn, p_ptt,
914 IGU_REG_MAPPING_MEMORY +
915 sizeof(u32) * p_block->igu_sb_id, val);
916
917 /* Configure igu sb in CAU which were marked valid */
918 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
919 p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
920
921 qed_dmae_host2grc(p_hwfn, p_ptt,
922 (u64)(uintptr_t)&sb_entry,
923 CAU_REG_SB_VAR_MEMORY +
924 p_block->igu_sb_id * sizeof(u64), 2, NULL);
925 }
926
927 vf->num_sbs = (u8) num_rx_queues;
928
929 return vf->num_sbs;
930 }
931
qed_iov_free_vf_igu_sbs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)932 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
933 struct qed_ptt *p_ptt,
934 struct qed_vf_info *vf)
935 {
936 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
937 int idx, igu_id;
938 u32 addr, val;
939
940 /* Invalidate igu CAM lines and mark them as free */
941 for (idx = 0; idx < vf->num_sbs; idx++) {
942 igu_id = vf->igu_sbs[idx];
943 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
944
945 val = qed_rd(p_hwfn, p_ptt, addr);
946 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
947 qed_wr(p_hwfn, p_ptt, addr, val);
948
949 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
950 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
951 }
952
953 vf->num_sbs = 0;
954 }
955
qed_iov_set_link(struct qed_hwfn * p_hwfn,u16 vfid,struct qed_mcp_link_params * params,struct qed_mcp_link_state * link,struct qed_mcp_link_capabilities * p_caps)956 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
957 u16 vfid,
958 struct qed_mcp_link_params *params,
959 struct qed_mcp_link_state *link,
960 struct qed_mcp_link_capabilities *p_caps)
961 {
962 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
963 vfid,
964 false);
965 struct qed_bulletin_content *p_bulletin;
966
967 if (!p_vf)
968 return;
969
970 p_bulletin = p_vf->bulletin.p_virt;
971 p_bulletin->req_autoneg = params->speed.autoneg;
972 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
973 p_bulletin->req_forced_speed = params->speed.forced_speed;
974 p_bulletin->req_autoneg_pause = params->pause.autoneg;
975 p_bulletin->req_forced_rx = params->pause.forced_rx;
976 p_bulletin->req_forced_tx = params->pause.forced_tx;
977 p_bulletin->req_loopback = params->loopback_mode;
978
979 p_bulletin->link_up = link->link_up;
980 p_bulletin->speed = link->speed;
981 p_bulletin->full_duplex = link->full_duplex;
982 p_bulletin->autoneg = link->an;
983 p_bulletin->autoneg_complete = link->an_complete;
984 p_bulletin->parallel_detection = link->parallel_detection;
985 p_bulletin->pfc_enabled = link->pfc_enabled;
986 p_bulletin->partner_adv_speed = link->partner_adv_speed;
987 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
988 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
989 p_bulletin->partner_adv_pause = link->partner_adv_pause;
990 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
991
992 p_bulletin->capability_speed = p_caps->speed_capabilities;
993 }
994
qed_iov_init_hw_for_vf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_iov_vf_init_params * p_params)995 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
996 struct qed_ptt *p_ptt,
997 struct qed_iov_vf_init_params *p_params)
998 {
999 struct qed_mcp_link_capabilities link_caps;
1000 struct qed_mcp_link_params link_params;
1001 struct qed_mcp_link_state link_state;
1002 u8 num_of_vf_avaiable_chains = 0;
1003 struct qed_vf_info *vf = NULL;
1004 u16 qid, num_irqs;
1005 int rc = 0;
1006 u32 cids;
1007 u8 i;
1008
1009 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1010 if (!vf) {
1011 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
1012 return -EINVAL;
1013 }
1014
1015 if (vf->b_init) {
1016 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
1017 p_params->rel_vf_id);
1018 return -EINVAL;
1019 }
1020
1021 /* Perform sanity checking on the requested queue_id */
1022 for (i = 0; i < p_params->num_queues; i++) {
1023 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1024 u16 max_vf_qzone = min_vf_qzone +
1025 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1026
1027 qid = p_params->req_rx_queue[i];
1028 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1029 DP_NOTICE(p_hwfn,
1030 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1031 qid,
1032 p_params->rel_vf_id,
1033 min_vf_qzone, max_vf_qzone);
1034 return -EINVAL;
1035 }
1036
1037 qid = p_params->req_tx_queue[i];
1038 if (qid > max_vf_qzone) {
1039 DP_NOTICE(p_hwfn,
1040 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1041 qid, p_params->rel_vf_id, max_vf_qzone);
1042 return -EINVAL;
1043 }
1044
1045 /* If client *really* wants, Tx qid can be shared with PF */
1046 if (qid < min_vf_qzone)
1047 DP_VERBOSE(p_hwfn,
1048 QED_MSG_IOV,
1049 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1050 p_params->rel_vf_id, qid, i);
1051 }
1052
1053 /* Limit number of queues according to number of CIDs */
1054 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1055 DP_VERBOSE(p_hwfn,
1056 QED_MSG_IOV,
1057 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1058 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1059 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1060
1061 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1062 p_ptt,
1063 vf, num_irqs);
1064 if (!num_of_vf_avaiable_chains) {
1065 DP_ERR(p_hwfn, "no available igu sbs\n");
1066 return -ENOMEM;
1067 }
1068
1069 /* Choose queue number and index ranges */
1070 vf->num_rxqs = num_of_vf_avaiable_chains;
1071 vf->num_txqs = num_of_vf_avaiable_chains;
1072
1073 for (i = 0; i < vf->num_rxqs; i++) {
1074 struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1075
1076 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1077 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1078
1079 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1080 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1081 vf->relative_vf_id, i, vf->igu_sbs[i],
1082 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1083 }
1084
1085 /* Update the link configuration in bulletin */
1086 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1087 sizeof(link_params));
1088 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1089 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1090 sizeof(link_caps));
1091 qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1092 &link_params, &link_state, &link_caps);
1093
1094 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1095 if (!rc) {
1096 vf->b_init = true;
1097
1098 if (IS_LEAD_HWFN(p_hwfn))
1099 p_hwfn->cdev->p_iov_info->num_vfs++;
1100 }
1101
1102 return rc;
1103 }
1104
qed_iov_release_hw_for_vf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 rel_vf_id)1105 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1106 struct qed_ptt *p_ptt, u16 rel_vf_id)
1107 {
1108 struct qed_mcp_link_capabilities caps;
1109 struct qed_mcp_link_params params;
1110 struct qed_mcp_link_state link;
1111 struct qed_vf_info *vf = NULL;
1112
1113 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1114 if (!vf) {
1115 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1116 return -EINVAL;
1117 }
1118
1119 if (vf->bulletin.p_virt)
1120 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1121
1122 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1123
1124 /* Get the link configuration back in bulletin so
1125 * that when VFs are re-enabled they get the actual
1126 * link configuration.
1127 */
1128 memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1129 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1130 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1131 qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1132
1133 /* Forget the VF's acquisition message */
1134 memset(&vf->acquire, 0, sizeof(vf->acquire));
1135
1136 /* disablng interrupts and resetting permission table was done during
1137 * vf-close, however, we could get here without going through vf_close
1138 */
1139 /* Disable Interrupts for VF */
1140 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1141
1142 /* Reset Permission table */
1143 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1144
1145 vf->num_rxqs = 0;
1146 vf->num_txqs = 0;
1147 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1148
1149 if (vf->b_init) {
1150 vf->b_init = false;
1151
1152 if (IS_LEAD_HWFN(p_hwfn))
1153 p_hwfn->cdev->p_iov_info->num_vfs--;
1154 }
1155
1156 return 0;
1157 }
1158
qed_iov_tlv_supported(u16 tlvtype)1159 static bool qed_iov_tlv_supported(u16 tlvtype)
1160 {
1161 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1162 }
1163
1164 /* place a given tlv on the tlv buffer, continuing current tlv list */
qed_add_tlv(struct qed_hwfn * p_hwfn,u8 ** offset,u16 type,u16 length)1165 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1166 {
1167 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1168
1169 tl->type = type;
1170 tl->length = length;
1171
1172 /* Offset should keep pointing to next TLV (the end of the last) */
1173 *offset += length;
1174
1175 /* Return a pointer to the start of the added tlv */
1176 return *offset - length;
1177 }
1178
1179 /* list the types and lengths of the tlvs on the buffer */
qed_dp_tlv_list(struct qed_hwfn * p_hwfn,void * tlvs_list)1180 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1181 {
1182 u16 i = 1, total_length = 0;
1183 struct channel_tlv *tlv;
1184
1185 do {
1186 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1187
1188 /* output tlv */
1189 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1190 "TLV number %d: type %d, length %d\n",
1191 i, tlv->type, tlv->length);
1192
1193 if (tlv->type == CHANNEL_TLV_LIST_END)
1194 return;
1195
1196 /* Validate entry - protect against malicious VFs */
1197 if (!tlv->length) {
1198 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1199 return;
1200 }
1201
1202 total_length += tlv->length;
1203
1204 if (total_length >= sizeof(struct tlv_buffer_size)) {
1205 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1206 return;
1207 }
1208
1209 i++;
1210 } while (1);
1211 }
1212
qed_iov_send_response(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,u16 length,u8 status)1213 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1214 struct qed_ptt *p_ptt,
1215 struct qed_vf_info *p_vf,
1216 u16 length, u8 status)
1217 {
1218 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1219 struct qed_dmae_params params;
1220 u8 eng_vf_id;
1221
1222 mbx->reply_virt->default_resp.hdr.status = status;
1223
1224 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1225
1226 eng_vf_id = p_vf->abs_vf_id;
1227
1228 memset(¶ms, 0, sizeof(struct qed_dmae_params));
1229 params.flags = QED_DMAE_FLAG_VF_DST;
1230 params.dst_vfid = eng_vf_id;
1231
1232 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1233 mbx->req_virt->first_tlv.reply_address +
1234 sizeof(u64),
1235 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1236 ¶ms);
1237
1238 /* Once PF copies the rc to the VF, the latter can continue
1239 * and send an additional message. So we have to make sure the
1240 * channel would be re-set to ready prior to that.
1241 */
1242 REG_WR(p_hwfn,
1243 GTT_BAR0_MAP_REG_USDM_RAM +
1244 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1245
1246 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1247 mbx->req_virt->first_tlv.reply_address,
1248 sizeof(u64) / 4, ¶ms);
1249 }
1250
qed_iov_vport_to_tlv(struct qed_hwfn * p_hwfn,enum qed_iov_vport_update_flag flag)1251 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1252 enum qed_iov_vport_update_flag flag)
1253 {
1254 switch (flag) {
1255 case QED_IOV_VP_UPDATE_ACTIVATE:
1256 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1257 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1258 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1259 case QED_IOV_VP_UPDATE_TX_SWITCH:
1260 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1261 case QED_IOV_VP_UPDATE_MCAST:
1262 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1263 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1264 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1265 case QED_IOV_VP_UPDATE_RSS:
1266 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1267 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1268 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1269 case QED_IOV_VP_UPDATE_SGE_TPA:
1270 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1271 default:
1272 return 0;
1273 }
1274 }
1275
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_iov_vf_mbx * p_mbx,u8 status,u16 tlvs_mask,u16 tlvs_accepted)1276 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1277 struct qed_vf_info *p_vf,
1278 struct qed_iov_vf_mbx *p_mbx,
1279 u8 status,
1280 u16 tlvs_mask, u16 tlvs_accepted)
1281 {
1282 struct pfvf_def_resp_tlv *resp;
1283 u16 size, total_len, i;
1284
1285 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1286 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1287 size = sizeof(struct pfvf_def_resp_tlv);
1288 total_len = size;
1289
1290 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1291
1292 /* Prepare response for all extended tlvs if they are found by PF */
1293 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1294 if (!(tlvs_mask & BIT(i)))
1295 continue;
1296
1297 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1298 qed_iov_vport_to_tlv(p_hwfn, i), size);
1299
1300 if (tlvs_accepted & BIT(i))
1301 resp->hdr.status = status;
1302 else
1303 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1304
1305 DP_VERBOSE(p_hwfn,
1306 QED_MSG_IOV,
1307 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1308 p_vf->relative_vf_id,
1309 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1310
1311 total_len += size;
1312 }
1313
1314 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1315 sizeof(struct channel_list_end_tlv));
1316
1317 return total_len;
1318 }
1319
qed_iov_prepare_resp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf_info,u16 type,u16 length,u8 status)1320 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1321 struct qed_ptt *p_ptt,
1322 struct qed_vf_info *vf_info,
1323 u16 type, u16 length, u8 status)
1324 {
1325 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1326
1327 mbx->offset = (u8 *)mbx->reply_virt;
1328
1329 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1330 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1331 sizeof(struct channel_list_end_tlv));
1332
1333 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1334 }
1335
1336 static struct
qed_iov_get_public_vf_info(struct qed_hwfn * p_hwfn,u16 relative_vf_id,bool b_enabled_only)1337 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1338 u16 relative_vf_id,
1339 bool b_enabled_only)
1340 {
1341 struct qed_vf_info *vf = NULL;
1342
1343 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1344 if (!vf)
1345 return NULL;
1346
1347 return &vf->p_vf_info;
1348 }
1349
qed_iov_clean_vf(struct qed_hwfn * p_hwfn,u8 vfid)1350 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1351 {
1352 struct qed_public_vf_info *vf_info;
1353
1354 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1355
1356 if (!vf_info)
1357 return;
1358
1359 /* Clear the VF mac */
1360 eth_zero_addr(vf_info->mac);
1361
1362 vf_info->rx_accept_mode = 0;
1363 vf_info->tx_accept_mode = 0;
1364 }
1365
qed_iov_vf_cleanup(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)1366 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1367 struct qed_vf_info *p_vf)
1368 {
1369 u32 i, j;
1370
1371 p_vf->vf_bulletin = 0;
1372 p_vf->vport_instance = 0;
1373 p_vf->configured_features = 0;
1374
1375 /* If VF previously requested less resources, go back to default */
1376 p_vf->num_rxqs = p_vf->num_sbs;
1377 p_vf->num_txqs = p_vf->num_sbs;
1378
1379 p_vf->num_active_rxqs = 0;
1380
1381 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1382 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1383
1384 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1385 if (!p_queue->cids[j].p_cid)
1386 continue;
1387
1388 qed_eth_queue_cid_release(p_hwfn,
1389 p_queue->cids[j].p_cid);
1390 p_queue->cids[j].p_cid = NULL;
1391 }
1392 }
1393
1394 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1395 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1396 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1397 }
1398
1399 /* Returns either 0, or log(size) */
qed_iov_vf_db_bar_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1400 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1401 struct qed_ptt *p_ptt)
1402 {
1403 u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1404
1405 if (val)
1406 return val + 11;
1407 return 0;
1408 }
1409
1410 static void
qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)1411 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1412 struct qed_ptt *p_ptt,
1413 struct qed_vf_info *p_vf,
1414 struct vf_pf_resc_request *p_req,
1415 struct pf_vf_resc *p_resp)
1416 {
1417 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1418 u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1419 qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1420 u32 bar_size;
1421
1422 p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1423
1424 /* If VF didn't bother asking for QIDs than don't bother limiting
1425 * number of CIDs. The VF doesn't care about the number, and this
1426 * has the likely result of causing an additional acquisition.
1427 */
1428 if (!(p_vf->acquire.vfdev_info.capabilities &
1429 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1430 return;
1431
1432 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1433 * that would make sure doorbells for all CIDs fall within the bar.
1434 * If it doesn't, make sure regview window is sufficient.
1435 */
1436 if (p_vf->acquire.vfdev_info.capabilities &
1437 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1438 bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1439 if (bar_size)
1440 bar_size = 1 << bar_size;
1441
1442 if (p_hwfn->cdev->num_hwfns > 1)
1443 bar_size /= 2;
1444 } else {
1445 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1446 }
1447
1448 if (bar_size / db_size < 256)
1449 p_resp->num_cids = min_t(u8, p_resp->num_cids,
1450 (u8)(bar_size / db_size));
1451 }
1452
qed_iov_vf_mbx_acquire_resc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)1453 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1454 struct qed_ptt *p_ptt,
1455 struct qed_vf_info *p_vf,
1456 struct vf_pf_resc_request *p_req,
1457 struct pf_vf_resc *p_resp)
1458 {
1459 u8 i;
1460
1461 /* Queue related information */
1462 p_resp->num_rxqs = p_vf->num_rxqs;
1463 p_resp->num_txqs = p_vf->num_txqs;
1464 p_resp->num_sbs = p_vf->num_sbs;
1465
1466 for (i = 0; i < p_resp->num_sbs; i++) {
1467 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1468 p_resp->hw_sbs[i].sb_qid = 0;
1469 }
1470
1471 /* These fields are filled for backward compatibility.
1472 * Unused by modern vfs.
1473 */
1474 for (i = 0; i < p_resp->num_rxqs; i++) {
1475 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1476 (u16 *)&p_resp->hw_qid[i]);
1477 p_resp->cid[i] = i;
1478 }
1479
1480 /* Filter related information */
1481 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1482 p_req->num_mac_filters);
1483 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1484 p_req->num_vlan_filters);
1485
1486 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1487
1488 /* This isn't really needed/enforced, but some legacy VFs might depend
1489 * on the correct filling of this field.
1490 */
1491 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1492
1493 /* Validate sufficient resources for VF */
1494 if (p_resp->num_rxqs < p_req->num_rxqs ||
1495 p_resp->num_txqs < p_req->num_txqs ||
1496 p_resp->num_sbs < p_req->num_sbs ||
1497 p_resp->num_mac_filters < p_req->num_mac_filters ||
1498 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1499 p_resp->num_mc_filters < p_req->num_mc_filters ||
1500 p_resp->num_cids < p_req->num_cids) {
1501 DP_VERBOSE(p_hwfn,
1502 QED_MSG_IOV,
1503 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1504 p_vf->abs_vf_id,
1505 p_req->num_rxqs,
1506 p_resp->num_rxqs,
1507 p_req->num_rxqs,
1508 p_resp->num_txqs,
1509 p_req->num_sbs,
1510 p_resp->num_sbs,
1511 p_req->num_mac_filters,
1512 p_resp->num_mac_filters,
1513 p_req->num_vlan_filters,
1514 p_resp->num_vlan_filters,
1515 p_req->num_mc_filters,
1516 p_resp->num_mc_filters,
1517 p_req->num_cids, p_resp->num_cids);
1518
1519 /* Some legacy OSes are incapable of correctly handling this
1520 * failure.
1521 */
1522 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1523 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1524 (p_vf->acquire.vfdev_info.os_type ==
1525 VFPF_ACQUIRE_OS_WINDOWS))
1526 return PFVF_STATUS_SUCCESS;
1527
1528 return PFVF_STATUS_NO_RESOURCE;
1529 }
1530
1531 return PFVF_STATUS_SUCCESS;
1532 }
1533
qed_iov_vf_mbx_acquire_stats(struct qed_hwfn * p_hwfn,struct pfvf_stats_info * p_stats)1534 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1535 struct pfvf_stats_info *p_stats)
1536 {
1537 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1538 offsetof(struct mstorm_vf_zone,
1539 non_trigger.eth_queue_stat);
1540 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1541 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1542 offsetof(struct ustorm_vf_zone,
1543 non_trigger.eth_queue_stat);
1544 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1545 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1546 offsetof(struct pstorm_vf_zone,
1547 non_trigger.eth_queue_stat);
1548 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1549 p_stats->tstats.address = 0;
1550 p_stats->tstats.len = 0;
1551 }
1552
qed_iov_vf_mbx_acquire(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)1553 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1554 struct qed_ptt *p_ptt,
1555 struct qed_vf_info *vf)
1556 {
1557 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1558 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1559 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1560 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1561 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1562 struct pf_vf_resc *resc = &resp->resc;
1563 int rc;
1564
1565 memset(resp, 0, sizeof(*resp));
1566
1567 /* Write the PF version so that VF would know which version
1568 * is supported - might be later overriden. This guarantees that
1569 * VF could recognize legacy PF based on lack of versions in reply.
1570 */
1571 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1572 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1573
1574 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1575 DP_VERBOSE(p_hwfn,
1576 QED_MSG_IOV,
1577 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1578 vf->abs_vf_id, vf->state);
1579 goto out;
1580 }
1581
1582 /* Validate FW compatibility */
1583 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1584 if (req->vfdev_info.capabilities &
1585 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1586 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1587
1588 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1589 "VF[%d] is pre-fastpath HSI\n",
1590 vf->abs_vf_id);
1591 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1592 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1593 } else {
1594 DP_INFO(p_hwfn,
1595 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1596 vf->abs_vf_id,
1597 req->vfdev_info.eth_fp_hsi_major,
1598 req->vfdev_info.eth_fp_hsi_minor,
1599 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1600
1601 goto out;
1602 }
1603 }
1604
1605 /* On 100g PFs, prevent old VFs from loading */
1606 if ((p_hwfn->cdev->num_hwfns > 1) &&
1607 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1608 DP_INFO(p_hwfn,
1609 "VF[%d] is running an old driver that doesn't support 100g\n",
1610 vf->abs_vf_id);
1611 goto out;
1612 }
1613
1614 /* Store the acquire message */
1615 memcpy(&vf->acquire, req, sizeof(vf->acquire));
1616
1617 vf->opaque_fid = req->vfdev_info.opaque_fid;
1618
1619 vf->vf_bulletin = req->bulletin_addr;
1620 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1621 vf->bulletin.size : req->bulletin_size;
1622
1623 /* fill in pfdev info */
1624 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1625 pfdev_info->db_size = 0;
1626 pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1627
1628 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1629 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1630 if (p_hwfn->cdev->num_hwfns > 1)
1631 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1632
1633 /* Share our ability to use multiple queue-ids only with VFs
1634 * that request it.
1635 */
1636 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1637 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1638
1639 /* Share the sizes of the bars with VF */
1640 resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1641
1642 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1643
1644 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1645
1646 pfdev_info->fw_major = FW_MAJOR_VERSION;
1647 pfdev_info->fw_minor = FW_MINOR_VERSION;
1648 pfdev_info->fw_rev = FW_REVISION_VERSION;
1649 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1650
1651 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1652 * this field.
1653 */
1654 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1655 req->vfdev_info.eth_fp_hsi_minor);
1656 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1657 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1658
1659 pfdev_info->dev_type = p_hwfn->cdev->type;
1660 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1661
1662 /* Fill resources available to VF; Make sure there are enough to
1663 * satisfy the VF's request.
1664 */
1665 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1666 &req->resc_request, resc);
1667 if (vfpf_status != PFVF_STATUS_SUCCESS)
1668 goto out;
1669
1670 /* Start the VF in FW */
1671 rc = qed_sp_vf_start(p_hwfn, vf);
1672 if (rc) {
1673 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1674 vfpf_status = PFVF_STATUS_FAILURE;
1675 goto out;
1676 }
1677
1678 /* Fill agreed size of bulletin board in response */
1679 resp->bulletin_size = vf->bulletin.size;
1680 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1681
1682 DP_VERBOSE(p_hwfn,
1683 QED_MSG_IOV,
1684 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1685 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1686 vf->abs_vf_id,
1687 resp->pfdev_info.chip_num,
1688 resp->pfdev_info.db_size,
1689 resp->pfdev_info.indices_per_sb,
1690 resp->pfdev_info.capabilities,
1691 resc->num_rxqs,
1692 resc->num_txqs,
1693 resc->num_sbs,
1694 resc->num_mac_filters,
1695 resc->num_vlan_filters);
1696 vf->state = VF_ACQUIRED;
1697
1698 /* Prepare Response */
1699 out:
1700 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1701 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1702 }
1703
__qed_iov_spoofchk_set(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,bool val)1704 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1705 struct qed_vf_info *p_vf, bool val)
1706 {
1707 struct qed_sp_vport_update_params params;
1708 int rc;
1709
1710 if (val == p_vf->spoof_chk) {
1711 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1712 "Spoofchk value[%d] is already configured\n", val);
1713 return 0;
1714 }
1715
1716 memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params));
1717 params.opaque_fid = p_vf->opaque_fid;
1718 params.vport_id = p_vf->vport_id;
1719 params.update_anti_spoofing_en_flg = 1;
1720 params.anti_spoofing_en = val;
1721
1722 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
1723 if (!rc) {
1724 p_vf->spoof_chk = val;
1725 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1726 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1727 "Spoofchk val[%d] configured\n", val);
1728 } else {
1729 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1730 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1731 val, p_vf->relative_vf_id);
1732 }
1733
1734 return rc;
1735 }
1736
qed_iov_reconfigure_unicast_vlan(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)1737 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1738 struct qed_vf_info *p_vf)
1739 {
1740 struct qed_filter_ucast filter;
1741 int rc = 0;
1742 int i;
1743
1744 memset(&filter, 0, sizeof(filter));
1745 filter.is_rx_filter = 1;
1746 filter.is_tx_filter = 1;
1747 filter.vport_to_add_to = p_vf->vport_id;
1748 filter.opcode = QED_FILTER_ADD;
1749
1750 /* Reconfigure vlans */
1751 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1752 if (!p_vf->shadow_config.vlans[i].used)
1753 continue;
1754
1755 filter.type = QED_FILTER_VLAN;
1756 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1757 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1758 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1759 filter.vlan, p_vf->relative_vf_id);
1760 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1761 &filter, QED_SPQ_MODE_CB, NULL);
1762 if (rc) {
1763 DP_NOTICE(p_hwfn,
1764 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1765 filter.vlan, p_vf->relative_vf_id);
1766 break;
1767 }
1768 }
1769
1770 return rc;
1771 }
1772
1773 static int
qed_iov_reconfigure_unicast_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u64 events)1774 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1775 struct qed_vf_info *p_vf, u64 events)
1776 {
1777 int rc = 0;
1778
1779 if ((events & BIT(VLAN_ADDR_FORCED)) &&
1780 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1781 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1782
1783 return rc;
1784 }
1785
qed_iov_configure_vport_forced(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u64 events)1786 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1787 struct qed_vf_info *p_vf, u64 events)
1788 {
1789 int rc = 0;
1790 struct qed_filter_ucast filter;
1791
1792 if (!p_vf->vport_instance)
1793 return -EINVAL;
1794
1795 if ((events & BIT(MAC_ADDR_FORCED)) ||
1796 p_vf->p_vf_info.is_trusted_configured) {
1797 /* Since there's no way [currently] of removing the MAC,
1798 * we can always assume this means we need to force it.
1799 */
1800 memset(&filter, 0, sizeof(filter));
1801 filter.type = QED_FILTER_MAC;
1802 filter.opcode = QED_FILTER_REPLACE;
1803 filter.is_rx_filter = 1;
1804 filter.is_tx_filter = 1;
1805 filter.vport_to_add_to = p_vf->vport_id;
1806 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1807
1808 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1809 &filter, QED_SPQ_MODE_CB, NULL);
1810 if (rc) {
1811 DP_NOTICE(p_hwfn,
1812 "PF failed to configure MAC for VF\n");
1813 return rc;
1814 }
1815 if (p_vf->p_vf_info.is_trusted_configured)
1816 p_vf->configured_features |=
1817 BIT(VFPF_BULLETIN_MAC_ADDR);
1818 else
1819 p_vf->configured_features |=
1820 BIT(MAC_ADDR_FORCED);
1821 }
1822
1823 if (events & BIT(VLAN_ADDR_FORCED)) {
1824 struct qed_sp_vport_update_params vport_update;
1825 u8 removal;
1826 int i;
1827
1828 memset(&filter, 0, sizeof(filter));
1829 filter.type = QED_FILTER_VLAN;
1830 filter.is_rx_filter = 1;
1831 filter.is_tx_filter = 1;
1832 filter.vport_to_add_to = p_vf->vport_id;
1833 filter.vlan = p_vf->bulletin.p_virt->pvid;
1834 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1835 QED_FILTER_FLUSH;
1836
1837 /* Send the ramrod */
1838 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1839 &filter, QED_SPQ_MODE_CB, NULL);
1840 if (rc) {
1841 DP_NOTICE(p_hwfn,
1842 "PF failed to configure VLAN for VF\n");
1843 return rc;
1844 }
1845
1846 /* Update the default-vlan & silent vlan stripping */
1847 memset(&vport_update, 0, sizeof(vport_update));
1848 vport_update.opaque_fid = p_vf->opaque_fid;
1849 vport_update.vport_id = p_vf->vport_id;
1850 vport_update.update_default_vlan_enable_flg = 1;
1851 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1852 vport_update.update_default_vlan_flg = 1;
1853 vport_update.default_vlan = filter.vlan;
1854
1855 vport_update.update_inner_vlan_removal_flg = 1;
1856 removal = filter.vlan ? 1
1857 : p_vf->shadow_config.inner_vlan_removal;
1858 vport_update.inner_vlan_removal_flg = removal;
1859 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1860 rc = qed_sp_vport_update(p_hwfn,
1861 &vport_update,
1862 QED_SPQ_MODE_EBLOCK, NULL);
1863 if (rc) {
1864 DP_NOTICE(p_hwfn,
1865 "PF failed to configure VF vport for vlan\n");
1866 return rc;
1867 }
1868
1869 /* Update all the Rx queues */
1870 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1871 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1872 struct qed_queue_cid *p_cid = NULL;
1873
1874 /* There can be at most 1 Rx queue on qzone. Find it */
1875 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1876 if (!p_cid)
1877 continue;
1878
1879 rc = qed_sp_eth_rx_queues_update(p_hwfn,
1880 (void **)&p_cid,
1881 1, 0, 1,
1882 QED_SPQ_MODE_EBLOCK,
1883 NULL);
1884 if (rc) {
1885 DP_NOTICE(p_hwfn,
1886 "Failed to send Rx update fo queue[0x%04x]\n",
1887 p_cid->rel.queue_id);
1888 return rc;
1889 }
1890 }
1891
1892 if (filter.vlan)
1893 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1894 else
1895 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1896 }
1897
1898 /* If forced features are terminated, we need to configure the shadow
1899 * configuration back again.
1900 */
1901 if (events)
1902 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1903
1904 return rc;
1905 }
1906
qed_iov_vf_mbx_start_vport(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)1907 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1908 struct qed_ptt *p_ptt,
1909 struct qed_vf_info *vf)
1910 {
1911 struct qed_sp_vport_start_params params = { 0 };
1912 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1913 struct vfpf_vport_start_tlv *start;
1914 u8 status = PFVF_STATUS_SUCCESS;
1915 struct qed_vf_info *vf_info;
1916 u64 *p_bitmap;
1917 int sb_id;
1918 int rc;
1919
1920 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1921 if (!vf_info) {
1922 DP_NOTICE(p_hwfn->cdev,
1923 "Failed to get VF info, invalid vfid [%d]\n",
1924 vf->relative_vf_id);
1925 return;
1926 }
1927
1928 vf->state = VF_ENABLED;
1929 start = &mbx->req_virt->start_vport;
1930
1931 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1932
1933 /* Initialize Status block in CAU */
1934 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1935 if (!start->sb_addr[sb_id]) {
1936 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1937 "VF[%d] did not fill the address of SB %d\n",
1938 vf->relative_vf_id, sb_id);
1939 break;
1940 }
1941
1942 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1943 start->sb_addr[sb_id],
1944 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1945 }
1946
1947 vf->mtu = start->mtu;
1948 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1949
1950 /* Take into consideration configuration forced by hypervisor;
1951 * If none is configured, use the supplied VF values [for old
1952 * vfs that would still be fine, since they passed '0' as padding].
1953 */
1954 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1955 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1956 u8 vf_req = start->only_untagged;
1957
1958 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1959 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1960 }
1961
1962 params.tpa_mode = start->tpa_mode;
1963 params.remove_inner_vlan = start->inner_vlan_removal;
1964 params.tx_switching = true;
1965
1966 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1967 params.drop_ttl0 = false;
1968 params.concrete_fid = vf->concrete_fid;
1969 params.opaque_fid = vf->opaque_fid;
1970 params.vport_id = vf->vport_id;
1971 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1972 params.mtu = vf->mtu;
1973
1974 /* Non trusted VFs should enable control frame filtering */
1975 params.check_mac = !vf->p_vf_info.is_trusted_configured;
1976
1977 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
1978 if (rc) {
1979 DP_ERR(p_hwfn,
1980 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1981 status = PFVF_STATUS_FAILURE;
1982 } else {
1983 vf->vport_instance++;
1984
1985 /* Force configuration if needed on the newly opened vport */
1986 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1987
1988 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1989 }
1990 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1991 sizeof(struct pfvf_def_resp_tlv), status);
1992 }
1993
qed_iov_vf_mbx_stop_vport(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)1994 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1995 struct qed_ptt *p_ptt,
1996 struct qed_vf_info *vf)
1997 {
1998 u8 status = PFVF_STATUS_SUCCESS;
1999 int rc;
2000
2001 vf->vport_instance--;
2002 vf->spoof_chk = false;
2003
2004 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
2005 (qed_iov_validate_active_txq(p_hwfn, vf))) {
2006 vf->b_malicious = true;
2007 DP_NOTICE(p_hwfn,
2008 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
2009 vf->abs_vf_id);
2010 status = PFVF_STATUS_MALICIOUS;
2011 goto out;
2012 }
2013
2014 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2015 if (rc) {
2016 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
2017 rc);
2018 status = PFVF_STATUS_FAILURE;
2019 }
2020
2021 /* Forget the configuration on the vport */
2022 vf->configured_features = 0;
2023 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2024
2025 out:
2026 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2027 sizeof(struct pfvf_def_resp_tlv), status);
2028 }
2029
qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,u8 status,bool b_legacy)2030 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2031 struct qed_ptt *p_ptt,
2032 struct qed_vf_info *vf,
2033 u8 status, bool b_legacy)
2034 {
2035 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2036 struct pfvf_start_queue_resp_tlv *p_tlv;
2037 struct vfpf_start_rxq_tlv *req;
2038 u16 length;
2039
2040 mbx->offset = (u8 *)mbx->reply_virt;
2041
2042 /* Taking a bigger struct instead of adding a TLV to list was a
2043 * mistake, but one which we're now stuck with, as some older
2044 * clients assume the size of the previous response.
2045 */
2046 if (!b_legacy)
2047 length = sizeof(*p_tlv);
2048 else
2049 length = sizeof(struct pfvf_def_resp_tlv);
2050
2051 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2052 length);
2053 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2054 sizeof(struct channel_list_end_tlv));
2055
2056 /* Update the TLV with the response */
2057 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2058 req = &mbx->req_virt->start_rxq;
2059 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2060 offsetof(struct mstorm_vf_zone,
2061 non_trigger.eth_rx_queue_producers) +
2062 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2063 }
2064
2065 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2066 }
2067
qed_iov_vf_mbx_qid(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,bool b_is_tx)2068 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2069 struct qed_vf_info *p_vf, bool b_is_tx)
2070 {
2071 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2072 struct vfpf_qid_tlv *p_qid_tlv;
2073
2074 /* Search for the qid if the VF published its going to provide it */
2075 if (!(p_vf->acquire.vfdev_info.capabilities &
2076 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2077 if (b_is_tx)
2078 return QED_IOV_LEGACY_QID_TX;
2079 else
2080 return QED_IOV_LEGACY_QID_RX;
2081 }
2082
2083 p_qid_tlv = (struct vfpf_qid_tlv *)
2084 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2085 CHANNEL_TLV_QID);
2086 if (!p_qid_tlv) {
2087 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2088 "VF[%2x]: Failed to provide qid\n",
2089 p_vf->relative_vf_id);
2090
2091 return QED_IOV_QID_INVALID;
2092 }
2093
2094 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2095 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2096 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2097 p_vf->relative_vf_id, p_qid_tlv->qid);
2098 return QED_IOV_QID_INVALID;
2099 }
2100
2101 return p_qid_tlv->qid;
2102 }
2103
qed_iov_vf_mbx_start_rxq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2104 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2105 struct qed_ptt *p_ptt,
2106 struct qed_vf_info *vf)
2107 {
2108 struct qed_queue_start_common_params params;
2109 struct qed_queue_cid_vf_params vf_params;
2110 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2111 u8 status = PFVF_STATUS_NO_RESOURCE;
2112 u8 qid_usage_idx, vf_legacy = 0;
2113 struct vfpf_start_rxq_tlv *req;
2114 struct qed_vf_queue *p_queue;
2115 struct qed_queue_cid *p_cid;
2116 struct qed_sb_info sb_dummy;
2117 int rc;
2118
2119 req = &mbx->req_virt->start_rxq;
2120
2121 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2122 QED_IOV_VALIDATE_Q_DISABLE) ||
2123 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2124 goto out;
2125
2126 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2127 if (qid_usage_idx == QED_IOV_QID_INVALID)
2128 goto out;
2129
2130 p_queue = &vf->vf_queues[req->rx_qid];
2131 if (p_queue->cids[qid_usage_idx].p_cid)
2132 goto out;
2133
2134 vf_legacy = qed_vf_calculate_legacy(vf);
2135
2136 /* Acquire a new queue-cid */
2137 memset(¶ms, 0, sizeof(params));
2138 params.queue_id = p_queue->fw_rx_qid;
2139 params.vport_id = vf->vport_id;
2140 params.stats_id = vf->abs_vf_id + 0x10;
2141 /* Since IGU index is passed via sb_info, construct a dummy one */
2142 memset(&sb_dummy, 0, sizeof(sb_dummy));
2143 sb_dummy.igu_sb_id = req->hw_sb;
2144 params.p_sb = &sb_dummy;
2145 params.sb_idx = req->sb_index;
2146
2147 memset(&vf_params, 0, sizeof(vf_params));
2148 vf_params.vfid = vf->relative_vf_id;
2149 vf_params.vf_qid = (u8)req->rx_qid;
2150 vf_params.vf_legacy = vf_legacy;
2151 vf_params.qid_usage_idx = qid_usage_idx;
2152 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2153 ¶ms, true, &vf_params);
2154 if (!p_cid)
2155 goto out;
2156
2157 /* Legacy VFs have their Producers in a different location, which they
2158 * calculate on their own and clean the producer prior to this.
2159 */
2160 if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2161 REG_WR(p_hwfn,
2162 GTT_BAR0_MAP_REG_MSDM_RAM +
2163 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2164 0);
2165
2166 rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2167 req->bd_max_bytes,
2168 req->rxq_addr,
2169 req->cqe_pbl_addr, req->cqe_pbl_size);
2170 if (rc) {
2171 status = PFVF_STATUS_FAILURE;
2172 qed_eth_queue_cid_release(p_hwfn, p_cid);
2173 } else {
2174 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2175 p_queue->cids[qid_usage_idx].b_is_tx = false;
2176 status = PFVF_STATUS_SUCCESS;
2177 vf->num_active_rxqs++;
2178 }
2179
2180 out:
2181 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2182 !!(vf_legacy &
2183 QED_QCID_LEGACY_VF_RX_PROD));
2184 }
2185
2186 static void
qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv * p_resp,struct qed_tunnel_info * p_tun,u16 tunn_feature_mask)2187 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2188 struct qed_tunnel_info *p_tun,
2189 u16 tunn_feature_mask)
2190 {
2191 p_resp->tunn_feature_mask = tunn_feature_mask;
2192 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2193 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2194 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2195 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2196 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2197 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2198 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2199 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2200 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2201 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2202 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2203 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2204 }
2205
2206 static void
__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv * p_req,struct qed_tunn_update_type * p_tun,enum qed_tunn_mode mask,u8 tun_cls)2207 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2208 struct qed_tunn_update_type *p_tun,
2209 enum qed_tunn_mode mask, u8 tun_cls)
2210 {
2211 if (p_req->tun_mode_update_mask & BIT(mask)) {
2212 p_tun->b_update_mode = true;
2213
2214 if (p_req->tunn_mode & BIT(mask))
2215 p_tun->b_mode_enabled = true;
2216 }
2217
2218 p_tun->tun_cls = tun_cls;
2219 }
2220
2221 static void
qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv * p_req,struct qed_tunn_update_type * p_tun,struct qed_tunn_update_udp_port * p_port,enum qed_tunn_mode mask,u8 tun_cls,u8 update_port,u16 port)2222 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2223 struct qed_tunn_update_type *p_tun,
2224 struct qed_tunn_update_udp_port *p_port,
2225 enum qed_tunn_mode mask,
2226 u8 tun_cls, u8 update_port, u16 port)
2227 {
2228 if (update_port) {
2229 p_port->b_update_port = true;
2230 p_port->port = port;
2231 }
2232
2233 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2234 }
2235
2236 static bool
qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv * p_req)2237 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2238 {
2239 bool b_update_requested = false;
2240
2241 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2242 p_req->update_geneve_port || p_req->update_vxlan_port)
2243 b_update_requested = true;
2244
2245 return b_update_requested;
2246 }
2247
qed_pf_validate_tunn_mode(struct qed_tunn_update_type * tun,int * rc)2248 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2249 {
2250 if (tun->b_update_mode && !tun->b_mode_enabled) {
2251 tun->b_update_mode = false;
2252 *rc = -EINVAL;
2253 }
2254 }
2255
2256 static int
qed_pf_validate_modify_tunn_config(struct qed_hwfn * p_hwfn,u16 * tun_features,bool * update,struct qed_tunnel_info * tun_src)2257 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2258 u16 *tun_features, bool *update,
2259 struct qed_tunnel_info *tun_src)
2260 {
2261 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2262 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2263 u16 bultn_vxlan_port, bultn_geneve_port;
2264 void *cookie = p_hwfn->cdev->ops_cookie;
2265 int i, rc = 0;
2266
2267 *tun_features = p_hwfn->cdev->tunn_feature_mask;
2268 bultn_vxlan_port = tun->vxlan_port.port;
2269 bultn_geneve_port = tun->geneve_port.port;
2270 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2271 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2272 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2273 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2274 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2275
2276 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2277 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2278 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2279 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2280 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2281 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2282 tun_src->b_update_rx_cls = false;
2283 tun_src->b_update_tx_cls = false;
2284 rc = -EINVAL;
2285 }
2286
2287 if (tun_src->vxlan_port.b_update_port) {
2288 if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2289 tun_src->vxlan_port.b_update_port = false;
2290 } else {
2291 *update = true;
2292 bultn_vxlan_port = tun_src->vxlan_port.port;
2293 }
2294 }
2295
2296 if (tun_src->geneve_port.b_update_port) {
2297 if (tun_src->geneve_port.port == tun->geneve_port.port) {
2298 tun_src->geneve_port.b_update_port = false;
2299 } else {
2300 *update = true;
2301 bultn_geneve_port = tun_src->geneve_port.port;
2302 }
2303 }
2304
2305 qed_for_each_vf(p_hwfn, i) {
2306 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2307 bultn_geneve_port);
2308 }
2309
2310 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2311 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2312
2313 return rc;
2314 }
2315
qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)2316 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2317 struct qed_ptt *p_ptt,
2318 struct qed_vf_info *p_vf)
2319 {
2320 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2321 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2322 struct pfvf_update_tunn_param_tlv *p_resp;
2323 struct vfpf_update_tunn_param_tlv *p_req;
2324 u8 status = PFVF_STATUS_SUCCESS;
2325 bool b_update_required = false;
2326 struct qed_tunnel_info tunn;
2327 u16 tunn_feature_mask = 0;
2328 int i, rc = 0;
2329
2330 mbx->offset = (u8 *)mbx->reply_virt;
2331
2332 memset(&tunn, 0, sizeof(tunn));
2333 p_req = &mbx->req_virt->tunn_param_update;
2334
2335 if (!qed_iov_pf_validate_tunn_param(p_req)) {
2336 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2337 "No tunnel update requested by VF\n");
2338 status = PFVF_STATUS_FAILURE;
2339 goto send_resp;
2340 }
2341
2342 tunn.b_update_rx_cls = p_req->update_tun_cls;
2343 tunn.b_update_tx_cls = p_req->update_tun_cls;
2344
2345 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2346 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2347 p_req->update_vxlan_port,
2348 p_req->vxlan_port);
2349 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2350 QED_MODE_L2GENEVE_TUNN,
2351 p_req->l2geneve_clss,
2352 p_req->update_geneve_port,
2353 p_req->geneve_port);
2354 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2355 QED_MODE_IPGENEVE_TUNN,
2356 p_req->ipgeneve_clss);
2357 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2358 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2359 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2360 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2361
2362 /* If PF modifies VF's req then it should
2363 * still return an error in case of partial configuration
2364 * or modified configuration as opposed to requested one.
2365 */
2366 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2367 &b_update_required, &tunn);
2368
2369 if (rc)
2370 status = PFVF_STATUS_FAILURE;
2371
2372 /* If QED client is willing to update anything ? */
2373 if (b_update_required) {
2374 u16 geneve_port;
2375
2376 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2377 QED_SPQ_MODE_EBLOCK, NULL);
2378 if (rc)
2379 status = PFVF_STATUS_FAILURE;
2380
2381 geneve_port = p_tun->geneve_port.port;
2382 qed_for_each_vf(p_hwfn, i) {
2383 qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2384 p_tun->vxlan_port.port,
2385 geneve_port);
2386 }
2387 }
2388
2389 send_resp:
2390 p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2391 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2392
2393 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2394 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2395 sizeof(struct channel_list_end_tlv));
2396
2397 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2398 }
2399
qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,u32 cid,u8 status)2400 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2401 struct qed_ptt *p_ptt,
2402 struct qed_vf_info *p_vf,
2403 u32 cid, u8 status)
2404 {
2405 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2406 struct pfvf_start_queue_resp_tlv *p_tlv;
2407 bool b_legacy = false;
2408 u16 length;
2409
2410 mbx->offset = (u8 *)mbx->reply_virt;
2411
2412 /* Taking a bigger struct instead of adding a TLV to list was a
2413 * mistake, but one which we're now stuck with, as some older
2414 * clients assume the size of the previous response.
2415 */
2416 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2417 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2418 b_legacy = true;
2419
2420 if (!b_legacy)
2421 length = sizeof(*p_tlv);
2422 else
2423 length = sizeof(struct pfvf_def_resp_tlv);
2424
2425 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2426 length);
2427 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2428 sizeof(struct channel_list_end_tlv));
2429
2430 /* Update the TLV with the response */
2431 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2432 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2433
2434 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2435 }
2436
qed_iov_vf_mbx_start_txq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2437 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2438 struct qed_ptt *p_ptt,
2439 struct qed_vf_info *vf)
2440 {
2441 struct qed_queue_start_common_params params;
2442 struct qed_queue_cid_vf_params vf_params;
2443 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2444 u8 status = PFVF_STATUS_NO_RESOURCE;
2445 struct vfpf_start_txq_tlv *req;
2446 struct qed_vf_queue *p_queue;
2447 struct qed_queue_cid *p_cid;
2448 struct qed_sb_info sb_dummy;
2449 u8 qid_usage_idx, vf_legacy;
2450 u32 cid = 0;
2451 int rc;
2452 u16 pq;
2453
2454 memset(¶ms, 0, sizeof(params));
2455 req = &mbx->req_virt->start_txq;
2456
2457 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2458 QED_IOV_VALIDATE_Q_NA) ||
2459 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2460 goto out;
2461
2462 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2463 if (qid_usage_idx == QED_IOV_QID_INVALID)
2464 goto out;
2465
2466 p_queue = &vf->vf_queues[req->tx_qid];
2467 if (p_queue->cids[qid_usage_idx].p_cid)
2468 goto out;
2469
2470 vf_legacy = qed_vf_calculate_legacy(vf);
2471
2472 /* Acquire a new queue-cid */
2473 params.queue_id = p_queue->fw_tx_qid;
2474 params.vport_id = vf->vport_id;
2475 params.stats_id = vf->abs_vf_id + 0x10;
2476
2477 /* Since IGU index is passed via sb_info, construct a dummy one */
2478 memset(&sb_dummy, 0, sizeof(sb_dummy));
2479 sb_dummy.igu_sb_id = req->hw_sb;
2480 params.p_sb = &sb_dummy;
2481 params.sb_idx = req->sb_index;
2482
2483 memset(&vf_params, 0, sizeof(vf_params));
2484 vf_params.vfid = vf->relative_vf_id;
2485 vf_params.vf_qid = (u8)req->tx_qid;
2486 vf_params.vf_legacy = vf_legacy;
2487 vf_params.qid_usage_idx = qid_usage_idx;
2488
2489 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2490 ¶ms, false, &vf_params);
2491 if (!p_cid)
2492 goto out;
2493
2494 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2495 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2496 req->pbl_addr, req->pbl_size, pq);
2497 if (rc) {
2498 status = PFVF_STATUS_FAILURE;
2499 qed_eth_queue_cid_release(p_hwfn, p_cid);
2500 } else {
2501 status = PFVF_STATUS_SUCCESS;
2502 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2503 p_queue->cids[qid_usage_idx].b_is_tx = true;
2504 cid = p_cid->cid;
2505 }
2506
2507 out:
2508 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2509 }
2510
qed_iov_vf_stop_rxqs(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,u16 rxq_id,u8 qid_usage_idx,bool cqe_completion)2511 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2512 struct qed_vf_info *vf,
2513 u16 rxq_id,
2514 u8 qid_usage_idx, bool cqe_completion)
2515 {
2516 struct qed_vf_queue *p_queue;
2517 int rc = 0;
2518
2519 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2520 DP_VERBOSE(p_hwfn,
2521 QED_MSG_IOV,
2522 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2523 vf->relative_vf_id, rxq_id, qid_usage_idx);
2524 return -EINVAL;
2525 }
2526
2527 p_queue = &vf->vf_queues[rxq_id];
2528
2529 /* We've validated the index and the existence of the active RXQ -
2530 * now we need to make sure that it's using the correct qid.
2531 */
2532 if (!p_queue->cids[qid_usage_idx].p_cid ||
2533 p_queue->cids[qid_usage_idx].b_is_tx) {
2534 struct qed_queue_cid *p_cid;
2535
2536 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2537 DP_VERBOSE(p_hwfn,
2538 QED_MSG_IOV,
2539 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2540 vf->relative_vf_id,
2541 rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2542 return -EINVAL;
2543 }
2544
2545 /* Now that we know we have a valid Rx-queue - close it */
2546 rc = qed_eth_rx_queue_stop(p_hwfn,
2547 p_queue->cids[qid_usage_idx].p_cid,
2548 false, cqe_completion);
2549 if (rc)
2550 return rc;
2551
2552 p_queue->cids[qid_usage_idx].p_cid = NULL;
2553 vf->num_active_rxqs--;
2554
2555 return 0;
2556 }
2557
qed_iov_vf_stop_txqs(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,u16 txq_id,u8 qid_usage_idx)2558 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2559 struct qed_vf_info *vf,
2560 u16 txq_id, u8 qid_usage_idx)
2561 {
2562 struct qed_vf_queue *p_queue;
2563 int rc = 0;
2564
2565 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2566 return -EINVAL;
2567
2568 p_queue = &vf->vf_queues[txq_id];
2569 if (!p_queue->cids[qid_usage_idx].p_cid ||
2570 !p_queue->cids[qid_usage_idx].b_is_tx)
2571 return -EINVAL;
2572
2573 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2574 if (rc)
2575 return rc;
2576
2577 p_queue->cids[qid_usage_idx].p_cid = NULL;
2578 return 0;
2579 }
2580
qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2581 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2582 struct qed_ptt *p_ptt,
2583 struct qed_vf_info *vf)
2584 {
2585 u16 length = sizeof(struct pfvf_def_resp_tlv);
2586 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2587 u8 status = PFVF_STATUS_FAILURE;
2588 struct vfpf_stop_rxqs_tlv *req;
2589 u8 qid_usage_idx;
2590 int rc;
2591
2592 /* There has never been an official driver that used this interface
2593 * for stopping multiple queues, and it is now considered deprecated.
2594 * Validate this isn't used here.
2595 */
2596 req = &mbx->req_virt->stop_rxqs;
2597 if (req->num_rxqs != 1) {
2598 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2599 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2600 vf->relative_vf_id);
2601 status = PFVF_STATUS_NOT_SUPPORTED;
2602 goto out;
2603 }
2604
2605 /* Find which qid-index is associated with the queue */
2606 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2607 if (qid_usage_idx == QED_IOV_QID_INVALID)
2608 goto out;
2609
2610 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2611 qid_usage_idx, req->cqe_completion);
2612 if (!rc)
2613 status = PFVF_STATUS_SUCCESS;
2614 out:
2615 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2616 length, status);
2617 }
2618
qed_iov_vf_mbx_stop_txqs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2619 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2620 struct qed_ptt *p_ptt,
2621 struct qed_vf_info *vf)
2622 {
2623 u16 length = sizeof(struct pfvf_def_resp_tlv);
2624 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2625 u8 status = PFVF_STATUS_FAILURE;
2626 struct vfpf_stop_txqs_tlv *req;
2627 u8 qid_usage_idx;
2628 int rc;
2629
2630 /* There has never been an official driver that used this interface
2631 * for stopping multiple queues, and it is now considered deprecated.
2632 * Validate this isn't used here.
2633 */
2634 req = &mbx->req_virt->stop_txqs;
2635 if (req->num_txqs != 1) {
2636 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2637 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2638 vf->relative_vf_id);
2639 status = PFVF_STATUS_NOT_SUPPORTED;
2640 goto out;
2641 }
2642
2643 /* Find which qid-index is associated with the queue */
2644 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2645 if (qid_usage_idx == QED_IOV_QID_INVALID)
2646 goto out;
2647
2648 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2649 if (!rc)
2650 status = PFVF_STATUS_SUCCESS;
2651
2652 out:
2653 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2654 length, status);
2655 }
2656
qed_iov_vf_mbx_update_rxqs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2657 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2658 struct qed_ptt *p_ptt,
2659 struct qed_vf_info *vf)
2660 {
2661 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2662 u16 length = sizeof(struct pfvf_def_resp_tlv);
2663 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2664 struct vfpf_update_rxq_tlv *req;
2665 u8 status = PFVF_STATUS_FAILURE;
2666 u8 complete_event_flg;
2667 u8 complete_cqe_flg;
2668 u8 qid_usage_idx;
2669 int rc;
2670 u8 i;
2671
2672 req = &mbx->req_virt->update_rxq;
2673 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2674 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2675
2676 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2677 if (qid_usage_idx == QED_IOV_QID_INVALID)
2678 goto out;
2679
2680 /* There shouldn't exist a VF that uses queue-qids yet uses this
2681 * API with multiple Rx queues. Validate this.
2682 */
2683 if ((vf->acquire.vfdev_info.capabilities &
2684 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2685 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2686 "VF[%d] supports QIDs but sends multiple queues\n",
2687 vf->relative_vf_id);
2688 goto out;
2689 }
2690
2691 /* Validate inputs - for the legacy case this is still true since
2692 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2693 */
2694 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2695 if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2696 QED_IOV_VALIDATE_Q_NA) ||
2697 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2698 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2699 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2700 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2701 vf->relative_vf_id, req->rx_qid,
2702 req->num_rxqs);
2703 goto out;
2704 }
2705 }
2706
2707 /* Prepare the handlers */
2708 for (i = 0; i < req->num_rxqs; i++) {
2709 u16 qid = req->rx_qid + i;
2710
2711 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2712 }
2713
2714 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2715 req->num_rxqs,
2716 complete_cqe_flg,
2717 complete_event_flg,
2718 QED_SPQ_MODE_EBLOCK, NULL);
2719 if (rc)
2720 goto out;
2721
2722 status = PFVF_STATUS_SUCCESS;
2723 out:
2724 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2725 length, status);
2726 }
2727
qed_iov_search_list_tlvs(struct qed_hwfn * p_hwfn,void * p_tlvs_list,u16 req_type)2728 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2729 void *p_tlvs_list, u16 req_type)
2730 {
2731 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2732 int len = 0;
2733
2734 do {
2735 if (!p_tlv->length) {
2736 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2737 return NULL;
2738 }
2739
2740 if (p_tlv->type == req_type) {
2741 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2742 "Extended tlv type %d, length %d found\n",
2743 p_tlv->type, p_tlv->length);
2744 return p_tlv;
2745 }
2746
2747 len += p_tlv->length;
2748 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2749
2750 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2751 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2752 return NULL;
2753 }
2754 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2755
2756 return NULL;
2757 }
2758
2759 static void
qed_iov_vp_update_act_param(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2760 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2761 struct qed_sp_vport_update_params *p_data,
2762 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2763 {
2764 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2765 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2766
2767 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2768 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2769 if (!p_act_tlv)
2770 return;
2771
2772 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2773 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2774 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2775 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2776 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2777 }
2778
2779 static void
qed_iov_vp_update_vlan_param(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_vf_info * p_vf,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2780 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2781 struct qed_sp_vport_update_params *p_data,
2782 struct qed_vf_info *p_vf,
2783 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2784 {
2785 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2786 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2787
2788 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2789 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2790 if (!p_vlan_tlv)
2791 return;
2792
2793 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2794
2795 /* Ignore the VF request if we're forcing a vlan */
2796 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2797 p_data->update_inner_vlan_removal_flg = 1;
2798 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2799 }
2800
2801 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2802 }
2803
2804 static void
qed_iov_vp_update_tx_switch(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2805 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2806 struct qed_sp_vport_update_params *p_data,
2807 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2808 {
2809 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2810 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2811
2812 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2813 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2814 tlv);
2815 if (!p_tx_switch_tlv)
2816 return;
2817
2818 p_data->update_tx_switching_flg = 1;
2819 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2820 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2821 }
2822
2823 static void
qed_iov_vp_update_mcast_bin_param(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2824 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2825 struct qed_sp_vport_update_params *p_data,
2826 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2827 {
2828 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2829 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2830
2831 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2832 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2833 if (!p_mcast_tlv)
2834 return;
2835
2836 p_data->update_approx_mcast_flg = 1;
2837 memcpy(p_data->bins, p_mcast_tlv->bins,
2838 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2839 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2840 }
2841
2842 static void
qed_iov_vp_update_accept_flag(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2843 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2844 struct qed_sp_vport_update_params *p_data,
2845 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2846 {
2847 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2848 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2849 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2850
2851 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2852 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2853 if (!p_accept_tlv)
2854 return;
2855
2856 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2857 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2858 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2859 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2860 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2861 }
2862
2863 static void
qed_iov_vp_update_accept_any_vlan(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2864 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2865 struct qed_sp_vport_update_params *p_data,
2866 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2867 {
2868 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2869 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2870
2871 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2872 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2873 tlv);
2874 if (!p_accept_any_vlan)
2875 return;
2876
2877 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2878 p_data->update_accept_any_vlan_flg =
2879 p_accept_any_vlan->update_accept_any_vlan_flg;
2880 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2881 }
2882
2883 static void
qed_iov_vp_update_rss_param(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,struct qed_sp_vport_update_params * p_data,struct qed_rss_params * p_rss,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask,u16 * tlvs_accepted)2884 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2885 struct qed_vf_info *vf,
2886 struct qed_sp_vport_update_params *p_data,
2887 struct qed_rss_params *p_rss,
2888 struct qed_iov_vf_mbx *p_mbx,
2889 u16 *tlvs_mask, u16 *tlvs_accepted)
2890 {
2891 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2892 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2893 bool b_reject = false;
2894 u16 table_size;
2895 u16 i, q_idx;
2896
2897 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2898 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2899 if (!p_rss_tlv) {
2900 p_data->rss_params = NULL;
2901 return;
2902 }
2903
2904 memset(p_rss, 0, sizeof(struct qed_rss_params));
2905
2906 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2907 VFPF_UPDATE_RSS_CONFIG_FLAG);
2908 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2909 VFPF_UPDATE_RSS_CAPS_FLAG);
2910 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2911 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2912 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2913 VFPF_UPDATE_RSS_KEY_FLAG);
2914
2915 p_rss->rss_enable = p_rss_tlv->rss_enable;
2916 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2917 p_rss->rss_caps = p_rss_tlv->rss_caps;
2918 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2919 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2920
2921 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2922 (1 << p_rss_tlv->rss_table_size_log));
2923
2924 for (i = 0; i < table_size; i++) {
2925 struct qed_queue_cid *p_cid;
2926
2927 q_idx = p_rss_tlv->rss_ind_table[i];
2928 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2929 QED_IOV_VALIDATE_Q_ENABLE)) {
2930 DP_VERBOSE(p_hwfn,
2931 QED_MSG_IOV,
2932 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2933 vf->relative_vf_id, q_idx);
2934 b_reject = true;
2935 goto out;
2936 }
2937
2938 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2939 p_rss->rss_ind_table[i] = p_cid;
2940 }
2941
2942 p_data->rss_params = p_rss;
2943 out:
2944 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2945 if (!b_reject)
2946 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2947 }
2948
2949 static void
qed_iov_vp_update_sge_tpa_param(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,struct qed_sp_vport_update_params * p_data,struct qed_sge_tpa_params * p_sge_tpa,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2950 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2951 struct qed_vf_info *vf,
2952 struct qed_sp_vport_update_params *p_data,
2953 struct qed_sge_tpa_params *p_sge_tpa,
2954 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2955 {
2956 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2957 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2958
2959 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2960 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2961
2962 if (!p_sge_tpa_tlv) {
2963 p_data->sge_tpa_params = NULL;
2964 return;
2965 }
2966
2967 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2968
2969 p_sge_tpa->update_tpa_en_flg =
2970 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2971 p_sge_tpa->update_tpa_param_flg =
2972 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2973 VFPF_UPDATE_TPA_PARAM_FLAG);
2974
2975 p_sge_tpa->tpa_ipv4_en_flg =
2976 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2977 p_sge_tpa->tpa_ipv6_en_flg =
2978 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2979 p_sge_tpa->tpa_pkt_split_flg =
2980 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2981 p_sge_tpa->tpa_hdr_data_split_flg =
2982 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2983 p_sge_tpa->tpa_gro_consistent_flg =
2984 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2985
2986 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2987 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2988 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2989 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2990 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2991
2992 p_data->sge_tpa_params = p_sge_tpa;
2993
2994 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2995 }
2996
qed_iov_pre_update_vport(struct qed_hwfn * hwfn,u8 vfid,struct qed_sp_vport_update_params * params,u16 * tlvs)2997 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2998 u8 vfid,
2999 struct qed_sp_vport_update_params *params,
3000 u16 *tlvs)
3001 {
3002 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
3003 struct qed_filter_accept_flags *flags = ¶ms->accept_flags;
3004 struct qed_public_vf_info *vf_info;
3005
3006 /* Untrusted VFs can't even be trusted to know that fact.
3007 * Simply indicate everything is configured fine, and trace
3008 * configuration 'behind their back'.
3009 */
3010 if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
3011 return 0;
3012
3013 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3014
3015 if (flags->update_rx_mode_config) {
3016 vf_info->rx_accept_mode = flags->rx_accept_filter;
3017 if (!vf_info->is_trusted_configured)
3018 flags->rx_accept_filter &= ~mask;
3019 }
3020
3021 if (flags->update_tx_mode_config) {
3022 vf_info->tx_accept_mode = flags->tx_accept_filter;
3023 if (!vf_info->is_trusted_configured)
3024 flags->tx_accept_filter &= ~mask;
3025 }
3026
3027 return 0;
3028 }
3029
qed_iov_vf_mbx_vport_update(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3030 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3031 struct qed_ptt *p_ptt,
3032 struct qed_vf_info *vf)
3033 {
3034 struct qed_rss_params *p_rss_params = NULL;
3035 struct qed_sp_vport_update_params params;
3036 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3037 struct qed_sge_tpa_params sge_tpa_params;
3038 u16 tlvs_mask = 0, tlvs_accepted = 0;
3039 u8 status = PFVF_STATUS_SUCCESS;
3040 u16 length;
3041 int rc;
3042
3043 /* Valiate PF can send such a request */
3044 if (!vf->vport_instance) {
3045 DP_VERBOSE(p_hwfn,
3046 QED_MSG_IOV,
3047 "No VPORT instance available for VF[%d], failing vport update\n",
3048 vf->abs_vf_id);
3049 status = PFVF_STATUS_FAILURE;
3050 goto out;
3051 }
3052 p_rss_params = vzalloc(sizeof(*p_rss_params));
3053 if (p_rss_params == NULL) {
3054 status = PFVF_STATUS_FAILURE;
3055 goto out;
3056 }
3057
3058 memset(¶ms, 0, sizeof(params));
3059 params.opaque_fid = vf->opaque_fid;
3060 params.vport_id = vf->vport_id;
3061 params.rss_params = NULL;
3062
3063 /* Search for extended tlvs list and update values
3064 * from VF in struct qed_sp_vport_update_params.
3065 */
3066 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3067 qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3068 qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3069 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3070 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3071 qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3072 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
3073 &sge_tpa_params, mbx, &tlvs_mask);
3074
3075 tlvs_accepted = tlvs_mask;
3076
3077 /* Some of the extended TLVs need to be validated first; In that case,
3078 * they can update the mask without updating the accepted [so that
3079 * PF could communicate to VF it has rejected request].
3080 */
3081 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3082 mbx, &tlvs_mask, &tlvs_accepted);
3083
3084 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3085 ¶ms, &tlvs_accepted)) {
3086 tlvs_accepted = 0;
3087 status = PFVF_STATUS_NOT_SUPPORTED;
3088 goto out;
3089 }
3090
3091 if (!tlvs_accepted) {
3092 if (tlvs_mask)
3093 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3094 "Upper-layer prevents VF vport configuration\n");
3095 else
3096 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3097 "No feature tlvs found for vport update\n");
3098 status = PFVF_STATUS_NOT_SUPPORTED;
3099 goto out;
3100 }
3101
3102 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
3103
3104 if (rc)
3105 status = PFVF_STATUS_FAILURE;
3106
3107 out:
3108 vfree(p_rss_params);
3109 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3110 tlvs_mask, tlvs_accepted);
3111 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3112 }
3113
qed_iov_vf_update_vlan_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_filter_ucast * p_params)3114 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3115 struct qed_vf_info *p_vf,
3116 struct qed_filter_ucast *p_params)
3117 {
3118 int i;
3119
3120 /* First remove entries and then add new ones */
3121 if (p_params->opcode == QED_FILTER_REMOVE) {
3122 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3123 if (p_vf->shadow_config.vlans[i].used &&
3124 p_vf->shadow_config.vlans[i].vid ==
3125 p_params->vlan) {
3126 p_vf->shadow_config.vlans[i].used = false;
3127 break;
3128 }
3129 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3130 DP_VERBOSE(p_hwfn,
3131 QED_MSG_IOV,
3132 "VF [%d] - Tries to remove a non-existing vlan\n",
3133 p_vf->relative_vf_id);
3134 return -EINVAL;
3135 }
3136 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3137 p_params->opcode == QED_FILTER_FLUSH) {
3138 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3139 p_vf->shadow_config.vlans[i].used = false;
3140 }
3141
3142 /* In forced mode, we're willing to remove entries - but we don't add
3143 * new ones.
3144 */
3145 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3146 return 0;
3147
3148 if (p_params->opcode == QED_FILTER_ADD ||
3149 p_params->opcode == QED_FILTER_REPLACE) {
3150 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3151 if (p_vf->shadow_config.vlans[i].used)
3152 continue;
3153
3154 p_vf->shadow_config.vlans[i].used = true;
3155 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3156 break;
3157 }
3158
3159 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3160 DP_VERBOSE(p_hwfn,
3161 QED_MSG_IOV,
3162 "VF [%d] - Tries to configure more than %d vlan filters\n",
3163 p_vf->relative_vf_id,
3164 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3165 return -EINVAL;
3166 }
3167 }
3168
3169 return 0;
3170 }
3171
qed_iov_vf_update_mac_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_filter_ucast * p_params)3172 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3173 struct qed_vf_info *p_vf,
3174 struct qed_filter_ucast *p_params)
3175 {
3176 int i;
3177
3178 /* If we're in forced-mode, we don't allow any change */
3179 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3180 return 0;
3181
3182 /* Don't keep track of shadow copy since we don't intend to restore. */
3183 if (p_vf->p_vf_info.is_trusted_configured)
3184 return 0;
3185
3186 /* First remove entries and then add new ones */
3187 if (p_params->opcode == QED_FILTER_REMOVE) {
3188 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3189 if (ether_addr_equal(p_vf->shadow_config.macs[i],
3190 p_params->mac)) {
3191 eth_zero_addr(p_vf->shadow_config.macs[i]);
3192 break;
3193 }
3194 }
3195
3196 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3197 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3198 "MAC isn't configured\n");
3199 return -EINVAL;
3200 }
3201 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3202 p_params->opcode == QED_FILTER_FLUSH) {
3203 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3204 eth_zero_addr(p_vf->shadow_config.macs[i]);
3205 }
3206
3207 /* List the new MAC address */
3208 if (p_params->opcode != QED_FILTER_ADD &&
3209 p_params->opcode != QED_FILTER_REPLACE)
3210 return 0;
3211
3212 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3213 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3214 ether_addr_copy(p_vf->shadow_config.macs[i],
3215 p_params->mac);
3216 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3217 "Added MAC at %d entry in shadow\n", i);
3218 break;
3219 }
3220 }
3221
3222 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3223 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3224 return -EINVAL;
3225 }
3226
3227 return 0;
3228 }
3229
3230 static int
qed_iov_vf_update_unicast_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_filter_ucast * p_params)3231 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3232 struct qed_vf_info *p_vf,
3233 struct qed_filter_ucast *p_params)
3234 {
3235 int rc = 0;
3236
3237 if (p_params->type == QED_FILTER_MAC) {
3238 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3239 if (rc)
3240 return rc;
3241 }
3242
3243 if (p_params->type == QED_FILTER_VLAN)
3244 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3245
3246 return rc;
3247 }
3248
qed_iov_chk_ucast(struct qed_hwfn * hwfn,int vfid,struct qed_filter_ucast * params)3249 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3250 int vfid, struct qed_filter_ucast *params)
3251 {
3252 struct qed_public_vf_info *vf;
3253
3254 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3255 if (!vf)
3256 return -EINVAL;
3257
3258 /* No real decision to make; Store the configured MAC */
3259 if (params->type == QED_FILTER_MAC ||
3260 params->type == QED_FILTER_MAC_VLAN) {
3261 ether_addr_copy(vf->mac, params->mac);
3262
3263 if (vf->is_trusted_configured) {
3264 qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
3265
3266 /* Update and post bulleitin again */
3267 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3268 }
3269 }
3270
3271 return 0;
3272 }
3273
qed_iov_vf_mbx_ucast_filter(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3274 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3275 struct qed_ptt *p_ptt,
3276 struct qed_vf_info *vf)
3277 {
3278 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3279 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3280 struct vfpf_ucast_filter_tlv *req;
3281 u8 status = PFVF_STATUS_SUCCESS;
3282 struct qed_filter_ucast params;
3283 int rc;
3284
3285 /* Prepare the unicast filter params */
3286 memset(¶ms, 0, sizeof(struct qed_filter_ucast));
3287 req = &mbx->req_virt->ucast_filter;
3288 params.opcode = (enum qed_filter_opcode)req->opcode;
3289 params.type = (enum qed_filter_ucast_type)req->type;
3290
3291 params.is_rx_filter = 1;
3292 params.is_tx_filter = 1;
3293 params.vport_to_remove_from = vf->vport_id;
3294 params.vport_to_add_to = vf->vport_id;
3295 memcpy(params.mac, req->mac, ETH_ALEN);
3296 params.vlan = req->vlan;
3297
3298 DP_VERBOSE(p_hwfn,
3299 QED_MSG_IOV,
3300 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3301 vf->abs_vf_id, params.opcode, params.type,
3302 params.is_rx_filter ? "RX" : "",
3303 params.is_tx_filter ? "TX" : "",
3304 params.vport_to_add_to,
3305 params.mac[0], params.mac[1],
3306 params.mac[2], params.mac[3],
3307 params.mac[4], params.mac[5], params.vlan);
3308
3309 if (!vf->vport_instance) {
3310 DP_VERBOSE(p_hwfn,
3311 QED_MSG_IOV,
3312 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3313 vf->abs_vf_id);
3314 status = PFVF_STATUS_FAILURE;
3315 goto out;
3316 }
3317
3318 /* Update shadow copy of the VF configuration */
3319 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) {
3320 status = PFVF_STATUS_FAILURE;
3321 goto out;
3322 }
3323
3324 /* Determine if the unicast filtering is acceptible by PF */
3325 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3326 (params.type == QED_FILTER_VLAN ||
3327 params.type == QED_FILTER_MAC_VLAN)) {
3328 /* Once VLAN is forced or PVID is set, do not allow
3329 * to add/replace any further VLANs.
3330 */
3331 if (params.opcode == QED_FILTER_ADD ||
3332 params.opcode == QED_FILTER_REPLACE)
3333 status = PFVF_STATUS_FORCED;
3334 goto out;
3335 }
3336
3337 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3338 (params.type == QED_FILTER_MAC ||
3339 params.type == QED_FILTER_MAC_VLAN)) {
3340 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3341 (params.opcode != QED_FILTER_ADD &&
3342 params.opcode != QED_FILTER_REPLACE))
3343 status = PFVF_STATUS_FORCED;
3344 goto out;
3345 }
3346
3347 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms);
3348 if (rc) {
3349 status = PFVF_STATUS_FAILURE;
3350 goto out;
3351 }
3352
3353 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3354 QED_SPQ_MODE_CB, NULL);
3355 if (rc)
3356 status = PFVF_STATUS_FAILURE;
3357
3358 out:
3359 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3360 sizeof(struct pfvf_def_resp_tlv), status);
3361 }
3362
qed_iov_vf_mbx_int_cleanup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3363 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3364 struct qed_ptt *p_ptt,
3365 struct qed_vf_info *vf)
3366 {
3367 int i;
3368
3369 /* Reset the SBs */
3370 for (i = 0; i < vf->num_sbs; i++)
3371 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3372 vf->igu_sbs[i],
3373 vf->opaque_fid, false);
3374
3375 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3376 sizeof(struct pfvf_def_resp_tlv),
3377 PFVF_STATUS_SUCCESS);
3378 }
3379
qed_iov_vf_mbx_close(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3380 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3381 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3382 {
3383 u16 length = sizeof(struct pfvf_def_resp_tlv);
3384 u8 status = PFVF_STATUS_SUCCESS;
3385
3386 /* Disable Interrupts for VF */
3387 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3388
3389 /* Reset Permission table */
3390 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3391
3392 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3393 length, status);
3394 }
3395
qed_iov_vf_mbx_release(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)3396 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3397 struct qed_ptt *p_ptt,
3398 struct qed_vf_info *p_vf)
3399 {
3400 u16 length = sizeof(struct pfvf_def_resp_tlv);
3401 u8 status = PFVF_STATUS_SUCCESS;
3402 int rc = 0;
3403
3404 qed_iov_vf_cleanup(p_hwfn, p_vf);
3405
3406 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3407 /* Stopping the VF */
3408 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3409 p_vf->opaque_fid);
3410
3411 if (rc) {
3412 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3413 rc);
3414 status = PFVF_STATUS_FAILURE;
3415 }
3416
3417 p_vf->state = VF_STOPPED;
3418 }
3419
3420 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3421 length, status);
3422 }
3423
qed_iov_vf_pf_get_coalesce(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)3424 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3425 struct qed_ptt *p_ptt,
3426 struct qed_vf_info *p_vf)
3427 {
3428 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3429 struct pfvf_read_coal_resp_tlv *p_resp;
3430 struct vfpf_read_coal_req_tlv *req;
3431 u8 status = PFVF_STATUS_FAILURE;
3432 struct qed_vf_queue *p_queue;
3433 struct qed_queue_cid *p_cid;
3434 u16 coal = 0, qid, i;
3435 bool b_is_rx;
3436 int rc = 0;
3437
3438 mbx->offset = (u8 *)mbx->reply_virt;
3439 req = &mbx->req_virt->read_coal_req;
3440
3441 qid = req->qid;
3442 b_is_rx = req->is_rx ? true : false;
3443
3444 if (b_is_rx) {
3445 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3446 QED_IOV_VALIDATE_Q_ENABLE)) {
3447 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3448 "VF[%d]: Invalid Rx queue_id = %d\n",
3449 p_vf->abs_vf_id, qid);
3450 goto send_resp;
3451 }
3452
3453 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3454 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3455 if (rc)
3456 goto send_resp;
3457 } else {
3458 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3459 QED_IOV_VALIDATE_Q_ENABLE)) {
3460 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3461 "VF[%d]: Invalid Tx queue_id = %d\n",
3462 p_vf->abs_vf_id, qid);
3463 goto send_resp;
3464 }
3465 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3466 p_queue = &p_vf->vf_queues[qid];
3467 if ((!p_queue->cids[i].p_cid) ||
3468 (!p_queue->cids[i].b_is_tx))
3469 continue;
3470
3471 p_cid = p_queue->cids[i].p_cid;
3472
3473 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3474 if (rc)
3475 goto send_resp;
3476 break;
3477 }
3478 }
3479
3480 status = PFVF_STATUS_SUCCESS;
3481
3482 send_resp:
3483 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3484 sizeof(*p_resp));
3485 p_resp->coal = coal;
3486
3487 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3488 sizeof(struct channel_list_end_tlv));
3489
3490 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3491 }
3492
qed_iov_vf_pf_set_coalesce(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3493 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3494 struct qed_ptt *p_ptt,
3495 struct qed_vf_info *vf)
3496 {
3497 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3498 struct vfpf_update_coalesce *req;
3499 u8 status = PFVF_STATUS_FAILURE;
3500 struct qed_queue_cid *p_cid;
3501 u16 rx_coal, tx_coal;
3502 int rc = 0, i;
3503 u16 qid;
3504
3505 req = &mbx->req_virt->update_coalesce;
3506
3507 rx_coal = req->rx_coal;
3508 tx_coal = req->tx_coal;
3509 qid = req->qid;
3510
3511 if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3512 QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3513 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3514 "VF[%d]: Invalid Rx queue_id = %d\n",
3515 vf->abs_vf_id, qid);
3516 goto out;
3517 }
3518
3519 if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3520 QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3521 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3522 "VF[%d]: Invalid Tx queue_id = %d\n",
3523 vf->abs_vf_id, qid);
3524 goto out;
3525 }
3526
3527 DP_VERBOSE(p_hwfn,
3528 QED_MSG_IOV,
3529 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3530 vf->abs_vf_id, rx_coal, tx_coal, qid);
3531
3532 if (rx_coal) {
3533 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3534
3535 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3536 if (rc) {
3537 DP_VERBOSE(p_hwfn,
3538 QED_MSG_IOV,
3539 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3540 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3541 goto out;
3542 }
3543 vf->rx_coal = rx_coal;
3544 }
3545
3546 if (tx_coal) {
3547 struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3548
3549 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3550 if (!p_queue->cids[i].p_cid)
3551 continue;
3552
3553 if (!p_queue->cids[i].b_is_tx)
3554 continue;
3555
3556 rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3557 p_queue->cids[i].p_cid);
3558
3559 if (rc) {
3560 DP_VERBOSE(p_hwfn,
3561 QED_MSG_IOV,
3562 "VF[%d]: Unable to set tx queue coalesce\n",
3563 vf->abs_vf_id);
3564 goto out;
3565 }
3566 }
3567 vf->tx_coal = tx_coal;
3568 }
3569
3570 status = PFVF_STATUS_SUCCESS;
3571 out:
3572 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3573 sizeof(struct pfvf_def_resp_tlv), status);
3574 }
3575 static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_ptt * p_ptt)3576 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3577 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3578 {
3579 int cnt;
3580 u32 val;
3581
3582 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3583
3584 for (cnt = 0; cnt < 50; cnt++) {
3585 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3586 if (!val)
3587 break;
3588 msleep(20);
3589 }
3590 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3591
3592 if (cnt == 50) {
3593 DP_ERR(p_hwfn,
3594 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3595 p_vf->abs_vf_id, val);
3596 return -EBUSY;
3597 }
3598
3599 return 0;
3600 }
3601
3602 static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_ptt * p_ptt)3603 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3604 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3605 {
3606 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3607 int i, cnt;
3608
3609 /* Read initial consumers & producers */
3610 for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3611 u32 prod;
3612
3613 cons[i] = qed_rd(p_hwfn, p_ptt,
3614 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3615 i * 0x40);
3616 prod = qed_rd(p_hwfn, p_ptt,
3617 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3618 i * 0x40);
3619 distance[i] = prod - cons[i];
3620 }
3621
3622 /* Wait for consumers to pass the producers */
3623 i = 0;
3624 for (cnt = 0; cnt < 50; cnt++) {
3625 for (; i < MAX_NUM_VOQS_E4; i++) {
3626 u32 tmp;
3627
3628 tmp = qed_rd(p_hwfn, p_ptt,
3629 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3630 i * 0x40);
3631 if (distance[i] > tmp - cons[i])
3632 break;
3633 }
3634
3635 if (i == MAX_NUM_VOQS_E4)
3636 break;
3637
3638 msleep(20);
3639 }
3640
3641 if (cnt == 50) {
3642 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3643 p_vf->abs_vf_id, i);
3644 return -EBUSY;
3645 }
3646
3647 return 0;
3648 }
3649
qed_iov_vf_flr_poll(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_ptt * p_ptt)3650 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3651 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3652 {
3653 int rc;
3654
3655 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3656 if (rc)
3657 return rc;
3658
3659 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3660 if (rc)
3661 return rc;
3662
3663 return 0;
3664 }
3665
3666 static int
qed_iov_execute_vf_flr_cleanup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 rel_vf_id,u32 * ack_vfs)3667 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3668 struct qed_ptt *p_ptt,
3669 u16 rel_vf_id, u32 *ack_vfs)
3670 {
3671 struct qed_vf_info *p_vf;
3672 int rc = 0;
3673
3674 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3675 if (!p_vf)
3676 return 0;
3677
3678 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3679 (1ULL << (rel_vf_id % 64))) {
3680 u16 vfid = p_vf->abs_vf_id;
3681
3682 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3683 "VF[%d] - Handling FLR\n", vfid);
3684
3685 qed_iov_vf_cleanup(p_hwfn, p_vf);
3686
3687 /* If VF isn't active, no need for anything but SW */
3688 if (!p_vf->b_init)
3689 goto cleanup;
3690
3691 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3692 if (rc)
3693 goto cleanup;
3694
3695 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3696 if (rc) {
3697 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3698 return rc;
3699 }
3700
3701 /* Workaround to make VF-PF channel ready, as FW
3702 * doesn't do that as a part of FLR.
3703 */
3704 REG_WR(p_hwfn,
3705 GTT_BAR0_MAP_REG_USDM_RAM +
3706 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3707
3708 /* VF_STOPPED has to be set only after final cleanup
3709 * but prior to re-enabling the VF.
3710 */
3711 p_vf->state = VF_STOPPED;
3712
3713 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3714 if (rc) {
3715 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3716 vfid);
3717 return rc;
3718 }
3719 cleanup:
3720 /* Mark VF for ack and clean pending state */
3721 if (p_vf->state == VF_RESET)
3722 p_vf->state = VF_STOPPED;
3723 ack_vfs[vfid / 32] |= BIT((vfid % 32));
3724 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3725 ~(1ULL << (rel_vf_id % 64));
3726 p_vf->vf_mbx.b_pending_msg = false;
3727 }
3728
3729 return rc;
3730 }
3731
3732 static int
qed_iov_vf_flr_cleanup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3733 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3734 {
3735 u32 ack_vfs[VF_MAX_STATIC / 32];
3736 int rc = 0;
3737 u16 i;
3738
3739 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3740
3741 /* Since BRB <-> PRS interface can't be tested as part of the flr
3742 * polling due to HW limitations, simply sleep a bit. And since
3743 * there's no need to wait per-vf, do it before looping.
3744 */
3745 msleep(100);
3746
3747 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3748 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3749
3750 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3751 return rc;
3752 }
3753
qed_iov_mark_vf_flr(struct qed_hwfn * p_hwfn,u32 * p_disabled_vfs)3754 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3755 {
3756 bool found = false;
3757 u16 i;
3758
3759 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3760 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3761 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3762 "[%08x,...,%08x]: %08x\n",
3763 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3764
3765 if (!p_hwfn->cdev->p_iov_info) {
3766 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3767 return false;
3768 }
3769
3770 /* Mark VFs */
3771 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3772 struct qed_vf_info *p_vf;
3773 u8 vfid;
3774
3775 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3776 if (!p_vf)
3777 continue;
3778
3779 vfid = p_vf->abs_vf_id;
3780 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3781 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3782 u16 rel_vf_id = p_vf->relative_vf_id;
3783
3784 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3785 "VF[%d] [rel %d] got FLR-ed\n",
3786 vfid, rel_vf_id);
3787
3788 p_vf->state = VF_RESET;
3789
3790 /* No need to lock here, since pending_flr should
3791 * only change here and before ACKing MFw. Since
3792 * MFW will not trigger an additional attention for
3793 * VF flr until ACKs, we're safe.
3794 */
3795 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3796 found = true;
3797 }
3798 }
3799
3800 return found;
3801 }
3802
qed_iov_get_link(struct qed_hwfn * p_hwfn,u16 vfid,struct qed_mcp_link_params * p_params,struct qed_mcp_link_state * p_link,struct qed_mcp_link_capabilities * p_caps)3803 static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3804 u16 vfid,
3805 struct qed_mcp_link_params *p_params,
3806 struct qed_mcp_link_state *p_link,
3807 struct qed_mcp_link_capabilities *p_caps)
3808 {
3809 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3810 vfid,
3811 false);
3812 struct qed_bulletin_content *p_bulletin;
3813
3814 if (!p_vf)
3815 return;
3816
3817 p_bulletin = p_vf->bulletin.p_virt;
3818
3819 if (p_params)
3820 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3821 if (p_link)
3822 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3823 if (p_caps)
3824 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3825 }
3826
3827 static int
qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)3828 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
3829 struct qed_ptt *p_ptt,
3830 struct qed_vf_info *p_vf)
3831 {
3832 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
3833 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3834 struct vfpf_bulletin_update_mac_tlv *p_req;
3835 u8 status = PFVF_STATUS_SUCCESS;
3836 int rc = 0;
3837
3838 if (!p_vf->p_vf_info.is_trusted_configured) {
3839 DP_VERBOSE(p_hwfn,
3840 QED_MSG_IOV,
3841 "Blocking bulletin update request from untrusted VF[%d]\n",
3842 p_vf->abs_vf_id);
3843 status = PFVF_STATUS_NOT_SUPPORTED;
3844 rc = -EINVAL;
3845 goto send_status;
3846 }
3847
3848 p_req = &mbx->req_virt->bulletin_update_mac;
3849 ether_addr_copy(p_bulletin->mac, p_req->mac);
3850 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3851 "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3852 p_vf->abs_vf_id, p_req->mac);
3853
3854 send_status:
3855 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3856 CHANNEL_TLV_BULLETIN_UPDATE_MAC,
3857 sizeof(struct pfvf_def_resp_tlv), status);
3858 return rc;
3859 }
3860
qed_iov_process_mbx_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,int vfid)3861 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3862 struct qed_ptt *p_ptt, int vfid)
3863 {
3864 struct qed_iov_vf_mbx *mbx;
3865 struct qed_vf_info *p_vf;
3866
3867 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3868 if (!p_vf)
3869 return;
3870
3871 mbx = &p_vf->vf_mbx;
3872
3873 /* qed_iov_process_mbx_request */
3874 if (!mbx->b_pending_msg) {
3875 DP_NOTICE(p_hwfn,
3876 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3877 p_vf->abs_vf_id);
3878 return;
3879 }
3880 mbx->b_pending_msg = false;
3881
3882 mbx->first_tlv = mbx->req_virt->first_tlv;
3883
3884 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3885 "VF[%02x]: Processing mailbox message [type %04x]\n",
3886 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3887
3888 /* check if tlv type is known */
3889 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3890 !p_vf->b_malicious) {
3891 switch (mbx->first_tlv.tl.type) {
3892 case CHANNEL_TLV_ACQUIRE:
3893 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3894 break;
3895 case CHANNEL_TLV_VPORT_START:
3896 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3897 break;
3898 case CHANNEL_TLV_VPORT_TEARDOWN:
3899 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3900 break;
3901 case CHANNEL_TLV_START_RXQ:
3902 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3903 break;
3904 case CHANNEL_TLV_START_TXQ:
3905 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3906 break;
3907 case CHANNEL_TLV_STOP_RXQS:
3908 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3909 break;
3910 case CHANNEL_TLV_STOP_TXQS:
3911 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3912 break;
3913 case CHANNEL_TLV_UPDATE_RXQ:
3914 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3915 break;
3916 case CHANNEL_TLV_VPORT_UPDATE:
3917 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3918 break;
3919 case CHANNEL_TLV_UCAST_FILTER:
3920 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3921 break;
3922 case CHANNEL_TLV_CLOSE:
3923 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3924 break;
3925 case CHANNEL_TLV_INT_CLEANUP:
3926 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3927 break;
3928 case CHANNEL_TLV_RELEASE:
3929 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3930 break;
3931 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3932 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3933 break;
3934 case CHANNEL_TLV_COALESCE_UPDATE:
3935 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3936 break;
3937 case CHANNEL_TLV_COALESCE_READ:
3938 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3939 break;
3940 case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
3941 qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
3942 break;
3943 }
3944 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3945 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3946 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3947 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3948
3949 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3950 mbx->first_tlv.tl.type,
3951 sizeof(struct pfvf_def_resp_tlv),
3952 PFVF_STATUS_MALICIOUS);
3953 } else {
3954 /* unknown TLV - this may belong to a VF driver from the future
3955 * - a version written after this PF driver was written, which
3956 * supports features unknown as of yet. Too bad since we don't
3957 * support them. Or this may be because someone wrote a crappy
3958 * VF driver and is sending garbage over the channel.
3959 */
3960 DP_NOTICE(p_hwfn,
3961 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3962 p_vf->abs_vf_id,
3963 mbx->first_tlv.tl.type,
3964 mbx->first_tlv.tl.length,
3965 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3966
3967 /* Try replying in case reply address matches the acquisition's
3968 * posted address.
3969 */
3970 if (p_vf->acquire.first_tlv.reply_address &&
3971 (mbx->first_tlv.reply_address ==
3972 p_vf->acquire.first_tlv.reply_address)) {
3973 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3974 mbx->first_tlv.tl.type,
3975 sizeof(struct pfvf_def_resp_tlv),
3976 PFVF_STATUS_NOT_SUPPORTED);
3977 } else {
3978 DP_VERBOSE(p_hwfn,
3979 QED_MSG_IOV,
3980 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3981 p_vf->abs_vf_id);
3982 }
3983 }
3984 }
3985
qed_iov_pf_get_pending_events(struct qed_hwfn * p_hwfn,u64 * events)3986 static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3987 {
3988 int i;
3989
3990 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3991
3992 qed_for_each_vf(p_hwfn, i) {
3993 struct qed_vf_info *p_vf;
3994
3995 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3996 if (p_vf->vf_mbx.b_pending_msg)
3997 events[i / 64] |= 1ULL << (i % 64);
3998 }
3999 }
4000
qed_sriov_get_vf_from_absid(struct qed_hwfn * p_hwfn,u16 abs_vfid)4001 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
4002 u16 abs_vfid)
4003 {
4004 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
4005
4006 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4007 DP_VERBOSE(p_hwfn,
4008 QED_MSG_IOV,
4009 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4010 abs_vfid);
4011 return NULL;
4012 }
4013
4014 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
4015 }
4016
qed_sriov_vfpf_msg(struct qed_hwfn * p_hwfn,u16 abs_vfid,struct regpair * vf_msg)4017 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
4018 u16 abs_vfid, struct regpair *vf_msg)
4019 {
4020 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
4021 abs_vfid);
4022
4023 if (!p_vf)
4024 return 0;
4025
4026 /* List the physical address of the request so that handler
4027 * could later on copy the message from it.
4028 */
4029 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4030
4031 /* Mark the event and schedule the workqueue */
4032 p_vf->vf_mbx.b_pending_msg = true;
4033 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
4034
4035 return 0;
4036 }
4037
qed_sriov_vfpf_malicious(struct qed_hwfn * p_hwfn,struct malicious_vf_eqe_data * p_data)4038 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
4039 struct malicious_vf_eqe_data *p_data)
4040 {
4041 struct qed_vf_info *p_vf;
4042
4043 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4044
4045 if (!p_vf)
4046 return;
4047
4048 if (!p_vf->b_malicious) {
4049 DP_NOTICE(p_hwfn,
4050 "VF [%d] - Malicious behavior [%02x]\n",
4051 p_vf->abs_vf_id, p_data->err_id);
4052
4053 p_vf->b_malicious = true;
4054 } else {
4055 DP_INFO(p_hwfn,
4056 "VF [%d] - Malicious behavior [%02x]\n",
4057 p_vf->abs_vf_id, p_data->err_id);
4058 }
4059 }
4060
qed_sriov_eqe_event(struct qed_hwfn * p_hwfn,u8 opcode,__le16 echo,union event_ring_data * data,u8 fw_return_code)4061 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
4062 u8 opcode,
4063 __le16 echo,
4064 union event_ring_data *data, u8 fw_return_code)
4065 {
4066 switch (opcode) {
4067 case COMMON_EVENT_VF_PF_CHANNEL:
4068 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4069 &data->vf_pf_channel.msg_addr);
4070 case COMMON_EVENT_MALICIOUS_VF:
4071 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4072 return 0;
4073 default:
4074 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4075 opcode);
4076 return -EINVAL;
4077 }
4078 }
4079
qed_iov_get_next_active_vf(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4080 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4081 {
4082 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4083 u16 i;
4084
4085 if (!p_iov)
4086 goto out;
4087
4088 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4089 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4090 return i;
4091
4092 out:
4093 return MAX_NUM_VFS;
4094 }
4095
qed_iov_copy_vf_msg(struct qed_hwfn * p_hwfn,struct qed_ptt * ptt,int vfid)4096 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4097 int vfid)
4098 {
4099 struct qed_dmae_params params;
4100 struct qed_vf_info *vf_info;
4101
4102 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4103 if (!vf_info)
4104 return -EINVAL;
4105
4106 memset(¶ms, 0, sizeof(struct qed_dmae_params));
4107 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
4108 params.src_vfid = vf_info->abs_vf_id;
4109
4110 if (qed_dmae_host2host(p_hwfn, ptt,
4111 vf_info->vf_mbx.pending_req,
4112 vf_info->vf_mbx.req_phys,
4113 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4114 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4115 "Failed to copy message from VF 0x%02x\n", vfid);
4116
4117 return -EIO;
4118 }
4119
4120 return 0;
4121 }
4122
qed_iov_bulletin_set_forced_mac(struct qed_hwfn * p_hwfn,u8 * mac,int vfid)4123 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4124 u8 *mac, int vfid)
4125 {
4126 struct qed_vf_info *vf_info;
4127 u64 feature;
4128
4129 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4130 if (!vf_info) {
4131 DP_NOTICE(p_hwfn->cdev,
4132 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4133 return;
4134 }
4135
4136 if (vf_info->b_malicious) {
4137 DP_NOTICE(p_hwfn->cdev,
4138 "Can't set forced MAC to malicious VF [%d]\n", vfid);
4139 return;
4140 }
4141
4142 if (vf_info->p_vf_info.is_trusted_configured) {
4143 feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4144 /* Trust mode will disable Forced MAC */
4145 vf_info->bulletin.p_virt->valid_bitmap &=
4146 ~BIT(MAC_ADDR_FORCED);
4147 } else {
4148 feature = BIT(MAC_ADDR_FORCED);
4149 /* Forced MAC will disable MAC_ADDR */
4150 vf_info->bulletin.p_virt->valid_bitmap &=
4151 ~BIT(VFPF_BULLETIN_MAC_ADDR);
4152 }
4153
4154 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4155
4156 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4157
4158 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4159 }
4160
qed_iov_bulletin_set_mac(struct qed_hwfn * p_hwfn,u8 * mac,int vfid)4161 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
4162 {
4163 struct qed_vf_info *vf_info;
4164 u64 feature;
4165
4166 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4167 if (!vf_info) {
4168 DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
4169 vfid);
4170 return -EINVAL;
4171 }
4172
4173 if (vf_info->b_malicious) {
4174 DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
4175 vfid);
4176 return -EINVAL;
4177 }
4178
4179 if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
4180 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4181 "Can not set MAC, Forced MAC is configured\n");
4182 return -EINVAL;
4183 }
4184
4185 feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4186 ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
4187
4188 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4189
4190 if (vf_info->p_vf_info.is_trusted_configured)
4191 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4192
4193 return 0;
4194 }
4195
qed_iov_bulletin_set_forced_vlan(struct qed_hwfn * p_hwfn,u16 pvid,int vfid)4196 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4197 u16 pvid, int vfid)
4198 {
4199 struct qed_vf_info *vf_info;
4200 u64 feature;
4201
4202 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4203 if (!vf_info) {
4204 DP_NOTICE(p_hwfn->cdev,
4205 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4206 return;
4207 }
4208
4209 if (vf_info->b_malicious) {
4210 DP_NOTICE(p_hwfn->cdev,
4211 "Can't set forced vlan to malicious VF [%d]\n", vfid);
4212 return;
4213 }
4214
4215 feature = 1 << VLAN_ADDR_FORCED;
4216 vf_info->bulletin.p_virt->pvid = pvid;
4217 if (pvid)
4218 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4219 else
4220 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4221
4222 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4223 }
4224
qed_iov_bulletin_set_udp_ports(struct qed_hwfn * p_hwfn,int vfid,u16 vxlan_port,u16 geneve_port)4225 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4226 int vfid, u16 vxlan_port, u16 geneve_port)
4227 {
4228 struct qed_vf_info *vf_info;
4229
4230 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4231 if (!vf_info) {
4232 DP_NOTICE(p_hwfn->cdev,
4233 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4234 return;
4235 }
4236
4237 if (vf_info->b_malicious) {
4238 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4239 "Can not set udp ports to malicious VF [%d]\n",
4240 vfid);
4241 return;
4242 }
4243
4244 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4245 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4246 }
4247
qed_iov_vf_has_vport_instance(struct qed_hwfn * p_hwfn,int vfid)4248 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4249 {
4250 struct qed_vf_info *p_vf_info;
4251
4252 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4253 if (!p_vf_info)
4254 return false;
4255
4256 return !!p_vf_info->vport_instance;
4257 }
4258
qed_iov_is_vf_stopped(struct qed_hwfn * p_hwfn,int vfid)4259 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4260 {
4261 struct qed_vf_info *p_vf_info;
4262
4263 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4264 if (!p_vf_info)
4265 return true;
4266
4267 return p_vf_info->state == VF_STOPPED;
4268 }
4269
qed_iov_spoofchk_get(struct qed_hwfn * p_hwfn,int vfid)4270 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4271 {
4272 struct qed_vf_info *vf_info;
4273
4274 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4275 if (!vf_info)
4276 return false;
4277
4278 return vf_info->spoof_chk;
4279 }
4280
qed_iov_spoofchk_set(struct qed_hwfn * p_hwfn,int vfid,bool val)4281 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4282 {
4283 struct qed_vf_info *vf;
4284 int rc = -EINVAL;
4285
4286 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4287 DP_NOTICE(p_hwfn,
4288 "SR-IOV sanity check failed, can't set spoofchk\n");
4289 goto out;
4290 }
4291
4292 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4293 if (!vf)
4294 goto out;
4295
4296 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4297 /* After VF VPORT start PF will configure spoof check */
4298 vf->req_spoofchk_val = val;
4299 rc = 0;
4300 goto out;
4301 }
4302
4303 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4304
4305 out:
4306 return rc;
4307 }
4308
qed_iov_bulletin_get_mac(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4309 static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4310 {
4311 struct qed_vf_info *p_vf;
4312
4313 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4314 if (!p_vf || !p_vf->bulletin.p_virt)
4315 return NULL;
4316
4317 if (!(p_vf->bulletin.p_virt->valid_bitmap &
4318 BIT(VFPF_BULLETIN_MAC_ADDR)))
4319 return NULL;
4320
4321 return p_vf->bulletin.p_virt->mac;
4322 }
4323
qed_iov_bulletin_get_forced_mac(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4324 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4325 u16 rel_vf_id)
4326 {
4327 struct qed_vf_info *p_vf;
4328
4329 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4330 if (!p_vf || !p_vf->bulletin.p_virt)
4331 return NULL;
4332
4333 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4334 return NULL;
4335
4336 return p_vf->bulletin.p_virt->mac;
4337 }
4338
4339 static u16
qed_iov_bulletin_get_forced_vlan(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4340 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4341 {
4342 struct qed_vf_info *p_vf;
4343
4344 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4345 if (!p_vf || !p_vf->bulletin.p_virt)
4346 return 0;
4347
4348 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4349 return 0;
4350
4351 return p_vf->bulletin.p_virt->pvid;
4352 }
4353
qed_iov_configure_tx_rate(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,int vfid,int val)4354 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4355 struct qed_ptt *p_ptt, int vfid, int val)
4356 {
4357 struct qed_mcp_link_state *p_link;
4358 struct qed_vf_info *vf;
4359 u8 abs_vp_id = 0;
4360 int rc;
4361
4362 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4363 if (!vf)
4364 return -EINVAL;
4365
4366 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4367 if (rc)
4368 return rc;
4369
4370 p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
4371
4372 return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
4373 p_link->speed);
4374 }
4375
4376 static int
qed_iov_configure_min_tx_rate(struct qed_dev * cdev,int vfid,u32 rate)4377 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4378 {
4379 struct qed_vf_info *vf;
4380 u8 vport_id;
4381 int i;
4382
4383 for_each_hwfn(cdev, i) {
4384 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4385
4386 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4387 DP_NOTICE(p_hwfn,
4388 "SR-IOV sanity check failed, can't set min rate\n");
4389 return -EINVAL;
4390 }
4391 }
4392
4393 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4394 vport_id = vf->vport_id;
4395
4396 return qed_configure_vport_wfq(cdev, vport_id, rate);
4397 }
4398
qed_iov_get_vf_min_rate(struct qed_hwfn * p_hwfn,int vfid)4399 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4400 {
4401 struct qed_wfq_data *vf_vp_wfq;
4402 struct qed_vf_info *vf_info;
4403
4404 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4405 if (!vf_info)
4406 return 0;
4407
4408 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4409
4410 if (vf_vp_wfq->configured)
4411 return vf_vp_wfq->min_speed;
4412 else
4413 return 0;
4414 }
4415
4416 /**
4417 * qed_schedule_iov - schedules IOV task for VF and PF
4418 * @hwfn: hardware function pointer
4419 * @flag: IOV flag for VF/PF
4420 */
qed_schedule_iov(struct qed_hwfn * hwfn,enum qed_iov_wq_flag flag)4421 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4422 {
4423 smp_mb__before_atomic();
4424 set_bit(flag, &hwfn->iov_task_flags);
4425 smp_mb__after_atomic();
4426 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4427 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4428 }
4429
qed_vf_start_iov_wq(struct qed_dev * cdev)4430 void qed_vf_start_iov_wq(struct qed_dev *cdev)
4431 {
4432 int i;
4433
4434 for_each_hwfn(cdev, i)
4435 queue_delayed_work(cdev->hwfns[i].iov_wq,
4436 &cdev->hwfns[i].iov_task, 0);
4437 }
4438
qed_sriov_disable(struct qed_dev * cdev,bool pci_enabled)4439 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4440 {
4441 int i, j;
4442
4443 for_each_hwfn(cdev, i)
4444 if (cdev->hwfns[i].iov_wq)
4445 flush_workqueue(cdev->hwfns[i].iov_wq);
4446
4447 /* Mark VFs for disablement */
4448 qed_iov_set_vfs_to_disable(cdev, true);
4449
4450 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4451 pci_disable_sriov(cdev->pdev);
4452
4453 if (cdev->recov_in_prog) {
4454 DP_VERBOSE(cdev,
4455 QED_MSG_IOV,
4456 "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4457 goto out;
4458 }
4459
4460 for_each_hwfn(cdev, i) {
4461 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4462 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4463
4464 /* Failure to acquire the ptt in 100g creates an odd error
4465 * where the first engine has already relased IOV.
4466 */
4467 if (!ptt) {
4468 DP_ERR(hwfn, "Failed to acquire ptt\n");
4469 return -EBUSY;
4470 }
4471
4472 /* Clean WFQ db and configure equal weight for all vports */
4473 qed_clean_wfq_db(hwfn, ptt);
4474
4475 qed_for_each_vf(hwfn, j) {
4476 int k;
4477
4478 if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4479 continue;
4480
4481 /* Wait until VF is disabled before releasing */
4482 for (k = 0; k < 100; k++) {
4483 if (!qed_iov_is_vf_stopped(hwfn, j))
4484 msleep(20);
4485 else
4486 break;
4487 }
4488
4489 if (k < 100)
4490 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4491 ptt, j);
4492 else
4493 DP_ERR(hwfn,
4494 "Timeout waiting for VF's FLR to end\n");
4495 }
4496
4497 qed_ptt_release(hwfn, ptt);
4498 }
4499 out:
4500 qed_iov_set_vfs_to_disable(cdev, false);
4501
4502 return 0;
4503 }
4504
qed_sriov_enable_qid_config(struct qed_hwfn * hwfn,u16 vfid,struct qed_iov_vf_init_params * params)4505 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4506 u16 vfid,
4507 struct qed_iov_vf_init_params *params)
4508 {
4509 u16 base, i;
4510
4511 /* Since we have an equal resource distribution per-VF, and we assume
4512 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4513 * sequentially from there.
4514 */
4515 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4516
4517 params->rel_vf_id = vfid;
4518 for (i = 0; i < params->num_queues; i++) {
4519 params->req_rx_queue[i] = base + i;
4520 params->req_tx_queue[i] = base + i;
4521 }
4522 }
4523
qed_sriov_enable(struct qed_dev * cdev,int num)4524 static int qed_sriov_enable(struct qed_dev *cdev, int num)
4525 {
4526 struct qed_iov_vf_init_params params;
4527 struct qed_hwfn *hwfn;
4528 struct qed_ptt *ptt;
4529 int i, j, rc;
4530
4531 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4532 DP_NOTICE(cdev, "Can start at most %d VFs\n",
4533 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4534 return -EINVAL;
4535 }
4536
4537 memset(¶ms, 0, sizeof(params));
4538
4539 /* Initialize HW for VF access */
4540 for_each_hwfn(cdev, j) {
4541 hwfn = &cdev->hwfns[j];
4542 ptt = qed_ptt_acquire(hwfn);
4543
4544 /* Make sure not to use more than 16 queues per VF */
4545 params.num_queues = min_t(int,
4546 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4547 16);
4548
4549 if (!ptt) {
4550 DP_ERR(hwfn, "Failed to acquire ptt\n");
4551 rc = -EBUSY;
4552 goto err;
4553 }
4554
4555 for (i = 0; i < num; i++) {
4556 if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4557 continue;
4558
4559 qed_sriov_enable_qid_config(hwfn, i, ¶ms);
4560 rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
4561 if (rc) {
4562 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4563 qed_ptt_release(hwfn, ptt);
4564 goto err;
4565 }
4566 }
4567
4568 qed_ptt_release(hwfn, ptt);
4569 }
4570
4571 /* Enable SRIOV PCIe functions */
4572 rc = pci_enable_sriov(cdev->pdev, num);
4573 if (rc) {
4574 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4575 goto err;
4576 }
4577
4578 hwfn = QED_LEADING_HWFN(cdev);
4579 ptt = qed_ptt_acquire(hwfn);
4580 if (!ptt) {
4581 DP_ERR(hwfn, "Failed to acquire ptt\n");
4582 rc = -EBUSY;
4583 goto err;
4584 }
4585
4586 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4587 if (rc)
4588 DP_INFO(cdev, "Failed to update eswitch mode\n");
4589 qed_ptt_release(hwfn, ptt);
4590
4591 return num;
4592
4593 err:
4594 qed_sriov_disable(cdev, false);
4595 return rc;
4596 }
4597
qed_sriov_configure(struct qed_dev * cdev,int num_vfs_param)4598 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4599 {
4600 if (!IS_QED_SRIOV(cdev)) {
4601 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4602 return -EOPNOTSUPP;
4603 }
4604
4605 if (num_vfs_param)
4606 return qed_sriov_enable(cdev, num_vfs_param);
4607 else
4608 return qed_sriov_disable(cdev, true);
4609 }
4610
qed_sriov_pf_set_mac(struct qed_dev * cdev,u8 * mac,int vfid)4611 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4612 {
4613 int i;
4614
4615 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4616 DP_VERBOSE(cdev, QED_MSG_IOV,
4617 "Cannot set a VF MAC; Sriov is not enabled\n");
4618 return -EINVAL;
4619 }
4620
4621 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4622 DP_VERBOSE(cdev, QED_MSG_IOV,
4623 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4624 return -EINVAL;
4625 }
4626
4627 for_each_hwfn(cdev, i) {
4628 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4629 struct qed_public_vf_info *vf_info;
4630
4631 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4632 if (!vf_info)
4633 continue;
4634
4635 /* Set the MAC, and schedule the IOV task */
4636 if (vf_info->is_trusted_configured)
4637 ether_addr_copy(vf_info->mac, mac);
4638 else
4639 ether_addr_copy(vf_info->forced_mac, mac);
4640
4641 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4642 }
4643
4644 return 0;
4645 }
4646
qed_sriov_pf_set_vlan(struct qed_dev * cdev,u16 vid,int vfid)4647 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4648 {
4649 int i;
4650
4651 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4652 DP_VERBOSE(cdev, QED_MSG_IOV,
4653 "Cannot set a VF MAC; Sriov is not enabled\n");
4654 return -EINVAL;
4655 }
4656
4657 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4658 DP_VERBOSE(cdev, QED_MSG_IOV,
4659 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4660 return -EINVAL;
4661 }
4662
4663 for_each_hwfn(cdev, i) {
4664 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4665 struct qed_public_vf_info *vf_info;
4666
4667 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4668 if (!vf_info)
4669 continue;
4670
4671 /* Set the forced vlan, and schedule the IOV task */
4672 vf_info->forced_vlan = vid;
4673 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4674 }
4675
4676 return 0;
4677 }
4678
qed_get_vf_config(struct qed_dev * cdev,int vf_id,struct ifla_vf_info * ivi)4679 static int qed_get_vf_config(struct qed_dev *cdev,
4680 int vf_id, struct ifla_vf_info *ivi)
4681 {
4682 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4683 struct qed_public_vf_info *vf_info;
4684 struct qed_mcp_link_state link;
4685 u32 tx_rate;
4686
4687 /* Sanitize request */
4688 if (IS_VF(cdev))
4689 return -EINVAL;
4690
4691 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4692 DP_VERBOSE(cdev, QED_MSG_IOV,
4693 "VF index [%d] isn't active\n", vf_id);
4694 return -EINVAL;
4695 }
4696
4697 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4698
4699 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4700
4701 /* Fill information about VF */
4702 ivi->vf = vf_id;
4703
4704 if (is_valid_ether_addr(vf_info->forced_mac))
4705 ether_addr_copy(ivi->mac, vf_info->forced_mac);
4706 else
4707 ether_addr_copy(ivi->mac, vf_info->mac);
4708
4709 ivi->vlan = vf_info->forced_vlan;
4710 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4711 ivi->linkstate = vf_info->link_state;
4712 tx_rate = vf_info->tx_rate;
4713 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4714 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4715
4716 return 0;
4717 }
4718
qed_inform_vf_link_state(struct qed_hwfn * hwfn)4719 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4720 {
4721 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4722 struct qed_mcp_link_capabilities caps;
4723 struct qed_mcp_link_params params;
4724 struct qed_mcp_link_state link;
4725 int i;
4726
4727 if (!hwfn->pf_iov_info)
4728 return;
4729
4730 /* Update bulletin of all future possible VFs with link configuration */
4731 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4732 struct qed_public_vf_info *vf_info;
4733
4734 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4735 if (!vf_info)
4736 continue;
4737
4738 /* Only hwfn0 is actually interested in the link speed.
4739 * But since only it would receive an MFW indication of link,
4740 * need to take configuration from it - otherwise things like
4741 * rate limiting for hwfn1 VF would not work.
4742 */
4743 memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn),
4744 sizeof(params));
4745 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4746 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4747 sizeof(caps));
4748
4749 /* Modify link according to the VF's configured link state */
4750 switch (vf_info->link_state) {
4751 case IFLA_VF_LINK_STATE_DISABLE:
4752 link.link_up = false;
4753 break;
4754 case IFLA_VF_LINK_STATE_ENABLE:
4755 link.link_up = true;
4756 /* Set speed according to maximum supported by HW.
4757 * that is 40G for regular devices and 100G for CMT
4758 * mode devices.
4759 */
4760 link.speed = (hwfn->cdev->num_hwfns > 1) ?
4761 100000 : 40000;
4762 default:
4763 /* In auto mode pass PF link image to VF */
4764 break;
4765 }
4766
4767 if (link.link_up && vf_info->tx_rate) {
4768 struct qed_ptt *ptt;
4769 int rate;
4770
4771 rate = min_t(int, vf_info->tx_rate, link.speed);
4772
4773 ptt = qed_ptt_acquire(hwfn);
4774 if (!ptt) {
4775 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4776 return;
4777 }
4778
4779 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4780 vf_info->tx_rate = rate;
4781 link.speed = rate;
4782 }
4783
4784 qed_ptt_release(hwfn, ptt);
4785 }
4786
4787 qed_iov_set_link(hwfn, i, ¶ms, &link, &caps);
4788 }
4789
4790 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4791 }
4792
qed_set_vf_link_state(struct qed_dev * cdev,int vf_id,int link_state)4793 static int qed_set_vf_link_state(struct qed_dev *cdev,
4794 int vf_id, int link_state)
4795 {
4796 int i;
4797
4798 /* Sanitize request */
4799 if (IS_VF(cdev))
4800 return -EINVAL;
4801
4802 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4803 DP_VERBOSE(cdev, QED_MSG_IOV,
4804 "VF index [%d] isn't active\n", vf_id);
4805 return -EINVAL;
4806 }
4807
4808 /* Handle configuration of link state */
4809 for_each_hwfn(cdev, i) {
4810 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4811 struct qed_public_vf_info *vf;
4812
4813 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4814 if (!vf)
4815 continue;
4816
4817 if (vf->link_state == link_state)
4818 continue;
4819
4820 vf->link_state = link_state;
4821 qed_inform_vf_link_state(&cdev->hwfns[i]);
4822 }
4823
4824 return 0;
4825 }
4826
qed_spoof_configure(struct qed_dev * cdev,int vfid,bool val)4827 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4828 {
4829 int i, rc = -EINVAL;
4830
4831 for_each_hwfn(cdev, i) {
4832 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4833
4834 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4835 if (rc)
4836 break;
4837 }
4838
4839 return rc;
4840 }
4841
qed_configure_max_vf_rate(struct qed_dev * cdev,int vfid,int rate)4842 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4843 {
4844 int i;
4845
4846 for_each_hwfn(cdev, i) {
4847 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4848 struct qed_public_vf_info *vf;
4849
4850 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4851 DP_NOTICE(p_hwfn,
4852 "SR-IOV sanity check failed, can't set tx rate\n");
4853 return -EINVAL;
4854 }
4855
4856 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4857
4858 vf->tx_rate = rate;
4859
4860 qed_inform_vf_link_state(p_hwfn);
4861 }
4862
4863 return 0;
4864 }
4865
qed_set_vf_rate(struct qed_dev * cdev,int vfid,u32 min_rate,u32 max_rate)4866 static int qed_set_vf_rate(struct qed_dev *cdev,
4867 int vfid, u32 min_rate, u32 max_rate)
4868 {
4869 int rc_min = 0, rc_max = 0;
4870
4871 if (max_rate)
4872 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4873
4874 if (min_rate)
4875 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4876
4877 if (rc_max | rc_min)
4878 return -EINVAL;
4879
4880 return 0;
4881 }
4882
qed_set_vf_trust(struct qed_dev * cdev,int vfid,bool trust)4883 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4884 {
4885 int i;
4886
4887 for_each_hwfn(cdev, i) {
4888 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4889 struct qed_public_vf_info *vf;
4890
4891 if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4892 DP_NOTICE(hwfn,
4893 "SR-IOV sanity check failed, can't set trust\n");
4894 return -EINVAL;
4895 }
4896
4897 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4898
4899 if (vf->is_trusted_request == trust)
4900 return 0;
4901 vf->is_trusted_request = trust;
4902
4903 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4904 }
4905
4906 return 0;
4907 }
4908
qed_handle_vf_msg(struct qed_hwfn * hwfn)4909 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4910 {
4911 u64 events[QED_VF_ARRAY_LENGTH];
4912 struct qed_ptt *ptt;
4913 int i;
4914
4915 ptt = qed_ptt_acquire(hwfn);
4916 if (!ptt) {
4917 DP_VERBOSE(hwfn, QED_MSG_IOV,
4918 "Can't acquire PTT; re-scheduling\n");
4919 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4920 return;
4921 }
4922
4923 qed_iov_pf_get_pending_events(hwfn, events);
4924
4925 DP_VERBOSE(hwfn, QED_MSG_IOV,
4926 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4927 events[0], events[1], events[2]);
4928
4929 qed_for_each_vf(hwfn, i) {
4930 /* Skip VFs with no pending messages */
4931 if (!(events[i / 64] & (1ULL << (i % 64))))
4932 continue;
4933
4934 DP_VERBOSE(hwfn, QED_MSG_IOV,
4935 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4936 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4937
4938 /* Copy VF's message to PF's request buffer for that VF */
4939 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4940 continue;
4941
4942 qed_iov_process_mbx_req(hwfn, ptt, i);
4943 }
4944
4945 qed_ptt_release(hwfn, ptt);
4946 }
4947
qed_pf_validate_req_vf_mac(struct qed_hwfn * hwfn,u8 * mac,struct qed_public_vf_info * info)4948 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
4949 u8 *mac,
4950 struct qed_public_vf_info *info)
4951 {
4952 if (info->is_trusted_configured) {
4953 if (is_valid_ether_addr(info->mac) &&
4954 (!mac || !ether_addr_equal(mac, info->mac)))
4955 return true;
4956 } else {
4957 if (is_valid_ether_addr(info->forced_mac) &&
4958 (!mac || !ether_addr_equal(mac, info->forced_mac)))
4959 return true;
4960 }
4961
4962 return false;
4963 }
4964
qed_set_bulletin_mac(struct qed_hwfn * hwfn,struct qed_public_vf_info * info,int vfid)4965 static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
4966 struct qed_public_vf_info *info,
4967 int vfid)
4968 {
4969 if (info->is_trusted_configured)
4970 qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
4971 else
4972 qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
4973 }
4974
qed_handle_pf_set_vf_unicast(struct qed_hwfn * hwfn)4975 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4976 {
4977 int i;
4978
4979 qed_for_each_vf(hwfn, i) {
4980 struct qed_public_vf_info *info;
4981 bool update = false;
4982 u8 *mac;
4983
4984 info = qed_iov_get_public_vf_info(hwfn, i, true);
4985 if (!info)
4986 continue;
4987
4988 /* Update data on bulletin board */
4989 if (info->is_trusted_configured)
4990 mac = qed_iov_bulletin_get_mac(hwfn, i);
4991 else
4992 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4993
4994 if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
4995 DP_VERBOSE(hwfn,
4996 QED_MSG_IOV,
4997 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4998 i,
4999 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
5000
5001 /* Update bulletin board with MAC */
5002 qed_set_bulletin_mac(hwfn, info, i);
5003 update = true;
5004 }
5005
5006 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
5007 info->forced_vlan) {
5008 DP_VERBOSE(hwfn,
5009 QED_MSG_IOV,
5010 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
5011 info->forced_vlan,
5012 i,
5013 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
5014 qed_iov_bulletin_set_forced_vlan(hwfn,
5015 info->forced_vlan, i);
5016 update = true;
5017 }
5018
5019 if (update)
5020 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5021 }
5022 }
5023
qed_handle_bulletin_post(struct qed_hwfn * hwfn)5024 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
5025 {
5026 struct qed_ptt *ptt;
5027 int i;
5028
5029 ptt = qed_ptt_acquire(hwfn);
5030 if (!ptt) {
5031 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
5032 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5033 return;
5034 }
5035
5036 qed_for_each_vf(hwfn, i)
5037 qed_iov_post_vf_bulletin(hwfn, i, ptt);
5038
5039 qed_ptt_release(hwfn, ptt);
5040 }
5041
qed_update_mac_for_vf_trust_change(struct qed_hwfn * hwfn,int vf_id)5042 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
5043 {
5044 struct qed_public_vf_info *vf_info;
5045 struct qed_vf_info *vf;
5046 u8 *force_mac;
5047 int i;
5048
5049 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
5050 vf = qed_iov_get_vf_info(hwfn, vf_id, true);
5051
5052 if (!vf_info || !vf)
5053 return;
5054
5055 /* Force MAC converted to generic MAC in case of VF trust on */
5056 if (vf_info->is_trusted_configured &&
5057 (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
5058 force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
5059
5060 if (force_mac) {
5061 /* Clear existing shadow copy of MAC to have a clean
5062 * slate.
5063 */
5064 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5065 if (ether_addr_equal(vf->shadow_config.macs[i],
5066 vf_info->mac)) {
5067 memset(vf->shadow_config.macs[i], 0,
5068 ETH_ALEN);
5069 DP_VERBOSE(hwfn, QED_MSG_IOV,
5070 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5071 vf_info->mac, vf_id);
5072 break;
5073 }
5074 }
5075
5076 ether_addr_copy(vf_info->mac, force_mac);
5077 memset(vf_info->forced_mac, 0, ETH_ALEN);
5078 vf->bulletin.p_virt->valid_bitmap &=
5079 ~BIT(MAC_ADDR_FORCED);
5080 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5081 }
5082 }
5083
5084 /* Update shadow copy with VF MAC when trust mode is turned off */
5085 if (!vf_info->is_trusted_configured) {
5086 u8 empty_mac[ETH_ALEN];
5087
5088 memset(empty_mac, 0, ETH_ALEN);
5089 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5090 if (ether_addr_equal(vf->shadow_config.macs[i],
5091 empty_mac)) {
5092 ether_addr_copy(vf->shadow_config.macs[i],
5093 vf_info->mac);
5094 DP_VERBOSE(hwfn, QED_MSG_IOV,
5095 "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5096 vf_info->mac, vf_id);
5097 break;
5098 }
5099 }
5100 /* Clear bulletin when trust mode is turned off,
5101 * to have a clean slate for next (normal) operations.
5102 */
5103 qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
5104 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5105 }
5106 }
5107
qed_iov_handle_trust_change(struct qed_hwfn * hwfn)5108 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5109 {
5110 struct qed_sp_vport_update_params params;
5111 struct qed_filter_accept_flags *flags;
5112 struct qed_public_vf_info *vf_info;
5113 struct qed_vf_info *vf;
5114 u8 mask;
5115 int i;
5116
5117 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
5118 flags = ¶ms.accept_flags;
5119
5120 qed_for_each_vf(hwfn, i) {
5121 /* Need to make sure current requested configuration didn't
5122 * flip so that we'll end up configuring something that's not
5123 * needed.
5124 */
5125 vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
5126 if (vf_info->is_trusted_configured ==
5127 vf_info->is_trusted_request)
5128 continue;
5129 vf_info->is_trusted_configured = vf_info->is_trusted_request;
5130
5131 /* Handle forced MAC mode */
5132 qed_update_mac_for_vf_trust_change(hwfn, i);
5133
5134 /* Validate that the VF has a configured vport */
5135 vf = qed_iov_get_vf_info(hwfn, i, true);
5136 if (!vf->vport_instance)
5137 continue;
5138
5139 memset(¶ms, 0, sizeof(params));
5140 params.opaque_fid = vf->opaque_fid;
5141 params.vport_id = vf->vport_id;
5142
5143 params.update_ctl_frame_check = 1;
5144 params.mac_chk_en = !vf_info->is_trusted_configured;
5145
5146 if (vf_info->rx_accept_mode & mask) {
5147 flags->update_rx_mode_config = 1;
5148 flags->rx_accept_filter = vf_info->rx_accept_mode;
5149 }
5150
5151 if (vf_info->tx_accept_mode & mask) {
5152 flags->update_tx_mode_config = 1;
5153 flags->tx_accept_filter = vf_info->tx_accept_mode;
5154 }
5155
5156 /* Remove if needed; Otherwise this would set the mask */
5157 if (!vf_info->is_trusted_configured) {
5158 flags->rx_accept_filter &= ~mask;
5159 flags->tx_accept_filter &= ~mask;
5160 }
5161
5162 if (flags->update_rx_mode_config ||
5163 flags->update_tx_mode_config ||
5164 params.update_ctl_frame_check)
5165 qed_sp_vport_update(hwfn, ¶ms,
5166 QED_SPQ_MODE_EBLOCK, NULL);
5167 }
5168 }
5169
qed_iov_pf_task(struct work_struct * work)5170 static void qed_iov_pf_task(struct work_struct *work)
5171
5172 {
5173 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
5174 iov_task.work);
5175 int rc;
5176
5177 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
5178 return;
5179
5180 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
5181 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
5182
5183 if (!ptt) {
5184 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5185 return;
5186 }
5187
5188 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
5189 if (rc)
5190 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5191
5192 qed_ptt_release(hwfn, ptt);
5193 }
5194
5195 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
5196 qed_handle_vf_msg(hwfn);
5197
5198 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5199 &hwfn->iov_task_flags))
5200 qed_handle_pf_set_vf_unicast(hwfn);
5201
5202 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5203 &hwfn->iov_task_flags))
5204 qed_handle_bulletin_post(hwfn);
5205
5206 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5207 qed_iov_handle_trust_change(hwfn);
5208 }
5209
qed_iov_wq_stop(struct qed_dev * cdev,bool schedule_first)5210 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5211 {
5212 int i;
5213
5214 for_each_hwfn(cdev, i) {
5215 if (!cdev->hwfns[i].iov_wq)
5216 continue;
5217
5218 if (schedule_first) {
5219 qed_schedule_iov(&cdev->hwfns[i],
5220 QED_IOV_WQ_STOP_WQ_FLAG);
5221 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5222 }
5223
5224 flush_workqueue(cdev->hwfns[i].iov_wq);
5225 destroy_workqueue(cdev->hwfns[i].iov_wq);
5226 }
5227 }
5228
qed_iov_wq_start(struct qed_dev * cdev)5229 int qed_iov_wq_start(struct qed_dev *cdev)
5230 {
5231 char name[NAME_SIZE];
5232 int i;
5233
5234 for_each_hwfn(cdev, i) {
5235 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5236
5237 /* PFs needs a dedicated workqueue only if they support IOV.
5238 * VFs always require one.
5239 */
5240 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5241 continue;
5242
5243 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5244 cdev->pdev->bus->number,
5245 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5246
5247 p_hwfn->iov_wq = create_singlethread_workqueue(name);
5248 if (!p_hwfn->iov_wq) {
5249 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5250 return -ENOMEM;
5251 }
5252
5253 if (IS_PF(cdev))
5254 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5255 else
5256 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5257 }
5258
5259 return 0;
5260 }
5261
5262 const struct qed_iov_hv_ops qed_iov_ops_pass = {
5263 .configure = &qed_sriov_configure,
5264 .set_mac = &qed_sriov_pf_set_mac,
5265 .set_vlan = &qed_sriov_pf_set_vlan,
5266 .get_config = &qed_get_vf_config,
5267 .set_link_state = &qed_set_vf_link_state,
5268 .set_spoof = &qed_spoof_configure,
5269 .set_rate = &qed_set_vf_rate,
5270 .set_trust = &qed_set_vf_trust,
5271 };
5272