1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef _QED_VF_H
34 #define _QED_VF_H
35
36 #include "qed_l2.h"
37 #include "qed_mcp.h"
38
39 #define T_ETH_INDIRECTION_TABLE_SIZE 128
40 #define T_ETH_RSS_KEY_SIZE 10
41
42 struct vf_pf_resc_request {
43 u8 num_rxqs;
44 u8 num_txqs;
45 u8 num_sbs;
46 u8 num_mac_filters;
47 u8 num_vlan_filters;
48 u8 num_mc_filters;
49 u8 num_cids;
50 u8 padding;
51 };
52
53 struct hw_sb_info {
54 u16 hw_sb_id;
55 u8 sb_qid;
56 u8 padding[5];
57 };
58
59 #define TLV_BUFFER_SIZE 1024
60
61 enum {
62 PFVF_STATUS_WAITING,
63 PFVF_STATUS_SUCCESS,
64 PFVF_STATUS_FAILURE,
65 PFVF_STATUS_NOT_SUPPORTED,
66 PFVF_STATUS_NO_RESOURCE,
67 PFVF_STATUS_FORCED,
68 PFVF_STATUS_MALICIOUS,
69 };
70
71 /* vf pf channel tlvs */
72 /* general tlv header (used for both vf->pf request and pf->vf response) */
73 struct channel_tlv {
74 u16 type;
75 u16 length;
76 };
77
78 /* header of first vf->pf tlv carries the offset used to calculate reponse
79 * buffer address
80 */
81 struct vfpf_first_tlv {
82 struct channel_tlv tl;
83 u32 padding;
84 u64 reply_address;
85 };
86
87 /* header of pf->vf tlvs, carries the status of handling the request */
88 struct pfvf_tlv {
89 struct channel_tlv tl;
90 u8 status;
91 u8 padding[3];
92 };
93
94 /* response tlv used for most tlvs */
95 struct pfvf_def_resp_tlv {
96 struct pfvf_tlv hdr;
97 };
98
99 /* used to terminate and pad a tlv list */
100 struct channel_list_end_tlv {
101 struct channel_tlv tl;
102 u8 padding[4];
103 };
104
105 #define VFPF_ACQUIRE_OS_LINUX (0)
106 #define VFPF_ACQUIRE_OS_WINDOWS (1)
107 #define VFPF_ACQUIRE_OS_ESX (2)
108 #define VFPF_ACQUIRE_OS_SOLARIS (3)
109 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
110
111 struct vfpf_acquire_tlv {
112 struct vfpf_first_tlv first_tlv;
113
114 struct vf_pf_vfdev_info {
115 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
116 #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
117 /* A requirement for supporting multi-Tx queues on a single queue-zone,
118 * VF would pass qids as additional information whenever passing queue
119 * references.
120 */
121 #define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2)
122
123 /* The VF is using the physical bar. While this is mostly internal
124 * to the VF, might affect the number of CIDs supported assuming
125 * QUEUE_QIDS is set.
126 */
127 #define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3)
128 u64 capabilities;
129 u8 fw_major;
130 u8 fw_minor;
131 u8 fw_revision;
132 u8 fw_engineering;
133 u32 driver_version;
134 u16 opaque_fid; /* ME register value */
135 u8 os_type; /* VFPF_ACQUIRE_OS_* value */
136 u8 eth_fp_hsi_major;
137 u8 eth_fp_hsi_minor;
138 u8 padding[3];
139 } vfdev_info;
140
141 struct vf_pf_resc_request resc_request;
142
143 u64 bulletin_addr;
144 u32 bulletin_size;
145 u32 padding;
146 };
147
148 /* receive side scaling tlv */
149 struct vfpf_vport_update_rss_tlv {
150 struct channel_tlv tl;
151
152 u8 update_rss_flags;
153 #define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
154 #define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
155 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
156 #define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
157
158 u8 rss_enable;
159 u8 rss_caps;
160 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
161 u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
162 u32 rss_key[T_ETH_RSS_KEY_SIZE];
163 };
164
165 struct pfvf_storm_stats {
166 u32 address;
167 u32 len;
168 };
169
170 struct pfvf_stats_info {
171 struct pfvf_storm_stats mstats;
172 struct pfvf_storm_stats pstats;
173 struct pfvf_storm_stats tstats;
174 struct pfvf_storm_stats ustats;
175 };
176
177 struct pfvf_acquire_resp_tlv {
178 struct pfvf_tlv hdr;
179
180 struct pf_vf_pfdev_info {
181 u32 chip_num;
182 u32 mfw_ver;
183
184 u16 fw_major;
185 u16 fw_minor;
186 u16 fw_rev;
187 u16 fw_eng;
188
189 u64 capabilities;
190 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
191 #define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
192 /* There are old PF versions where the PF might mistakenly override the sanity
193 * mechanism [version-based] and allow a VF that can't be supported to pass
194 * the acquisition phase.
195 * To overcome this, PFs now indicate that they're past that point and the new
196 * VFs would fail probe on the older PFs that fail to do so.
197 */
198 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
199
200 /* PF expects queues to be received with additional qids */
201 #define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3)
202
203 u16 db_size;
204 u8 indices_per_sb;
205 u8 os_type;
206
207 /* These should match the PF's qed_dev values */
208 u16 chip_rev;
209 u8 dev_type;
210
211 /* Doorbell bar size configured in HW: log(size) or 0 */
212 u8 bar_size;
213
214 struct pfvf_stats_info stats_info;
215
216 u8 port_mac[ETH_ALEN];
217
218 /* It's possible PF had to configure an older fastpath HSI
219 * [in case VF is newer than PF]. This is communicated back
220 * to the VF. It can also be used in case of error due to
221 * non-matching versions to shed light in VF about failure.
222 */
223 u8 major_fp_hsi;
224 u8 minor_fp_hsi;
225 } pfdev_info;
226
227 struct pf_vf_resc {
228 #define PFVF_MAX_QUEUES_PER_VF 16
229 #define PFVF_MAX_SBS_PER_VF 16
230 struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
231 u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
232 u8 cid[PFVF_MAX_QUEUES_PER_VF];
233
234 u8 num_rxqs;
235 u8 num_txqs;
236 u8 num_sbs;
237 u8 num_mac_filters;
238 u8 num_vlan_filters;
239 u8 num_mc_filters;
240 u8 num_cids;
241 u8 padding;
242 } resc;
243
244 u32 bulletin_size;
245 u32 padding;
246 };
247
248 struct pfvf_start_queue_resp_tlv {
249 struct pfvf_tlv hdr;
250 u32 offset; /* offset to consumer/producer of queue */
251 u8 padding[4];
252 };
253
254 /* Extended queue information - additional index for reference inside qzone.
255 * If commmunicated between VF/PF, each TLV relating to queues should be
256 * extended by one such [or have a future base TLV that already contains info].
257 */
258 struct vfpf_qid_tlv {
259 struct channel_tlv tl;
260 u8 qid;
261 u8 padding[3];
262 };
263
264 /* Setup Queue */
265 struct vfpf_start_rxq_tlv {
266 struct vfpf_first_tlv first_tlv;
267
268 /* physical addresses */
269 u64 rxq_addr;
270 u64 deprecated_sge_addr;
271 u64 cqe_pbl_addr;
272
273 u16 cqe_pbl_size;
274 u16 hw_sb;
275 u16 rx_qid;
276 u16 hc_rate; /* desired interrupts per sec. */
277
278 u16 bd_max_bytes;
279 u16 stat_id;
280 u8 sb_index;
281 u8 padding[3];
282 };
283
284 struct vfpf_start_txq_tlv {
285 struct vfpf_first_tlv first_tlv;
286
287 /* physical addresses */
288 u64 pbl_addr;
289 u16 pbl_size;
290 u16 stat_id;
291 u16 tx_qid;
292 u16 hw_sb;
293
294 u32 flags; /* VFPF_QUEUE_FLG_X flags */
295 u16 hc_rate; /* desired interrupts per sec. */
296 u8 sb_index;
297 u8 padding[3];
298 };
299
300 /* Stop RX Queue */
301 struct vfpf_stop_rxqs_tlv {
302 struct vfpf_first_tlv first_tlv;
303
304 u16 rx_qid;
305
306 /* this field is deprecated and should *always* be set to '1' */
307 u8 num_rxqs;
308 u8 cqe_completion;
309 u8 padding[4];
310 };
311
312 /* Stop TX Queues */
313 struct vfpf_stop_txqs_tlv {
314 struct vfpf_first_tlv first_tlv;
315
316 u16 tx_qid;
317
318 /* this field is deprecated and should *always* be set to '1' */
319 u8 num_txqs;
320 u8 padding[5];
321 };
322
323 struct vfpf_update_rxq_tlv {
324 struct vfpf_first_tlv first_tlv;
325
326 u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
327
328 u16 rx_qid;
329 u8 num_rxqs;
330 u8 flags;
331 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
332 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
333 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
334
335 u8 padding[4];
336 };
337
338 /* Set Queue Filters */
339 struct vfpf_q_mac_vlan_filter {
340 u32 flags;
341 #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
342 #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
343 #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
344
345 u8 mac[ETH_ALEN];
346 u16 vlan_tag;
347
348 u8 padding[4];
349 };
350
351 /* Start a vport */
352 struct vfpf_vport_start_tlv {
353 struct vfpf_first_tlv first_tlv;
354
355 u64 sb_addr[PFVF_MAX_SBS_PER_VF];
356
357 u32 tpa_mode;
358 u16 dep1;
359 u16 mtu;
360
361 u8 vport_id;
362 u8 inner_vlan_removal;
363
364 u8 only_untagged;
365 u8 max_buffers_per_cqe;
366
367 u8 padding[4];
368 };
369
370 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
371 struct vfpf_vport_update_activate_tlv {
372 struct channel_tlv tl;
373 u8 update_rx;
374 u8 update_tx;
375 u8 active_rx;
376 u8 active_tx;
377 };
378
379 struct vfpf_vport_update_tx_switch_tlv {
380 struct channel_tlv tl;
381 u8 tx_switching;
382 u8 padding[3];
383 };
384
385 struct vfpf_vport_update_vlan_strip_tlv {
386 struct channel_tlv tl;
387 u8 remove_vlan;
388 u8 padding[3];
389 };
390
391 struct vfpf_vport_update_mcast_bin_tlv {
392 struct channel_tlv tl;
393 u8 padding[4];
394
395 /* There are only 256 approx bins, and in HSI they're divided into
396 * 32-bit values. As old VFs used to set-bit to the values on its side,
397 * the upper half of the array is never expected to contain any data.
398 */
399 u64 bins[4];
400 u64 obsolete_bins[4];
401 };
402
403 struct vfpf_vport_update_accept_param_tlv {
404 struct channel_tlv tl;
405 u8 update_rx_mode;
406 u8 update_tx_mode;
407 u8 rx_accept_filter;
408 u8 tx_accept_filter;
409 };
410
411 struct vfpf_vport_update_accept_any_vlan_tlv {
412 struct channel_tlv tl;
413 u8 update_accept_any_vlan_flg;
414 u8 accept_any_vlan;
415
416 u8 padding[2];
417 };
418
419 struct vfpf_vport_update_sge_tpa_tlv {
420 struct channel_tlv tl;
421
422 u16 sge_tpa_flags;
423 #define VFPF_TPA_IPV4_EN_FLAG BIT(0)
424 #define VFPF_TPA_IPV6_EN_FLAG BIT(1)
425 #define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
426 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
427 #define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
428
429 u8 update_sge_tpa_flags;
430 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
431 #define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
432 #define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
433
434 u8 max_buffers_per_cqe;
435
436 u16 deprecated_sge_buff_size;
437 u16 tpa_max_size;
438 u16 tpa_min_size_to_start;
439 u16 tpa_min_size_to_cont;
440
441 u8 tpa_max_aggs_num;
442 u8 padding[7];
443 };
444
445 /* Primary tlv as a header for various extended tlvs for
446 * various functionalities in vport update ramrod.
447 */
448 struct vfpf_vport_update_tlv {
449 struct vfpf_first_tlv first_tlv;
450 };
451
452 struct vfpf_ucast_filter_tlv {
453 struct vfpf_first_tlv first_tlv;
454
455 u8 opcode;
456 u8 type;
457
458 u8 mac[ETH_ALEN];
459
460 u16 vlan;
461 u16 padding[3];
462 };
463
464 /* tunnel update param tlv */
465 struct vfpf_update_tunn_param_tlv {
466 struct vfpf_first_tlv first_tlv;
467
468 u8 tun_mode_update_mask;
469 u8 tunn_mode;
470 u8 update_tun_cls;
471 u8 vxlan_clss;
472 u8 l2gre_clss;
473 u8 ipgre_clss;
474 u8 l2geneve_clss;
475 u8 ipgeneve_clss;
476 u8 update_geneve_port;
477 u8 update_vxlan_port;
478 u16 geneve_port;
479 u16 vxlan_port;
480 u8 padding[2];
481 };
482
483 struct pfvf_update_tunn_param_tlv {
484 struct pfvf_tlv hdr;
485
486 u16 tunn_feature_mask;
487 u8 vxlan_mode;
488 u8 l2geneve_mode;
489 u8 ipgeneve_mode;
490 u8 l2gre_mode;
491 u8 ipgre_mode;
492 u8 vxlan_clss;
493 u8 l2gre_clss;
494 u8 ipgre_clss;
495 u8 l2geneve_clss;
496 u8 ipgeneve_clss;
497 u16 vxlan_udp_port;
498 u16 geneve_udp_port;
499 };
500
501 struct tlv_buffer_size {
502 u8 tlv_buffer[TLV_BUFFER_SIZE];
503 };
504
505 struct vfpf_update_coalesce {
506 struct vfpf_first_tlv first_tlv;
507 u16 rx_coal;
508 u16 tx_coal;
509 u16 qid;
510 u8 padding[2];
511 };
512
513 struct vfpf_read_coal_req_tlv {
514 struct vfpf_first_tlv first_tlv;
515 u16 qid;
516 u8 is_rx;
517 u8 padding[5];
518 };
519
520 struct pfvf_read_coal_resp_tlv {
521 struct pfvf_tlv hdr;
522 u16 coal;
523 u8 padding[6];
524 };
525
526 struct vfpf_bulletin_update_mac_tlv {
527 struct vfpf_first_tlv first_tlv;
528 u8 mac[ETH_ALEN];
529 u8 padding[2];
530 };
531
532 union vfpf_tlvs {
533 struct vfpf_first_tlv first_tlv;
534 struct vfpf_acquire_tlv acquire;
535 struct vfpf_start_rxq_tlv start_rxq;
536 struct vfpf_start_txq_tlv start_txq;
537 struct vfpf_stop_rxqs_tlv stop_rxqs;
538 struct vfpf_stop_txqs_tlv stop_txqs;
539 struct vfpf_update_rxq_tlv update_rxq;
540 struct vfpf_vport_start_tlv start_vport;
541 struct vfpf_vport_update_tlv vport_update;
542 struct vfpf_ucast_filter_tlv ucast_filter;
543 struct vfpf_update_tunn_param_tlv tunn_param_update;
544 struct vfpf_update_coalesce update_coalesce;
545 struct vfpf_read_coal_req_tlv read_coal_req;
546 struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
547 struct tlv_buffer_size tlv_buf_size;
548 };
549
550 union pfvf_tlvs {
551 struct pfvf_def_resp_tlv default_resp;
552 struct pfvf_acquire_resp_tlv acquire_resp;
553 struct tlv_buffer_size tlv_buf_size;
554 struct pfvf_start_queue_resp_tlv queue_start;
555 struct pfvf_update_tunn_param_tlv tunn_param_resp;
556 struct pfvf_read_coal_resp_tlv read_coal_resp;
557 };
558
559 enum qed_bulletin_bit {
560 /* Alert the VF that a forced MAC was set by the PF */
561 MAC_ADDR_FORCED = 0,
562 /* Alert the VF that a forced VLAN was set by the PF */
563 VLAN_ADDR_FORCED = 2,
564
565 /* Indicate that `default_only_untagged' contains actual data */
566 VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
567 VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
568
569 /* Alert the VF that suggested mac was sent by the PF.
570 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
571 */
572 VFPF_BULLETIN_MAC_ADDR = 5
573 };
574
575 struct qed_bulletin_content {
576 /* crc of structure to ensure is not in mid-update */
577 u32 crc;
578
579 u32 version;
580
581 /* bitmap indicating which fields hold valid values */
582 u64 valid_bitmap;
583
584 /* used for MAC_ADDR or MAC_ADDR_FORCED */
585 u8 mac[ETH_ALEN];
586
587 /* If valid, 1 => only untagged Rx if no vlan is configured */
588 u8 default_only_untagged;
589 u8 padding;
590
591 /* The following is a 'copy' of qed_mcp_link_state,
592 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
593 * possible the structs will increase further along the road we cannot
594 * have it here; Instead we need to have all of its fields.
595 */
596 u8 req_autoneg;
597 u8 req_autoneg_pause;
598 u8 req_forced_rx;
599 u8 req_forced_tx;
600 u8 padding2[4];
601
602 u32 req_adv_speed;
603 u32 req_forced_speed;
604 u32 req_loopback;
605 u32 padding3;
606
607 u8 link_up;
608 u8 full_duplex;
609 u8 autoneg;
610 u8 autoneg_complete;
611 u8 parallel_detection;
612 u8 pfc_enabled;
613 u8 partner_tx_flow_ctrl_en;
614 u8 partner_rx_flow_ctrl_en;
615 u8 partner_adv_pause;
616 u8 sfp_tx_fault;
617 u16 vxlan_udp_port;
618 u16 geneve_udp_port;
619 u8 padding4[2];
620
621 u32 speed;
622 u32 partner_adv_speed;
623
624 u32 capability_speed;
625
626 /* Forced vlan */
627 u16 pvid;
628 u16 padding5;
629 };
630
631 struct qed_bulletin {
632 dma_addr_t phys;
633 struct qed_bulletin_content *p_virt;
634 u32 size;
635 };
636
637 enum {
638 CHANNEL_TLV_NONE, /* ends tlv sequence */
639 CHANNEL_TLV_ACQUIRE,
640 CHANNEL_TLV_VPORT_START,
641 CHANNEL_TLV_VPORT_UPDATE,
642 CHANNEL_TLV_VPORT_TEARDOWN,
643 CHANNEL_TLV_START_RXQ,
644 CHANNEL_TLV_START_TXQ,
645 CHANNEL_TLV_STOP_RXQS,
646 CHANNEL_TLV_STOP_TXQS,
647 CHANNEL_TLV_UPDATE_RXQ,
648 CHANNEL_TLV_INT_CLEANUP,
649 CHANNEL_TLV_CLOSE,
650 CHANNEL_TLV_RELEASE,
651 CHANNEL_TLV_LIST_END,
652 CHANNEL_TLV_UCAST_FILTER,
653 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
654 CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
655 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
656 CHANNEL_TLV_VPORT_UPDATE_MCAST,
657 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
658 CHANNEL_TLV_VPORT_UPDATE_RSS,
659 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
660 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
661 CHANNEL_TLV_UPDATE_TUNN_PARAM,
662 CHANNEL_TLV_COALESCE_UPDATE,
663 CHANNEL_TLV_QID,
664 CHANNEL_TLV_COALESCE_READ,
665 CHANNEL_TLV_BULLETIN_UPDATE_MAC,
666 CHANNEL_TLV_MAX,
667
668 /* Required for iterating over vport-update tlvs.
669 * Will break in case non-sequential vport-update tlvs.
670 */
671 CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
672 };
673
674 /* Default number of CIDs [total of both Rx and Tx] to be requested
675 * by default, and maximum possible number.
676 */
677 #define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
678 #define QED_ETH_VF_MAX_NUM_CIDS (250)
679
680 /* This data is held in the qed_hwfn structure for VFs only. */
681 struct qed_vf_iov {
682 union vfpf_tlvs *vf2pf_request;
683 dma_addr_t vf2pf_request_phys;
684 union pfvf_tlvs *pf2vf_reply;
685 dma_addr_t pf2vf_reply_phys;
686
687 /* Should be taken whenever the mailbox buffers are accessed */
688 struct mutex mutex;
689 u8 *offset;
690
691 /* Bulletin Board */
692 struct qed_bulletin bulletin;
693 struct qed_bulletin_content bulletin_shadow;
694
695 /* we set aside a copy of the acquire response */
696 struct pfvf_acquire_resp_tlv acquire_resp;
697
698 /* In case PF originates prior to the fp-hsi version comparison,
699 * this has to be propagated as it affects the fastpath.
700 */
701 bool b_pre_fp_hsi;
702
703 /* Current day VFs are passing the SBs physical address on vport
704 * start, and as they lack an IGU mapping they need to store the
705 * addresses of previously registered SBs.
706 * Even if we were to change configuration flow, due to backward
707 * compatibility [with older PFs] we'd still need to store these.
708 */
709 struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
710
711 /* Determines whether VF utilizes doorbells via limited register
712 * bar or via the doorbell bar.
713 */
714 bool b_doorbell_bar;
715 };
716
717 /**
718 * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
719 * Coalesce value '0' will omit the configuration.
720 *
721 * @param p_hwfn
722 * @param rx_coal - coalesce value in micro second for rx queue
723 * @param tx_coal - coalesce value in micro second for tx queue
724 * @param p_cid - queue cid
725 *
726 **/
727 int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
728 u16 rx_coal,
729 u16 tx_coal, struct qed_queue_cid *p_cid);
730
731 /**
732 * @brief VF - Get coalesce per VF's relative queue.
733 *
734 * @param p_hwfn
735 * @param p_coal - coalesce value in micro second for VF queues.
736 * @param p_cid - queue cid
737 *
738 **/
739 int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
740 u16 *p_coal, struct qed_queue_cid *p_cid);
741
742 #ifdef CONFIG_QED_SRIOV
743 /**
744 * @brief Read the VF bulletin and act on it if needed
745 *
746 * @param p_hwfn
747 * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
748 *
749 * @return enum _qed_status
750 */
751 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
752
753 /**
754 * @brief Get link paramters for VF from qed
755 *
756 * @param p_hwfn
757 * @param params - the link params structure to be filled for the VF
758 */
759 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
760 struct qed_mcp_link_params *params);
761
762 /**
763 * @brief Get link state for VF from qed
764 *
765 * @param p_hwfn
766 * @param link - the link state structure to be filled for the VF
767 */
768 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
769 struct qed_mcp_link_state *link);
770
771 /**
772 * @brief Get link capabilities for VF from qed
773 *
774 * @param p_hwfn
775 * @param p_link_caps - the link capabilities structure to be filled for the VF
776 */
777 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
778 struct qed_mcp_link_capabilities *p_link_caps);
779
780 /**
781 * @brief Get number of Rx queues allocated for VF by qed
782 *
783 * @param p_hwfn
784 * @param num_rxqs - allocated RX queues
785 */
786 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
787
788 /**
789 * @brief Get number of Rx queues allocated for VF by qed
790 *
791 * @param p_hwfn
792 * @param num_txqs - allocated RX queues
793 */
794 void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
795
796 /**
797 * @brief Get number of available connections [both Rx and Tx] for VF
798 *
799 * @param p_hwfn
800 * @param num_cids - allocated number of connections
801 */
802 void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
803
804 /**
805 * @brief Get port mac address for VF
806 *
807 * @param p_hwfn
808 * @param port_mac - destination location for port mac
809 */
810 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
811
812 /**
813 * @brief Get number of VLAN filters allocated for VF by qed
814 *
815 * @param p_hwfn
816 * @param num_rxqs - allocated VLAN filters
817 */
818 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
819 u8 *num_vlan_filters);
820
821 /**
822 * @brief Get number of MAC filters allocated for VF by qed
823 *
824 * @param p_hwfn
825 * @param num_rxqs - allocated MAC filters
826 */
827 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
828
829 /**
830 * @brief Check if VF can set a MAC address
831 *
832 * @param p_hwfn
833 * @param mac
834 *
835 * @return bool
836 */
837 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
838
839 /**
840 * @brief Set firmware version information in dev_info from VFs acquire response tlv
841 *
842 * @param p_hwfn
843 * @param fw_major
844 * @param fw_minor
845 * @param fw_rev
846 * @param fw_eng
847 */
848 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
849 u16 *fw_major, u16 *fw_minor,
850 u16 *fw_rev, u16 *fw_eng);
851
852 /**
853 * @brief hw preparation for VF
854 * sends ACQUIRE message
855 *
856 * @param p_hwfn
857 *
858 * @return int
859 */
860 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
861
862 /**
863 * @brief VF - start the RX Queue by sending a message to the PF
864 * @param p_hwfn
865 * @param p_cid - Only relative fields are relevant
866 * @param bd_max_bytes - maximum number of bytes per bd
867 * @param bd_chain_phys_addr - physical address of bd chain
868 * @param cqe_pbl_addr - physical address of pbl
869 * @param cqe_pbl_size - pbl size
870 * @param pp_prod - pointer to the producer to be
871 * used in fastpath
872 *
873 * @return int
874 */
875 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
876 struct qed_queue_cid *p_cid,
877 u16 bd_max_bytes,
878 dma_addr_t bd_chain_phys_addr,
879 dma_addr_t cqe_pbl_addr,
880 u16 cqe_pbl_size, void __iomem **pp_prod);
881
882 /**
883 * @brief VF - start the TX queue by sending a message to the
884 * PF.
885 *
886 * @param p_hwfn
887 * @param tx_queue_id - zero based within the VF
888 * @param sb - status block for this queue
889 * @param sb_index - index within the status block
890 * @param bd_chain_phys_addr - physical address of tx chain
891 * @param pp_doorbell - pointer to address to which to
892 * write the doorbell too..
893 *
894 * @return int
895 */
896 int
897 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
898 struct qed_queue_cid *p_cid,
899 dma_addr_t pbl_addr,
900 u16 pbl_size, void __iomem **pp_doorbell);
901
902 /**
903 * @brief VF - stop the RX queue by sending a message to the PF
904 *
905 * @param p_hwfn
906 * @param p_cid
907 * @param cqe_completion
908 *
909 * @return int
910 */
911 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
912 struct qed_queue_cid *p_cid, bool cqe_completion);
913
914 /**
915 * @brief VF - stop the TX queue by sending a message to the PF
916 *
917 * @param p_hwfn
918 * @param tx_qid
919 *
920 * @return int
921 */
922 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
923
924 /**
925 * @brief VF - send a vport update command
926 *
927 * @param p_hwfn
928 * @param params
929 *
930 * @return int
931 */
932 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
933 struct qed_sp_vport_update_params *p_params);
934
935 /**
936 *
937 * @brief VF - send a close message to PF
938 *
939 * @param p_hwfn
940 *
941 * @return enum _qed_status
942 */
943 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
944
945 /**
946 * @brief VF - free vf`s memories
947 *
948 * @param p_hwfn
949 *
950 * @return enum _qed_status
951 */
952 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
953
954 /**
955 * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
956 * sb_id. For VFs igu sbs don't have to be contiguous
957 *
958 * @param p_hwfn
959 * @param sb_id
960 *
961 * @return INLINE u16
962 */
963 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
964
965 /**
966 * @brief Stores [or removes] a configured sb_info.
967 *
968 * @param p_hwfn
969 * @param sb_id - zero-based SB index [for fastpath]
970 * @param sb_info - may be NULL [during removal].
971 */
972 void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
973 u16 sb_id, struct qed_sb_info *p_sb);
974
975 /**
976 * @brief qed_vf_pf_vport_start - perform vport start for VF.
977 *
978 * @param p_hwfn
979 * @param vport_id
980 * @param mtu
981 * @param inner_vlan_removal
982 * @param tpa_mode
983 * @param max_buffers_per_cqe,
984 * @param only_untagged - default behavior regarding vlan acceptance
985 *
986 * @return enum _qed_status
987 */
988 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
989 u8 vport_id,
990 u16 mtu,
991 u8 inner_vlan_removal,
992 enum qed_tpa_mode tpa_mode,
993 u8 max_buffers_per_cqe, u8 only_untagged);
994
995 /**
996 * @brief qed_vf_pf_vport_stop - stop the VF's vport
997 *
998 * @param p_hwfn
999 *
1000 * @return enum _qed_status
1001 */
1002 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
1003
1004 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1005 struct qed_filter_ucast *p_param);
1006
1007 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1008 struct qed_filter_mcast *p_filter_cmd);
1009
1010 /**
1011 * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
1012 *
1013 * @param p_hwfn
1014 *
1015 * @return enum _qed_status
1016 */
1017 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
1018
1019 /**
1020 * @brief - return the link params in a given bulletin board
1021 *
1022 * @param p_hwfn
1023 * @param p_params - pointer to a struct to fill with link params
1024 * @param p_bulletin
1025 */
1026 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1027 struct qed_mcp_link_params *p_params,
1028 struct qed_bulletin_content *p_bulletin);
1029
1030 /**
1031 * @brief - return the link state in a given bulletin board
1032 *
1033 * @param p_hwfn
1034 * @param p_link - pointer to a struct to fill with link state
1035 * @param p_bulletin
1036 */
1037 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1038 struct qed_mcp_link_state *p_link,
1039 struct qed_bulletin_content *p_bulletin);
1040
1041 /**
1042 * @brief - return the link capabilities in a given bulletin board
1043 *
1044 * @param p_hwfn
1045 * @param p_link - pointer to a struct to fill with link capabilities
1046 * @param p_bulletin
1047 */
1048 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1049 struct qed_mcp_link_capabilities *p_link_caps,
1050 struct qed_bulletin_content *p_bulletin);
1051
1052 void qed_iov_vf_task(struct work_struct *work);
1053 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
1054 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1055 struct qed_tunnel_info *p_tunn);
1056
1057 u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
1058 /**
1059 * @brief - Ask PF to update the MAC address in it's bulletin board
1060 *
1061 * @param p_mac - mac address to be updated in bulletin board
1062 */
1063 int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
1064
1065 #else
qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * params)1066 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1067 struct qed_mcp_link_params *params)
1068 {
1069 }
1070
qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * link)1071 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1072 struct qed_mcp_link_state *link)
1073 {
1074 }
1075
1076 static inline void
qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps)1077 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1078 struct qed_mcp_link_capabilities *p_link_caps)
1079 {
1080 }
1081
qed_vf_get_num_rxqs(struct qed_hwfn * p_hwfn,u8 * num_rxqs)1082 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1083 {
1084 }
1085
qed_vf_get_num_txqs(struct qed_hwfn * p_hwfn,u8 * num_txqs)1086 static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
1087 {
1088 }
1089
qed_vf_get_num_cids(struct qed_hwfn * p_hwfn,u8 * num_cids)1090 static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
1091 {
1092 }
1093
qed_vf_get_port_mac(struct qed_hwfn * p_hwfn,u8 * port_mac)1094 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1095 {
1096 }
1097
qed_vf_get_num_vlan_filters(struct qed_hwfn * p_hwfn,u8 * num_vlan_filters)1098 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
1099 u8 *num_vlan_filters)
1100 {
1101 }
1102
qed_vf_get_num_mac_filters(struct qed_hwfn * p_hwfn,u8 * num_mac_filters)1103 static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
1104 u8 *num_mac_filters)
1105 {
1106 }
1107
qed_vf_check_mac(struct qed_hwfn * p_hwfn,u8 * mac)1108 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1109 {
1110 return false;
1111 }
1112
qed_vf_get_fw_version(struct qed_hwfn * p_hwfn,u16 * fw_major,u16 * fw_minor,u16 * fw_rev,u16 * fw_eng)1113 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1114 u16 *fw_major, u16 *fw_minor,
1115 u16 *fw_rev, u16 *fw_eng)
1116 {
1117 }
1118
qed_vf_hw_prepare(struct qed_hwfn * p_hwfn)1119 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
1120 {
1121 return -EINVAL;
1122 }
1123
qed_vf_pf_rxq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_adr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void __iomem ** pp_prod)1124 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
1125 struct qed_queue_cid *p_cid,
1126 u16 bd_max_bytes,
1127 dma_addr_t bd_chain_phys_adr,
1128 dma_addr_t cqe_pbl_addr,
1129 u16 cqe_pbl_size, void __iomem **pp_prod)
1130 {
1131 return -EINVAL;
1132 }
1133
qed_vf_pf_txq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,void __iomem ** pp_doorbell)1134 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
1135 struct qed_queue_cid *p_cid,
1136 dma_addr_t pbl_addr,
1137 u16 pbl_size, void __iomem **pp_doorbell)
1138 {
1139 return -EINVAL;
1140 }
1141
qed_vf_pf_rxq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,bool cqe_completion)1142 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
1143 struct qed_queue_cid *p_cid,
1144 bool cqe_completion)
1145 {
1146 return -EINVAL;
1147 }
1148
qed_vf_pf_txq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid)1149 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
1150 struct qed_queue_cid *p_cid)
1151 {
1152 return -EINVAL;
1153 }
1154
1155 static inline int
qed_vf_pf_vport_update(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_params)1156 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1157 struct qed_sp_vport_update_params *p_params)
1158 {
1159 return -EINVAL;
1160 }
1161
qed_vf_pf_reset(struct qed_hwfn * p_hwfn)1162 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
1163 {
1164 return -EINVAL;
1165 }
1166
qed_vf_pf_release(struct qed_hwfn * p_hwfn)1167 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
1168 {
1169 return -EINVAL;
1170 }
1171
qed_vf_get_igu_sb_id(struct qed_hwfn * p_hwfn,u16 sb_id)1172 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1173 {
1174 return 0;
1175 }
1176
qed_vf_set_sb_info(struct qed_hwfn * p_hwfn,u16 sb_id,struct qed_sb_info * p_sb)1177 static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
1178 struct qed_sb_info *p_sb)
1179 {
1180 }
1181
qed_vf_pf_vport_start(struct qed_hwfn * p_hwfn,u8 vport_id,u16 mtu,u8 inner_vlan_removal,enum qed_tpa_mode tpa_mode,u8 max_buffers_per_cqe,u8 only_untagged)1182 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
1183 u8 vport_id,
1184 u16 mtu,
1185 u8 inner_vlan_removal,
1186 enum qed_tpa_mode tpa_mode,
1187 u8 max_buffers_per_cqe,
1188 u8 only_untagged)
1189 {
1190 return -EINVAL;
1191 }
1192
qed_vf_pf_vport_stop(struct qed_hwfn * p_hwfn)1193 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
1194 {
1195 return -EINVAL;
1196 }
1197
qed_vf_pf_filter_ucast(struct qed_hwfn * p_hwfn,struct qed_filter_ucast * p_param)1198 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1199 struct qed_filter_ucast *p_param)
1200 {
1201 return -EINVAL;
1202 }
1203
qed_vf_pf_filter_mcast(struct qed_hwfn * p_hwfn,struct qed_filter_mcast * p_filter_cmd)1204 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1205 struct qed_filter_mcast *p_filter_cmd)
1206 {
1207 }
1208
qed_vf_pf_int_cleanup(struct qed_hwfn * p_hwfn)1209 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1210 {
1211 return -EINVAL;
1212 }
1213
__qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * p_params,struct qed_bulletin_content * p_bulletin)1214 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1215 struct qed_mcp_link_params
1216 *p_params,
1217 struct qed_bulletin_content
1218 *p_bulletin)
1219 {
1220 }
1221
__qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * p_link,struct qed_bulletin_content * p_bulletin)1222 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1223 struct qed_mcp_link_state *p_link,
1224 struct qed_bulletin_content
1225 *p_bulletin)
1226 {
1227 }
1228
1229 static inline void
__qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps,struct qed_bulletin_content * p_bulletin)1230 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1231 struct qed_mcp_link_capabilities *p_link_caps,
1232 struct qed_bulletin_content *p_bulletin)
1233 {
1234 }
1235
qed_iov_vf_task(struct work_struct * work)1236 static inline void qed_iov_vf_task(struct work_struct *work)
1237 {
1238 }
1239
1240 static inline void
qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info * p_tun)1241 qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
1242 {
1243 }
1244
qed_vf_pf_tunnel_param_update(struct qed_hwfn * p_hwfn,struct qed_tunnel_info * p_tunn)1245 static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1246 struct qed_tunnel_info *p_tunn)
1247 {
1248 return -EINVAL;
1249 }
1250
qed_vf_pf_bulletin_update_mac(struct qed_hwfn * p_hwfn,u8 * p_mac)1251 static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
1252 u8 *p_mac)
1253 {
1254 return -EINVAL;
1255 }
1256
1257 static inline u32
qed_vf_hw_bar_size(struct qed_hwfn * p_hwfn,enum BAR_ID bar_id)1258 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn,
1259 enum BAR_ID bar_id)
1260 {
1261 return 0;
1262 }
1263 #endif
1264
1265 #endif
1266