1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2013-2022, Intel Corporation. */
3
4 #ifndef _VIRTCHNL_H_
5 #define _VIRTCHNL_H_
6
7 /* Description:
8 * This header file describes the Virtual Function (VF) - Physical Function
9 * (PF) communication protocol used by the drivers for all devices starting
10 * from our 40G product line
11 *
12 * Admin queue buffer usage:
13 * desc->opcode is always aqc_opc_send_msg_to_pf
14 * flags, retval, datalen, and data addr are all used normally.
15 * The Firmware copies the cookie fields when sending messages between the
16 * PF and VF, but uses all other fields internally. Due to this limitation,
17 * we must send all messages as "indirect", i.e. using an external buffer.
18 *
19 * All the VSI indexes are relative to the VF. Each VF can have maximum of
20 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
21 * have a maximum of sixteen queues for all of its VSIs.
22 *
23 * The PF is required to return a status code in v_retval for all messages
24 * except RESET_VF, which does not require any response. The returned value
25 * is of virtchnl_status_code type, defined here.
26 *
27 * In general, VF driver initialization should roughly follow the order of
28 * these opcodes. The VF driver must first validate the API version of the
29 * PF driver, then request a reset, then get resources, then configure
30 * queues and interrupts. After these operations are complete, the VF
31 * driver may start its queues, optionally add MAC and VLAN filters, and
32 * process traffic.
33 */
34
35 /* START GENERIC DEFINES
36 * Need to ensure the following enums and defines hold the same meaning and
37 * value in current and future projects
38 */
39
40 /* Error Codes */
41 enum virtchnl_status_code {
42 VIRTCHNL_STATUS_SUCCESS = 0,
43 VIRTCHNL_STATUS_ERR_PARAM = -5,
44 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
45 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
46 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
47 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
48 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
49 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
50 };
51
52 /* Backward compatibility */
53 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
54 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
55
56 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
57 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
58 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
59 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
60 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
61 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
62 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
63 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
64
65 enum virtchnl_link_speed {
66 VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
67 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
68 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
69 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
70 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
71 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
72 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
73 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
74 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
75 };
76
77 /* for hsplit_0 field of Rx HMC context */
78 /* deprecated with AVF 1.0 */
79 enum virtchnl_rx_hsplit {
80 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
81 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
82 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
83 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
84 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
85 };
86
87 /* END GENERIC DEFINES */
88
89 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
90 * of the virtchnl_msg structure.
91 */
92 enum virtchnl_ops {
93 /* The PF sends status change events to VFs using
94 * the VIRTCHNL_OP_EVENT opcode.
95 * VFs send requests to the PF using the other ops.
96 * Use of "advanced opcode" features must be negotiated as part of capabilities
97 * exchange and are not considered part of base mode feature set.
98 */
99 VIRTCHNL_OP_UNKNOWN = 0,
100 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
101 VIRTCHNL_OP_RESET_VF = 2,
102 VIRTCHNL_OP_GET_VF_RESOURCES = 3,
103 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
104 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
105 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
106 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
107 VIRTCHNL_OP_ENABLE_QUEUES = 8,
108 VIRTCHNL_OP_DISABLE_QUEUES = 9,
109 VIRTCHNL_OP_ADD_ETH_ADDR = 10,
110 VIRTCHNL_OP_DEL_ETH_ADDR = 11,
111 VIRTCHNL_OP_ADD_VLAN = 12,
112 VIRTCHNL_OP_DEL_VLAN = 13,
113 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
114 VIRTCHNL_OP_GET_STATS = 15,
115 VIRTCHNL_OP_RSVD = 16,
116 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
117 /* opcode 19 is reserved */
118 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
119 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
120 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
121 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
122 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
123 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
124 VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
125 VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
126 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
127 VIRTCHNL_OP_SET_RSS_HENA = 26,
128 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
129 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
130 VIRTCHNL_OP_REQUEST_QUEUES = 29,
131 VIRTCHNL_OP_ENABLE_CHANNELS = 30,
132 VIRTCHNL_OP_DISABLE_CHANNELS = 31,
133 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
134 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
135 /* opcode 34 - 43 are reserved */
136 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
137 VIRTCHNL_OP_ADD_RSS_CFG = 45,
138 VIRTCHNL_OP_DEL_RSS_CFG = 46,
139 VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
140 VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
141 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
142 VIRTCHNL_OP_ADD_VLAN_V2 = 52,
143 VIRTCHNL_OP_DEL_VLAN_V2 = 53,
144 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
145 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
146 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
147 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
148 VIRTCHNL_OP_MAX,
149 };
150
151 /* These macros are used to generate compilation errors if a structure/union
152 * is not exactly the correct length. It gives a divide by zero error if the
153 * structure/union is not of the correct size, otherwise it creates an enum
154 * that is never used.
155 */
156 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
157 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
158 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
159 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
160
161 /* Message descriptions and data structures. */
162
163 /* VIRTCHNL_OP_VERSION
164 * VF posts its version number to the PF. PF responds with its version number
165 * in the same format, along with a return code.
166 * Reply from PF has its major/minor versions also in param0 and param1.
167 * If there is a major version mismatch, then the VF cannot operate.
168 * If there is a minor version mismatch, then the VF can operate but should
169 * add a warning to the system log.
170 *
171 * This enum element MUST always be specified as == 1, regardless of other
172 * changes in the API. The PF must always respond to this message without
173 * error regardless of version mismatch.
174 */
175 #define VIRTCHNL_VERSION_MAJOR 1
176 #define VIRTCHNL_VERSION_MINOR 1
177 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
178
179 struct virtchnl_version_info {
180 u32 major;
181 u32 minor;
182 };
183
184 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
185
186 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
187 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
188
189 /* VIRTCHNL_OP_RESET_VF
190 * VF sends this request to PF with no parameters
191 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
192 * until reset completion is indicated. The admin queue must be reinitialized
193 * after this operation.
194 *
195 * When reset is complete, PF must ensure that all queues in all VSIs associated
196 * with the VF are stopped, all queue configurations in the HMC are set to 0,
197 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
198 * are cleared.
199 */
200
201 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
202 * vsi_type should always be 6 for backward compatibility. Add other fields
203 * as needed.
204 */
205 enum virtchnl_vsi_type {
206 VIRTCHNL_VSI_TYPE_INVALID = 0,
207 VIRTCHNL_VSI_SRIOV = 6,
208 };
209
210 /* VIRTCHNL_OP_GET_VF_RESOURCES
211 * Version 1.0 VF sends this request to PF with no parameters
212 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
213 * PF responds with an indirect message containing
214 * virtchnl_vf_resource and one or more
215 * virtchnl_vsi_resource structures.
216 */
217
218 struct virtchnl_vsi_resource {
219 u16 vsi_id;
220 u16 num_queue_pairs;
221
222 /* see enum virtchnl_vsi_type */
223 s32 vsi_type;
224 u16 qset_handle;
225 u8 default_mac_addr[ETH_ALEN];
226 };
227
228 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
229
230 /* VF capability flags
231 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
232 * TX/RX Checksum offloading and TSO for non-tunnelled packets.
233 */
234 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
235 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
236 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
237 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
238 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
239 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
240 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
241 /* used to negotiate communicating link speeds in Mbps */
242 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
243 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
244 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
245 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
246 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
247 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
248 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
249 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
250 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
251 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
252 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
253 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
254 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
255 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
256
257 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
258 VIRTCHNL_VF_OFFLOAD_VLAN | \
259 VIRTCHNL_VF_OFFLOAD_RSS_PF)
260
261 struct virtchnl_vf_resource {
262 u16 num_vsis;
263 u16 num_queue_pairs;
264 u16 max_vectors;
265 u16 max_mtu;
266
267 u32 vf_cap_flags;
268 u32 rss_key_size;
269 u32 rss_lut_size;
270
271 struct virtchnl_vsi_resource vsi_res[];
272 };
273
274 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
275 #define virtchnl_vf_resource_LEGACY_SIZEOF 36
276
277 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
278 * VF sends this message to set up parameters for one TX queue.
279 * External data buffer contains one instance of virtchnl_txq_info.
280 * PF configures requested queue and returns a status code.
281 */
282
283 /* Tx queue config info */
284 struct virtchnl_txq_info {
285 u16 vsi_id;
286 u16 queue_id;
287 u16 ring_len; /* number of descriptors, multiple of 8 */
288 u16 headwb_enabled; /* deprecated with AVF 1.0 */
289 u64 dma_ring_addr;
290 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
291 };
292
293 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
294
295 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
296 * VF sends this message to set up parameters for one RX queue.
297 * External data buffer contains one instance of virtchnl_rxq_info.
298 * PF configures requested queue and returns a status code.
299 */
300
301 /* Rx queue config info */
302 struct virtchnl_rxq_info {
303 u16 vsi_id;
304 u16 queue_id;
305 u32 ring_len; /* number of descriptors, multiple of 32 */
306 u16 hdr_size;
307 u16 splithdr_enabled; /* deprecated with AVF 1.0 */
308 u32 databuffer_size;
309 u32 max_pkt_size;
310 u8 pad0;
311 u8 rxdid;
312 u8 pad1[2];
313 u64 dma_ring_addr;
314
315 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
316 s32 rx_split_pos;
317 u32 pad2;
318 };
319
320 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
321
322 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
323 * VF sends this message to set parameters for all active TX and RX queues
324 * associated with the specified VSI.
325 * PF configures queues and returns status.
326 * If the number of queues specified is greater than the number of queues
327 * associated with the VSI, an error is returned and no queues are configured.
328 * NOTE: The VF is not required to configure all queues in a single request.
329 * It may send multiple messages. PF drivers must correctly handle all VF
330 * requests.
331 */
332 struct virtchnl_queue_pair_info {
333 /* NOTE: vsi_id and queue_id should be identical for both queues. */
334 struct virtchnl_txq_info txq;
335 struct virtchnl_rxq_info rxq;
336 };
337
338 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
339
340 struct virtchnl_vsi_queue_config_info {
341 u16 vsi_id;
342 u16 num_queue_pairs;
343 u32 pad;
344 struct virtchnl_queue_pair_info qpair[];
345 };
346
347 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
348 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
349
350 /* VIRTCHNL_OP_REQUEST_QUEUES
351 * VF sends this message to request the PF to allocate additional queues to
352 * this VF. Each VF gets a guaranteed number of queues on init but asking for
353 * additional queues must be negotiated. This is a best effort request as it
354 * is possible the PF does not have enough queues left to support the request.
355 * If the PF cannot support the number requested it will respond with the
356 * maximum number it is able to support. If the request is successful, PF will
357 * then reset the VF to institute required changes.
358 */
359
360 /* VF resource request */
361 struct virtchnl_vf_res_request {
362 u16 num_queue_pairs;
363 };
364
365 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
366 * VF uses this message to map vectors to queues.
367 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
368 * are to be associated with the specified vector.
369 * The "other" causes are always mapped to vector 0. The VF may not request
370 * that vector 0 be used for traffic.
371 * PF configures interrupt mapping and returns status.
372 * NOTE: due to hardware requirements, all active queues (both TX and RX)
373 * should be mapped to interrupts, even if the driver intends to operate
374 * only in polling mode. In this case the interrupt may be disabled, but
375 * the ITR timer will still run to trigger writebacks.
376 */
377 struct virtchnl_vector_map {
378 u16 vsi_id;
379 u16 vector_id;
380 u16 rxq_map;
381 u16 txq_map;
382 u16 rxitr_idx;
383 u16 txitr_idx;
384 };
385
386 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
387
388 struct virtchnl_irq_map_info {
389 u16 num_vectors;
390 struct virtchnl_vector_map vecmap[];
391 };
392
393 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
394 #define virtchnl_irq_map_info_LEGACY_SIZEOF 14
395
396 /* VIRTCHNL_OP_ENABLE_QUEUES
397 * VIRTCHNL_OP_DISABLE_QUEUES
398 * VF sends these message to enable or disable TX/RX queue pairs.
399 * The queues fields are bitmaps indicating which queues to act upon.
400 * (Currently, we only support 16 queues per VF, but we make the field
401 * u32 to allow for expansion.)
402 * PF performs requested action and returns status.
403 * NOTE: The VF is not required to enable/disable all queues in a single
404 * request. It may send multiple messages.
405 * PF drivers must correctly handle all VF requests.
406 */
407 struct virtchnl_queue_select {
408 u16 vsi_id;
409 u16 pad;
410 u32 rx_queues;
411 u32 tx_queues;
412 };
413
414 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
415
416 /* VIRTCHNL_OP_ADD_ETH_ADDR
417 * VF sends this message in order to add one or more unicast or multicast
418 * address filters for the specified VSI.
419 * PF adds the filters and returns status.
420 */
421
422 /* VIRTCHNL_OP_DEL_ETH_ADDR
423 * VF sends this message in order to remove one or more unicast or multicast
424 * filters for the specified VSI.
425 * PF removes the filters and returns status.
426 */
427
428 /* VIRTCHNL_ETHER_ADDR_LEGACY
429 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
430 * bytes. Moving forward all VF drivers should not set type to
431 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
432 * behavior. The control plane function (i.e. PF) can use a best effort method
433 * of tracking the primary/device unicast in this case, but there is no
434 * guarantee and functionality depends on the implementation of the PF.
435 */
436
437 /* VIRTCHNL_ETHER_ADDR_PRIMARY
438 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
439 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
440 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
441 * function (i.e. PF) to accurately track and use this MAC address for
442 * displaying on the host and for VM/function reset.
443 */
444
445 /* VIRTCHNL_ETHER_ADDR_EXTRA
446 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
447 * unicast and/or multicast filters that are being added/deleted via
448 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
449 */
450 struct virtchnl_ether_addr {
451 u8 addr[ETH_ALEN];
452 u8 type;
453 #define VIRTCHNL_ETHER_ADDR_LEGACY 0
454 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1
455 #define VIRTCHNL_ETHER_ADDR_EXTRA 2
456 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
457 u8 pad;
458 };
459
460 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
461
462 struct virtchnl_ether_addr_list {
463 u16 vsi_id;
464 u16 num_elements;
465 struct virtchnl_ether_addr list[];
466 };
467
468 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
469 #define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
470
471 /* VIRTCHNL_OP_ADD_VLAN
472 * VF sends this message to add one or more VLAN tag filters for receives.
473 * PF adds the filters and returns status.
474 * If a port VLAN is configured by the PF, this operation will return an
475 * error to the VF.
476 */
477
478 /* VIRTCHNL_OP_DEL_VLAN
479 * VF sends this message to remove one or more VLAN tag filters for receives.
480 * PF removes the filters and returns status.
481 * If a port VLAN is configured by the PF, this operation will return an
482 * error to the VF.
483 */
484
485 struct virtchnl_vlan_filter_list {
486 u16 vsi_id;
487 u16 num_elements;
488 u16 vlan_id[];
489 };
490
491 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
492 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
493
494 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
495 * structures and opcodes.
496 *
497 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
498 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
499 *
500 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
501 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
502 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
503 *
504 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
505 * by the PF concurrently. For example, if the PF can support
506 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
507 * would OR the following bits:
508 *
509 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
510 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
511 * VIRTCHNL_VLAN_ETHERTYPE_AND;
512 *
513 * The VF would interpret this as VLAN filtering can be supported on both 0x8100
514 * and 0x88A8 VLAN ethertypes.
515 *
516 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
517 * by the PF concurrently. For example if the PF can support
518 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
519 * offload it would OR the following bits:
520 *
521 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
522 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
523 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
524 *
525 * The VF would interpret this as VLAN stripping can be supported on either
526 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
527 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
528 * the previously set value.
529 *
530 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
531 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
532 *
533 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
534 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
535 *
536 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
537 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
538 *
539 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
540 * VLAN filtering if the underlying PF supports it.
541 *
542 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
543 * certain VLAN capability can be toggled. For example if the underlying PF/CP
544 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
545 * set this bit along with the supported ethertypes.
546 */
547 enum virtchnl_vlan_support {
548 VIRTCHNL_VLAN_UNSUPPORTED = 0,
549 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
550 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
551 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
552 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
553 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
554 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
555 VIRTCHNL_VLAN_PRIO = BIT(24),
556 VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
557 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
558 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
559 VIRTCHNL_VLAN_TOGGLE = BIT(31),
560 };
561
562 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
563 * for filtering, insertion, and stripping capabilities.
564 *
565 * If only outer capabilities are supported (for filtering, insertion, and/or
566 * stripping) then this refers to the outer most or single VLAN from the VF's
567 * perspective.
568 *
569 * If only inner capabilities are supported (for filtering, insertion, and/or
570 * stripping) then this refers to the outer most or single VLAN from the VF's
571 * perspective. Functionally this is the same as if only outer capabilities are
572 * supported. The VF driver is just forced to use the inner fields when
573 * adding/deleting filters and enabling/disabling offloads (if supported).
574 *
575 * If both outer and inner capabilities are supported (for filtering, insertion,
576 * and/or stripping) then outer refers to the outer most or single VLAN and
577 * inner refers to the second VLAN, if it exists, in the packet.
578 *
579 * There is no support for tunneled VLAN offloads, so outer or inner are never
580 * referring to a tunneled packet from the VF's perspective.
581 */
582 struct virtchnl_vlan_supported_caps {
583 u32 outer;
584 u32 inner;
585 };
586
587 /* The PF populates these fields based on the supported VLAN filtering. If a
588 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
589 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
590 * the unsupported fields.
591 *
592 * Also, a VF is only allowed to toggle its VLAN filtering setting if the
593 * VIRTCHNL_VLAN_TOGGLE bit is set.
594 *
595 * The ethertype(s) specified in the ethertype_init field are the ethertypes
596 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
597 * most VLAN from the VF's perspective. If both inner and outer filtering are
598 * allowed then ethertype_init only refers to the outer most VLAN as only
599 * VLAN ethertype supported for inner VLAN filtering is
600 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
601 * when both inner and outer filtering are allowed.
602 *
603 * The max_filters field tells the VF how many VLAN filters it's allowed to have
604 * at any one time. If it exceeds this amount and tries to add another filter,
605 * then the request will be rejected by the PF. To prevent failures, the VF
606 * should keep track of how many VLAN filters it has added and not attempt to
607 * add more than max_filters.
608 */
609 struct virtchnl_vlan_filtering_caps {
610 struct virtchnl_vlan_supported_caps filtering_support;
611 u32 ethertype_init;
612 u16 max_filters;
613 u8 pad[2];
614 };
615
616 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
617
618 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
619 * if the PF supports a different ethertype for stripping and insertion.
620 *
621 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
622 * for stripping affect the ethertype(s) specified for insertion and visa versa
623 * as well. If the VF tries to configure VLAN stripping via
624 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
625 * that will be the ethertype for both stripping and insertion.
626 *
627 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
628 * stripping do not affect the ethertype(s) specified for insertion and visa
629 * versa.
630 */
631 enum virtchnl_vlan_ethertype_match {
632 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
633 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
634 };
635
636 /* The PF populates these fields based on the supported VLAN offloads. If a
637 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
638 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
639 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
640 *
641 * Also, a VF is only allowed to toggle its VLAN offload setting if the
642 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
643 *
644 * The VF driver needs to be aware of how the tags are stripped by hardware and
645 * inserted by the VF driver based on the level of offload support. The PF will
646 * populate these fields based on where the VLAN tags are expected to be
647 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
648 * interpret these fields. See the definition of the
649 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
650 * enumeration.
651 */
652 struct virtchnl_vlan_offload_caps {
653 struct virtchnl_vlan_supported_caps stripping_support;
654 struct virtchnl_vlan_supported_caps insertion_support;
655 u32 ethertype_init;
656 u8 ethertype_match;
657 u8 pad[3];
658 };
659
660 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
661
662 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
663 * VF sends this message to determine its VLAN capabilities.
664 *
665 * PF will mark which capabilities it supports based on hardware support and
666 * current configuration. For example, if a port VLAN is configured the PF will
667 * not allow outer VLAN filtering, stripping, or insertion to be configured so
668 * it will block these features from the VF.
669 *
670 * The VF will need to cross reference its capabilities with the PFs
671 * capabilities in the response message from the PF to determine the VLAN
672 * support.
673 */
674 struct virtchnl_vlan_caps {
675 struct virtchnl_vlan_filtering_caps filtering;
676 struct virtchnl_vlan_offload_caps offloads;
677 };
678
679 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
680
681 struct virtchnl_vlan {
682 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
683 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
684 * filtering caps
685 */
686 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
687 * filtering caps. Note that tpid here does not refer to
688 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
689 * actual 2-byte VLAN TPID
690 */
691 u8 pad[2];
692 };
693
694 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
695
696 struct virtchnl_vlan_filter {
697 struct virtchnl_vlan inner;
698 struct virtchnl_vlan outer;
699 u8 pad[16];
700 };
701
702 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
703
704 /* VIRTCHNL_OP_ADD_VLAN_V2
705 * VIRTCHNL_OP_DEL_VLAN_V2
706 *
707 * VF sends these messages to add/del one or more VLAN tag filters for Rx
708 * traffic.
709 *
710 * The PF attempts to add the filters and returns status.
711 *
712 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
713 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
714 */
715 struct virtchnl_vlan_filter_list_v2 {
716 u16 vport_id;
717 u16 num_elements;
718 u8 pad[4];
719 struct virtchnl_vlan_filter filters[];
720 };
721
722 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
723 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
724
725 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
726 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
727 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
728 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
729 *
730 * VF sends this message to enable or disable VLAN stripping or insertion. It
731 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
732 * allowed and whether or not it's allowed to enable/disable the specific
733 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
734 * parse the virtchnl_vlan_caps.offloads fields to determine which offload
735 * messages are allowed.
736 *
737 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
738 * following manner the VF will be allowed to enable and/or disable 0x8100 inner
739 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
740 * case means the outer most or single VLAN from the VF's perspective. This is
741 * because no outer offloads are supported. See the comments above the
742 * virtchnl_vlan_supported_caps structure for more details.
743 *
744 * virtchnl_vlan_caps.offloads.stripping_support.inner =
745 * VIRTCHNL_VLAN_TOGGLE |
746 * VIRTCHNL_VLAN_ETHERTYPE_8100;
747 *
748 * virtchnl_vlan_caps.offloads.insertion_support.inner =
749 * VIRTCHNL_VLAN_TOGGLE |
750 * VIRTCHNL_VLAN_ETHERTYPE_8100;
751 *
752 * In order to enable inner (again note that in this case inner is the outer
753 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
754 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
755 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
756 *
757 * virtchnl_vlan_setting.inner_ethertype_setting =
758 * VIRTCHNL_VLAN_ETHERTYPE_8100;
759 *
760 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
761 * initialization.
762 *
763 * The reason that VLAN TPID(s) are not being used for the
764 * outer_ethertype_setting and inner_ethertype_setting fields is because it's
765 * possible a device could support VLAN insertion and/or stripping offload on
766 * multiple ethertypes concurrently, so this method allows a VF to request
767 * multiple ethertypes in one message using the virtchnl_vlan_support
768 * enumeration.
769 *
770 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
771 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
772 * VLAN insertion and stripping simultaneously. The
773 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
774 * populated based on what the PF can support.
775 *
776 * virtchnl_vlan_caps.offloads.stripping_support.outer =
777 * VIRTCHNL_VLAN_TOGGLE |
778 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
779 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
780 * VIRTCHNL_VLAN_ETHERTYPE_AND;
781 *
782 * virtchnl_vlan_caps.offloads.insertion_support.outer =
783 * VIRTCHNL_VLAN_TOGGLE |
784 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
785 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
786 * VIRTCHNL_VLAN_ETHERTYPE_AND;
787 *
788 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
789 * would populate the virthcnl_vlan_offload_structure in the following manner
790 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
791 *
792 * virtchnl_vlan_setting.outer_ethertype_setting =
793 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
794 * VIRTHCNL_VLAN_ETHERTYPE_88A8;
795 *
796 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
797 * initialization.
798 *
799 * There is also the case where a PF and the underlying hardware can support
800 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
801 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
802 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
803 * offloads. The ethertypes must match for stripping and insertion.
804 *
805 * virtchnl_vlan_caps.offloads.stripping_support.outer =
806 * VIRTCHNL_VLAN_TOGGLE |
807 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
808 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
809 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
810 *
811 * virtchnl_vlan_caps.offloads.insertion_support.outer =
812 * VIRTCHNL_VLAN_TOGGLE |
813 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
814 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
815 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
816 *
817 * virtchnl_vlan_caps.offloads.ethertype_match =
818 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
819 *
820 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
821 * populate the virtchnl_vlan_setting structure in the following manner and send
822 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
823 * ethertype for VLAN insertion if it's enabled. So, for completeness, a
824 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
825 *
826 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
827 *
828 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
829 * initialization.
830 */
831 struct virtchnl_vlan_setting {
832 u32 outer_ethertype_setting;
833 u32 inner_ethertype_setting;
834 u16 vport_id;
835 u8 pad[6];
836 };
837
838 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
839
840 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
841 * VF sends VSI id and flags.
842 * PF returns status code in retval.
843 * Note: we assume that broadcast accept mode is always enabled.
844 */
845 struct virtchnl_promisc_info {
846 u16 vsi_id;
847 u16 flags;
848 };
849
850 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
851
852 #define FLAG_VF_UNICAST_PROMISC 0x00000001
853 #define FLAG_VF_MULTICAST_PROMISC 0x00000002
854
855 /* VIRTCHNL_OP_GET_STATS
856 * VF sends this message to request stats for the selected VSI. VF uses
857 * the virtchnl_queue_select struct to specify the VSI. The queue_id
858 * field is ignored by the PF.
859 *
860 * PF replies with struct eth_stats in an external buffer.
861 */
862
863 /* VIRTCHNL_OP_CONFIG_RSS_KEY
864 * VIRTCHNL_OP_CONFIG_RSS_LUT
865 * VF sends these messages to configure RSS. Only supported if both PF
866 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
867 * configuration negotiation. If this is the case, then the RSS fields in
868 * the VF resource struct are valid.
869 * Both the key and LUT are initialized to 0 by the PF, meaning that
870 * RSS is effectively disabled until set up by the VF.
871 */
872 struct virtchnl_rss_key {
873 u16 vsi_id;
874 u16 key_len;
875 u8 key[]; /* RSS hash key, packed bytes */
876 };
877
878 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
879 #define virtchnl_rss_key_LEGACY_SIZEOF 6
880
881 struct virtchnl_rss_lut {
882 u16 vsi_id;
883 u16 lut_entries;
884 u8 lut[]; /* RSS lookup table */
885 };
886
887 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
888 #define virtchnl_rss_lut_LEGACY_SIZEOF 6
889
890 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
891 * VIRTCHNL_OP_SET_RSS_HENA
892 * VF sends these messages to get and set the hash filter enable bits for RSS.
893 * By default, the PF sets these to all possible traffic types that the
894 * hardware supports. The VF can query this value if it wants to change the
895 * traffic types that are hashed by the hardware.
896 */
897 struct virtchnl_rss_hena {
898 u64 hena;
899 };
900
901 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
902
903 /* VIRTCHNL_OP_ENABLE_CHANNELS
904 * VIRTCHNL_OP_DISABLE_CHANNELS
905 * VF sends these messages to enable or disable channels based on
906 * the user specified queue count and queue offset for each traffic class.
907 * This struct encompasses all the information that the PF needs from
908 * VF to create a channel.
909 */
910 struct virtchnl_channel_info {
911 u16 count; /* number of queues in a channel */
912 u16 offset; /* queues in a channel start from 'offset' */
913 u32 pad;
914 u64 max_tx_rate;
915 };
916
917 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
918
919 struct virtchnl_tc_info {
920 u32 num_tc;
921 u32 pad;
922 struct virtchnl_channel_info list[];
923 };
924
925 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
926 #define virtchnl_tc_info_LEGACY_SIZEOF 24
927
928 /* VIRTCHNL_ADD_CLOUD_FILTER
929 * VIRTCHNL_DEL_CLOUD_FILTER
930 * VF sends these messages to add or delete a cloud filter based on the
931 * user specified match and action filters. These structures encompass
932 * all the information that the PF needs from the VF to add/delete a
933 * cloud filter.
934 */
935
936 struct virtchnl_l4_spec {
937 u8 src_mac[ETH_ALEN];
938 u8 dst_mac[ETH_ALEN];
939 __be16 vlan_id;
940 __be16 pad; /* reserved for future use */
941 __be32 src_ip[4];
942 __be32 dst_ip[4];
943 __be16 src_port;
944 __be16 dst_port;
945 };
946
947 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
948
949 union virtchnl_flow_spec {
950 struct virtchnl_l4_spec tcp_spec;
951 u8 buffer[128]; /* reserved for future use */
952 };
953
954 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
955
956 enum virtchnl_action {
957 /* action types */
958 VIRTCHNL_ACTION_DROP = 0,
959 VIRTCHNL_ACTION_TC_REDIRECT,
960 VIRTCHNL_ACTION_PASSTHRU,
961 VIRTCHNL_ACTION_QUEUE,
962 VIRTCHNL_ACTION_Q_REGION,
963 VIRTCHNL_ACTION_MARK,
964 VIRTCHNL_ACTION_COUNT,
965 };
966
967 enum virtchnl_flow_type {
968 /* flow types */
969 VIRTCHNL_TCP_V4_FLOW = 0,
970 VIRTCHNL_TCP_V6_FLOW,
971 };
972
973 struct virtchnl_filter {
974 union virtchnl_flow_spec data;
975 union virtchnl_flow_spec mask;
976
977 /* see enum virtchnl_flow_type */
978 s32 flow_type;
979
980 /* see enum virtchnl_action */
981 s32 action;
982 u32 action_meta;
983 u8 field_flags;
984 u8 pad[3];
985 };
986
987 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
988
989 struct virtchnl_supported_rxdids {
990 u64 supported_rxdids;
991 };
992
993 /* VIRTCHNL_OP_EVENT
994 * PF sends this message to inform the VF driver of events that may affect it.
995 * No direct response is expected from the VF, though it may generate other
996 * messages in response to this one.
997 */
998 enum virtchnl_event_codes {
999 VIRTCHNL_EVENT_UNKNOWN = 0,
1000 VIRTCHNL_EVENT_LINK_CHANGE,
1001 VIRTCHNL_EVENT_RESET_IMPENDING,
1002 VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
1003 };
1004
1005 #define PF_EVENT_SEVERITY_INFO 0
1006 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
1007
1008 struct virtchnl_pf_event {
1009 /* see enum virtchnl_event_codes */
1010 s32 event;
1011 union {
1012 /* If the PF driver does not support the new speed reporting
1013 * capabilities then use link_event else use link_event_adv to
1014 * get the speed and link information. The ability to understand
1015 * new speeds is indicated by setting the capability flag
1016 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1017 * in virtchnl_vf_resource struct and can be used to determine
1018 * which link event struct to use below.
1019 */
1020 struct {
1021 enum virtchnl_link_speed link_speed;
1022 bool link_status;
1023 u8 pad[3];
1024 } link_event;
1025 struct {
1026 /* link_speed provided in Mbps */
1027 u32 link_speed;
1028 u8 link_status;
1029 u8 pad[3];
1030 } link_event_adv;
1031 } event_data;
1032
1033 s32 severity;
1034 };
1035
1036 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1037
1038 /* used to specify if a ceq_idx or aeq_idx is invalid */
1039 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
1040 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
1041 * VF uses this message to request PF to map RDMA vectors to RDMA queues.
1042 * The request for this originates from the VF RDMA driver through
1043 * a client interface between VF LAN and VF RDMA driver.
1044 * A vector could have an AEQ and CEQ attached to it although
1045 * there is a single AEQ per VF RDMA instance in which case
1046 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
1047 * idx for ceqs There will never be a case where there will be multiple CEQs
1048 * attached to a single vector.
1049 * PF configures interrupt mapping and returns status.
1050 */
1051
1052 struct virtchnl_rdma_qv_info {
1053 u32 v_idx; /* msix_vector */
1054 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1055 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1056 u8 itr_idx;
1057 u8 pad[3];
1058 };
1059
1060 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
1061
1062 struct virtchnl_rdma_qvlist_info {
1063 u32 num_vectors;
1064 struct virtchnl_rdma_qv_info qv_info[];
1065 };
1066
1067 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
1068 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
1069
1070 /* VF reset states - these are written into the RSTAT register:
1071 * VFGEN_RSTAT on the VF
1072 * When the PF initiates a reset, it writes 0
1073 * When the reset is complete, it writes 1
1074 * When the PF detects that the VF has recovered, it writes 2
1075 * VF checks this register periodically to determine if a reset has occurred,
1076 * then polls it to know when the reset is complete.
1077 * If either the PF or VF reads the register while the hardware
1078 * is in a reset state, it will return DEADBEEF, which, when masked
1079 * will result in 3.
1080 */
1081 enum virtchnl_vfr_states {
1082 VIRTCHNL_VFR_INPROGRESS = 0,
1083 VIRTCHNL_VFR_COMPLETED,
1084 VIRTCHNL_VFR_VFACTIVE,
1085 };
1086
1087 /* Type of RSS algorithm */
1088 enum virtchnl_rss_algorithm {
1089 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
1090 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
1091 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
1092 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
1093 };
1094
1095 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
1096 #define PROTO_HDR_SHIFT 5
1097 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
1098 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1099
1100 /* VF use these macros to configure each protocol header.
1101 * Specify which protocol headers and protocol header fields base on
1102 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1103 * @param hdr: a struct of virtchnl_proto_hdr
1104 * @param hdr_type: ETH/IPV4/TCP, etc
1105 * @param field: SRC/DST/TEID/SPI, etc
1106 */
1107 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1108 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1109 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1110 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1111 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1112 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1113 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
1114
1115 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1116 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1117 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1118 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1119 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1120 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1121
1122 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1123 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1124 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1125 (((hdr)->type) >> PROTO_HDR_SHIFT)
1126 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1127 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1128 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1129 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
1130 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
1131
1132 /* Protocol header type within a packet segment. A segment consists of one or
1133 * more protocol headers that make up a logical group of protocol headers. Each
1134 * logical group of protocol headers encapsulates or is encapsulated using/by
1135 * tunneling or encapsulation protocols for network virtualization.
1136 */
1137 enum virtchnl_proto_hdr_type {
1138 VIRTCHNL_PROTO_HDR_NONE,
1139 VIRTCHNL_PROTO_HDR_ETH,
1140 VIRTCHNL_PROTO_HDR_S_VLAN,
1141 VIRTCHNL_PROTO_HDR_C_VLAN,
1142 VIRTCHNL_PROTO_HDR_IPV4,
1143 VIRTCHNL_PROTO_HDR_IPV6,
1144 VIRTCHNL_PROTO_HDR_TCP,
1145 VIRTCHNL_PROTO_HDR_UDP,
1146 VIRTCHNL_PROTO_HDR_SCTP,
1147 VIRTCHNL_PROTO_HDR_GTPU_IP,
1148 VIRTCHNL_PROTO_HDR_GTPU_EH,
1149 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1150 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1151 VIRTCHNL_PROTO_HDR_PPPOE,
1152 VIRTCHNL_PROTO_HDR_L2TPV3,
1153 VIRTCHNL_PROTO_HDR_ESP,
1154 VIRTCHNL_PROTO_HDR_AH,
1155 VIRTCHNL_PROTO_HDR_PFCP,
1156 };
1157
1158 /* Protocol header field within a protocol header. */
1159 enum virtchnl_proto_hdr_field {
1160 /* ETHER */
1161 VIRTCHNL_PROTO_HDR_ETH_SRC =
1162 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1163 VIRTCHNL_PROTO_HDR_ETH_DST,
1164 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1165 /* S-VLAN */
1166 VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1167 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1168 /* C-VLAN */
1169 VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1170 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1171 /* IPV4 */
1172 VIRTCHNL_PROTO_HDR_IPV4_SRC =
1173 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1174 VIRTCHNL_PROTO_HDR_IPV4_DST,
1175 VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1176 VIRTCHNL_PROTO_HDR_IPV4_TTL,
1177 VIRTCHNL_PROTO_HDR_IPV4_PROT,
1178 /* IPV6 */
1179 VIRTCHNL_PROTO_HDR_IPV6_SRC =
1180 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1181 VIRTCHNL_PROTO_HDR_IPV6_DST,
1182 VIRTCHNL_PROTO_HDR_IPV6_TC,
1183 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1184 VIRTCHNL_PROTO_HDR_IPV6_PROT,
1185 /* TCP */
1186 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1187 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1188 VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1189 /* UDP */
1190 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1191 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1192 VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1193 /* SCTP */
1194 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1195 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1196 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1197 /* GTPU_IP */
1198 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1199 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1200 /* GTPU_EH */
1201 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1202 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1203 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1204 /* PPPOE */
1205 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1206 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1207 /* L2TPV3 */
1208 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1209 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1210 /* ESP */
1211 VIRTCHNL_PROTO_HDR_ESP_SPI =
1212 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1213 /* AH */
1214 VIRTCHNL_PROTO_HDR_AH_SPI =
1215 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1216 /* PFCP */
1217 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1218 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1219 VIRTCHNL_PROTO_HDR_PFCP_SEID,
1220 };
1221
1222 struct virtchnl_proto_hdr {
1223 /* see enum virtchnl_proto_hdr_type */
1224 s32 type;
1225 u32 field_selector; /* a bit mask to select field for header type */
1226 u8 buffer[64];
1227 /**
1228 * binary buffer in network order for specific header type.
1229 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1230 * header is expected to be copied into the buffer.
1231 */
1232 };
1233
1234 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1235
1236 struct virtchnl_proto_hdrs {
1237 u8 tunnel_level;
1238 u8 pad[3];
1239 /**
1240 * specify where protocol header start from.
1241 * 0 - from the outer layer
1242 * 1 - from the first inner layer
1243 * 2 - from the second inner layer
1244 * ....
1245 **/
1246 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
1247 struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1248 };
1249
1250 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1251
1252 struct virtchnl_rss_cfg {
1253 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
1254
1255 /* see enum virtchnl_rss_algorithm; rss algorithm type */
1256 s32 rss_algorithm;
1257 u8 reserved[128]; /* reserve for future */
1258 };
1259
1260 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1261
1262 /* action configuration for FDIR */
1263 struct virtchnl_filter_action {
1264 /* see enum virtchnl_action type */
1265 s32 type;
1266 union {
1267 /* used for queue and qgroup action */
1268 struct {
1269 u16 index;
1270 u8 region;
1271 } queue;
1272 /* used for count action */
1273 struct {
1274 /* share counter ID with other flow rules */
1275 u8 shared;
1276 u32 id; /* counter ID */
1277 } count;
1278 /* used for mark action */
1279 u32 mark_id;
1280 u8 reserve[32];
1281 } act_conf;
1282 };
1283
1284 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1285
1286 #define VIRTCHNL_MAX_NUM_ACTIONS 8
1287
1288 struct virtchnl_filter_action_set {
1289 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1290 int count;
1291 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1292 };
1293
1294 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1295
1296 /* pattern and action for FDIR rule */
1297 struct virtchnl_fdir_rule {
1298 struct virtchnl_proto_hdrs proto_hdrs;
1299 struct virtchnl_filter_action_set action_set;
1300 };
1301
1302 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1303
1304 /* Status returned to VF after VF requests FDIR commands
1305 * VIRTCHNL_FDIR_SUCCESS
1306 * VF FDIR related request is successfully done by PF
1307 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1308 *
1309 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1310 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1311 *
1312 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1313 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1314 *
1315 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1316 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1317 *
1318 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1319 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1320 *
1321 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1322 * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1323 * or HW doesn't support.
1324 *
1325 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1326 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1327 * for programming.
1328 *
1329 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1330 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1331 * for example, VF query counter of a rule who has no counter action.
1332 */
1333 enum virtchnl_fdir_prgm_status {
1334 VIRTCHNL_FDIR_SUCCESS = 0,
1335 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1336 VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1337 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1338 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1339 VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1340 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1341 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1342 };
1343
1344 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1345 * VF sends this request to PF by filling out vsi_id,
1346 * validate_only and rule_cfg. PF will return flow_id
1347 * if the request is successfully done and return add_status to VF.
1348 */
1349 struct virtchnl_fdir_add {
1350 u16 vsi_id; /* INPUT */
1351 /*
1352 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1353 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1354 */
1355 u16 validate_only; /* INPUT */
1356 u32 flow_id; /* OUTPUT */
1357 struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1358
1359 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1360 s32 status;
1361 };
1362
1363 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1364
1365 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1366 * VF sends this request to PF by filling out vsi_id
1367 * and flow_id. PF will return del_status to VF.
1368 */
1369 struct virtchnl_fdir_del {
1370 u16 vsi_id; /* INPUT */
1371 u16 pad;
1372 u32 flow_id; /* INPUT */
1373
1374 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1375 s32 status;
1376 };
1377
1378 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1379
1380 #define __vss_byone(p, member, count, old) \
1381 (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
1382
1383 #define __vss_byelem(p, member, count, old) \
1384 (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
1385
1386 #define __vss_full(p, member, count, old) \
1387 (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
1388
1389 #define __vss(type, func, p, member, count) \
1390 struct type: func(p, member, count, type##_LEGACY_SIZEOF)
1391
1392 #define virtchnl_struct_size(p, m, c) \
1393 _Generic(*p, \
1394 __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
1395 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
1396 __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
1397 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
1398 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
1399 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
1400 __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
1401 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
1402 __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
1403 __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
1404
1405 /**
1406 * virtchnl_vc_validate_vf_msg
1407 * @ver: Virtchnl version info
1408 * @v_opcode: Opcode for the message
1409 * @msg: pointer to the msg buffer
1410 * @msglen: msg length
1411 *
1412 * validate msg format against struct for each opcode
1413 */
1414 static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)1415 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1416 u8 *msg, u16 msglen)
1417 {
1418 bool err_msg_format = false;
1419 u32 valid_len = 0;
1420
1421 /* Validate message length. */
1422 switch (v_opcode) {
1423 case VIRTCHNL_OP_VERSION:
1424 valid_len = sizeof(struct virtchnl_version_info);
1425 break;
1426 case VIRTCHNL_OP_RESET_VF:
1427 break;
1428 case VIRTCHNL_OP_GET_VF_RESOURCES:
1429 if (VF_IS_V11(ver))
1430 valid_len = sizeof(u32);
1431 break;
1432 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1433 valid_len = sizeof(struct virtchnl_txq_info);
1434 break;
1435 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1436 valid_len = sizeof(struct virtchnl_rxq_info);
1437 break;
1438 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1439 valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
1440 if (msglen >= valid_len) {
1441 struct virtchnl_vsi_queue_config_info *vqc =
1442 (struct virtchnl_vsi_queue_config_info *)msg;
1443 valid_len = virtchnl_struct_size(vqc, qpair,
1444 vqc->num_queue_pairs);
1445 if (vqc->num_queue_pairs == 0)
1446 err_msg_format = true;
1447 }
1448 break;
1449 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1450 valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
1451 if (msglen >= valid_len) {
1452 struct virtchnl_irq_map_info *vimi =
1453 (struct virtchnl_irq_map_info *)msg;
1454 valid_len = virtchnl_struct_size(vimi, vecmap,
1455 vimi->num_vectors);
1456 if (vimi->num_vectors == 0)
1457 err_msg_format = true;
1458 }
1459 break;
1460 case VIRTCHNL_OP_ENABLE_QUEUES:
1461 case VIRTCHNL_OP_DISABLE_QUEUES:
1462 valid_len = sizeof(struct virtchnl_queue_select);
1463 break;
1464 case VIRTCHNL_OP_ADD_ETH_ADDR:
1465 case VIRTCHNL_OP_DEL_ETH_ADDR:
1466 valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
1467 if (msglen >= valid_len) {
1468 struct virtchnl_ether_addr_list *veal =
1469 (struct virtchnl_ether_addr_list *)msg;
1470 valid_len = virtchnl_struct_size(veal, list,
1471 veal->num_elements);
1472 if (veal->num_elements == 0)
1473 err_msg_format = true;
1474 }
1475 break;
1476 case VIRTCHNL_OP_ADD_VLAN:
1477 case VIRTCHNL_OP_DEL_VLAN:
1478 valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
1479 if (msglen >= valid_len) {
1480 struct virtchnl_vlan_filter_list *vfl =
1481 (struct virtchnl_vlan_filter_list *)msg;
1482 valid_len = virtchnl_struct_size(vfl, vlan_id,
1483 vfl->num_elements);
1484 if (vfl->num_elements == 0)
1485 err_msg_format = true;
1486 }
1487 break;
1488 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1489 valid_len = sizeof(struct virtchnl_promisc_info);
1490 break;
1491 case VIRTCHNL_OP_GET_STATS:
1492 valid_len = sizeof(struct virtchnl_queue_select);
1493 break;
1494 case VIRTCHNL_OP_RDMA:
1495 /* These messages are opaque to us and will be validated in
1496 * the RDMA client code. We just need to check for nonzero
1497 * length. The firmware will enforce max length restrictions.
1498 */
1499 if (msglen)
1500 valid_len = msglen;
1501 else
1502 err_msg_format = true;
1503 break;
1504 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
1505 break;
1506 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
1507 valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
1508 if (msglen >= valid_len) {
1509 struct virtchnl_rdma_qvlist_info *qv =
1510 (struct virtchnl_rdma_qvlist_info *)msg;
1511
1512 valid_len = virtchnl_struct_size(qv, qv_info,
1513 qv->num_vectors);
1514 }
1515 break;
1516 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1517 valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
1518 if (msglen >= valid_len) {
1519 struct virtchnl_rss_key *vrk =
1520 (struct virtchnl_rss_key *)msg;
1521 valid_len = virtchnl_struct_size(vrk, key,
1522 vrk->key_len);
1523 }
1524 break;
1525 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1526 valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
1527 if (msglen >= valid_len) {
1528 struct virtchnl_rss_lut *vrl =
1529 (struct virtchnl_rss_lut *)msg;
1530 valid_len = virtchnl_struct_size(vrl, lut,
1531 vrl->lut_entries);
1532 }
1533 break;
1534 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1535 break;
1536 case VIRTCHNL_OP_SET_RSS_HENA:
1537 valid_len = sizeof(struct virtchnl_rss_hena);
1538 break;
1539 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1540 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1541 break;
1542 case VIRTCHNL_OP_REQUEST_QUEUES:
1543 valid_len = sizeof(struct virtchnl_vf_res_request);
1544 break;
1545 case VIRTCHNL_OP_ENABLE_CHANNELS:
1546 valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
1547 if (msglen >= valid_len) {
1548 struct virtchnl_tc_info *vti =
1549 (struct virtchnl_tc_info *)msg;
1550 valid_len = virtchnl_struct_size(vti, list,
1551 vti->num_tc);
1552 if (vti->num_tc == 0)
1553 err_msg_format = true;
1554 }
1555 break;
1556 case VIRTCHNL_OP_DISABLE_CHANNELS:
1557 break;
1558 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1559 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1560 valid_len = sizeof(struct virtchnl_filter);
1561 break;
1562 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1563 break;
1564 case VIRTCHNL_OP_ADD_RSS_CFG:
1565 case VIRTCHNL_OP_DEL_RSS_CFG:
1566 valid_len = sizeof(struct virtchnl_rss_cfg);
1567 break;
1568 case VIRTCHNL_OP_ADD_FDIR_FILTER:
1569 valid_len = sizeof(struct virtchnl_fdir_add);
1570 break;
1571 case VIRTCHNL_OP_DEL_FDIR_FILTER:
1572 valid_len = sizeof(struct virtchnl_fdir_del);
1573 break;
1574 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
1575 break;
1576 case VIRTCHNL_OP_ADD_VLAN_V2:
1577 case VIRTCHNL_OP_DEL_VLAN_V2:
1578 valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
1579 if (msglen >= valid_len) {
1580 struct virtchnl_vlan_filter_list_v2 *vfl =
1581 (struct virtchnl_vlan_filter_list_v2 *)msg;
1582
1583 valid_len = virtchnl_struct_size(vfl, filters,
1584 vfl->num_elements);
1585
1586 if (vfl->num_elements == 0) {
1587 err_msg_format = true;
1588 break;
1589 }
1590 }
1591 break;
1592 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1593 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1594 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1595 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1596 valid_len = sizeof(struct virtchnl_vlan_setting);
1597 break;
1598 /* These are always errors coming from the VF. */
1599 case VIRTCHNL_OP_EVENT:
1600 case VIRTCHNL_OP_UNKNOWN:
1601 default:
1602 return VIRTCHNL_STATUS_ERR_PARAM;
1603 }
1604 /* few more checks */
1605 if (err_msg_format || valid_len != msglen)
1606 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1607
1608 return 0;
1609 }
1610 #endif /* _VIRTCHNL_H_ */
1611