1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #ifndef _ICE_LAN_TX_RX_H_
5 #define _ICE_LAN_TX_RX_H_
6
7 union ice_32byte_rx_desc {
8 struct {
9 __le64 pkt_addr; /* Packet buffer address */
10 __le64 hdr_addr; /* Header buffer address */
11 /* bit 0 of hdr_addr is DD bit */
12 __le64 rsvd1;
13 __le64 rsvd2;
14 } read;
15 struct {
16 struct {
17 struct {
18 __le16 mirroring_status;
19 __le16 l2tag1;
20 } lo_dword;
21 union {
22 __le32 rss; /* RSS Hash */
23 __le32 fd_id; /* Flow Director filter id */
24 } hi_dword;
25 } qword0;
26 struct {
27 /* status/error/PTYPE/length */
28 __le64 status_error_len;
29 } qword1;
30 struct {
31 __le16 ext_status; /* extended status */
32 __le16 rsvd;
33 __le16 l2tag2_1;
34 __le16 l2tag2_2;
35 } qword2;
36 struct {
37 __le32 reserved;
38 __le32 fd_id;
39 } qword3;
40 } wb; /* writeback */
41 };
42
43 struct ice_rx_ptype_decoded {
44 u32 ptype:10;
45 u32 known:1;
46 u32 outer_ip:1;
47 u32 outer_ip_ver:2;
48 u32 outer_frag:1;
49 u32 tunnel_type:3;
50 u32 tunnel_end_prot:2;
51 u32 tunnel_end_frag:1;
52 u32 inner_prot:4;
53 u32 payload_layer:3;
54 };
55
56 enum ice_rx_ptype_outer_ip {
57 ICE_RX_PTYPE_OUTER_L2 = 0,
58 ICE_RX_PTYPE_OUTER_IP = 1,
59 };
60
61 enum ice_rx_ptype_outer_ip_ver {
62 ICE_RX_PTYPE_OUTER_NONE = 0,
63 ICE_RX_PTYPE_OUTER_IPV4 = 1,
64 ICE_RX_PTYPE_OUTER_IPV6 = 2,
65 };
66
67 enum ice_rx_ptype_outer_fragmented {
68 ICE_RX_PTYPE_NOT_FRAG = 0,
69 ICE_RX_PTYPE_FRAG = 1,
70 };
71
72 enum ice_rx_ptype_tunnel_type {
73 ICE_RX_PTYPE_TUNNEL_NONE = 0,
74 ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
75 ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
76 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
77 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
78 };
79
80 enum ice_rx_ptype_tunnel_end_prot {
81 ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
82 ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
83 ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
84 };
85
86 enum ice_rx_ptype_inner_prot {
87 ICE_RX_PTYPE_INNER_PROT_NONE = 0,
88 ICE_RX_PTYPE_INNER_PROT_UDP = 1,
89 ICE_RX_PTYPE_INNER_PROT_TCP = 2,
90 ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
91 ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
92 ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
93 };
94
95 enum ice_rx_ptype_payload_layer {
96 ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
97 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
98 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
99 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
100 };
101
102 /* RX Flex Descriptor
103 * This descriptor is used instead of the legacy version descriptor when
104 * ice_rlan_ctx.adv_desc is set
105 */
106 union ice_32b_rx_flex_desc {
107 struct {
108 __le64 pkt_addr; /* Packet buffer address */
109 __le64 hdr_addr; /* Header buffer address */
110 /* bit 0 of hdr_addr is DD bit */
111 __le64 rsvd1;
112 __le64 rsvd2;
113 } read;
114 struct {
115 /* Qword 0 */
116 u8 rxdid; /* descriptor builder profile id */
117 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
118 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
119 __le16 pkt_len; /* [15:14] are reserved */
120 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
121 /* sph=[11:11] */
122 /* ff1/ext=[15:12] */
123
124 /* Qword 1 */
125 __le16 status_error0;
126 __le16 l2tag1;
127 __le16 flex_meta0;
128 __le16 flex_meta1;
129
130 /* Qword 2 */
131 __le16 status_error1;
132 u8 flex_flags2;
133 u8 time_stamp_low;
134 __le16 l2tag2_1st;
135 __le16 l2tag2_2nd;
136
137 /* Qword 3 */
138 __le16 flex_meta2;
139 __le16 flex_meta3;
140 union {
141 struct {
142 __le16 flex_meta4;
143 __le16 flex_meta5;
144 } flex;
145 __le32 ts_high;
146 } flex_ts;
147 } wb; /* writeback */
148 };
149
150 /* Rx Flex Descriptor NIC Profile
151 * This descriptor corresponds to RxDID 2 which contains
152 * metadata fields for RSS, flow id and timestamp info
153 */
154 struct ice_32b_rx_flex_desc_nic {
155 /* Qword 0 */
156 u8 rxdid;
157 u8 mir_id_umb_cast;
158 __le16 ptype_flexi_flags0;
159 __le16 pkt_len;
160 __le16 hdr_len_sph_flex_flags1;
161
162 /* Qword 1 */
163 __le16 status_error0;
164 __le16 l2tag1;
165 __le32 rss_hash;
166
167 /* Qword 2 */
168 __le16 status_error1;
169 u8 flexi_flags2;
170 u8 ts_low;
171 __le16 l2tag2_1st;
172 __le16 l2tag2_2nd;
173
174 /* Qword 3 */
175 __le32 flow_id;
176 union {
177 struct {
178 __le16 vlan_id;
179 __le16 flow_id_ipv6;
180 } flex;
181 __le32 ts_high;
182 } flex_ts;
183 };
184
185 /* Receive Flex Descriptor profile IDs: There are a total
186 * of 64 profiles where profile IDs 0/1 are for legacy; and
187 * profiles 2-63 are flex profiles that can be programmed
188 * with a specific metadata (profile 7 reserved for HW)
189 */
190 enum ice_rxdid {
191 ICE_RXDID_START = 0,
192 ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
193 ICE_RXDID_LEGACY_1,
194 ICE_RXDID_FLX_START,
195 ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
196 ICE_RXDID_FLX_LAST = 63,
197 ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
198 };
199
200 /* Receive Flex Descriptor Rx opcode values */
201 #define ICE_RX_OPC_MDID 0x01
202
203 /* Receive Descriptor MDID values */
204 #define ICE_RX_MDID_FLOW_ID_LOWER 5
205 #define ICE_RX_MDID_FLOW_ID_HIGH 6
206 #define ICE_RX_MDID_HASH_LOW 56
207 #define ICE_RX_MDID_HASH_HIGH 57
208
209 /* Rx Flag64 packet flag bits */
210 enum ice_rx_flg64_bits {
211 ICE_RXFLG_PKT_DSI = 0,
212 ICE_RXFLG_EVLAN_x8100 = 15,
213 ICE_RXFLG_EVLAN_x9100,
214 ICE_RXFLG_VLAN_x8100,
215 ICE_RXFLG_TNL_MAC = 22,
216 ICE_RXFLG_TNL_VLAN,
217 ICE_RXFLG_PKT_FRG,
218 ICE_RXFLG_FIN = 32,
219 ICE_RXFLG_SYN,
220 ICE_RXFLG_RST,
221 ICE_RXFLG_TNL0 = 38,
222 ICE_RXFLG_TNL1,
223 ICE_RXFLG_TNL2,
224 ICE_RXFLG_UDP_GRE,
225 ICE_RXFLG_RSVD = 63
226 };
227
228 /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
229 #define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
230
231 /* for ice_32byte_rx_flex_desc.pkt_length member */
232 #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
233
234 enum ice_rx_flex_desc_status_error_0_bits {
235 /* Note: These are predefined bit offsets */
236 ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
237 ICE_RX_FLEX_DESC_STATUS0_EOF_S,
238 ICE_RX_FLEX_DESC_STATUS0_HBO_S,
239 ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
240 ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
241 ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
242 ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
243 ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
244 ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
245 ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
246 ICE_RX_FLEX_DESC_STATUS0_RXE_S,
247 ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
248 ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
249 ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
250 ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
251 ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
252 ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
253 };
254
255 #define ICE_RXQ_CTX_SIZE_DWORDS 8
256 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
257
258 /* RLAN Rx queue context data
259 *
260 * The sizes of the variables may be larger than needed due to crossing byte
261 * boundaries. If we do not have the width of the variable set to the correct
262 * size then we could end up shifting bits off the top of the variable when the
263 * variable is at the top of a byte and crosses over into the next byte.
264 */
265 struct ice_rlan_ctx {
266 u16 head;
267 u16 cpuid; /* bigger than needed, see above for reason */
268 #define ICE_RLAN_BASE_S 7
269 u64 base;
270 u16 qlen;
271 #define ICE_RLAN_CTX_DBUF_S 7
272 u16 dbuf; /* bigger than needed, see above for reason */
273 #define ICE_RLAN_CTX_HBUF_S 6
274 u16 hbuf; /* bigger than needed, see above for reason */
275 u8 dtype;
276 u8 dsize;
277 u8 crcstrip;
278 u8 l2tsel;
279 u8 hsplit_0;
280 u8 hsplit_1;
281 u8 showiv;
282 u32 rxmax; /* bigger than needed, see above for reason */
283 u8 tphrdesc_ena;
284 u8 tphwdesc_ena;
285 u8 tphdata_ena;
286 u8 tphhead_ena;
287 u16 lrxqthresh; /* bigger than needed, see above for reason */
288 };
289
290 struct ice_ctx_ele {
291 u16 offset;
292 u16 size_of;
293 u16 width;
294 u16 lsb;
295 };
296
297 #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
298 .offset = offsetof(struct _struct, _ele), \
299 .size_of = FIELD_SIZEOF(struct _struct, _ele), \
300 .width = _width, \
301 .lsb = _lsb, \
302 }
303
304 /* for hsplit_0 field of Rx RLAN context */
305 enum ice_rlan_ctx_rx_hsplit_0 {
306 ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
307 ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1,
308 ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2,
309 ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
310 ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8,
311 };
312
313 /* for hsplit_1 field of Rx RLAN context */
314 enum ice_rlan_ctx_rx_hsplit_1 {
315 ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0,
316 ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1,
317 ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
318 };
319
320 /* TX Descriptor */
321 struct ice_tx_desc {
322 __le64 buf_addr; /* Address of descriptor's data buf */
323 __le64 cmd_type_offset_bsz;
324 };
325
326 enum ice_tx_desc_dtype_value {
327 ICE_TX_DESC_DTYPE_DATA = 0x0,
328 ICE_TX_DESC_DTYPE_CTX = 0x1,
329 /* DESC_DONE - HW has completed write-back of descriptor */
330 ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
331 };
332
333 #define ICE_TXD_QW1_CMD_S 4
334 #define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
335
336 enum ice_tx_desc_cmd_bits {
337 ICE_TX_DESC_CMD_EOP = 0x0001,
338 ICE_TX_DESC_CMD_RS = 0x0002,
339 ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
340 ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
341 ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
342 ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
343 ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
344 ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
345 };
346
347 #define ICE_TXD_QW1_OFFSET_S 16
348 #define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
349
350 enum ice_tx_desc_len_fields {
351 /* Note: These are predefined bit offsets */
352 ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
353 ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
354 ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
355 };
356
357 #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
358 #define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
359 #define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
360
361 /* Tx descriptor field limits in bytes */
362 #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
363 ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
364 #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
365 ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
366 #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
367 ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
368
369 #define ICE_TXD_QW1_TX_BUF_SZ_S 34
370 #define ICE_TXD_QW1_L2TAG1_S 48
371
372 /* Context descriptors */
373 struct ice_tx_ctx_desc {
374 __le32 tunneling_params;
375 __le16 l2tag2;
376 __le16 rsvd;
377 __le64 qw1;
378 };
379
380 #define ICE_TXD_CTX_QW1_CMD_S 4
381 #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
382
383 #define ICE_TXD_CTX_QW1_TSO_LEN_S 30
384 #define ICE_TXD_CTX_QW1_TSO_LEN_M \
385 (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
386
387 #define ICE_TXD_CTX_QW1_MSS_S 50
388
389 enum ice_tx_ctx_desc_cmd_bits {
390 ICE_TX_CTX_DESC_TSO = 0x01,
391 ICE_TX_CTX_DESC_TSYN = 0x02,
392 ICE_TX_CTX_DESC_IL2TAG2 = 0x04,
393 ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
394 ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
395 ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
396 ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
397 ICE_TX_CTX_DESC_SWTCH_VSI = 0x30,
398 ICE_TX_CTX_DESC_RESERVED = 0x40
399 };
400
401 #define ICE_LAN_TXQ_MAX_QGRPS 127
402 #define ICE_LAN_TXQ_MAX_QDIS 1023
403
404 /* Tx queue context data
405 *
406 * The sizes of the variables may be larger than needed due to crossing byte
407 * boundaries. If we do not have the width of the variable set to the correct
408 * size then we could end up shifting bits off the top of the variable when the
409 * variable is at the top of a byte and crosses over into the next byte.
410 */
411 struct ice_tlan_ctx {
412 #define ICE_TLAN_CTX_BASE_S 7
413 u64 base; /* base is defined in 128-byte units */
414 u8 port_num;
415 u16 cgd_num; /* bigger than needed, see above for reason */
416 u8 pf_num;
417 u16 vmvf_num;
418 u8 vmvf_type;
419 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
420 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2
421 u16 src_vsi;
422 u8 tsyn_ena;
423 u8 alt_vlan;
424 u16 cpuid; /* bigger than needed, see above for reason */
425 u8 wb_mode;
426 u8 tphrd_desc;
427 u8 tphrd;
428 u8 tphwr_desc;
429 u16 cmpq_id;
430 u16 qnum_in_func;
431 u8 itr_notification_mode;
432 u8 adjust_prof_id;
433 u32 qlen; /* bigger than needed, see above for reason */
434 u8 quanta_prof_idx;
435 u8 tso_ena;
436 u16 tso_qnum;
437 u8 legacy_int;
438 u8 drop_ena;
439 u8 cache_prof_idx;
440 u8 pkt_shaper_prof_idx;
441 u8 int_q_state; /* width not needed - internal do not write */
442 };
443
444 /* macro to make the table lines short */
445 #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
446 { PTYPE, \
447 1, \
448 ICE_RX_PTYPE_OUTER_##OUTER_IP, \
449 ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
450 ICE_RX_PTYPE_##OUTER_FRAG, \
451 ICE_RX_PTYPE_TUNNEL_##T, \
452 ICE_RX_PTYPE_TUNNEL_END_##TE, \
453 ICE_RX_PTYPE_##TEF, \
454 ICE_RX_PTYPE_INNER_PROT_##I, \
455 ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
456
457 #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
458
459 /* shorter macros makes the table fit but are terse */
460 #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
461
462 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
463 static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
464 /* L2 Packet types */
465 ICE_PTT_UNUSED_ENTRY(0),
466 ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
467 ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
468 };
469
ice_decode_rx_desc_ptype(u16 ptype)470 static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
471 {
472 return ice_ptype_lkup[ptype];
473 }
474 #endif /* _ICE_LAN_TX_RX_H_ */
475