1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include "ice_common.h"
5 #include "ice_flow.h"
6
7 /* Describe properties of a protocol header field */
8 struct ice_flow_field_info {
9 enum ice_flow_seg_hdr hdr;
10 s16 off; /* Offset from start of a protocol header, in bits */
11 u16 size; /* Size of fields in bits */
12 u16 mask; /* 16-bit mask for field */
13 };
14
15 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
16 .hdr = _hdr, \
17 .off = (_offset_bytes) * BITS_PER_BYTE, \
18 .size = (_size_bytes) * BITS_PER_BYTE, \
19 .mask = 0, \
20 }
21
22 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
23 .hdr = _hdr, \
24 .off = (_offset_bytes) * BITS_PER_BYTE, \
25 .size = (_size_bytes) * BITS_PER_BYTE, \
26 .mask = _mask, \
27 }
28
29 /* Table containing properties of supported protocol header fields */
30 static const
31 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
32 /* Ether */
33 /* ICE_FLOW_FIELD_IDX_ETH_DA */
34 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
35 /* ICE_FLOW_FIELD_IDX_ETH_SA */
36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
37 /* ICE_FLOW_FIELD_IDX_S_VLAN */
38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)),
39 /* ICE_FLOW_FIELD_IDX_C_VLAN */
40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)),
41 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)),
43 /* IPv4 / IPv6 */
44 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
45 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc),
46 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
47 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0),
48 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
49 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00),
50 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
51 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff),
52 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
53 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff),
54 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
55 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00),
56 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
57 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
58 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
59 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
60 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
62 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
64 /* Transport */
65 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
67 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
68 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
69 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
70 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
71 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
72 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
73 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
74 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
75 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
76 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
77 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
78 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
79 /* ARP */
80 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
81 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
82 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
83 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)),
84 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
85 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
86 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
87 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
88 /* ICE_FLOW_FIELD_IDX_ARP_OP */
89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)),
90 /* ICMP */
91 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1),
93 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1),
95 /* GRE */
96 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
97 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
98 sizeof_field(struct gre_full_hdr, key)),
99 /* GTP */
100 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)),
102 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)),
104 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)),
106 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
107 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
108 0x3f00),
109 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
111 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
113 /* PPPoE */
114 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
116 /* PFCP */
117 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
118 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)),
119 /* L2TPv3 */
120 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)),
122 /* ESP */
123 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)),
125 /* AH */
126 /* ICE_FLOW_FIELD_IDX_AH_SPI */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
128 /* NAT_T_ESP */
129 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
131 };
132
133 /* Bitmaps indicating relevant packet types for a particular protocol header
134 *
135 * Packet types for packets with an Outer/First/Single MAC header
136 */
137 static const u32 ice_ptypes_mac_ofos[] = {
138 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
139 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
140 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
141 0x00000000, 0x00000000, 0x00000000, 0x00000000,
142 0x00000000, 0x00000000, 0x00000000, 0x00000000,
143 0x00000000, 0x00000000, 0x00000000, 0x00000000,
144 0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 };
147
148 /* Packet types for packets with an Innermost/Last MAC VLAN header */
149 static const u32 ice_ptypes_macvlan_il[] = {
150 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
151 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
152 0x00000000, 0x00000000, 0x00000000, 0x00000000,
153 0x00000000, 0x00000000, 0x00000000, 0x00000000,
154 0x00000000, 0x00000000, 0x00000000, 0x00000000,
155 0x00000000, 0x00000000, 0x00000000, 0x00000000,
156 0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 };
159
160 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
161 * include IPv4 other PTYPEs
162 */
163 static const u32 ice_ptypes_ipv4_ofos[] = {
164 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
165 0x00000000, 0x00000155, 0x00000000, 0x00000000,
166 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
167 0x00000000, 0x00000000, 0x00000000, 0x00000000,
168 0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 };
173
174 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
175 * IPv4 other PTYPEs
176 */
177 static const u32 ice_ptypes_ipv4_ofos_all[] = {
178 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
179 0x00000000, 0x00000155, 0x00000000, 0x00000000,
180 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 };
187
188 /* Packet types for packets with an Innermost/Last IPv4 header */
189 static const u32 ice_ptypes_ipv4_il[] = {
190 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
191 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 };
199
200 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
201 * include IPv6 other PTYPEs
202 */
203 static const u32 ice_ptypes_ipv6_ofos[] = {
204 0x00000000, 0x00000000, 0x77000000, 0x10002000,
205 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
206 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 0x00000000, 0x00000000, 0x00000000, 0x00000000,
211 0x00000000, 0x00000000, 0x00000000, 0x00000000,
212 };
213
214 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
215 * IPv6 other PTYPEs
216 */
217 static const u32 ice_ptypes_ipv6_ofos_all[] = {
218 0x00000000, 0x00000000, 0x77000000, 0x10002000,
219 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
220 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 0x00000000, 0x00000000, 0x00000000, 0x00000000,
223 0x00000000, 0x00000000, 0x00000000, 0x00000000,
224 0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 };
227
228 /* Packet types for packets with an Innermost/Last IPv6 header */
229 static const u32 ice_ptypes_ipv6_il[] = {
230 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
231 0x00000770, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
233 0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 };
239
240 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
241 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
242 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 0x00000000, 0x00000000, 0x00000000, 0x00000000,
250 };
251
252 /* Packet types for packets with an Outermost/First ARP header */
253 static const u32 ice_ptypes_arp_of[] = {
254 0x00000800, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 0x00000000, 0x00000000, 0x00000000, 0x00000000,
262 };
263
264 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
265 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
266 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
267 0x00000008, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 };
275
276 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
277 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
278 0x00000000, 0x00000000, 0x43000000, 0x10002000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 };
287
288 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
289 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
290 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
291 0x00000430, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 };
299
300 /* UDP Packet types for non-tunneled packets or tunneled
301 * packets with inner UDP.
302 */
303 static const u32 ice_ptypes_udp_il[] = {
304 0x81000000, 0x20204040, 0x04000010, 0x80810102,
305 0x00000040, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00410000, 0x90842000, 0x00000007,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 };
313
314 /* Packet types for packets with an Innermost/Last TCP header */
315 static const u32 ice_ptypes_tcp_il[] = {
316 0x04000000, 0x80810102, 0x10000040, 0x02040408,
317 0x00000102, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00820000, 0x21084000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 };
325
326 /* Packet types for packets with an Innermost/Last SCTP header */
327 static const u32 ice_ptypes_sctp_il[] = {
328 0x08000000, 0x01020204, 0x20000081, 0x04080810,
329 0x00000204, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x01040000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 };
337
338 /* Packet types for packets with an Outermost/First ICMP header */
339 static const u32 ice_ptypes_icmp_of[] = {
340 0x10000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 };
349
350 /* Packet types for packets with an Innermost/Last ICMP header */
351 static const u32 ice_ptypes_icmp_il[] = {
352 0x00000000, 0x02040408, 0x40000102, 0x08101020,
353 0x00000408, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x42108000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 };
361
362 /* Packet types for packets with an Outermost/First GRE header */
363 static const u32 ice_ptypes_gre_of[] = {
364 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
365 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 };
373
374 /* Packet types for packets with an Innermost/Last MAC header */
375 static const u32 ice_ptypes_mac_il[] = {
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00000000, 0x00000000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 };
385
386 /* Packet types for GTPC */
387 static const u32 ice_ptypes_gtpc[] = {
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 0x00000000, 0x00000000, 0x00000000, 0x00000000,
390 0x00000000, 0x00000000, 0x00000180, 0x00000000,
391 0x00000000, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 };
397
398 /* Packet types for GTPC with TEID */
399 static const u32 ice_ptypes_gtpc_tid[] = {
400 0x00000000, 0x00000000, 0x00000000, 0x00000000,
401 0x00000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000060, 0x00000000,
403 0x00000000, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 };
409
410 /* Packet types for GTPU */
411 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
412 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
413 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
414 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
415 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
416 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
417 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
418 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
419 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
420 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
421 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
422 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
423 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
424 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
425 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
426 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
427 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
428 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
429 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
430 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
431 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
432 };
433
434 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
435 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
436 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
437 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
438 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
439 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
440 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
441 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
442 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
443 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
444 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
445 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
446 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
447 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
448 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
449 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
450 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
451 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
452 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
453 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
454 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
455 };
456
457 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
458 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
459 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
460 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
461 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
462 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
463 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
464 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
465 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
466 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
467 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
468 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
469 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
470 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
471 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
472 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
473 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
474 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
475 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
476 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
477 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
478 };
479
480 static const u32 ice_ptypes_gtpu[] = {
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 0x00000000, 0x00000000, 0x00000000, 0x00000000,
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 };
490
491 /* Packet types for PPPoE */
492 static const u32 ice_ptypes_pppoe[] = {
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 0x00000000, 0x00000000, 0x00000000, 0x00000000,
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000000,
501 };
502
503 /* Packet types for packets with PFCP NODE header */
504 static const u32 ice_ptypes_pfcp_node[] = {
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x80000000, 0x00000002,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 0x00000000, 0x00000000, 0x00000000, 0x00000000,
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000000,
513 };
514
515 /* Packet types for packets with PFCP SESSION header */
516 static const u32 ice_ptypes_pfcp_session[] = {
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000000, 0x00000005,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 0x00000000, 0x00000000, 0x00000000, 0x00000000,
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000000, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 };
526
527 /* Packet types for L2TPv3 */
528 static const u32 ice_ptypes_l2tpv3[] = {
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 0x00000000, 0x00000000, 0x00000000, 0x00000000,
531 0x00000000, 0x00000000, 0x00000000, 0x00000300,
532 0x00000000, 0x00000000, 0x00000000, 0x00000000,
533 0x00000000, 0x00000000, 0x00000000, 0x00000000,
534 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 0x00000000, 0x00000000, 0x00000000, 0x00000000,
536 0x00000000, 0x00000000, 0x00000000, 0x00000000,
537 };
538
539 /* Packet types for ESP */
540 static const u32 ice_ptypes_esp[] = {
541 0x00000000, 0x00000000, 0x00000000, 0x00000000,
542 0x00000000, 0x00000003, 0x00000000, 0x00000000,
543 0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 0x00000000, 0x00000000, 0x00000000, 0x00000000,
545 0x00000000, 0x00000000, 0x00000000, 0x00000000,
546 0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 0x00000000, 0x00000000, 0x00000000, 0x00000000,
548 0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 };
550
551 /* Packet types for AH */
552 static const u32 ice_ptypes_ah[] = {
553 0x00000000, 0x00000000, 0x00000000, 0x00000000,
554 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
555 0x00000000, 0x00000000, 0x00000000, 0x00000000,
556 0x00000000, 0x00000000, 0x00000000, 0x00000000,
557 0x00000000, 0x00000000, 0x00000000, 0x00000000,
558 0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 0x00000000, 0x00000000, 0x00000000, 0x00000000,
560 0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 };
562
563 /* Packet types for packets with NAT_T ESP header */
564 static const u32 ice_ptypes_nat_t_esp[] = {
565 0x00000000, 0x00000000, 0x00000000, 0x00000000,
566 0x00000000, 0x00000030, 0x00000000, 0x00000000,
567 0x00000000, 0x00000000, 0x00000000, 0x00000000,
568 0x00000000, 0x00000000, 0x00000000, 0x00000000,
569 0x00000000, 0x00000000, 0x00000000, 0x00000000,
570 0x00000000, 0x00000000, 0x00000000, 0x00000000,
571 0x00000000, 0x00000000, 0x00000000, 0x00000000,
572 0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 };
574
575 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
576 0x00000846, 0x00000000, 0x00000000, 0x00000000,
577 0x00000000, 0x00000000, 0x00000000, 0x00000000,
578 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
579 0x00000000, 0x00000000, 0x00000000, 0x00000000,
580 0x00000000, 0x00000000, 0x00000000, 0x00000000,
581 0x00000000, 0x00000000, 0x00000000, 0x00000000,
582 0x00000000, 0x00000000, 0x00000000, 0x00000000,
583 0x00000000, 0x00000000, 0x00000000, 0x00000000,
584 };
585
586 /* Manage parameters and info. used during the creation of a flow profile */
587 struct ice_flow_prof_params {
588 enum ice_block blk;
589 u16 entry_length; /* # of bytes formatted entry will require */
590 u8 es_cnt;
591 struct ice_flow_prof *prof;
592
593 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
594 * This will give us the direction flags.
595 */
596 struct ice_fv_word es[ICE_MAX_FV_WORDS];
597 /* attributes can be used to add attributes to a particular PTYPE */
598 const struct ice_ptype_attributes *attr;
599 u16 attr_cnt;
600
601 u16 mask[ICE_MAX_FV_WORDS];
602 DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
603 };
604
605 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
606 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
607 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
608 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
609 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
610 ICE_FLOW_SEG_HDR_NAT_T_ESP)
611
612 #define ICE_FLOW_SEG_HDRS_L2_MASK \
613 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
614 #define ICE_FLOW_SEG_HDRS_L3_MASK \
615 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
616 #define ICE_FLOW_SEG_HDRS_L4_MASK \
617 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
618 ICE_FLOW_SEG_HDR_SCTP)
619 /* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
620 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
621 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
622
623 /**
624 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
625 * @segs: array of one or more packet segments that describe the flow
626 * @segs_cnt: number of packet segments provided
627 */
628 static enum ice_status
ice_flow_val_hdrs(struct ice_flow_seg_info * segs,u8 segs_cnt)629 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
630 {
631 u8 i;
632
633 for (i = 0; i < segs_cnt; i++) {
634 /* Multiple L3 headers */
635 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
636 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
637 return ICE_ERR_PARAM;
638
639 /* Multiple L4 headers */
640 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
641 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
642 return ICE_ERR_PARAM;
643 }
644
645 return 0;
646 }
647
648 /* Sizes of fixed known protocol headers without header options */
649 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
650 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
651 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
652 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
653 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
654 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
655 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
656 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
657 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
658
659 /**
660 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
661 * @params: information about the flow to be processed
662 * @seg: index of packet segment whose header size is to be determined
663 */
ice_flow_calc_seg_sz(struct ice_flow_prof_params * params,u8 seg)664 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
665 {
666 u16 sz;
667
668 /* L2 headers */
669 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
670 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
671
672 /* L3 headers */
673 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
674 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
675 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
676 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
677 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
678 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
679 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
680 /* An L3 header is required if L4 is specified */
681 return 0;
682
683 /* L4 headers */
684 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
685 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
686 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
687 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
688 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
689 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
690 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
691 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
692
693 return sz;
694 }
695
696 /**
697 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
698 * @params: information about the flow to be processed
699 *
700 * This function identifies the packet types associated with the protocol
701 * headers being present in packet segments of the specified flow profile.
702 */
703 static enum ice_status
ice_flow_proc_seg_hdrs(struct ice_flow_prof_params * params)704 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
705 {
706 struct ice_flow_prof *prof;
707 u8 i;
708
709 memset(params->ptypes, 0xff, sizeof(params->ptypes));
710
711 prof = params->prof;
712
713 for (i = 0; i < params->prof->segs_cnt; i++) {
714 const unsigned long *src;
715 u32 hdrs;
716
717 hdrs = prof->segs[i].hdrs;
718
719 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
720 src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
721 (const unsigned long *)ice_ptypes_mac_il;
722 bitmap_and(params->ptypes, params->ptypes, src,
723 ICE_FLOW_PTYPE_MAX);
724 }
725
726 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
727 src = (const unsigned long *)ice_ptypes_macvlan_il;
728 bitmap_and(params->ptypes, params->ptypes, src,
729 ICE_FLOW_PTYPE_MAX);
730 }
731
732 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
733 bitmap_and(params->ptypes, params->ptypes,
734 (const unsigned long *)ice_ptypes_arp_of,
735 ICE_FLOW_PTYPE_MAX);
736 }
737
738 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
739 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
740 src = i ? (const unsigned long *)ice_ptypes_ipv4_il :
741 (const unsigned long *)ice_ptypes_ipv4_ofos_all;
742 bitmap_and(params->ptypes, params->ptypes, src,
743 ICE_FLOW_PTYPE_MAX);
744 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
745 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
746 src = i ? (const unsigned long *)ice_ptypes_ipv6_il :
747 (const unsigned long *)ice_ptypes_ipv6_ofos_all;
748 bitmap_and(params->ptypes, params->ptypes, src,
749 ICE_FLOW_PTYPE_MAX);
750 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
751 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
752 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos_no_l4 :
753 (const unsigned long *)ice_ptypes_ipv4_il_no_l4;
754 bitmap_and(params->ptypes, params->ptypes, src,
755 ICE_FLOW_PTYPE_MAX);
756 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
757 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
758 (const unsigned long *)ice_ptypes_ipv4_il;
759 bitmap_and(params->ptypes, params->ptypes, src,
760 ICE_FLOW_PTYPE_MAX);
761 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
762 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
763 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos_no_l4 :
764 (const unsigned long *)ice_ptypes_ipv6_il_no_l4;
765 bitmap_and(params->ptypes, params->ptypes, src,
766 ICE_FLOW_PTYPE_MAX);
767 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
768 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
769 (const unsigned long *)ice_ptypes_ipv6_il;
770 bitmap_and(params->ptypes, params->ptypes, src,
771 ICE_FLOW_PTYPE_MAX);
772 }
773
774 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
775 src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos;
776 bitmap_and(params->ptypes, params->ptypes, src,
777 ICE_FLOW_PTYPE_MAX);
778 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
779 src = (const unsigned long *)ice_ptypes_pppoe;
780 bitmap_and(params->ptypes, params->ptypes, src,
781 ICE_FLOW_PTYPE_MAX);
782 } else {
783 src = (const unsigned long *)ice_ptypes_pppoe;
784 bitmap_andnot(params->ptypes, params->ptypes, src,
785 ICE_FLOW_PTYPE_MAX);
786 }
787
788 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
789 src = (const unsigned long *)ice_ptypes_udp_il;
790 bitmap_and(params->ptypes, params->ptypes, src,
791 ICE_FLOW_PTYPE_MAX);
792 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
793 bitmap_and(params->ptypes, params->ptypes,
794 (const unsigned long *)ice_ptypes_tcp_il,
795 ICE_FLOW_PTYPE_MAX);
796 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
797 src = (const unsigned long *)ice_ptypes_sctp_il;
798 bitmap_and(params->ptypes, params->ptypes, src,
799 ICE_FLOW_PTYPE_MAX);
800 }
801
802 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
803 src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
804 (const unsigned long *)ice_ptypes_icmp_il;
805 bitmap_and(params->ptypes, params->ptypes, src,
806 ICE_FLOW_PTYPE_MAX);
807 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
808 if (!i) {
809 src = (const unsigned long *)ice_ptypes_gre_of;
810 bitmap_and(params->ptypes, params->ptypes,
811 src, ICE_FLOW_PTYPE_MAX);
812 }
813 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
814 src = (const unsigned long *)ice_ptypes_gtpc;
815 bitmap_and(params->ptypes, params->ptypes, src,
816 ICE_FLOW_PTYPE_MAX);
817 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
818 src = (const unsigned long *)ice_ptypes_gtpc_tid;
819 bitmap_and(params->ptypes, params->ptypes, src,
820 ICE_FLOW_PTYPE_MAX);
821 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
822 src = (const unsigned long *)ice_ptypes_gtpu;
823 bitmap_and(params->ptypes, params->ptypes, src,
824 ICE_FLOW_PTYPE_MAX);
825
826 /* Attributes for GTP packet with downlink */
827 params->attr = ice_attr_gtpu_down;
828 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
829 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
830 src = (const unsigned long *)ice_ptypes_gtpu;
831 bitmap_and(params->ptypes, params->ptypes, src,
832 ICE_FLOW_PTYPE_MAX);
833
834 /* Attributes for GTP packet with uplink */
835 params->attr = ice_attr_gtpu_up;
836 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
837 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
838 src = (const unsigned long *)ice_ptypes_gtpu;
839 bitmap_and(params->ptypes, params->ptypes, src,
840 ICE_FLOW_PTYPE_MAX);
841
842 /* Attributes for GTP packet with Extension Header */
843 params->attr = ice_attr_gtpu_eh;
844 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
845 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
846 src = (const unsigned long *)ice_ptypes_gtpu;
847 bitmap_and(params->ptypes, params->ptypes, src,
848 ICE_FLOW_PTYPE_MAX);
849 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
850 src = (const unsigned long *)ice_ptypes_l2tpv3;
851 bitmap_and(params->ptypes, params->ptypes, src,
852 ICE_FLOW_PTYPE_MAX);
853 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
854 src = (const unsigned long *)ice_ptypes_esp;
855 bitmap_and(params->ptypes, params->ptypes, src,
856 ICE_FLOW_PTYPE_MAX);
857 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
858 src = (const unsigned long *)ice_ptypes_ah;
859 bitmap_and(params->ptypes, params->ptypes, src,
860 ICE_FLOW_PTYPE_MAX);
861 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
862 src = (const unsigned long *)ice_ptypes_nat_t_esp;
863 bitmap_and(params->ptypes, params->ptypes, src,
864 ICE_FLOW_PTYPE_MAX);
865 }
866
867 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
868 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
869 src = (const unsigned long *)ice_ptypes_pfcp_node;
870 else
871 src = (const unsigned long *)ice_ptypes_pfcp_session;
872
873 bitmap_and(params->ptypes, params->ptypes, src,
874 ICE_FLOW_PTYPE_MAX);
875 } else {
876 src = (const unsigned long *)ice_ptypes_pfcp_node;
877 bitmap_andnot(params->ptypes, params->ptypes, src,
878 ICE_FLOW_PTYPE_MAX);
879
880 src = (const unsigned long *)ice_ptypes_pfcp_session;
881 bitmap_andnot(params->ptypes, params->ptypes, src,
882 ICE_FLOW_PTYPE_MAX);
883 }
884 }
885
886 return 0;
887 }
888
889 /**
890 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
891 * @hw: pointer to the HW struct
892 * @params: information about the flow to be processed
893 * @seg: packet segment index of the field to be extracted
894 * @fld: ID of field to be extracted
895 * @match: bit field of all fields
896 *
897 * This function determines the protocol ID, offset, and size of the given
898 * field. It then allocates one or more extraction sequence entries for the
899 * given field, and fill the entries with protocol ID and offset information.
900 */
901 static enum ice_status
ice_flow_xtract_fld(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg,enum ice_flow_field fld,u64 match)902 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
903 u8 seg, enum ice_flow_field fld, u64 match)
904 {
905 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
906 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
907 u8 fv_words = hw->blk[params->blk].es.fvw;
908 struct ice_flow_fld_info *flds;
909 u16 cnt, ese_bits, i;
910 u16 sib_mask = 0;
911 u16 mask;
912 u16 off;
913
914 flds = params->prof->segs[seg].fields;
915
916 switch (fld) {
917 case ICE_FLOW_FIELD_IDX_ETH_DA:
918 case ICE_FLOW_FIELD_IDX_ETH_SA:
919 case ICE_FLOW_FIELD_IDX_S_VLAN:
920 case ICE_FLOW_FIELD_IDX_C_VLAN:
921 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
922 break;
923 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
924 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
925 break;
926 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
927 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
928 break;
929 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
930 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
931 break;
932 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
933 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
934 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
935
936 /* TTL and PROT share the same extraction seq. entry.
937 * Each is considered a sibling to the other in terms of sharing
938 * the same extraction sequence entry.
939 */
940 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
941 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
942 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
943 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
944
945 /* If the sibling field is also included, that field's
946 * mask needs to be included.
947 */
948 if (match & BIT(sib))
949 sib_mask = ice_flds_info[sib].mask;
950 break;
951 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
952 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
953 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
954
955 /* TTL and PROT share the same extraction seq. entry.
956 * Each is considered a sibling to the other in terms of sharing
957 * the same extraction sequence entry.
958 */
959 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
960 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
961 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
962 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
963
964 /* If the sibling field is also included, that field's
965 * mask needs to be included.
966 */
967 if (match & BIT(sib))
968 sib_mask = ice_flds_info[sib].mask;
969 break;
970 case ICE_FLOW_FIELD_IDX_IPV4_SA:
971 case ICE_FLOW_FIELD_IDX_IPV4_DA:
972 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
973 break;
974 case ICE_FLOW_FIELD_IDX_IPV6_SA:
975 case ICE_FLOW_FIELD_IDX_IPV6_DA:
976 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
977 break;
978 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
979 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
980 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
981 prot_id = ICE_PROT_TCP_IL;
982 break;
983 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
984 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
985 prot_id = ICE_PROT_UDP_IL_OR_S;
986 break;
987 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
988 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
989 prot_id = ICE_PROT_SCTP_IL;
990 break;
991 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
992 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
993 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
994 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
995 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
996 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
997 /* GTP is accessed through UDP OF protocol */
998 prot_id = ICE_PROT_UDP_OF;
999 break;
1000 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1001 prot_id = ICE_PROT_PPPOE;
1002 break;
1003 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1004 prot_id = ICE_PROT_UDP_IL_OR_S;
1005 break;
1006 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1007 prot_id = ICE_PROT_L2TPV3;
1008 break;
1009 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1010 prot_id = ICE_PROT_ESP_F;
1011 break;
1012 case ICE_FLOW_FIELD_IDX_AH_SPI:
1013 prot_id = ICE_PROT_ESP_2;
1014 break;
1015 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1016 prot_id = ICE_PROT_UDP_IL_OR_S;
1017 break;
1018 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1019 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1020 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1021 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1022 case ICE_FLOW_FIELD_IDX_ARP_OP:
1023 prot_id = ICE_PROT_ARP_OF;
1024 break;
1025 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1026 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1027 /* ICMP type and code share the same extraction seq. entry */
1028 prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
1029 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1030 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1031 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1032 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1033 break;
1034 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1035 prot_id = ICE_PROT_GRE_OF;
1036 break;
1037 default:
1038 return ICE_ERR_NOT_IMPL;
1039 }
1040
1041 /* Each extraction sequence entry is a word in size, and extracts a
1042 * word-aligned offset from a protocol header.
1043 */
1044 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1045
1046 flds[fld].xtrct.prot_id = prot_id;
1047 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1048 ICE_FLOW_FV_EXTRACT_SZ;
1049 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1050 flds[fld].xtrct.idx = params->es_cnt;
1051 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1052
1053 /* Adjust the next field-entry index after accommodating the number of
1054 * entries this field consumes
1055 */
1056 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
1057 ese_bits);
1058
1059 /* Fill in the extraction sequence entries needed for this field */
1060 off = flds[fld].xtrct.off;
1061 mask = flds[fld].xtrct.mask;
1062 for (i = 0; i < cnt; i++) {
1063 /* Only consume an extraction sequence entry if there is no
1064 * sibling field associated with this field or the sibling entry
1065 * already extracts the word shared with this field.
1066 */
1067 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1068 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1069 flds[sib].xtrct.off != off) {
1070 u8 idx;
1071
1072 /* Make sure the number of extraction sequence required
1073 * does not exceed the block's capability
1074 */
1075 if (params->es_cnt >= fv_words)
1076 return ICE_ERR_MAX_LIMIT;
1077
1078 /* some blocks require a reversed field vector layout */
1079 if (hw->blk[params->blk].es.reverse)
1080 idx = fv_words - params->es_cnt - 1;
1081 else
1082 idx = params->es_cnt;
1083
1084 params->es[idx].prot_id = prot_id;
1085 params->es[idx].off = off;
1086 params->mask[idx] = mask | sib_mask;
1087 params->es_cnt++;
1088 }
1089
1090 off += ICE_FLOW_FV_EXTRACT_SZ;
1091 }
1092
1093 return 0;
1094 }
1095
1096 /**
1097 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1098 * @hw: pointer to the HW struct
1099 * @params: information about the flow to be processed
1100 * @seg: index of packet segment whose raw fields are to be extracted
1101 */
1102 static enum ice_status
ice_flow_xtract_raws(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg)1103 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1104 u8 seg)
1105 {
1106 u16 fv_words;
1107 u16 hdrs_sz;
1108 u8 i;
1109
1110 if (!params->prof->segs[seg].raws_cnt)
1111 return 0;
1112
1113 if (params->prof->segs[seg].raws_cnt >
1114 ARRAY_SIZE(params->prof->segs[seg].raws))
1115 return ICE_ERR_MAX_LIMIT;
1116
1117 /* Offsets within the segment headers are not supported */
1118 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1119 if (!hdrs_sz)
1120 return ICE_ERR_PARAM;
1121
1122 fv_words = hw->blk[params->blk].es.fvw;
1123
1124 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1125 struct ice_flow_seg_fld_raw *raw;
1126 u16 off, cnt, j;
1127
1128 raw = ¶ms->prof->segs[seg].raws[i];
1129
1130 /* Storing extraction information */
1131 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1132 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1133 ICE_FLOW_FV_EXTRACT_SZ;
1134 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1135 BITS_PER_BYTE;
1136 raw->info.xtrct.idx = params->es_cnt;
1137
1138 /* Determine the number of field vector entries this raw field
1139 * consumes.
1140 */
1141 cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
1142 (raw->info.src.last * BITS_PER_BYTE),
1143 (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
1144 off = raw->info.xtrct.off;
1145 for (j = 0; j < cnt; j++) {
1146 u16 idx;
1147
1148 /* Make sure the number of extraction sequence required
1149 * does not exceed the block's capability
1150 */
1151 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1152 params->es_cnt >= ICE_MAX_FV_WORDS)
1153 return ICE_ERR_MAX_LIMIT;
1154
1155 /* some blocks require a reversed field vector layout */
1156 if (hw->blk[params->blk].es.reverse)
1157 idx = fv_words - params->es_cnt - 1;
1158 else
1159 idx = params->es_cnt;
1160
1161 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1162 params->es[idx].off = off;
1163 params->es_cnt++;
1164 off += ICE_FLOW_FV_EXTRACT_SZ;
1165 }
1166 }
1167
1168 return 0;
1169 }
1170
1171 /**
1172 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1173 * @hw: pointer to the HW struct
1174 * @params: information about the flow to be processed
1175 *
1176 * This function iterates through all matched fields in the given segments, and
1177 * creates an extraction sequence for the fields.
1178 */
1179 static enum ice_status
ice_flow_create_xtrct_seq(struct ice_hw * hw,struct ice_flow_prof_params * params)1180 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1181 struct ice_flow_prof_params *params)
1182 {
1183 struct ice_flow_prof *prof = params->prof;
1184 enum ice_status status = 0;
1185 u8 i;
1186
1187 for (i = 0; i < prof->segs_cnt; i++) {
1188 u64 match = params->prof->segs[i].match;
1189 enum ice_flow_field j;
1190
1191 for_each_set_bit(j, (unsigned long *)&match,
1192 ICE_FLOW_FIELD_IDX_MAX) {
1193 status = ice_flow_xtract_fld(hw, params, i, j, match);
1194 if (status)
1195 return status;
1196 clear_bit(j, (unsigned long *)&match);
1197 }
1198
1199 /* Process raw matching bytes */
1200 status = ice_flow_xtract_raws(hw, params, i);
1201 if (status)
1202 return status;
1203 }
1204
1205 return status;
1206 }
1207
1208 /**
1209 * ice_flow_proc_segs - process all packet segments associated with a profile
1210 * @hw: pointer to the HW struct
1211 * @params: information about the flow to be processed
1212 */
1213 static enum ice_status
ice_flow_proc_segs(struct ice_hw * hw,struct ice_flow_prof_params * params)1214 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1215 {
1216 enum ice_status status;
1217
1218 status = ice_flow_proc_seg_hdrs(params);
1219 if (status)
1220 return status;
1221
1222 status = ice_flow_create_xtrct_seq(hw, params);
1223 if (status)
1224 return status;
1225
1226 switch (params->blk) {
1227 case ICE_BLK_FD:
1228 case ICE_BLK_RSS:
1229 status = 0;
1230 break;
1231 default:
1232 return ICE_ERR_NOT_IMPL;
1233 }
1234
1235 return status;
1236 }
1237
1238 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1239 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1240 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1241
1242 /**
1243 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1244 * @hw: pointer to the HW struct
1245 * @blk: classification stage
1246 * @dir: flow direction
1247 * @segs: array of one or more packet segments that describe the flow
1248 * @segs_cnt: number of packet segments provided
1249 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1250 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1251 */
1252 static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,struct ice_flow_seg_info * segs,u8 segs_cnt,u16 vsi_handle,u32 conds)1253 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1254 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1255 u8 segs_cnt, u16 vsi_handle, u32 conds)
1256 {
1257 struct ice_flow_prof *p, *prof = NULL;
1258
1259 mutex_lock(&hw->fl_profs_locks[blk]);
1260 list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
1261 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1262 segs_cnt && segs_cnt == p->segs_cnt) {
1263 u8 i;
1264
1265 /* Check for profile-VSI association if specified */
1266 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1267 ice_is_vsi_valid(hw, vsi_handle) &&
1268 !test_bit(vsi_handle, p->vsis))
1269 continue;
1270
1271 /* Protocol headers must be checked. Matched fields are
1272 * checked if specified.
1273 */
1274 for (i = 0; i < segs_cnt; i++)
1275 if (segs[i].hdrs != p->segs[i].hdrs ||
1276 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1277 segs[i].match != p->segs[i].match))
1278 break;
1279
1280 /* A match is found if all segments are matched */
1281 if (i == segs_cnt) {
1282 prof = p;
1283 break;
1284 }
1285 }
1286 mutex_unlock(&hw->fl_profs_locks[blk]);
1287
1288 return prof;
1289 }
1290
1291 /**
1292 * ice_flow_find_prof_id - Look up a profile with given profile ID
1293 * @hw: pointer to the HW struct
1294 * @blk: classification stage
1295 * @prof_id: unique ID to identify this flow profile
1296 */
1297 static struct ice_flow_prof *
ice_flow_find_prof_id(struct ice_hw * hw,enum ice_block blk,u64 prof_id)1298 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1299 {
1300 struct ice_flow_prof *p;
1301
1302 list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
1303 if (p->id == prof_id)
1304 return p;
1305
1306 return NULL;
1307 }
1308
1309 /**
1310 * ice_dealloc_flow_entry - Deallocate flow entry memory
1311 * @hw: pointer to the HW struct
1312 * @entry: flow entry to be removed
1313 */
1314 static void
ice_dealloc_flow_entry(struct ice_hw * hw,struct ice_flow_entry * entry)1315 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1316 {
1317 if (!entry)
1318 return;
1319
1320 if (entry->entry)
1321 devm_kfree(ice_hw_to_dev(hw), entry->entry);
1322
1323 devm_kfree(ice_hw_to_dev(hw), entry);
1324 }
1325
1326 /**
1327 * ice_flow_rem_entry_sync - Remove a flow entry
1328 * @hw: pointer to the HW struct
1329 * @blk: classification stage
1330 * @entry: flow entry to be removed
1331 */
1332 static enum ice_status
ice_flow_rem_entry_sync(struct ice_hw * hw,enum ice_block __always_unused blk,struct ice_flow_entry * entry)1333 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
1334 struct ice_flow_entry *entry)
1335 {
1336 if (!entry)
1337 return ICE_ERR_BAD_PTR;
1338
1339 list_del(&entry->l_entry);
1340
1341 ice_dealloc_flow_entry(hw, entry);
1342
1343 return 0;
1344 }
1345
1346 /**
1347 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1348 * @hw: pointer to the HW struct
1349 * @blk: classification stage
1350 * @dir: flow direction
1351 * @prof_id: unique ID to identify this flow profile
1352 * @segs: array of one or more packet segments that describe the flow
1353 * @segs_cnt: number of packet segments provided
1354 * @prof: stores the returned flow profile added
1355 *
1356 * Assumption: the caller has acquired the lock to the profile list
1357 */
1358 static enum ice_status
ice_flow_add_prof_sync(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_prof ** prof)1359 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1360 enum ice_flow_dir dir, u64 prof_id,
1361 struct ice_flow_seg_info *segs, u8 segs_cnt,
1362 struct ice_flow_prof **prof)
1363 {
1364 struct ice_flow_prof_params *params;
1365 enum ice_status status;
1366 u8 i;
1367
1368 if (!prof)
1369 return ICE_ERR_BAD_PTR;
1370
1371 params = kzalloc(sizeof(*params), GFP_KERNEL);
1372 if (!params)
1373 return ICE_ERR_NO_MEMORY;
1374
1375 params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
1376 GFP_KERNEL);
1377 if (!params->prof) {
1378 status = ICE_ERR_NO_MEMORY;
1379 goto free_params;
1380 }
1381
1382 /* initialize extraction sequence to all invalid (0xff) */
1383 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1384 params->es[i].prot_id = ICE_PROT_INVALID;
1385 params->es[i].off = ICE_FV_OFFSET_INVAL;
1386 }
1387
1388 params->blk = blk;
1389 params->prof->id = prof_id;
1390 params->prof->dir = dir;
1391 params->prof->segs_cnt = segs_cnt;
1392
1393 /* Make a copy of the segments that need to be persistent in the flow
1394 * profile instance
1395 */
1396 for (i = 0; i < segs_cnt; i++)
1397 memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs));
1398
1399 status = ice_flow_proc_segs(hw, params);
1400 if (status) {
1401 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1402 goto out;
1403 }
1404
1405 /* Add a HW profile for this flow profile */
1406 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1407 params->attr, params->attr_cnt, params->es,
1408 params->mask);
1409 if (status) {
1410 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1411 goto out;
1412 }
1413
1414 INIT_LIST_HEAD(¶ms->prof->entries);
1415 mutex_init(¶ms->prof->entries_lock);
1416 *prof = params->prof;
1417
1418 out:
1419 if (status)
1420 devm_kfree(ice_hw_to_dev(hw), params->prof);
1421 free_params:
1422 kfree(params);
1423
1424 return status;
1425 }
1426
1427 /**
1428 * ice_flow_rem_prof_sync - remove a flow profile
1429 * @hw: pointer to the hardware structure
1430 * @blk: classification stage
1431 * @prof: pointer to flow profile to remove
1432 *
1433 * Assumption: the caller has acquired the lock to the profile list
1434 */
1435 static enum ice_status
ice_flow_rem_prof_sync(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof)1436 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1437 struct ice_flow_prof *prof)
1438 {
1439 enum ice_status status;
1440
1441 /* Remove all remaining flow entries before removing the flow profile */
1442 if (!list_empty(&prof->entries)) {
1443 struct ice_flow_entry *e, *t;
1444
1445 mutex_lock(&prof->entries_lock);
1446
1447 list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
1448 status = ice_flow_rem_entry_sync(hw, blk, e);
1449 if (status)
1450 break;
1451 }
1452
1453 mutex_unlock(&prof->entries_lock);
1454 }
1455
1456 /* Remove all hardware profiles associated with this flow profile */
1457 status = ice_rem_prof(hw, blk, prof->id);
1458 if (!status) {
1459 list_del(&prof->l_entry);
1460 mutex_destroy(&prof->entries_lock);
1461 devm_kfree(ice_hw_to_dev(hw), prof);
1462 }
1463
1464 return status;
1465 }
1466
1467 /**
1468 * ice_flow_assoc_prof - associate a VSI with a flow profile
1469 * @hw: pointer to the hardware structure
1470 * @blk: classification stage
1471 * @prof: pointer to flow profile
1472 * @vsi_handle: software VSI handle
1473 *
1474 * Assumption: the caller has acquired the lock to the profile list
1475 * and the software VSI handle has been validated
1476 */
1477 static enum ice_status
ice_flow_assoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)1478 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1479 struct ice_flow_prof *prof, u16 vsi_handle)
1480 {
1481 enum ice_status status = 0;
1482
1483 if (!test_bit(vsi_handle, prof->vsis)) {
1484 status = ice_add_prof_id_flow(hw, blk,
1485 ice_get_hw_vsi_num(hw,
1486 vsi_handle),
1487 prof->id);
1488 if (!status)
1489 set_bit(vsi_handle, prof->vsis);
1490 else
1491 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
1492 status);
1493 }
1494
1495 return status;
1496 }
1497
1498 /**
1499 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1500 * @hw: pointer to the hardware structure
1501 * @blk: classification stage
1502 * @prof: pointer to flow profile
1503 * @vsi_handle: software VSI handle
1504 *
1505 * Assumption: the caller has acquired the lock to the profile list
1506 * and the software VSI handle has been validated
1507 */
1508 static enum ice_status
ice_flow_disassoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)1509 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1510 struct ice_flow_prof *prof, u16 vsi_handle)
1511 {
1512 enum ice_status status = 0;
1513
1514 if (test_bit(vsi_handle, prof->vsis)) {
1515 status = ice_rem_prof_id_flow(hw, blk,
1516 ice_get_hw_vsi_num(hw,
1517 vsi_handle),
1518 prof->id);
1519 if (!status)
1520 clear_bit(vsi_handle, prof->vsis);
1521 else
1522 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
1523 status);
1524 }
1525
1526 return status;
1527 }
1528
1529 /**
1530 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1531 * @hw: pointer to the HW struct
1532 * @blk: classification stage
1533 * @dir: flow direction
1534 * @prof_id: unique ID to identify this flow profile
1535 * @segs: array of one or more packet segments that describe the flow
1536 * @segs_cnt: number of packet segments provided
1537 * @prof: stores the returned flow profile added
1538 */
1539 enum ice_status
ice_flow_add_prof(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_prof ** prof)1540 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1541 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1542 struct ice_flow_prof **prof)
1543 {
1544 enum ice_status status;
1545
1546 if (segs_cnt > ICE_FLOW_SEG_MAX)
1547 return ICE_ERR_MAX_LIMIT;
1548
1549 if (!segs_cnt)
1550 return ICE_ERR_PARAM;
1551
1552 if (!segs)
1553 return ICE_ERR_BAD_PTR;
1554
1555 status = ice_flow_val_hdrs(segs, segs_cnt);
1556 if (status)
1557 return status;
1558
1559 mutex_lock(&hw->fl_profs_locks[blk]);
1560
1561 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1562 prof);
1563 if (!status)
1564 list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
1565
1566 mutex_unlock(&hw->fl_profs_locks[blk]);
1567
1568 return status;
1569 }
1570
1571 /**
1572 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1573 * @hw: pointer to the HW struct
1574 * @blk: the block for which the flow profile is to be removed
1575 * @prof_id: unique ID of the flow profile to be removed
1576 */
1577 enum ice_status
ice_flow_rem_prof(struct ice_hw * hw,enum ice_block blk,u64 prof_id)1578 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1579 {
1580 struct ice_flow_prof *prof;
1581 enum ice_status status;
1582
1583 mutex_lock(&hw->fl_profs_locks[blk]);
1584
1585 prof = ice_flow_find_prof_id(hw, blk, prof_id);
1586 if (!prof) {
1587 status = ICE_ERR_DOES_NOT_EXIST;
1588 goto out;
1589 }
1590
1591 /* prof becomes invalid after the call */
1592 status = ice_flow_rem_prof_sync(hw, blk, prof);
1593
1594 out:
1595 mutex_unlock(&hw->fl_profs_locks[blk]);
1596
1597 return status;
1598 }
1599
1600 /**
1601 * ice_flow_add_entry - Add a flow entry
1602 * @hw: pointer to the HW struct
1603 * @blk: classification stage
1604 * @prof_id: ID of the profile to add a new flow entry to
1605 * @entry_id: unique ID to identify this flow entry
1606 * @vsi_handle: software VSI handle for the flow entry
1607 * @prio: priority of the flow entry
1608 * @data: pointer to a data buffer containing flow entry's match values/masks
1609 * @entry_h: pointer to buffer that receives the new flow entry's handle
1610 */
1611 enum ice_status
ice_flow_add_entry(struct ice_hw * hw,enum ice_block blk,u64 prof_id,u64 entry_id,u16 vsi_handle,enum ice_flow_priority prio,void * data,u64 * entry_h)1612 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1613 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
1614 void *data, u64 *entry_h)
1615 {
1616 struct ice_flow_entry *e = NULL;
1617 struct ice_flow_prof *prof;
1618 enum ice_status status;
1619
1620 /* No flow entry data is expected for RSS */
1621 if (!entry_h || (!data && blk != ICE_BLK_RSS))
1622 return ICE_ERR_BAD_PTR;
1623
1624 if (!ice_is_vsi_valid(hw, vsi_handle))
1625 return ICE_ERR_PARAM;
1626
1627 mutex_lock(&hw->fl_profs_locks[blk]);
1628
1629 prof = ice_flow_find_prof_id(hw, blk, prof_id);
1630 if (!prof) {
1631 status = ICE_ERR_DOES_NOT_EXIST;
1632 } else {
1633 /* Allocate memory for the entry being added and associate
1634 * the VSI to the found flow profile
1635 */
1636 e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
1637 if (!e)
1638 status = ICE_ERR_NO_MEMORY;
1639 else
1640 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1641 }
1642
1643 mutex_unlock(&hw->fl_profs_locks[blk]);
1644 if (status)
1645 goto out;
1646
1647 e->id = entry_id;
1648 e->vsi_handle = vsi_handle;
1649 e->prof = prof;
1650 e->priority = prio;
1651
1652 switch (blk) {
1653 case ICE_BLK_FD:
1654 case ICE_BLK_RSS:
1655 break;
1656 default:
1657 status = ICE_ERR_NOT_IMPL;
1658 goto out;
1659 }
1660
1661 mutex_lock(&prof->entries_lock);
1662 list_add(&e->l_entry, &prof->entries);
1663 mutex_unlock(&prof->entries_lock);
1664
1665 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
1666
1667 out:
1668 if (status && e) {
1669 if (e->entry)
1670 devm_kfree(ice_hw_to_dev(hw), e->entry);
1671 devm_kfree(ice_hw_to_dev(hw), e);
1672 }
1673
1674 return status;
1675 }
1676
1677 /**
1678 * ice_flow_rem_entry - Remove a flow entry
1679 * @hw: pointer to the HW struct
1680 * @blk: classification stage
1681 * @entry_h: handle to the flow entry to be removed
1682 */
ice_flow_rem_entry(struct ice_hw * hw,enum ice_block blk,u64 entry_h)1683 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
1684 u64 entry_h)
1685 {
1686 struct ice_flow_entry *entry;
1687 struct ice_flow_prof *prof;
1688 enum ice_status status = 0;
1689
1690 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1691 return ICE_ERR_PARAM;
1692
1693 entry = ICE_FLOW_ENTRY_PTR(entry_h);
1694
1695 /* Retain the pointer to the flow profile as the entry will be freed */
1696 prof = entry->prof;
1697
1698 if (prof) {
1699 mutex_lock(&prof->entries_lock);
1700 status = ice_flow_rem_entry_sync(hw, blk, entry);
1701 mutex_unlock(&prof->entries_lock);
1702 }
1703
1704 return status;
1705 }
1706
1707 /**
1708 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1709 * @seg: packet segment the field being set belongs to
1710 * @fld: field to be set
1711 * @field_type: type of the field
1712 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1713 * entry's input buffer
1714 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1715 * input buffer
1716 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1717 * entry's input buffer
1718 *
1719 * This helper function stores information of a field being matched, including
1720 * the type of the field and the locations of the value to match, the mask, and
1721 * the upper-bound value in the start of the input buffer for a flow entry.
1722 * This function should only be used for fixed-size data structures.
1723 *
1724 * This function also opportunistically determines the protocol headers to be
1725 * present based on the fields being set. Some fields cannot be used alone to
1726 * determine the protocol headers present. Sometimes, fields for particular
1727 * protocol headers are not matched. In those cases, the protocol headers
1728 * must be explicitly set.
1729 */
1730 static void
ice_flow_set_fld_ext(struct ice_flow_seg_info * seg,enum ice_flow_field fld,enum ice_flow_fld_match_type field_type,u16 val_loc,u16 mask_loc,u16 last_loc)1731 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1732 enum ice_flow_fld_match_type field_type, u16 val_loc,
1733 u16 mask_loc, u16 last_loc)
1734 {
1735 u64 bit = BIT_ULL(fld);
1736
1737 seg->match |= bit;
1738 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1739 seg->range |= bit;
1740
1741 seg->fields[fld].type = field_type;
1742 seg->fields[fld].src.val = val_loc;
1743 seg->fields[fld].src.mask = mask_loc;
1744 seg->fields[fld].src.last = last_loc;
1745
1746 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1747 }
1748
1749 /**
1750 * ice_flow_set_fld - specifies locations of field from entry's input buffer
1751 * @seg: packet segment the field being set belongs to
1752 * @fld: field to be set
1753 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1754 * entry's input buffer
1755 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1756 * input buffer
1757 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1758 * entry's input buffer
1759 * @range: indicate if field being matched is to be in a range
1760 *
1761 * This function specifies the locations, in the form of byte offsets from the
1762 * start of the input buffer for a flow entry, from where the value to match,
1763 * the mask value, and upper value can be extracted. These locations are then
1764 * stored in the flow profile. When adding a flow entry associated with the
1765 * flow profile, these locations will be used to quickly extract the values and
1766 * create the content of a match entry. This function should only be used for
1767 * fixed-size data structures.
1768 */
1769 void
ice_flow_set_fld(struct ice_flow_seg_info * seg,enum ice_flow_field fld,u16 val_loc,u16 mask_loc,u16 last_loc,bool range)1770 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1771 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1772 {
1773 enum ice_flow_fld_match_type t = range ?
1774 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1775
1776 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1777 }
1778
1779 /**
1780 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1781 * @seg: packet segment the field being set belongs to
1782 * @off: offset of the raw field from the beginning of the segment in bytes
1783 * @len: length of the raw pattern to be matched
1784 * @val_loc: location of the value to match from entry's input buffer
1785 * @mask_loc: location of mask value from entry's input buffer
1786 *
1787 * This function specifies the offset of the raw field to be match from the
1788 * beginning of the specified packet segment, and the locations, in the form of
1789 * byte offsets from the start of the input buffer for a flow entry, from where
1790 * the value to match and the mask value to be extracted. These locations are
1791 * then stored in the flow profile. When adding flow entries to the associated
1792 * flow profile, these locations can be used to quickly extract the values to
1793 * create the content of a match entry. This function should only be used for
1794 * fixed-size data structures.
1795 */
1796 void
ice_flow_add_fld_raw(struct ice_flow_seg_info * seg,u16 off,u8 len,u16 val_loc,u16 mask_loc)1797 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1798 u16 val_loc, u16 mask_loc)
1799 {
1800 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1801 seg->raws[seg->raws_cnt].off = off;
1802 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1803 seg->raws[seg->raws_cnt].info.src.val = val_loc;
1804 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1805 /* The "last" field is used to store the length of the field */
1806 seg->raws[seg->raws_cnt].info.src.last = len;
1807 }
1808
1809 /* Overflows of "raws" will be handled as an error condition later in
1810 * the flow when this information is processed.
1811 */
1812 seg->raws_cnt++;
1813 }
1814
1815 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
1816 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
1817
1818 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1819 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1820
1821 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1822 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1823
1824 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1825 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
1826 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1827 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1828
1829 /**
1830 * ice_flow_set_rss_seg_info - setup packet segments for RSS
1831 * @segs: pointer to the flow field segment(s)
1832 * @hash_fields: fields to be hashed on for the segment(s)
1833 * @flow_hdr: protocol header fields within a packet segment
1834 *
1835 * Helper function to extract fields from hash bitmap and use flow
1836 * header value to set flow field segment for further use in flow
1837 * profile entry or removal.
1838 */
1839 static enum ice_status
ice_flow_set_rss_seg_info(struct ice_flow_seg_info * segs,u64 hash_fields,u32 flow_hdr)1840 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1841 u32 flow_hdr)
1842 {
1843 u64 val;
1844 u8 i;
1845
1846 for_each_set_bit(i, (unsigned long *)&hash_fields,
1847 ICE_FLOW_FIELD_IDX_MAX)
1848 ice_flow_set_fld(segs, (enum ice_flow_field)i,
1849 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1850 ICE_FLOW_FLD_OFF_INVAL, false);
1851
1852 ICE_FLOW_SET_HDRS(segs, flow_hdr);
1853
1854 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
1855 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
1856 return ICE_ERR_PARAM;
1857
1858 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1859 if (val && !is_power_of_2(val))
1860 return ICE_ERR_CFG;
1861
1862 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1863 if (val && !is_power_of_2(val))
1864 return ICE_ERR_CFG;
1865
1866 return 0;
1867 }
1868
1869 /**
1870 * ice_rem_vsi_rss_list - remove VSI from RSS list
1871 * @hw: pointer to the hardware structure
1872 * @vsi_handle: software VSI handle
1873 *
1874 * Remove the VSI from all RSS configurations in the list.
1875 */
ice_rem_vsi_rss_list(struct ice_hw * hw,u16 vsi_handle)1876 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1877 {
1878 struct ice_rss_cfg *r, *tmp;
1879
1880 if (list_empty(&hw->rss_list_head))
1881 return;
1882
1883 mutex_lock(&hw->rss_locks);
1884 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1885 if (test_and_clear_bit(vsi_handle, r->vsis))
1886 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1887 list_del(&r->l_entry);
1888 devm_kfree(ice_hw_to_dev(hw), r);
1889 }
1890 mutex_unlock(&hw->rss_locks);
1891 }
1892
1893 /**
1894 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1895 * @hw: pointer to the hardware structure
1896 * @vsi_handle: software VSI handle
1897 *
1898 * This function will iterate through all flow profiles and disassociate
1899 * the VSI from that profile. If the flow profile has no VSIs it will
1900 * be removed.
1901 */
ice_rem_vsi_rss_cfg(struct ice_hw * hw,u16 vsi_handle)1902 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1903 {
1904 const enum ice_block blk = ICE_BLK_RSS;
1905 struct ice_flow_prof *p, *t;
1906 enum ice_status status = 0;
1907
1908 if (!ice_is_vsi_valid(hw, vsi_handle))
1909 return ICE_ERR_PARAM;
1910
1911 if (list_empty(&hw->fl_profs[blk]))
1912 return 0;
1913
1914 mutex_lock(&hw->rss_locks);
1915 list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1916 if (test_bit(vsi_handle, p->vsis)) {
1917 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1918 if (status)
1919 break;
1920
1921 if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1922 status = ice_flow_rem_prof(hw, blk, p->id);
1923 if (status)
1924 break;
1925 }
1926 }
1927 mutex_unlock(&hw->rss_locks);
1928
1929 return status;
1930 }
1931
1932 /**
1933 * ice_rem_rss_list - remove RSS configuration from list
1934 * @hw: pointer to the hardware structure
1935 * @vsi_handle: software VSI handle
1936 * @prof: pointer to flow profile
1937 *
1938 * Assumption: lock has already been acquired for RSS list
1939 */
1940 static void
ice_rem_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)1941 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1942 {
1943 struct ice_rss_cfg *r, *tmp;
1944
1945 /* Search for RSS hash fields associated to the VSI that match the
1946 * hash configurations associated to the flow profile. If found
1947 * remove from the RSS entry list of the VSI context and delete entry.
1948 */
1949 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1950 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1951 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1952 clear_bit(vsi_handle, r->vsis);
1953 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1954 list_del(&r->l_entry);
1955 devm_kfree(ice_hw_to_dev(hw), r);
1956 }
1957 return;
1958 }
1959 }
1960
1961 /**
1962 * ice_add_rss_list - add RSS configuration to list
1963 * @hw: pointer to the hardware structure
1964 * @vsi_handle: software VSI handle
1965 * @prof: pointer to flow profile
1966 *
1967 * Assumption: lock has already been acquired for RSS list
1968 */
1969 static enum ice_status
ice_add_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)1970 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1971 {
1972 struct ice_rss_cfg *r, *rss_cfg;
1973
1974 list_for_each_entry(r, &hw->rss_list_head, l_entry)
1975 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1976 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1977 set_bit(vsi_handle, r->vsis);
1978 return 0;
1979 }
1980
1981 rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1982 GFP_KERNEL);
1983 if (!rss_cfg)
1984 return ICE_ERR_NO_MEMORY;
1985
1986 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1987 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1988 set_bit(vsi_handle, rss_cfg->vsis);
1989
1990 list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1991
1992 return 0;
1993 }
1994
1995 #define ICE_FLOW_PROF_HASH_S 0
1996 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1997 #define ICE_FLOW_PROF_HDR_S 32
1998 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1999 #define ICE_FLOW_PROF_ENCAP_S 63
2000 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
2001
2002 #define ICE_RSS_OUTER_HEADERS 1
2003 #define ICE_RSS_INNER_HEADERS 2
2004
2005 /* Flow profile ID format:
2006 * [0:31] - Packet match fields
2007 * [32:62] - Protocol header
2008 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
2009 */
2010 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
2011 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
2012 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
2013 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
2014
2015 /**
2016 * ice_add_rss_cfg_sync - add an RSS configuration
2017 * @hw: pointer to the hardware structure
2018 * @vsi_handle: software VSI handle
2019 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
2020 * @addl_hdrs: protocol header fields
2021 * @segs_cnt: packet segment count
2022 *
2023 * Assumption: lock has already been acquired for RSS list
2024 */
2025 static enum ice_status
ice_add_rss_cfg_sync(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs,u8 segs_cnt)2026 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2027 u32 addl_hdrs, u8 segs_cnt)
2028 {
2029 const enum ice_block blk = ICE_BLK_RSS;
2030 struct ice_flow_prof *prof = NULL;
2031 struct ice_flow_seg_info *segs;
2032 enum ice_status status;
2033
2034 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
2035 return ICE_ERR_PARAM;
2036
2037 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
2038 if (!segs)
2039 return ICE_ERR_NO_MEMORY;
2040
2041 /* Construct the packet segment info from the hashed fields */
2042 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
2043 addl_hdrs);
2044 if (status)
2045 goto exit;
2046
2047 /* Search for a flow profile that has matching headers, hash fields
2048 * and has the input VSI associated to it. If found, no further
2049 * operations required and exit.
2050 */
2051 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2052 vsi_handle,
2053 ICE_FLOW_FIND_PROF_CHK_FLDS |
2054 ICE_FLOW_FIND_PROF_CHK_VSI);
2055 if (prof)
2056 goto exit;
2057
2058 /* Check if a flow profile exists with the same protocol headers and
2059 * associated with the input VSI. If so disassociate the VSI from
2060 * this profile. The VSI will be added to a new profile created with
2061 * the protocol header and new hash field configuration.
2062 */
2063 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2064 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
2065 if (prof) {
2066 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
2067 if (!status)
2068 ice_rem_rss_list(hw, vsi_handle, prof);
2069 else
2070 goto exit;
2071
2072 /* Remove profile if it has no VSIs associated */
2073 if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
2074 status = ice_flow_rem_prof(hw, blk, prof->id);
2075 if (status)
2076 goto exit;
2077 }
2078 }
2079
2080 /* Search for a profile that has same match fields only. If this
2081 * exists then associate the VSI to this profile.
2082 */
2083 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2084 vsi_handle,
2085 ICE_FLOW_FIND_PROF_CHK_FLDS);
2086 if (prof) {
2087 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2088 if (!status)
2089 status = ice_add_rss_list(hw, vsi_handle, prof);
2090 goto exit;
2091 }
2092
2093 /* Create a new flow profile with generated profile and packet
2094 * segment information.
2095 */
2096 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
2097 ICE_FLOW_GEN_PROFID(hashed_flds,
2098 segs[segs_cnt - 1].hdrs,
2099 segs_cnt),
2100 segs, segs_cnt, &prof);
2101 if (status)
2102 goto exit;
2103
2104 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2105 /* If association to a new flow profile failed then this profile can
2106 * be removed.
2107 */
2108 if (status) {
2109 ice_flow_rem_prof(hw, blk, prof->id);
2110 goto exit;
2111 }
2112
2113 status = ice_add_rss_list(hw, vsi_handle, prof);
2114
2115 exit:
2116 kfree(segs);
2117 return status;
2118 }
2119
2120 /**
2121 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
2122 * @hw: pointer to the hardware structure
2123 * @vsi_handle: software VSI handle
2124 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
2125 * @addl_hdrs: protocol header fields
2126 *
2127 * This function will generate a flow profile based on fields associated with
2128 * the input fields to hash on, the flow type and use the VSI number to add
2129 * a flow entry to the profile.
2130 */
2131 enum ice_status
ice_add_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs)2132 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2133 u32 addl_hdrs)
2134 {
2135 enum ice_status status;
2136
2137 if (hashed_flds == ICE_HASH_INVALID ||
2138 !ice_is_vsi_valid(hw, vsi_handle))
2139 return ICE_ERR_PARAM;
2140
2141 mutex_lock(&hw->rss_locks);
2142 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
2143 ICE_RSS_OUTER_HEADERS);
2144 if (!status)
2145 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
2146 addl_hdrs, ICE_RSS_INNER_HEADERS);
2147 mutex_unlock(&hw->rss_locks);
2148
2149 return status;
2150 }
2151
2152 /**
2153 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
2154 * @hw: pointer to the hardware structure
2155 * @vsi_handle: software VSI handle
2156 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
2157 * @addl_hdrs: Protocol header fields within a packet segment
2158 * @segs_cnt: packet segment count
2159 *
2160 * Assumption: lock has already been acquired for RSS list
2161 */
2162 static enum ice_status
ice_rem_rss_cfg_sync(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs,u8 segs_cnt)2163 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2164 u32 addl_hdrs, u8 segs_cnt)
2165 {
2166 const enum ice_block blk = ICE_BLK_RSS;
2167 struct ice_flow_seg_info *segs;
2168 struct ice_flow_prof *prof;
2169 enum ice_status status;
2170
2171 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
2172 if (!segs)
2173 return ICE_ERR_NO_MEMORY;
2174
2175 /* Construct the packet segment info from the hashed fields */
2176 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
2177 addl_hdrs);
2178 if (status)
2179 goto out;
2180
2181 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2182 vsi_handle,
2183 ICE_FLOW_FIND_PROF_CHK_FLDS);
2184 if (!prof) {
2185 status = ICE_ERR_DOES_NOT_EXIST;
2186 goto out;
2187 }
2188
2189 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
2190 if (status)
2191 goto out;
2192
2193 /* Remove RSS configuration from VSI context before deleting
2194 * the flow profile.
2195 */
2196 ice_rem_rss_list(hw, vsi_handle, prof);
2197
2198 if (bitmap_empty(prof->vsis, ICE_MAX_VSI))
2199 status = ice_flow_rem_prof(hw, blk, prof->id);
2200
2201 out:
2202 kfree(segs);
2203 return status;
2204 }
2205
2206 /**
2207 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
2208 * @hw: pointer to the hardware structure
2209 * @vsi_handle: software VSI handle
2210 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
2211 * @addl_hdrs: Protocol header fields within a packet segment
2212 *
2213 * This function will lookup the flow profile based on the input
2214 * hash field bitmap, iterate through the profile entry list of
2215 * that profile and find entry associated with input VSI to be
2216 * removed. Calls are made to underlying flow s which will APIs
2217 * turn build or update buffers for RSS XLT1 section.
2218 */
2219 enum ice_status __maybe_unused
ice_rem_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs)2220 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2221 u32 addl_hdrs)
2222 {
2223 enum ice_status status;
2224
2225 if (hashed_flds == ICE_HASH_INVALID ||
2226 !ice_is_vsi_valid(hw, vsi_handle))
2227 return ICE_ERR_PARAM;
2228
2229 mutex_lock(&hw->rss_locks);
2230 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
2231 ICE_RSS_OUTER_HEADERS);
2232 if (!status)
2233 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
2234 addl_hdrs, ICE_RSS_INNER_HEADERS);
2235 mutex_unlock(&hw->rss_locks);
2236
2237 return status;
2238 }
2239
2240 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
2241 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
2242 * convert its values to their appropriate flow L3, L4 values.
2243 */
2244 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
2245 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
2246 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
2247 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
2248 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
2249 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
2250 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
2251 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
2252 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
2253 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
2254 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
2255 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
2256 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
2257
2258 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
2259 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
2260 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
2261 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
2262 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
2263 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
2264 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
2265 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
2266 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
2267 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
2268 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
2269 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
2270 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
2271
2272 /**
2273 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
2274 * @hw: pointer to the hardware structure
2275 * @vsi_handle: software VSI handle
2276 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
2277 *
2278 * This function will take the hash bitmap provided by the AVF driver via a
2279 * message, convert it to ICE-compatible values, and configure RSS flow
2280 * profiles.
2281 */
2282 enum ice_status
ice_add_avf_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 avf_hash)2283 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
2284 {
2285 enum ice_status status = 0;
2286 u64 hash_flds;
2287
2288 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
2289 !ice_is_vsi_valid(hw, vsi_handle))
2290 return ICE_ERR_PARAM;
2291
2292 /* Make sure no unsupported bits are specified */
2293 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
2294 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
2295 return ICE_ERR_CFG;
2296
2297 hash_flds = avf_hash;
2298
2299 /* Always create an L3 RSS configuration for any L4 RSS configuration */
2300 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
2301 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
2302
2303 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
2304 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
2305
2306 /* Create the corresponding RSS configuration for each valid hash bit */
2307 while (hash_flds) {
2308 u64 rss_hash = ICE_HASH_INVALID;
2309
2310 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
2311 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
2312 rss_hash = ICE_FLOW_HASH_IPV4;
2313 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
2314 } else if (hash_flds &
2315 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
2316 rss_hash = ICE_FLOW_HASH_IPV4 |
2317 ICE_FLOW_HASH_TCP_PORT;
2318 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
2319 } else if (hash_flds &
2320 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
2321 rss_hash = ICE_FLOW_HASH_IPV4 |
2322 ICE_FLOW_HASH_UDP_PORT;
2323 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
2324 } else if (hash_flds &
2325 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
2326 rss_hash = ICE_FLOW_HASH_IPV4 |
2327 ICE_FLOW_HASH_SCTP_PORT;
2328 hash_flds &=
2329 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
2330 }
2331 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
2332 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
2333 rss_hash = ICE_FLOW_HASH_IPV6;
2334 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
2335 } else if (hash_flds &
2336 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
2337 rss_hash = ICE_FLOW_HASH_IPV6 |
2338 ICE_FLOW_HASH_TCP_PORT;
2339 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
2340 } else if (hash_flds &
2341 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
2342 rss_hash = ICE_FLOW_HASH_IPV6 |
2343 ICE_FLOW_HASH_UDP_PORT;
2344 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
2345 } else if (hash_flds &
2346 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
2347 rss_hash = ICE_FLOW_HASH_IPV6 |
2348 ICE_FLOW_HASH_SCTP_PORT;
2349 hash_flds &=
2350 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
2351 }
2352 }
2353
2354 if (rss_hash == ICE_HASH_INVALID)
2355 return ICE_ERR_OUT_OF_RANGE;
2356
2357 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
2358 ICE_FLOW_SEG_HDR_NONE);
2359 if (status)
2360 break;
2361 }
2362
2363 return status;
2364 }
2365
2366 /**
2367 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
2368 * @hw: pointer to the hardware structure
2369 * @vsi_handle: software VSI handle
2370 */
ice_replay_rss_cfg(struct ice_hw * hw,u16 vsi_handle)2371 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
2372 {
2373 enum ice_status status = 0;
2374 struct ice_rss_cfg *r;
2375
2376 if (!ice_is_vsi_valid(hw, vsi_handle))
2377 return ICE_ERR_PARAM;
2378
2379 mutex_lock(&hw->rss_locks);
2380 list_for_each_entry(r, &hw->rss_list_head, l_entry) {
2381 if (test_bit(vsi_handle, r->vsis)) {
2382 status = ice_add_rss_cfg_sync(hw, vsi_handle,
2383 r->hashed_flds,
2384 r->packet_hdr,
2385 ICE_RSS_OUTER_HEADERS);
2386 if (status)
2387 break;
2388 status = ice_add_rss_cfg_sync(hw, vsi_handle,
2389 r->hashed_flds,
2390 r->packet_hdr,
2391 ICE_RSS_INNER_HEADERS);
2392 if (status)
2393 break;
2394 }
2395 }
2396 mutex_unlock(&hw->rss_locks);
2397
2398 return status;
2399 }
2400
2401 /**
2402 * ice_get_rss_cfg - returns hashed fields for the given header types
2403 * @hw: pointer to the hardware structure
2404 * @vsi_handle: software VSI handle
2405 * @hdrs: protocol header type
2406 *
2407 * This function will return the match fields of the first instance of flow
2408 * profile having the given header types and containing input VSI
2409 */
ice_get_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u32 hdrs)2410 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
2411 {
2412 u64 rss_hash = ICE_HASH_INVALID;
2413 struct ice_rss_cfg *r;
2414
2415 /* verify if the protocol header is non zero and VSI is valid */
2416 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
2417 return ICE_HASH_INVALID;
2418
2419 mutex_lock(&hw->rss_locks);
2420 list_for_each_entry(r, &hw->rss_list_head, l_entry)
2421 if (test_bit(vsi_handle, r->vsis) &&
2422 r->packet_hdr == hdrs) {
2423 rss_hash = r->hashed_flds;
2424 break;
2425 }
2426 mutex_unlock(&hw->rss_locks);
2427
2428 return rss_hash;
2429 }
2430