1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 #include "en/params.h"
36 #include "en/xsk/pool.h"
37 #include "en/fs_ethtool.h"
38
39 struct mlx5e_ethtool_table {
40 struct mlx5_flow_table *ft;
41 int num_rules;
42 };
43
44 #define ETHTOOL_NUM_L3_L4_FTS 7
45 #define ETHTOOL_NUM_L2_FTS 4
46
47 struct mlx5e_ethtool_steering {
48 struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
49 struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
50 struct list_head rules;
51 int tot_num_rules;
52 };
53
54 static int flow_type_to_traffic_type(u32 flow_type);
55
flow_type_mask(u32 flow_type)56 static u32 flow_type_mask(u32 flow_type)
57 {
58 return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
59 }
60
61 struct mlx5e_ethtool_rule {
62 struct list_head list;
63 struct ethtool_rx_flow_spec flow_spec;
64 struct mlx5_flow_handle *rule;
65 struct mlx5e_ethtool_table *eth_ft;
66 struct mlx5e_rss *rss;
67 };
68
put_flow_table(struct mlx5e_ethtool_table * eth_ft)69 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
70 {
71 if (!--eth_ft->num_rules) {
72 mlx5_destroy_flow_table(eth_ft->ft);
73 eth_ft->ft = NULL;
74 }
75 }
76
77 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
78 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
79 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
80 #define MLX5E_ETHTOOL_NUM_GROUPS 10
get_flow_table(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,int num_tuples)81 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
82 struct ethtool_rx_flow_spec *fs,
83 int num_tuples)
84 {
85 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
86 struct mlx5_flow_table_attr ft_attr = {};
87 struct mlx5e_ethtool_table *eth_ft;
88 struct mlx5_flow_namespace *ns;
89 struct mlx5_flow_table *ft;
90 int max_tuples;
91 int table_size;
92 int prio;
93
94 switch (flow_type_mask(fs->flow_type)) {
95 case TCP_V4_FLOW:
96 case UDP_V4_FLOW:
97 case TCP_V6_FLOW:
98 case UDP_V6_FLOW:
99 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
100 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
101 eth_ft = ðtool->l3_l4_ft[prio];
102 break;
103 case IP_USER_FLOW:
104 case IPV6_USER_FLOW:
105 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
106 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
107 eth_ft = ðtool->l3_l4_ft[prio];
108 break;
109 case ETHER_FLOW:
110 max_tuples = ETHTOOL_NUM_L2_FTS;
111 prio = max_tuples - num_tuples;
112 eth_ft = ðtool->l2_ft[prio];
113 prio += MLX5E_ETHTOOL_L2_PRIO;
114 break;
115 default:
116 return ERR_PTR(-EINVAL);
117 }
118
119 eth_ft->num_rules++;
120 if (eth_ft->ft)
121 return eth_ft;
122
123 ns = mlx5_get_flow_namespace(priv->mdev,
124 MLX5_FLOW_NAMESPACE_ETHTOOL);
125 if (!ns)
126 return ERR_PTR(-EOPNOTSUPP);
127
128 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
129 flow_table_properties_nic_receive.log_max_ft_size)),
130 MLX5E_ETHTOOL_NUM_ENTRIES);
131
132 ft_attr.prio = prio;
133 ft_attr.max_fte = table_size;
134 ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
135 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
136 if (IS_ERR(ft))
137 return (void *)ft;
138
139 eth_ft->ft = ft;
140 return eth_ft;
141 }
142
mask_spec(u8 * mask,u8 * val,size_t size)143 static void mask_spec(u8 *mask, u8 *val, size_t size)
144 {
145 unsigned int i;
146
147 for (i = 0; i < size; i++, mask++, val++)
148 *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
149 }
150
151 #define MLX5E_FTE_SET(header_p, fld, v) \
152 MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
153
154 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
155 MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
156
157 static void
set_ip4(void * headers_c,void * headers_v,__be32 ip4src_m,__be32 ip4src_v,__be32 ip4dst_m,__be32 ip4dst_v)158 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
159 __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
160 {
161 if (ip4src_m) {
162 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
163 &ip4src_v, sizeof(ip4src_v));
164 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
165 &ip4src_m, sizeof(ip4src_m));
166 }
167 if (ip4dst_m) {
168 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
169 &ip4dst_v, sizeof(ip4dst_v));
170 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
171 &ip4dst_m, sizeof(ip4dst_m));
172 }
173
174 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
175 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
176 }
177
178 static void
set_ip6(void * headers_c,void * headers_v,__be32 ip6src_m[4],__be32 ip6src_v[4],__be32 ip6dst_m[4],__be32 ip6dst_v[4])179 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
180 __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
181 {
182 u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
183
184 if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
185 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
186 ip6src_v, ip6_sz);
187 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
188 ip6src_m, ip6_sz);
189 }
190 if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
191 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
192 ip6dst_v, ip6_sz);
193 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
194 ip6dst_m, ip6_sz);
195 }
196
197 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
198 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
199 }
200
201 static void
set_tcp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)202 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
203 __be16 pdst_m, __be16 pdst_v)
204 {
205 if (psrc_m) {
206 MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
207 MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
208 }
209 if (pdst_m) {
210 MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
211 MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
212 }
213
214 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
215 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
216 }
217
218 static void
set_udp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)219 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
220 __be16 pdst_m, __be16 pdst_v)
221 {
222 if (psrc_m) {
223 MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
224 MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
225 }
226
227 if (pdst_m) {
228 MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
229 MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
230 }
231
232 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
233 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
234 }
235
236 static void
parse_tcp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)237 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
238 {
239 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
240 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
241
242 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
243 l4_mask->ip4dst, l4_val->ip4dst);
244
245 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
246 l4_mask->pdst, l4_val->pdst);
247 }
248
249 static void
parse_udp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)250 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
251 {
252 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
253 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
254
255 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
256 l4_mask->ip4dst, l4_val->ip4dst);
257
258 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
259 l4_mask->pdst, l4_val->pdst);
260 }
261
262 static void
parse_ip4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)263 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
264 {
265 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
266 struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
267
268 set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
269 l3_mask->ip4dst, l3_val->ip4dst);
270
271 if (l3_mask->proto) {
272 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
273 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
274 }
275 }
276
277 static void
parse_ip6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)278 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
279 {
280 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
281 struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
282
283 set_ip6(headers_c, headers_v, l3_mask->ip6src,
284 l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
285
286 if (l3_mask->l4_proto) {
287 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
288 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
289 }
290 }
291
292 static void
parse_tcp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)293 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
294 {
295 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
296 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
297
298 set_ip6(headers_c, headers_v, l4_mask->ip6src,
299 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
300
301 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
302 l4_mask->pdst, l4_val->pdst);
303 }
304
305 static void
parse_udp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)306 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
307 {
308 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
309 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
310
311 set_ip6(headers_c, headers_v, l4_mask->ip6src,
312 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
313
314 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
315 l4_mask->pdst, l4_val->pdst);
316 }
317
318 static void
parse_ether(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)319 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
320 {
321 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
322 struct ethhdr *eth_val = &fs->h_u.ether_spec;
323
324 mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
325 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
326 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
327 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
328 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
329 MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
330 MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
331 }
332
333 static void
set_cvlan(void * headers_c,void * headers_v,__be16 vlan_tci)334 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
335 {
336 MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
337 MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
338 MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
339 MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
340 }
341
342 static void
set_dmac(void * headers_c,void * headers_v,unsigned char m_dest[ETH_ALEN],unsigned char v_dest[ETH_ALEN])343 set_dmac(void *headers_c, void *headers_v,
344 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
345 {
346 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
347 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
348 }
349
set_flow_attrs(u32 * match_c,u32 * match_v,struct ethtool_rx_flow_spec * fs)350 static int set_flow_attrs(u32 *match_c, u32 *match_v,
351 struct ethtool_rx_flow_spec *fs)
352 {
353 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
354 outer_headers);
355 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
356 outer_headers);
357 u32 flow_type = flow_type_mask(fs->flow_type);
358
359 switch (flow_type) {
360 case TCP_V4_FLOW:
361 parse_tcp4(outer_headers_c, outer_headers_v, fs);
362 break;
363 case UDP_V4_FLOW:
364 parse_udp4(outer_headers_c, outer_headers_v, fs);
365 break;
366 case IP_USER_FLOW:
367 parse_ip4(outer_headers_c, outer_headers_v, fs);
368 break;
369 case TCP_V6_FLOW:
370 parse_tcp6(outer_headers_c, outer_headers_v, fs);
371 break;
372 case UDP_V6_FLOW:
373 parse_udp6(outer_headers_c, outer_headers_v, fs);
374 break;
375 case IPV6_USER_FLOW:
376 parse_ip6(outer_headers_c, outer_headers_v, fs);
377 break;
378 case ETHER_FLOW:
379 parse_ether(outer_headers_c, outer_headers_v, fs);
380 break;
381 default:
382 return -EINVAL;
383 }
384
385 if ((fs->flow_type & FLOW_EXT) &&
386 (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
387 set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
388
389 if (fs->flow_type & FLOW_MAC_EXT &&
390 !is_zero_ether_addr(fs->m_ext.h_dest)) {
391 mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
392 set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
393 fs->h_ext.h_dest);
394 }
395
396 return 0;
397 }
398
add_rule_to_list(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * rule)399 static void add_rule_to_list(struct mlx5e_priv *priv,
400 struct mlx5e_ethtool_rule *rule)
401 {
402 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
403 struct list_head *head = ðtool->rules;
404 struct mlx5e_ethtool_rule *iter;
405
406 list_for_each_entry(iter, ðtool->rules, list) {
407 if (iter->flow_spec.location > rule->flow_spec.location)
408 break;
409 head = &iter->list;
410 }
411 ethtool->tot_num_rules++;
412 list_add(&rule->list, head);
413 }
414
outer_header_zero(u32 * match_criteria)415 static bool outer_header_zero(u32 *match_criteria)
416 {
417 int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
418 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
419 outer_headers);
420
421 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
422 outer_headers_c + 1,
423 size - 1);
424 }
425
flow_get_tirn(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule,struct ethtool_rx_flow_spec * fs,u32 rss_context,u32 * tirn)426 static int flow_get_tirn(struct mlx5e_priv *priv,
427 struct mlx5e_ethtool_rule *eth_rule,
428 struct ethtool_rx_flow_spec *fs,
429 u32 rss_context, u32 *tirn)
430 {
431 if (fs->flow_type & FLOW_RSS) {
432 struct mlx5e_packet_merge_param pkt_merge_param;
433 struct mlx5e_rss *rss;
434 u32 flow_type;
435 int err;
436 int tt;
437
438 rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context);
439 if (!rss)
440 return -ENOENT;
441
442 flow_type = flow_type_mask(fs->flow_type);
443 tt = flow_type_to_traffic_type(flow_type);
444 if (tt < 0)
445 return -EINVAL;
446
447 pkt_merge_param = priv->channels.params.packet_merge;
448 err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
449 if (err)
450 return err;
451 eth_rule->rss = rss;
452 mlx5e_rss_refcnt_inc(eth_rule->rss);
453 } else {
454 *tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, fs->ring_cookie);
455 }
456
457 return 0;
458 }
459
460 static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule,struct mlx5_flow_table * ft,struct ethtool_rx_flow_spec * fs,u32 rss_context)461 add_ethtool_flow_rule(struct mlx5e_priv *priv,
462 struct mlx5e_ethtool_rule *eth_rule,
463 struct mlx5_flow_table *ft,
464 struct ethtool_rx_flow_spec *fs, u32 rss_context)
465 {
466 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
467 struct mlx5_flow_destination *dst = NULL;
468 struct mlx5_flow_handle *rule;
469 struct mlx5_flow_spec *spec;
470 int err = 0;
471
472 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
473 if (!spec)
474 return ERR_PTR(-ENOMEM);
475 err = set_flow_attrs(spec->match_criteria, spec->match_value,
476 fs);
477 if (err)
478 goto free;
479
480 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
481 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
482 } else {
483 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
484 if (!dst) {
485 err = -ENOMEM;
486 goto free;
487 }
488
489 err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num);
490 if (err)
491 goto free;
492
493 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
494 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
495 }
496
497 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
498 spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
499 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
500 if (IS_ERR(rule)) {
501 err = PTR_ERR(rule);
502 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
503 __func__, err);
504 goto free;
505 }
506 free:
507 kvfree(spec);
508 kfree(dst);
509 return err ? ERR_PTR(err) : rule;
510 }
511
del_ethtool_rule(struct mlx5e_flow_steering * fs,struct mlx5e_ethtool_rule * eth_rule)512 static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
513 struct mlx5e_ethtool_rule *eth_rule)
514 {
515 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
516 if (eth_rule->rule)
517 mlx5_del_flow_rules(eth_rule->rule);
518 if (eth_rule->rss)
519 mlx5e_rss_refcnt_dec(eth_rule->rss);
520 list_del(ð_rule->list);
521 ethtool->tot_num_rules--;
522 put_flow_table(eth_rule->eth_ft);
523 kfree(eth_rule);
524 }
525
find_ethtool_rule(struct mlx5e_priv * priv,int location)526 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
527 int location)
528 {
529 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
530 struct mlx5e_ethtool_rule *iter;
531
532 list_for_each_entry(iter, ðtool->rules, list) {
533 if (iter->flow_spec.location == location)
534 return iter;
535 }
536 return NULL;
537 }
538
get_ethtool_rule(struct mlx5e_priv * priv,int location)539 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
540 int location)
541 {
542 struct mlx5e_ethtool_rule *eth_rule;
543
544 eth_rule = find_ethtool_rule(priv, location);
545 if (eth_rule)
546 del_ethtool_rule(priv->fs, eth_rule);
547
548 eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
549 if (!eth_rule)
550 return ERR_PTR(-ENOMEM);
551
552 add_rule_to_list(priv, eth_rule);
553 return eth_rule;
554 }
555
556 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
557
558 #define all_ones(field) (field == (__force typeof(field))-1)
559 #define all_zeros_or_all_ones(field) \
560 ((field) == 0 || (field) == (__force typeof(field))-1)
561
validate_ethter(struct ethtool_rx_flow_spec * fs)562 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
563 {
564 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
565 int ntuples = 0;
566
567 if (!is_zero_ether_addr(eth_mask->h_dest))
568 ntuples++;
569 if (!is_zero_ether_addr(eth_mask->h_source))
570 ntuples++;
571 if (eth_mask->h_proto)
572 ntuples++;
573 return ntuples;
574 }
575
validate_tcpudp4(struct ethtool_rx_flow_spec * fs)576 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
577 {
578 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
579 int ntuples = 0;
580
581 if (l4_mask->tos)
582 return -EINVAL;
583
584 if (l4_mask->ip4src)
585 ntuples++;
586 if (l4_mask->ip4dst)
587 ntuples++;
588 if (l4_mask->psrc)
589 ntuples++;
590 if (l4_mask->pdst)
591 ntuples++;
592 /* Flow is TCP/UDP */
593 return ++ntuples;
594 }
595
validate_ip4(struct ethtool_rx_flow_spec * fs)596 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
597 {
598 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
599 int ntuples = 0;
600
601 if (l3_mask->l4_4_bytes || l3_mask->tos ||
602 fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
603 return -EINVAL;
604 if (l3_mask->ip4src)
605 ntuples++;
606 if (l3_mask->ip4dst)
607 ntuples++;
608 if (l3_mask->proto)
609 ntuples++;
610 /* Flow is IPv4 */
611 return ++ntuples;
612 }
613
validate_ip6(struct ethtool_rx_flow_spec * fs)614 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
615 {
616 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
617 int ntuples = 0;
618
619 if (l3_mask->l4_4_bytes || l3_mask->tclass)
620 return -EINVAL;
621 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
622 ntuples++;
623
624 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
625 ntuples++;
626 if (l3_mask->l4_proto)
627 ntuples++;
628 /* Flow is IPv6 */
629 return ++ntuples;
630 }
631
validate_tcpudp6(struct ethtool_rx_flow_spec * fs)632 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
633 {
634 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
635 int ntuples = 0;
636
637 if (l4_mask->tclass)
638 return -EINVAL;
639
640 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
641 ntuples++;
642
643 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
644 ntuples++;
645
646 if (l4_mask->psrc)
647 ntuples++;
648 if (l4_mask->pdst)
649 ntuples++;
650 /* Flow is TCP/UDP */
651 return ++ntuples;
652 }
653
validate_vlan(struct ethtool_rx_flow_spec * fs)654 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
655 {
656 if (fs->m_ext.vlan_etype ||
657 fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
658 return -EINVAL;
659
660 if (fs->m_ext.vlan_tci &&
661 (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
662 return -EINVAL;
663
664 return 1;
665 }
666
validate_flow(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)667 static int validate_flow(struct mlx5e_priv *priv,
668 struct ethtool_rx_flow_spec *fs)
669 {
670 int num_tuples = 0;
671 int ret = 0;
672
673 if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
674 return -ENOSPC;
675
676 if (fs->ring_cookie != RX_CLS_FLOW_DISC)
677 if (fs->ring_cookie >= priv->channels.params.num_channels)
678 return -EINVAL;
679
680 switch (flow_type_mask(fs->flow_type)) {
681 case ETHER_FLOW:
682 num_tuples += validate_ethter(fs);
683 break;
684 case TCP_V4_FLOW:
685 case UDP_V4_FLOW:
686 ret = validate_tcpudp4(fs);
687 if (ret < 0)
688 return ret;
689 num_tuples += ret;
690 break;
691 case IP_USER_FLOW:
692 ret = validate_ip4(fs);
693 if (ret < 0)
694 return ret;
695 num_tuples += ret;
696 break;
697 case TCP_V6_FLOW:
698 case UDP_V6_FLOW:
699 ret = validate_tcpudp6(fs);
700 if (ret < 0)
701 return ret;
702 num_tuples += ret;
703 break;
704 case IPV6_USER_FLOW:
705 ret = validate_ip6(fs);
706 if (ret < 0)
707 return ret;
708 num_tuples += ret;
709 break;
710 default:
711 return -ENOTSUPP;
712 }
713 if ((fs->flow_type & FLOW_EXT)) {
714 ret = validate_vlan(fs);
715 if (ret < 0)
716 return ret;
717 num_tuples += ret;
718 }
719
720 if (fs->flow_type & FLOW_MAC_EXT &&
721 !is_zero_ether_addr(fs->m_ext.h_dest))
722 num_tuples++;
723
724 return num_tuples;
725 }
726
727 static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,u32 rss_context)728 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
729 struct ethtool_rx_flow_spec *fs, u32 rss_context)
730 {
731 struct mlx5e_ethtool_table *eth_ft;
732 struct mlx5e_ethtool_rule *eth_rule;
733 struct mlx5_flow_handle *rule;
734 int num_tuples;
735 int err;
736
737 num_tuples = validate_flow(priv, fs);
738 if (num_tuples <= 0) {
739 netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
740 __func__, num_tuples);
741 return num_tuples;
742 }
743
744 eth_ft = get_flow_table(priv, fs, num_tuples);
745 if (IS_ERR(eth_ft))
746 return PTR_ERR(eth_ft);
747
748 eth_rule = get_ethtool_rule(priv, fs->location);
749 if (IS_ERR(eth_rule)) {
750 put_flow_table(eth_ft);
751 return PTR_ERR(eth_rule);
752 }
753
754 eth_rule->flow_spec = *fs;
755 eth_rule->eth_ft = eth_ft;
756
757 rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
758 if (IS_ERR(rule)) {
759 err = PTR_ERR(rule);
760 goto del_ethtool_rule;
761 }
762
763 eth_rule->rule = rule;
764
765 return 0;
766
767 del_ethtool_rule:
768 del_ethtool_rule(priv->fs, eth_rule);
769
770 return err;
771 }
772
773 static int
mlx5e_ethtool_flow_remove(struct mlx5e_priv * priv,int location)774 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
775 {
776 struct mlx5e_ethtool_rule *eth_rule;
777 int err = 0;
778
779 if (location >= MAX_NUM_OF_ETHTOOL_RULES)
780 return -ENOSPC;
781
782 eth_rule = find_ethtool_rule(priv, location);
783 if (!eth_rule) {
784 err = -ENOENT;
785 goto out;
786 }
787
788 del_ethtool_rule(priv->fs, eth_rule);
789 out:
790 return err;
791 }
792
793 static int
mlx5e_ethtool_get_flow(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,int location)794 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
795 struct ethtool_rxnfc *info, int location)
796 {
797 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
798 struct mlx5e_ethtool_rule *eth_rule;
799
800 if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
801 return -EINVAL;
802
803 list_for_each_entry(eth_rule, ðtool->rules, list) {
804 int index;
805
806 if (eth_rule->flow_spec.location != location)
807 continue;
808 if (!info)
809 return 0;
810 info->fs = eth_rule->flow_spec;
811 if (!eth_rule->rss)
812 return 0;
813 index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss);
814 if (index < 0)
815 return index;
816 info->rss_context = index;
817 return 0;
818 }
819
820 return -ENOENT;
821 }
822
823 static int
mlx5e_ethtool_get_all_flows(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)824 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
825 struct ethtool_rxnfc *info, u32 *rule_locs)
826 {
827 int location = 0;
828 int idx = 0;
829 int err = 0;
830
831 info->data = MAX_NUM_OF_ETHTOOL_RULES;
832 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
833 err = mlx5e_ethtool_get_flow(priv, NULL, location);
834 if (!err)
835 rule_locs[idx++] = location;
836 location++;
837 }
838 return err;
839 }
840
mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering ** ethtool)841 int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
842 {
843 *ethtool = kvzalloc(sizeof(**ethtool), GFP_KERNEL);
844 if (!*ethtool)
845 return -ENOMEM;
846 return 0;
847 }
848
mlx5e_ethtool_free(struct mlx5e_ethtool_steering * ethtool)849 void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool)
850 {
851 kvfree(ethtool);
852 }
853
mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering * fs)854 void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
855 {
856 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
857 struct mlx5e_ethtool_rule *iter;
858 struct mlx5e_ethtool_rule *temp;
859
860 list_for_each_entry_safe(iter, temp, ðtool->rules, list)
861 del_ethtool_rule(fs, iter);
862 }
863
mlx5e_ethtool_init_steering(struct mlx5e_flow_steering * fs)864 void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
865 {
866 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
867
868 INIT_LIST_HEAD(ðtool->rules);
869 }
870
flow_type_to_traffic_type(u32 flow_type)871 static int flow_type_to_traffic_type(u32 flow_type)
872 {
873 switch (flow_type) {
874 case TCP_V4_FLOW:
875 return MLX5_TT_IPV4_TCP;
876 case TCP_V6_FLOW:
877 return MLX5_TT_IPV6_TCP;
878 case UDP_V4_FLOW:
879 return MLX5_TT_IPV4_UDP;
880 case UDP_V6_FLOW:
881 return MLX5_TT_IPV6_UDP;
882 case AH_V4_FLOW:
883 return MLX5_TT_IPV4_IPSEC_AH;
884 case AH_V6_FLOW:
885 return MLX5_TT_IPV6_IPSEC_AH;
886 case ESP_V4_FLOW:
887 return MLX5_TT_IPV4_IPSEC_ESP;
888 case ESP_V6_FLOW:
889 return MLX5_TT_IPV6_IPSEC_ESP;
890 case IPV4_FLOW:
891 return MLX5_TT_IPV4;
892 case IPV6_FLOW:
893 return MLX5_TT_IPV6;
894 default:
895 return -EINVAL;
896 }
897 }
898
mlx5e_set_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)899 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
900 struct ethtool_rxnfc *nfc)
901 {
902 u8 rx_hash_field = 0;
903 int err;
904 int tt;
905
906 tt = flow_type_to_traffic_type(nfc->flow_type);
907 if (tt < 0)
908 return tt;
909
910 /* RSS does not support anything other than hashing to queues
911 * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
912 * port.
913 */
914 if (nfc->flow_type != TCP_V4_FLOW &&
915 nfc->flow_type != TCP_V6_FLOW &&
916 nfc->flow_type != UDP_V4_FLOW &&
917 nfc->flow_type != UDP_V6_FLOW)
918 return -EOPNOTSUPP;
919
920 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
921 RXH_L4_B_0_1 | RXH_L4_B_2_3))
922 return -EOPNOTSUPP;
923
924 if (nfc->data & RXH_IP_SRC)
925 rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
926 if (nfc->data & RXH_IP_DST)
927 rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
928 if (nfc->data & RXH_L4_B_0_1)
929 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
930 if (nfc->data & RXH_L4_B_2_3)
931 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
932
933 mutex_lock(&priv->state_lock);
934 err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field);
935 mutex_unlock(&priv->state_lock);
936
937 return err;
938 }
939
mlx5e_get_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)940 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
941 struct ethtool_rxnfc *nfc)
942 {
943 u32 hash_field = 0;
944 int tt;
945
946 tt = flow_type_to_traffic_type(nfc->flow_type);
947 if (tt < 0)
948 return tt;
949
950 hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt);
951 nfc->data = 0;
952
953 if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
954 nfc->data |= RXH_IP_SRC;
955 if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
956 nfc->data |= RXH_IP_DST;
957 if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
958 nfc->data |= RXH_L4_B_0_1;
959 if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
960 nfc->data |= RXH_L4_B_2_3;
961
962 return 0;
963 }
964
mlx5e_ethtool_set_rxnfc(struct mlx5e_priv * priv,struct ethtool_rxnfc * cmd)965 int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
966 {
967 int err = 0;
968
969 switch (cmd->cmd) {
970 case ETHTOOL_SRXCLSRLINS:
971 err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context);
972 break;
973 case ETHTOOL_SRXCLSRLDEL:
974 err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
975 break;
976 case ETHTOOL_SRXFH:
977 err = mlx5e_set_rss_hash_opt(priv, cmd);
978 break;
979 default:
980 err = -EOPNOTSUPP;
981 break;
982 }
983
984 return err;
985 }
986
mlx5e_ethtool_get_rxnfc(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)987 int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
988 struct ethtool_rxnfc *info, u32 *rule_locs)
989 {
990 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
991 int err = 0;
992
993 switch (info->cmd) {
994 case ETHTOOL_GRXCLSRLCNT:
995 info->rule_cnt = ethtool->tot_num_rules;
996 break;
997 case ETHTOOL_GRXCLSRULE:
998 err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
999 break;
1000 case ETHTOOL_GRXCLSRLALL:
1001 err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
1002 break;
1003 case ETHTOOL_GRXFH:
1004 err = mlx5e_get_rss_hash_opt(priv, info);
1005 break;
1006 default:
1007 err = -EOPNOTSUPP;
1008 break;
1009 }
1010
1011 return err;
1012 }
1013
1014