1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 #include "en/params.h"
36 #include "en/xsk/pool.h"
37 #include "en/fs_ethtool.h"
38
39 struct mlx5e_ethtool_table {
40 struct mlx5_flow_table *ft;
41 int num_rules;
42 };
43
44 #define ETHTOOL_NUM_L3_L4_FTS 7
45 #define ETHTOOL_NUM_L2_FTS 4
46
47 struct mlx5e_ethtool_steering {
48 struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
49 struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
50 struct list_head rules;
51 int tot_num_rules;
52 };
53
54 static int flow_type_to_traffic_type(u32 flow_type);
55
flow_type_mask(u32 flow_type)56 static u32 flow_type_mask(u32 flow_type)
57 {
58 return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
59 }
60
61 struct mlx5e_ethtool_rule {
62 struct list_head list;
63 struct ethtool_rx_flow_spec flow_spec;
64 struct mlx5_flow_handle *rule;
65 struct mlx5e_ethtool_table *eth_ft;
66 struct mlx5e_rss *rss;
67 };
68
put_flow_table(struct mlx5e_ethtool_table * eth_ft)69 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
70 {
71 if (!--eth_ft->num_rules) {
72 mlx5_destroy_flow_table(eth_ft->ft);
73 eth_ft->ft = NULL;
74 }
75 }
76
77 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
78 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
79 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
80 #define MLX5E_ETHTOOL_NUM_GROUPS 10
get_flow_table(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,int num_tuples)81 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
82 struct ethtool_rx_flow_spec *fs,
83 int num_tuples)
84 {
85 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
86 struct mlx5_flow_table_attr ft_attr = {};
87 struct mlx5e_ethtool_table *eth_ft;
88 struct mlx5_flow_namespace *ns;
89 struct mlx5_flow_table *ft;
90 int max_tuples;
91 int table_size;
92 int prio;
93
94 switch (flow_type_mask(fs->flow_type)) {
95 case TCP_V4_FLOW:
96 case UDP_V4_FLOW:
97 case TCP_V6_FLOW:
98 case UDP_V6_FLOW:
99 case IP_USER_FLOW:
100 case IPV6_USER_FLOW:
101 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
102 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
103 eth_ft = ðtool->l3_l4_ft[prio];
104 break;
105 case ETHER_FLOW:
106 max_tuples = ETHTOOL_NUM_L2_FTS;
107 prio = max_tuples - num_tuples;
108 eth_ft = ðtool->l2_ft[prio];
109 prio += MLX5E_ETHTOOL_L2_PRIO;
110 break;
111 default:
112 return ERR_PTR(-EINVAL);
113 }
114
115 eth_ft->num_rules++;
116 if (eth_ft->ft)
117 return eth_ft;
118
119 ns = mlx5_get_flow_namespace(priv->mdev,
120 MLX5_FLOW_NAMESPACE_ETHTOOL);
121 if (!ns)
122 return ERR_PTR(-EOPNOTSUPP);
123
124 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
125 flow_table_properties_nic_receive.log_max_ft_size)),
126 MLX5E_ETHTOOL_NUM_ENTRIES);
127
128 ft_attr.prio = prio;
129 ft_attr.max_fte = table_size;
130 ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
131 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
132 if (IS_ERR(ft))
133 return (void *)ft;
134
135 eth_ft->ft = ft;
136 return eth_ft;
137 }
138
mask_spec(u8 * mask,u8 * val,size_t size)139 static void mask_spec(u8 *mask, u8 *val, size_t size)
140 {
141 unsigned int i;
142
143 for (i = 0; i < size; i++, mask++, val++)
144 *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
145 }
146
147 #define MLX5E_FTE_SET(header_p, fld, v) \
148 MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
149
150 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
151 MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
152
153 static void
set_ip4(void * headers_c,void * headers_v,__be32 ip4src_m,__be32 ip4src_v,__be32 ip4dst_m,__be32 ip4dst_v)154 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
155 __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
156 {
157 if (ip4src_m) {
158 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
159 &ip4src_v, sizeof(ip4src_v));
160 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
161 &ip4src_m, sizeof(ip4src_m));
162 }
163 if (ip4dst_m) {
164 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
165 &ip4dst_v, sizeof(ip4dst_v));
166 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
167 &ip4dst_m, sizeof(ip4dst_m));
168 }
169
170 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
171 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
172 }
173
174 static void
set_ip6(void * headers_c,void * headers_v,__be32 ip6src_m[4],__be32 ip6src_v[4],__be32 ip6dst_m[4],__be32 ip6dst_v[4])175 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
176 __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
177 {
178 u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
179
180 if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
181 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
182 ip6src_v, ip6_sz);
183 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
184 ip6src_m, ip6_sz);
185 }
186 if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
187 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
188 ip6dst_v, ip6_sz);
189 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
190 ip6dst_m, ip6_sz);
191 }
192
193 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
194 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
195 }
196
197 static void
set_tcp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)198 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
199 __be16 pdst_m, __be16 pdst_v)
200 {
201 if (psrc_m) {
202 MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
203 MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
204 }
205 if (pdst_m) {
206 MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
207 MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
208 }
209
210 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
211 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
212 }
213
214 static void
set_udp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)215 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
216 __be16 pdst_m, __be16 pdst_v)
217 {
218 if (psrc_m) {
219 MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
220 MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
221 }
222
223 if (pdst_m) {
224 MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
225 MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
226 }
227
228 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
229 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
230 }
231
232 static void
parse_tcp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)233 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
234 {
235 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
236 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
237
238 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
239 l4_mask->ip4dst, l4_val->ip4dst);
240
241 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
242 l4_mask->pdst, l4_val->pdst);
243 }
244
245 static void
parse_udp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)246 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
247 {
248 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
249 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
250
251 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
252 l4_mask->ip4dst, l4_val->ip4dst);
253
254 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
255 l4_mask->pdst, l4_val->pdst);
256 }
257
258 static void
parse_ip4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)259 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
260 {
261 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
262 struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
263
264 set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
265 l3_mask->ip4dst, l3_val->ip4dst);
266
267 if (l3_mask->proto) {
268 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
269 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
270 }
271 }
272
273 static void
parse_ip6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)274 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
275 {
276 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
277 struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
278
279 set_ip6(headers_c, headers_v, l3_mask->ip6src,
280 l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
281
282 if (l3_mask->l4_proto) {
283 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
284 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
285 }
286 }
287
288 static void
parse_tcp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)289 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
290 {
291 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
292 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
293
294 set_ip6(headers_c, headers_v, l4_mask->ip6src,
295 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
296
297 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
298 l4_mask->pdst, l4_val->pdst);
299 }
300
301 static void
parse_udp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)302 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
303 {
304 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
305 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
306
307 set_ip6(headers_c, headers_v, l4_mask->ip6src,
308 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
309
310 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
311 l4_mask->pdst, l4_val->pdst);
312 }
313
314 static void
parse_ether(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)315 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
316 {
317 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
318 struct ethhdr *eth_val = &fs->h_u.ether_spec;
319
320 mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
321 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
322 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
323 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
324 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
325 MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
326 MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
327 }
328
329 static void
set_cvlan(void * headers_c,void * headers_v,__be16 vlan_tci)330 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
331 {
332 MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
333 MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
334 MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
335 MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
336 }
337
338 static void
set_dmac(void * headers_c,void * headers_v,unsigned char m_dest[ETH_ALEN],unsigned char v_dest[ETH_ALEN])339 set_dmac(void *headers_c, void *headers_v,
340 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
341 {
342 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
343 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
344 }
345
set_flow_attrs(u32 * match_c,u32 * match_v,struct ethtool_rx_flow_spec * fs)346 static int set_flow_attrs(u32 *match_c, u32 *match_v,
347 struct ethtool_rx_flow_spec *fs)
348 {
349 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
350 outer_headers);
351 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
352 outer_headers);
353 u32 flow_type = flow_type_mask(fs->flow_type);
354
355 switch (flow_type) {
356 case TCP_V4_FLOW:
357 parse_tcp4(outer_headers_c, outer_headers_v, fs);
358 break;
359 case UDP_V4_FLOW:
360 parse_udp4(outer_headers_c, outer_headers_v, fs);
361 break;
362 case IP_USER_FLOW:
363 parse_ip4(outer_headers_c, outer_headers_v, fs);
364 break;
365 case TCP_V6_FLOW:
366 parse_tcp6(outer_headers_c, outer_headers_v, fs);
367 break;
368 case UDP_V6_FLOW:
369 parse_udp6(outer_headers_c, outer_headers_v, fs);
370 break;
371 case IPV6_USER_FLOW:
372 parse_ip6(outer_headers_c, outer_headers_v, fs);
373 break;
374 case ETHER_FLOW:
375 parse_ether(outer_headers_c, outer_headers_v, fs);
376 break;
377 default:
378 return -EINVAL;
379 }
380
381 if ((fs->flow_type & FLOW_EXT) &&
382 (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
383 set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
384
385 if (fs->flow_type & FLOW_MAC_EXT &&
386 !is_zero_ether_addr(fs->m_ext.h_dest)) {
387 mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
388 set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
389 fs->h_ext.h_dest);
390 }
391
392 return 0;
393 }
394
add_rule_to_list(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * rule)395 static void add_rule_to_list(struct mlx5e_priv *priv,
396 struct mlx5e_ethtool_rule *rule)
397 {
398 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
399 struct list_head *head = ðtool->rules;
400 struct mlx5e_ethtool_rule *iter;
401
402 list_for_each_entry(iter, ðtool->rules, list) {
403 if (iter->flow_spec.location > rule->flow_spec.location)
404 break;
405 head = &iter->list;
406 }
407 ethtool->tot_num_rules++;
408 list_add(&rule->list, head);
409 }
410
outer_header_zero(u32 * match_criteria)411 static bool outer_header_zero(u32 *match_criteria)
412 {
413 int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
414 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
415 outer_headers);
416
417 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
418 outer_headers_c + 1,
419 size - 1);
420 }
421
flow_get_tirn(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule,struct ethtool_rx_flow_spec * fs,u32 rss_context,u32 * tirn)422 static int flow_get_tirn(struct mlx5e_priv *priv,
423 struct mlx5e_ethtool_rule *eth_rule,
424 struct ethtool_rx_flow_spec *fs,
425 u32 rss_context, u32 *tirn)
426 {
427 if (fs->flow_type & FLOW_RSS) {
428 struct mlx5e_packet_merge_param pkt_merge_param;
429 struct mlx5e_rss *rss;
430 u32 flow_type;
431 int err;
432 int tt;
433
434 rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context);
435 if (!rss)
436 return -ENOENT;
437
438 flow_type = flow_type_mask(fs->flow_type);
439 tt = flow_type_to_traffic_type(flow_type);
440 if (tt < 0)
441 return -EINVAL;
442
443 pkt_merge_param = priv->channels.params.packet_merge;
444 err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
445 if (err)
446 return err;
447 eth_rule->rss = rss;
448 mlx5e_rss_refcnt_inc(eth_rule->rss);
449 } else {
450 *tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, fs->ring_cookie);
451 }
452
453 return 0;
454 }
455
456 static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule,struct mlx5_flow_table * ft,struct ethtool_rx_flow_spec * fs,u32 rss_context)457 add_ethtool_flow_rule(struct mlx5e_priv *priv,
458 struct mlx5e_ethtool_rule *eth_rule,
459 struct mlx5_flow_table *ft,
460 struct ethtool_rx_flow_spec *fs, u32 rss_context)
461 {
462 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
463 struct mlx5_flow_destination *dst = NULL;
464 struct mlx5_flow_handle *rule;
465 struct mlx5_flow_spec *spec;
466 int err = 0;
467
468 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
469 if (!spec)
470 return ERR_PTR(-ENOMEM);
471 err = set_flow_attrs(spec->match_criteria, spec->match_value,
472 fs);
473 if (err)
474 goto free;
475
476 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
477 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
478 } else {
479 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
480 if (!dst) {
481 err = -ENOMEM;
482 goto free;
483 }
484
485 err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num);
486 if (err)
487 goto free;
488
489 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
490 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
491 }
492
493 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
494 spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
495 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
496 if (IS_ERR(rule)) {
497 err = PTR_ERR(rule);
498 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
499 __func__, err);
500 goto free;
501 }
502 free:
503 kvfree(spec);
504 kfree(dst);
505 return err ? ERR_PTR(err) : rule;
506 }
507
del_ethtool_rule(struct mlx5e_flow_steering * fs,struct mlx5e_ethtool_rule * eth_rule)508 static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
509 struct mlx5e_ethtool_rule *eth_rule)
510 {
511 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
512 if (eth_rule->rule)
513 mlx5_del_flow_rules(eth_rule->rule);
514 if (eth_rule->rss)
515 mlx5e_rss_refcnt_dec(eth_rule->rss);
516 list_del(ð_rule->list);
517 ethtool->tot_num_rules--;
518 put_flow_table(eth_rule->eth_ft);
519 kfree(eth_rule);
520 }
521
find_ethtool_rule(struct mlx5e_priv * priv,int location)522 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
523 int location)
524 {
525 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
526 struct mlx5e_ethtool_rule *iter;
527
528 list_for_each_entry(iter, ðtool->rules, list) {
529 if (iter->flow_spec.location == location)
530 return iter;
531 }
532 return NULL;
533 }
534
get_ethtool_rule(struct mlx5e_priv * priv,int location)535 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
536 int location)
537 {
538 struct mlx5e_ethtool_rule *eth_rule;
539
540 eth_rule = find_ethtool_rule(priv, location);
541 if (eth_rule)
542 del_ethtool_rule(priv->fs, eth_rule);
543
544 eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
545 if (!eth_rule)
546 return ERR_PTR(-ENOMEM);
547
548 add_rule_to_list(priv, eth_rule);
549 return eth_rule;
550 }
551
552 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
553
554 #define all_ones(field) (field == (__force typeof(field))-1)
555 #define all_zeros_or_all_ones(field) \
556 ((field) == 0 || (field) == (__force typeof(field))-1)
557
validate_ethter(struct ethtool_rx_flow_spec * fs)558 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
559 {
560 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
561 int ntuples = 0;
562
563 if (!is_zero_ether_addr(eth_mask->h_dest))
564 ntuples++;
565 if (!is_zero_ether_addr(eth_mask->h_source))
566 ntuples++;
567 if (eth_mask->h_proto)
568 ntuples++;
569 return ntuples;
570 }
571
validate_tcpudp4(struct ethtool_rx_flow_spec * fs)572 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
573 {
574 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
575 int ntuples = 0;
576
577 if (l4_mask->tos)
578 return -EINVAL;
579
580 if (l4_mask->ip4src)
581 ntuples++;
582 if (l4_mask->ip4dst)
583 ntuples++;
584 if (l4_mask->psrc)
585 ntuples++;
586 if (l4_mask->pdst)
587 ntuples++;
588 /* Flow is TCP/UDP */
589 return ++ntuples;
590 }
591
validate_ip4(struct ethtool_rx_flow_spec * fs)592 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
593 {
594 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
595 int ntuples = 0;
596
597 if (l3_mask->l4_4_bytes || l3_mask->tos ||
598 fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
599 return -EINVAL;
600 if (l3_mask->ip4src)
601 ntuples++;
602 if (l3_mask->ip4dst)
603 ntuples++;
604 if (l3_mask->proto)
605 ntuples++;
606 /* Flow is IPv4 */
607 return ++ntuples;
608 }
609
validate_ip6(struct ethtool_rx_flow_spec * fs)610 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
611 {
612 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
613 int ntuples = 0;
614
615 if (l3_mask->l4_4_bytes || l3_mask->tclass)
616 return -EINVAL;
617 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
618 ntuples++;
619
620 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
621 ntuples++;
622 if (l3_mask->l4_proto)
623 ntuples++;
624 /* Flow is IPv6 */
625 return ++ntuples;
626 }
627
validate_tcpudp6(struct ethtool_rx_flow_spec * fs)628 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
629 {
630 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
631 int ntuples = 0;
632
633 if (l4_mask->tclass)
634 return -EINVAL;
635
636 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
637 ntuples++;
638
639 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
640 ntuples++;
641
642 if (l4_mask->psrc)
643 ntuples++;
644 if (l4_mask->pdst)
645 ntuples++;
646 /* Flow is TCP/UDP */
647 return ++ntuples;
648 }
649
validate_vlan(struct ethtool_rx_flow_spec * fs)650 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
651 {
652 if (fs->m_ext.vlan_etype ||
653 fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
654 return -EINVAL;
655
656 if (fs->m_ext.vlan_tci &&
657 (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
658 return -EINVAL;
659
660 return 1;
661 }
662
validate_flow(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)663 static int validate_flow(struct mlx5e_priv *priv,
664 struct ethtool_rx_flow_spec *fs)
665 {
666 int num_tuples = 0;
667 int ret = 0;
668
669 if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
670 return -ENOSPC;
671
672 if (fs->ring_cookie != RX_CLS_FLOW_DISC)
673 if (fs->ring_cookie >= priv->channels.params.num_channels)
674 return -EINVAL;
675
676 switch (flow_type_mask(fs->flow_type)) {
677 case ETHER_FLOW:
678 num_tuples += validate_ethter(fs);
679 break;
680 case TCP_V4_FLOW:
681 case UDP_V4_FLOW:
682 ret = validate_tcpudp4(fs);
683 if (ret < 0)
684 return ret;
685 num_tuples += ret;
686 break;
687 case IP_USER_FLOW:
688 ret = validate_ip4(fs);
689 if (ret < 0)
690 return ret;
691 num_tuples += ret;
692 break;
693 case TCP_V6_FLOW:
694 case UDP_V6_FLOW:
695 ret = validate_tcpudp6(fs);
696 if (ret < 0)
697 return ret;
698 num_tuples += ret;
699 break;
700 case IPV6_USER_FLOW:
701 ret = validate_ip6(fs);
702 if (ret < 0)
703 return ret;
704 num_tuples += ret;
705 break;
706 default:
707 return -ENOTSUPP;
708 }
709 if ((fs->flow_type & FLOW_EXT)) {
710 ret = validate_vlan(fs);
711 if (ret < 0)
712 return ret;
713 num_tuples += ret;
714 }
715
716 if (fs->flow_type & FLOW_MAC_EXT &&
717 !is_zero_ether_addr(fs->m_ext.h_dest))
718 num_tuples++;
719
720 return num_tuples;
721 }
722
723 static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,u32 rss_context)724 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
725 struct ethtool_rx_flow_spec *fs, u32 rss_context)
726 {
727 struct mlx5e_ethtool_table *eth_ft;
728 struct mlx5e_ethtool_rule *eth_rule;
729 struct mlx5_flow_handle *rule;
730 int num_tuples;
731 int err;
732
733 num_tuples = validate_flow(priv, fs);
734 if (num_tuples <= 0) {
735 netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
736 __func__, num_tuples);
737 return num_tuples;
738 }
739
740 eth_ft = get_flow_table(priv, fs, num_tuples);
741 if (IS_ERR(eth_ft))
742 return PTR_ERR(eth_ft);
743
744 eth_rule = get_ethtool_rule(priv, fs->location);
745 if (IS_ERR(eth_rule)) {
746 put_flow_table(eth_ft);
747 return PTR_ERR(eth_rule);
748 }
749
750 eth_rule->flow_spec = *fs;
751 eth_rule->eth_ft = eth_ft;
752
753 rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
754 if (IS_ERR(rule)) {
755 err = PTR_ERR(rule);
756 goto del_ethtool_rule;
757 }
758
759 eth_rule->rule = rule;
760
761 return 0;
762
763 del_ethtool_rule:
764 del_ethtool_rule(priv->fs, eth_rule);
765
766 return err;
767 }
768
769 static int
mlx5e_ethtool_flow_remove(struct mlx5e_priv * priv,int location)770 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
771 {
772 struct mlx5e_ethtool_rule *eth_rule;
773 int err = 0;
774
775 if (location >= MAX_NUM_OF_ETHTOOL_RULES)
776 return -ENOSPC;
777
778 eth_rule = find_ethtool_rule(priv, location);
779 if (!eth_rule) {
780 err = -ENOENT;
781 goto out;
782 }
783
784 del_ethtool_rule(priv->fs, eth_rule);
785 out:
786 return err;
787 }
788
789 static int
mlx5e_ethtool_get_flow(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,int location)790 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
791 struct ethtool_rxnfc *info, int location)
792 {
793 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
794 struct mlx5e_ethtool_rule *eth_rule;
795
796 if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
797 return -EINVAL;
798
799 list_for_each_entry(eth_rule, ðtool->rules, list) {
800 int index;
801
802 if (eth_rule->flow_spec.location != location)
803 continue;
804 if (!info)
805 return 0;
806 info->fs = eth_rule->flow_spec;
807 if (!eth_rule->rss)
808 return 0;
809 index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss);
810 if (index < 0)
811 return index;
812 info->rss_context = index;
813 return 0;
814 }
815
816 return -ENOENT;
817 }
818
819 static int
mlx5e_ethtool_get_all_flows(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)820 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
821 struct ethtool_rxnfc *info, u32 *rule_locs)
822 {
823 int location = 0;
824 int idx = 0;
825 int err = 0;
826
827 info->data = MAX_NUM_OF_ETHTOOL_RULES;
828 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
829 err = mlx5e_ethtool_get_flow(priv, NULL, location);
830 if (!err)
831 rule_locs[idx++] = location;
832 location++;
833 }
834 return err;
835 }
836
mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering ** ethtool)837 int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
838 {
839 *ethtool = kvzalloc(sizeof(**ethtool), GFP_KERNEL);
840 if (!*ethtool)
841 return -ENOMEM;
842 return 0;
843 }
844
mlx5e_ethtool_free(struct mlx5e_ethtool_steering * ethtool)845 void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool)
846 {
847 kvfree(ethtool);
848 }
849
mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering * fs)850 void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
851 {
852 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
853 struct mlx5e_ethtool_rule *iter;
854 struct mlx5e_ethtool_rule *temp;
855
856 list_for_each_entry_safe(iter, temp, ðtool->rules, list)
857 del_ethtool_rule(fs, iter);
858 }
859
mlx5e_ethtool_init_steering(struct mlx5e_flow_steering * fs)860 void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
861 {
862 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
863
864 INIT_LIST_HEAD(ðtool->rules);
865 }
866
flow_type_to_traffic_type(u32 flow_type)867 static int flow_type_to_traffic_type(u32 flow_type)
868 {
869 switch (flow_type) {
870 case TCP_V4_FLOW:
871 return MLX5_TT_IPV4_TCP;
872 case TCP_V6_FLOW:
873 return MLX5_TT_IPV6_TCP;
874 case UDP_V4_FLOW:
875 return MLX5_TT_IPV4_UDP;
876 case UDP_V6_FLOW:
877 return MLX5_TT_IPV6_UDP;
878 case AH_V4_FLOW:
879 return MLX5_TT_IPV4_IPSEC_AH;
880 case AH_V6_FLOW:
881 return MLX5_TT_IPV6_IPSEC_AH;
882 case ESP_V4_FLOW:
883 return MLX5_TT_IPV4_IPSEC_ESP;
884 case ESP_V6_FLOW:
885 return MLX5_TT_IPV6_IPSEC_ESP;
886 case IPV4_FLOW:
887 return MLX5_TT_IPV4;
888 case IPV6_FLOW:
889 return MLX5_TT_IPV6;
890 default:
891 return -EINVAL;
892 }
893 }
894
mlx5e_set_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)895 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
896 struct ethtool_rxnfc *nfc)
897 {
898 u8 rx_hash_field = 0;
899 u32 flow_type = 0;
900 u32 rss_idx = 0;
901 int err;
902 int tt;
903
904 if (nfc->flow_type & FLOW_RSS)
905 rss_idx = nfc->rss_context;
906
907 flow_type = flow_type_mask(nfc->flow_type);
908 tt = flow_type_to_traffic_type(flow_type);
909 if (tt < 0)
910 return tt;
911
912 /* RSS does not support anything other than hashing to queues
913 * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
914 * port.
915 */
916 if (flow_type != TCP_V4_FLOW &&
917 flow_type != TCP_V6_FLOW &&
918 flow_type != UDP_V4_FLOW &&
919 flow_type != UDP_V6_FLOW)
920 return -EOPNOTSUPP;
921
922 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
923 RXH_L4_B_0_1 | RXH_L4_B_2_3))
924 return -EOPNOTSUPP;
925
926 if (nfc->data & RXH_IP_SRC)
927 rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
928 if (nfc->data & RXH_IP_DST)
929 rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
930 if (nfc->data & RXH_L4_B_0_1)
931 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
932 if (nfc->data & RXH_L4_B_2_3)
933 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
934
935 mutex_lock(&priv->state_lock);
936 err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, rss_idx, tt, rx_hash_field);
937 mutex_unlock(&priv->state_lock);
938
939 return err;
940 }
941
mlx5e_get_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)942 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
943 struct ethtool_rxnfc *nfc)
944 {
945 int hash_field = 0;
946 u32 flow_type = 0;
947 u32 rss_idx = 0;
948 int tt;
949
950 if (nfc->flow_type & FLOW_RSS)
951 rss_idx = nfc->rss_context;
952
953 flow_type = flow_type_mask(nfc->flow_type);
954 tt = flow_type_to_traffic_type(flow_type);
955 if (tt < 0)
956 return tt;
957
958 hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, rss_idx, tt);
959 if (hash_field < 0)
960 return hash_field;
961
962 nfc->data = 0;
963
964 if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
965 nfc->data |= RXH_IP_SRC;
966 if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
967 nfc->data |= RXH_IP_DST;
968 if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
969 nfc->data |= RXH_L4_B_0_1;
970 if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
971 nfc->data |= RXH_L4_B_2_3;
972
973 return 0;
974 }
975
mlx5e_ethtool_set_rxnfc(struct mlx5e_priv * priv,struct ethtool_rxnfc * cmd)976 int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
977 {
978 int err = 0;
979
980 switch (cmd->cmd) {
981 case ETHTOOL_SRXCLSRLINS:
982 err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context);
983 break;
984 case ETHTOOL_SRXCLSRLDEL:
985 err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
986 break;
987 case ETHTOOL_SRXFH:
988 err = mlx5e_set_rss_hash_opt(priv, cmd);
989 break;
990 default:
991 err = -EOPNOTSUPP;
992 break;
993 }
994
995 return err;
996 }
997
mlx5e_ethtool_get_rxnfc(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)998 int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
999 struct ethtool_rxnfc *info, u32 *rule_locs)
1000 {
1001 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
1002 int err = 0;
1003
1004 switch (info->cmd) {
1005 case ETHTOOL_GRXCLSRLCNT:
1006 info->rule_cnt = ethtool->tot_num_rules;
1007 break;
1008 case ETHTOOL_GRXCLSRULE:
1009 err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
1010 break;
1011 case ETHTOOL_GRXCLSRLALL:
1012 err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
1013 break;
1014 case ETHTOOL_GRXFH:
1015 err = mlx5e_get_rss_hash_opt(priv, info);
1016 break;
1017 default:
1018 err = -EOPNOTSUPP;
1019 break;
1020 }
1021
1022 return err;
1023 }
1024
1025