1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 
36 struct mlx5e_ethtool_rule {
37 	struct list_head             list;
38 	struct ethtool_rx_flow_spec  flow_spec;
39 	struct mlx5_flow_handle	     *rule;
40 	struct mlx5e_ethtool_table   *eth_ft;
41 };
42 
put_flow_table(struct mlx5e_ethtool_table * eth_ft)43 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
44 {
45 	if (!--eth_ft->num_rules) {
46 		mlx5_destroy_flow_table(eth_ft->ft);
47 		eth_ft->ft = NULL;
48 	}
49 }
50 
51 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
52 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
53 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
54 #define MLX5E_ETHTOOL_NUM_GROUPS  10
get_flow_table(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,int num_tuples)55 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
56 						  struct ethtool_rx_flow_spec *fs,
57 						  int num_tuples)
58 {
59 	struct mlx5e_ethtool_table *eth_ft;
60 	struct mlx5_flow_namespace *ns;
61 	struct mlx5_flow_table *ft;
62 	int max_tuples;
63 	int table_size;
64 	int prio;
65 
66 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
67 	case TCP_V4_FLOW:
68 	case UDP_V4_FLOW:
69 	case TCP_V6_FLOW:
70 	case UDP_V6_FLOW:
71 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
72 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
73 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
74 		break;
75 	case IP_USER_FLOW:
76 	case IPV6_USER_FLOW:
77 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
78 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
79 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
80 		break;
81 	case ETHER_FLOW:
82 		max_tuples = ETHTOOL_NUM_L2_FTS;
83 		prio = max_tuples - num_tuples;
84 		eth_ft = &priv->fs.ethtool.l2_ft[prio];
85 		prio += MLX5E_ETHTOOL_L2_PRIO;
86 		break;
87 	default:
88 		return ERR_PTR(-EINVAL);
89 	}
90 
91 	eth_ft->num_rules++;
92 	if (eth_ft->ft)
93 		return eth_ft;
94 
95 	ns = mlx5_get_flow_namespace(priv->mdev,
96 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
97 	if (!ns)
98 		return ERR_PTR(-EOPNOTSUPP);
99 
100 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
101 						       flow_table_properties_nic_receive.log_max_ft_size)),
102 			   MLX5E_ETHTOOL_NUM_ENTRIES);
103 	ft = mlx5_create_auto_grouped_flow_table(ns, prio,
104 						 table_size,
105 						 MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
106 	if (IS_ERR(ft))
107 		return (void *)ft;
108 
109 	eth_ft->ft = ft;
110 	return eth_ft;
111 }
112 
mask_spec(u8 * mask,u8 * val,size_t size)113 static void mask_spec(u8 *mask, u8 *val, size_t size)
114 {
115 	unsigned int i;
116 
117 	for (i = 0; i < size; i++, mask++, val++)
118 		*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
119 }
120 
121 #define MLX5E_FTE_SET(header_p, fld, v)  \
122 	MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
123 
124 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
125 	MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
126 
127 static void
set_ip4(void * headers_c,void * headers_v,__be32 ip4src_m,__be32 ip4src_v,__be32 ip4dst_m,__be32 ip4dst_v)128 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
129 	__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
130 {
131 	if (ip4src_m) {
132 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
133 		       &ip4src_v, sizeof(ip4src_v));
134 		memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
135 		       0xff, sizeof(ip4src_m));
136 	}
137 	if (ip4dst_m) {
138 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
139 		       &ip4dst_v, sizeof(ip4dst_v));
140 		memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
141 		       0xff, sizeof(ip4dst_m));
142 	}
143 
144 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
145 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
146 }
147 
148 static void
set_ip6(void * headers_c,void * headers_v,__be32 ip6src_m[4],__be32 ip6src_v[4],__be32 ip6dst_m[4],__be32 ip6dst_v[4])149 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
150 	__be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
151 {
152 	u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
153 
154 	if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
155 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
156 		       ip6src_v, ip6_sz);
157 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
158 		       ip6src_m, ip6_sz);
159 	}
160 	if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
161 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
162 		       ip6dst_v, ip6_sz);
163 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
164 		       ip6dst_m, ip6_sz);
165 	}
166 
167 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
168 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
169 }
170 
171 static void
set_tcp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)172 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
173 	__be16 pdst_m, __be16 pdst_v)
174 {
175 	if (psrc_m) {
176 		MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
177 		MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
178 	}
179 	if (pdst_m) {
180 		MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
181 		MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
182 	}
183 
184 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
185 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
186 }
187 
188 static void
set_udp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)189 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
190 	__be16 pdst_m, __be16 pdst_v)
191 {
192 	if (psrc_m) {
193 		MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
194 		MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
195 	}
196 
197 	if (pdst_m) {
198 		MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
199 		MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
200 	}
201 
202 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
203 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
204 }
205 
206 static void
parse_tcp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)207 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
208 {
209 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
210 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.tcp_ip4_spec;
211 
212 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
213 		l4_mask->ip4dst, l4_val->ip4dst);
214 
215 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
216 		l4_mask->pdst, l4_val->pdst);
217 }
218 
219 static void
parse_udp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)220 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
221 {
222 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
223 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.udp_ip4_spec;
224 
225 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
226 		l4_mask->ip4dst, l4_val->ip4dst);
227 
228 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
229 		l4_mask->pdst, l4_val->pdst);
230 }
231 
232 static void
parse_ip4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)233 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
234 {
235 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
236 	struct ethtool_usrip4_spec *l3_val  = &fs->h_u.usr_ip4_spec;
237 
238 	set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
239 		l3_mask->ip4dst, l3_val->ip4dst);
240 
241 	if (l3_mask->proto) {
242 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
243 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
244 	}
245 }
246 
247 static void
parse_ip6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)248 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
249 {
250 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
251 	struct ethtool_usrip6_spec *l3_val  = &fs->h_u.usr_ip6_spec;
252 
253 	set_ip6(headers_c, headers_v, l3_mask->ip6src,
254 		l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
255 
256 	if (l3_mask->l4_proto) {
257 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
258 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
259 	}
260 }
261 
262 static void
parse_tcp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)263 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
264 {
265 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
266 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.tcp_ip6_spec;
267 
268 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
269 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
270 
271 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
272 		l4_mask->pdst, l4_val->pdst);
273 }
274 
275 static void
parse_udp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)276 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
277 {
278 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
279 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.udp_ip6_spec;
280 
281 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
282 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
283 
284 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
285 		l4_mask->pdst, l4_val->pdst);
286 }
287 
288 static void
parse_ether(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)289 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
290 {
291 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
292 	struct ethhdr *eth_val = &fs->h_u.ether_spec;
293 
294 	mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
295 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
296 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
297 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
298 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
299 	MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
300 	MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
301 }
302 
303 static void
set_cvlan(void * headers_c,void * headers_v,__be16 vlan_tci)304 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
305 {
306 	MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
307 	MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
308 	MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
309 	MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
310 }
311 
312 static void
set_dmac(void * headers_c,void * headers_v,unsigned char m_dest[ETH_ALEN],unsigned char v_dest[ETH_ALEN])313 set_dmac(void *headers_c, void *headers_v,
314 	 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
315 {
316 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
317 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
318 }
319 
set_flow_attrs(u32 * match_c,u32 * match_v,struct ethtool_rx_flow_spec * fs)320 static int set_flow_attrs(u32 *match_c, u32 *match_v,
321 			  struct ethtool_rx_flow_spec *fs)
322 {
323 	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
324 					     outer_headers);
325 	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
326 					     outer_headers);
327 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
328 
329 	switch (flow_type) {
330 	case TCP_V4_FLOW:
331 		parse_tcp4(outer_headers_c, outer_headers_v, fs);
332 		break;
333 	case UDP_V4_FLOW:
334 		parse_udp4(outer_headers_c, outer_headers_v, fs);
335 		break;
336 	case IP_USER_FLOW:
337 		parse_ip4(outer_headers_c, outer_headers_v, fs);
338 		break;
339 	case TCP_V6_FLOW:
340 		parse_tcp6(outer_headers_c, outer_headers_v, fs);
341 		break;
342 	case UDP_V6_FLOW:
343 		parse_udp6(outer_headers_c, outer_headers_v, fs);
344 		break;
345 	case IPV6_USER_FLOW:
346 		parse_ip6(outer_headers_c, outer_headers_v, fs);
347 		break;
348 	case ETHER_FLOW:
349 		parse_ether(outer_headers_c, outer_headers_v, fs);
350 		break;
351 	default:
352 		return -EINVAL;
353 	}
354 
355 	if ((fs->flow_type & FLOW_EXT) &&
356 	    (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
357 		set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
358 
359 	if (fs->flow_type & FLOW_MAC_EXT &&
360 	    !is_zero_ether_addr(fs->m_ext.h_dest)) {
361 		mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
362 		set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
363 			 fs->h_ext.h_dest);
364 	}
365 
366 	return 0;
367 }
368 
add_rule_to_list(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * rule)369 static void add_rule_to_list(struct mlx5e_priv *priv,
370 			     struct mlx5e_ethtool_rule *rule)
371 {
372 	struct mlx5e_ethtool_rule *iter;
373 	struct list_head *head = &priv->fs.ethtool.rules;
374 
375 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
376 		if (iter->flow_spec.location > rule->flow_spec.location)
377 			break;
378 		head = &iter->list;
379 	}
380 	priv->fs.ethtool.tot_num_rules++;
381 	list_add(&rule->list, head);
382 }
383 
outer_header_zero(u32 * match_criteria)384 static bool outer_header_zero(u32 *match_criteria)
385 {
386 	int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
387 	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
388 					     outer_headers);
389 
390 	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
391 						  outer_headers_c + 1,
392 						  size - 1);
393 }
394 
395 static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct ethtool_rx_flow_spec * fs)396 add_ethtool_flow_rule(struct mlx5e_priv *priv,
397 		      struct mlx5_flow_table *ft,
398 		      struct ethtool_rx_flow_spec *fs)
399 {
400 	struct mlx5_flow_destination *dst = NULL;
401 	struct mlx5_flow_act flow_act = {0};
402 	struct mlx5_flow_spec *spec;
403 	struct mlx5_flow_handle *rule;
404 	int err = 0;
405 
406 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
407 	if (!spec)
408 		return ERR_PTR(-ENOMEM);
409 	err = set_flow_attrs(spec->match_criteria, spec->match_value,
410 			     fs);
411 	if (err)
412 		goto free;
413 
414 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
415 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
416 	} else {
417 		dst = kzalloc(sizeof(*dst), GFP_KERNEL);
418 		if (!dst) {
419 			err = -ENOMEM;
420 			goto free;
421 		}
422 
423 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
424 		dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
425 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
426 	}
427 
428 	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
429 	flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
430 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
431 	if (IS_ERR(rule)) {
432 		err = PTR_ERR(rule);
433 		netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
434 			   __func__, err);
435 		goto free;
436 	}
437 free:
438 	kvfree(spec);
439 	kfree(dst);
440 	return err ? ERR_PTR(err) : rule;
441 }
442 
del_ethtool_rule(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule)443 static void del_ethtool_rule(struct mlx5e_priv *priv,
444 			     struct mlx5e_ethtool_rule *eth_rule)
445 {
446 	if (eth_rule->rule)
447 		mlx5_del_flow_rules(eth_rule->rule);
448 	list_del(&eth_rule->list);
449 	priv->fs.ethtool.tot_num_rules--;
450 	put_flow_table(eth_rule->eth_ft);
451 	kfree(eth_rule);
452 }
453 
find_ethtool_rule(struct mlx5e_priv * priv,int location)454 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
455 						    int location)
456 {
457 	struct mlx5e_ethtool_rule *iter;
458 
459 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
460 		if (iter->flow_spec.location == location)
461 			return iter;
462 	}
463 	return NULL;
464 }
465 
get_ethtool_rule(struct mlx5e_priv * priv,int location)466 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
467 						   int location)
468 {
469 	struct mlx5e_ethtool_rule *eth_rule;
470 
471 	eth_rule = find_ethtool_rule(priv, location);
472 	if (eth_rule)
473 		del_ethtool_rule(priv, eth_rule);
474 
475 	eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
476 	if (!eth_rule)
477 		return ERR_PTR(-ENOMEM);
478 
479 	add_rule_to_list(priv, eth_rule);
480 	return eth_rule;
481 }
482 
483 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
484 
485 #define all_ones(field) (field == (__force typeof(field))-1)
486 #define all_zeros_or_all_ones(field)		\
487 	((field) == 0 || (field) == (__force typeof(field))-1)
488 
validate_ethter(struct ethtool_rx_flow_spec * fs)489 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
490 {
491 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
492 	int ntuples = 0;
493 
494 	if (!is_zero_ether_addr(eth_mask->h_dest))
495 		ntuples++;
496 	if (!is_zero_ether_addr(eth_mask->h_source))
497 		ntuples++;
498 	if (eth_mask->h_proto)
499 		ntuples++;
500 	return ntuples;
501 }
502 
validate_tcpudp4(struct ethtool_rx_flow_spec * fs)503 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
504 {
505 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
506 	int ntuples = 0;
507 
508 	if (l4_mask->tos)
509 		return -EINVAL;
510 
511 	if (l4_mask->ip4src) {
512 		if (!all_ones(l4_mask->ip4src))
513 			return -EINVAL;
514 		ntuples++;
515 	}
516 	if (l4_mask->ip4dst) {
517 		if (!all_ones(l4_mask->ip4dst))
518 			return -EINVAL;
519 		ntuples++;
520 	}
521 	if (l4_mask->psrc) {
522 		if (!all_ones(l4_mask->psrc))
523 			return -EINVAL;
524 		ntuples++;
525 	}
526 	if (l4_mask->pdst) {
527 		if (!all_ones(l4_mask->pdst))
528 			return -EINVAL;
529 		ntuples++;
530 	}
531 	/* Flow is TCP/UDP */
532 	return ++ntuples;
533 }
534 
validate_ip4(struct ethtool_rx_flow_spec * fs)535 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
536 {
537 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
538 	int ntuples = 0;
539 
540 	if (l3_mask->l4_4_bytes || l3_mask->tos ||
541 	    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
542 		return -EINVAL;
543 	if (l3_mask->ip4src) {
544 		if (!all_ones(l3_mask->ip4src))
545 			return -EINVAL;
546 		ntuples++;
547 	}
548 	if (l3_mask->ip4dst) {
549 		if (!all_ones(l3_mask->ip4dst))
550 			return -EINVAL;
551 		ntuples++;
552 	}
553 	if (l3_mask->proto)
554 		ntuples++;
555 	/* Flow is IPv4 */
556 	return ++ntuples;
557 }
558 
validate_ip6(struct ethtool_rx_flow_spec * fs)559 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
560 {
561 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
562 	int ntuples = 0;
563 
564 	if (l3_mask->l4_4_bytes || l3_mask->tclass)
565 		return -EINVAL;
566 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
567 		ntuples++;
568 
569 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
570 		ntuples++;
571 	if (l3_mask->l4_proto)
572 		ntuples++;
573 	/* Flow is IPv6 */
574 	return ++ntuples;
575 }
576 
validate_tcpudp6(struct ethtool_rx_flow_spec * fs)577 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
578 {
579 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
580 	int ntuples = 0;
581 
582 	if (l4_mask->tclass)
583 		return -EINVAL;
584 
585 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
586 		ntuples++;
587 
588 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
589 		ntuples++;
590 
591 	if (l4_mask->psrc) {
592 		if (!all_ones(l4_mask->psrc))
593 			return -EINVAL;
594 		ntuples++;
595 	}
596 	if (l4_mask->pdst) {
597 		if (!all_ones(l4_mask->pdst))
598 			return -EINVAL;
599 		ntuples++;
600 	}
601 	/* Flow is TCP/UDP */
602 	return ++ntuples;
603 }
604 
validate_vlan(struct ethtool_rx_flow_spec * fs)605 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
606 {
607 	if (fs->m_ext.vlan_etype ||
608 	    fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
609 		return -EINVAL;
610 
611 	if (fs->m_ext.vlan_tci &&
612 	    (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
613 		return -EINVAL;
614 
615 	return 1;
616 }
617 
validate_flow(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)618 static int validate_flow(struct mlx5e_priv *priv,
619 			 struct ethtool_rx_flow_spec *fs)
620 {
621 	int num_tuples = 0;
622 	int ret = 0;
623 
624 	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
625 		return -ENOSPC;
626 
627 	if (fs->ring_cookie >= priv->channels.params.num_channels &&
628 	    fs->ring_cookie != RX_CLS_FLOW_DISC)
629 		return -EINVAL;
630 
631 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
632 	case ETHER_FLOW:
633 		num_tuples += validate_ethter(fs);
634 		break;
635 	case TCP_V4_FLOW:
636 	case UDP_V4_FLOW:
637 		ret = validate_tcpudp4(fs);
638 		if (ret < 0)
639 			return ret;
640 		num_tuples += ret;
641 		break;
642 	case IP_USER_FLOW:
643 		ret = validate_ip4(fs);
644 		if (ret < 0)
645 			return ret;
646 		num_tuples += ret;
647 		break;
648 	case TCP_V6_FLOW:
649 	case UDP_V6_FLOW:
650 		ret = validate_tcpudp6(fs);
651 		if (ret < 0)
652 			return ret;
653 		num_tuples += ret;
654 		break;
655 	case IPV6_USER_FLOW:
656 		ret = validate_ip6(fs);
657 		if (ret < 0)
658 			return ret;
659 		num_tuples += ret;
660 		break;
661 	default:
662 		return -ENOTSUPP;
663 	}
664 	if ((fs->flow_type & FLOW_EXT)) {
665 		ret = validate_vlan(fs);
666 		if (ret < 0)
667 			return ret;
668 		num_tuples += ret;
669 	}
670 
671 	if (fs->flow_type & FLOW_MAC_EXT &&
672 	    !is_zero_ether_addr(fs->m_ext.h_dest))
673 		num_tuples++;
674 
675 	return num_tuples;
676 }
677 
678 static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)679 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
680 			   struct ethtool_rx_flow_spec *fs)
681 {
682 	struct mlx5e_ethtool_table *eth_ft;
683 	struct mlx5e_ethtool_rule *eth_rule;
684 	struct mlx5_flow_handle *rule;
685 	int num_tuples;
686 	int err;
687 
688 	num_tuples = validate_flow(priv, fs);
689 	if (num_tuples <= 0) {
690 		netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
691 			    __func__, num_tuples);
692 		return num_tuples;
693 	}
694 
695 	eth_ft = get_flow_table(priv, fs, num_tuples);
696 	if (IS_ERR(eth_ft))
697 		return PTR_ERR(eth_ft);
698 
699 	eth_rule = get_ethtool_rule(priv, fs->location);
700 	if (IS_ERR(eth_rule)) {
701 		put_flow_table(eth_ft);
702 		return PTR_ERR(eth_rule);
703 	}
704 
705 	eth_rule->flow_spec = *fs;
706 	eth_rule->eth_ft = eth_ft;
707 	if (!eth_ft->ft) {
708 		err = -EINVAL;
709 		goto del_ethtool_rule;
710 	}
711 	rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
712 	if (IS_ERR(rule)) {
713 		err = PTR_ERR(rule);
714 		goto del_ethtool_rule;
715 	}
716 
717 	eth_rule->rule = rule;
718 
719 	return 0;
720 
721 del_ethtool_rule:
722 	del_ethtool_rule(priv, eth_rule);
723 
724 	return err;
725 }
726 
727 static int
mlx5e_ethtool_flow_remove(struct mlx5e_priv * priv,int location)728 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
729 {
730 	struct mlx5e_ethtool_rule *eth_rule;
731 	int err = 0;
732 
733 	if (location >= MAX_NUM_OF_ETHTOOL_RULES)
734 		return -ENOSPC;
735 
736 	eth_rule = find_ethtool_rule(priv, location);
737 	if (!eth_rule) {
738 		err =  -ENOENT;
739 		goto out;
740 	}
741 
742 	del_ethtool_rule(priv, eth_rule);
743 out:
744 	return err;
745 }
746 
747 static int
mlx5e_ethtool_get_flow(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,int location)748 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
749 		       struct ethtool_rxnfc *info, int location)
750 {
751 	struct mlx5e_ethtool_rule *eth_rule;
752 
753 	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
754 		return -EINVAL;
755 
756 	list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
757 		if (eth_rule->flow_spec.location == location) {
758 			info->fs = eth_rule->flow_spec;
759 			return 0;
760 		}
761 	}
762 
763 	return -ENOENT;
764 }
765 
766 static int
mlx5e_ethtool_get_all_flows(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)767 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
768 			    struct ethtool_rxnfc *info, u32 *rule_locs)
769 {
770 	int location = 0;
771 	int idx = 0;
772 	int err = 0;
773 
774 	info->data = MAX_NUM_OF_ETHTOOL_RULES;
775 	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
776 		err = mlx5e_ethtool_get_flow(priv, info, location);
777 		if (!err)
778 			rule_locs[idx++] = location;
779 		location++;
780 	}
781 	return err;
782 }
783 
mlx5e_ethtool_cleanup_steering(struct mlx5e_priv * priv)784 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
785 {
786 	struct mlx5e_ethtool_rule *iter;
787 	struct mlx5e_ethtool_rule *temp;
788 
789 	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
790 		del_ethtool_rule(priv, iter);
791 }
792 
mlx5e_ethtool_init_steering(struct mlx5e_priv * priv)793 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
794 {
795 	INIT_LIST_HEAD(&priv->fs.ethtool.rules);
796 }
797 
mlx5e_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)798 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
799 {
800 	int err = 0;
801 	struct mlx5e_priv *priv = netdev_priv(dev);
802 
803 	switch (cmd->cmd) {
804 	case ETHTOOL_SRXCLSRLINS:
805 		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
806 		break;
807 	case ETHTOOL_SRXCLSRLDEL:
808 		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
809 		break;
810 	default:
811 		err = -EOPNOTSUPP;
812 		break;
813 	}
814 
815 	return err;
816 }
817 
mlx5e_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rule_locs)818 int mlx5e_get_rxnfc(struct net_device *dev,
819 		    struct ethtool_rxnfc *info, u32 *rule_locs)
820 {
821 	struct mlx5e_priv *priv = netdev_priv(dev);
822 	int err = 0;
823 
824 	switch (info->cmd) {
825 	case ETHTOOL_GRXRINGS:
826 		info->data = priv->channels.params.num_channels;
827 		break;
828 	case ETHTOOL_GRXCLSRLCNT:
829 		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
830 		break;
831 	case ETHTOOL_GRXCLSRULE:
832 		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
833 		break;
834 	case ETHTOOL_GRXCLSRLALL:
835 		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
836 		break;
837 	default:
838 		err = -EOPNOTSUPP;
839 		break;
840 	}
841 
842 	return err;
843 }
844 
845