1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
3
4 #include <net/vxlan.h>
5 #include <net/gre.h>
6 #include <net/geneve.h>
7 #include <net/bareudp.h>
8 #include "en/tc_tun.h"
9 #include "en_tc.h"
10 #include "rep/tc.h"
11 #include "rep/neigh.h"
12
mlx5e_get_tc_tun(struct net_device * tunnel_dev)13 struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
14 {
15 if (netif_is_vxlan(tunnel_dev))
16 return &vxlan_tunnel;
17 else if (netif_is_geneve(tunnel_dev))
18 return &geneve_tunnel;
19 else if (netif_is_gretap(tunnel_dev) ||
20 netif_is_ip6gretap(tunnel_dev))
21 return &gre_tunnel;
22 else if (netif_is_bareudp(tunnel_dev))
23 return &mplsoudp_tunnel;
24 else
25 return NULL;
26 }
27
get_route_and_out_devs(struct mlx5e_priv * priv,struct net_device * dev,struct net_device ** route_dev,struct net_device ** out_dev)28 static int get_route_and_out_devs(struct mlx5e_priv *priv,
29 struct net_device *dev,
30 struct net_device **route_dev,
31 struct net_device **out_dev)
32 {
33 struct net_device *uplink_dev, *uplink_upper, *real_dev;
34 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
35 bool dst_is_lag_dev;
36
37 real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev;
38 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
39
40 rcu_read_lock();
41 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
42 /* mlx5_lag_is_sriov() is a blocking function which can't be called
43 * while holding rcu read lock. Take the net_device for correctness
44 * sake.
45 */
46 if (uplink_upper)
47 dev_hold(uplink_upper);
48 rcu_read_unlock();
49
50 dst_is_lag_dev = (uplink_upper &&
51 netif_is_lag_master(uplink_upper) &&
52 real_dev == uplink_upper &&
53 mlx5_lag_is_sriov(priv->mdev));
54 if (uplink_upper)
55 dev_put(uplink_upper);
56
57 /* if the egress device isn't on the same HW e-switch or
58 * it's a LAG device, use the uplink
59 */
60 *route_dev = dev;
61 if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
62 dst_is_lag_dev || is_vlan_dev(*route_dev))
63 *out_dev = uplink_dev;
64 else if (mlx5e_eswitch_rep(dev) &&
65 mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
66 *out_dev = *route_dev;
67 else
68 return -EOPNOTSUPP;
69
70 if (!(mlx5e_eswitch_rep(*out_dev) &&
71 mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
72 return -EOPNOTSUPP;
73
74 if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev)
75 return -EOPNOTSUPP;
76
77 return 0;
78 }
79
mlx5e_route_lookup_ipv4_get(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct net_device ** out_dev,struct net_device ** route_dev,struct flowi4 * fl4,struct neighbour ** out_n,u8 * out_ttl)80 static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
81 struct net_device *mirred_dev,
82 struct net_device **out_dev,
83 struct net_device **route_dev,
84 struct flowi4 *fl4,
85 struct neighbour **out_n,
86 u8 *out_ttl)
87 {
88 struct neighbour *n;
89 struct rtable *rt;
90
91 #if IS_ENABLED(CONFIG_INET)
92 struct mlx5_core_dev *mdev = priv->mdev;
93 struct net_device *uplink_dev;
94 int ret;
95
96 if (mlx5_lag_is_multipath(mdev)) {
97 struct mlx5_eswitch *esw = mdev->priv.eswitch;
98
99 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
100 fl4->flowi4_oif = uplink_dev->ifindex;
101 }
102
103 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
104 if (IS_ERR(rt))
105 return PTR_ERR(rt);
106
107 if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
108 ip_rt_put(rt);
109 return -ENETUNREACH;
110 }
111 #else
112 return -EOPNOTSUPP;
113 #endif
114
115 ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
116 if (ret < 0) {
117 ip_rt_put(rt);
118 return ret;
119 }
120 dev_hold(*route_dev);
121
122 if (!(*out_ttl))
123 *out_ttl = ip4_dst_hoplimit(&rt->dst);
124 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
125 ip_rt_put(rt);
126 if (!n) {
127 dev_put(*route_dev);
128 return -ENOMEM;
129 }
130
131 *out_n = n;
132 return 0;
133 }
134
mlx5e_route_lookup_ipv4_put(struct net_device * route_dev,struct neighbour * n)135 static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
136 struct neighbour *n)
137 {
138 neigh_release(n);
139 dev_put(route_dev);
140 }
141
mlx5e_netdev_kind(struct net_device * dev)142 static const char *mlx5e_netdev_kind(struct net_device *dev)
143 {
144 if (dev->rtnl_link_ops)
145 return dev->rtnl_link_ops->kind;
146 else
147 return "unknown";
148 }
149
mlx5e_gen_ip_tunnel_header(char buf[],__u8 * ip_proto,struct mlx5e_encap_entry * e)150 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
151 struct mlx5e_encap_entry *e)
152 {
153 if (!e->tunnel) {
154 pr_warn("mlx5: Cannot generate tunnel header for this tunnel\n");
155 return -EOPNOTSUPP;
156 }
157
158 return e->tunnel->generate_ip_tun_hdr(buf, ip_proto, e);
159 }
160
gen_eth_tnl_hdr(char * buf,struct net_device * dev,struct mlx5e_encap_entry * e,u16 proto)161 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
162 struct mlx5e_encap_entry *e,
163 u16 proto)
164 {
165 struct ethhdr *eth = (struct ethhdr *)buf;
166 char *ip;
167
168 ether_addr_copy(eth->h_dest, e->h_dest);
169 ether_addr_copy(eth->h_source, dev->dev_addr);
170 if (is_vlan_dev(dev)) {
171 struct vlan_hdr *vlan = (struct vlan_hdr *)
172 ((char *)eth + ETH_HLEN);
173 ip = (char *)vlan + VLAN_HLEN;
174 eth->h_proto = vlan_dev_vlan_proto(dev);
175 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
176 vlan->h_vlan_encapsulated_proto = htons(proto);
177 } else {
178 eth->h_proto = htons(proto);
179 ip = (char *)eth + ETH_HLEN;
180 }
181
182 return ip;
183 }
184
mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct mlx5e_encap_entry * e)185 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
186 struct net_device *mirred_dev,
187 struct mlx5e_encap_entry *e)
188 {
189 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
190 const struct ip_tunnel_key *tun_key = &e->tun_info->key;
191 struct net_device *out_dev, *route_dev;
192 struct flowi4 fl4 = {};
193 struct neighbour *n;
194 int ipv4_encap_size;
195 char *encap_header;
196 u8 nud_state, ttl;
197 struct iphdr *ip;
198 int err;
199
200 /* add the IP fields */
201 fl4.flowi4_tos = tun_key->tos;
202 fl4.daddr = tun_key->u.ipv4.dst;
203 fl4.saddr = tun_key->u.ipv4.src;
204 ttl = tun_key->ttl;
205
206 err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
207 &fl4, &n, &ttl);
208 if (err)
209 return err;
210
211 ipv4_encap_size =
212 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
213 sizeof(struct iphdr) +
214 e->tunnel->calc_hlen(e);
215
216 if (max_encap_size < ipv4_encap_size) {
217 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
218 ipv4_encap_size, max_encap_size);
219 err = -EOPNOTSUPP;
220 goto release_neigh;
221 }
222
223 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
224 if (!encap_header) {
225 err = -ENOMEM;
226 goto release_neigh;
227 }
228
229 /* used by mlx5e_detach_encap to lookup a neigh hash table
230 * entry in the neigh hash table when a user deletes a rule
231 */
232 e->m_neigh.dev = n->dev;
233 e->m_neigh.family = n->ops->family;
234 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
235 e->out_dev = out_dev;
236 e->route_dev_ifindex = route_dev->ifindex;
237
238 /* It's important to add the neigh to the hash table before checking
239 * the neigh validity state. So if we'll get a notification, in case the
240 * neigh changes it's validity state, we would find the relevant neigh
241 * in the hash.
242 */
243 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
244 if (err)
245 goto free_encap;
246
247 read_lock_bh(&n->lock);
248 nud_state = n->nud_state;
249 ether_addr_copy(e->h_dest, n->ha);
250 read_unlock_bh(&n->lock);
251
252 /* add ethernet header */
253 ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
254 ETH_P_IP);
255
256 /* add ip header */
257 ip->tos = tun_key->tos;
258 ip->version = 0x4;
259 ip->ihl = 0x5;
260 ip->ttl = ttl;
261 ip->daddr = fl4.daddr;
262 ip->saddr = fl4.saddr;
263
264 /* add tunneling protocol header */
265 err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
266 &ip->protocol, e);
267 if (err)
268 goto destroy_neigh_entry;
269
270 e->encap_size = ipv4_encap_size;
271 e->encap_header = encap_header;
272
273 if (!(nud_state & NUD_VALID)) {
274 neigh_event_send(n, NULL);
275 /* the encap entry will be made valid on neigh update event
276 * and not used before that.
277 */
278 goto release_neigh;
279 }
280 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
281 e->reformat_type,
282 ipv4_encap_size, encap_header,
283 MLX5_FLOW_NAMESPACE_FDB);
284 if (IS_ERR(e->pkt_reformat)) {
285 err = PTR_ERR(e->pkt_reformat);
286 goto destroy_neigh_entry;
287 }
288
289 e->flags |= MLX5_ENCAP_ENTRY_VALID;
290 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
291 mlx5e_route_lookup_ipv4_put(route_dev, n);
292 return err;
293
294 destroy_neigh_entry:
295 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
296 free_encap:
297 kfree(encap_header);
298 release_neigh:
299 mlx5e_route_lookup_ipv4_put(route_dev, n);
300 return err;
301 }
302
303 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
mlx5e_route_lookup_ipv6_get(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct net_device ** out_dev,struct net_device ** route_dev,struct flowi6 * fl6,struct neighbour ** out_n,u8 * out_ttl)304 static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
305 struct net_device *mirred_dev,
306 struct net_device **out_dev,
307 struct net_device **route_dev,
308 struct flowi6 *fl6,
309 struct neighbour **out_n,
310 u8 *out_ttl)
311 {
312 struct dst_entry *dst;
313 struct neighbour *n;
314
315 int ret;
316
317 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
318 NULL);
319 if (IS_ERR(dst))
320 return PTR_ERR(dst);
321
322 if (!(*out_ttl))
323 *out_ttl = ip6_dst_hoplimit(dst);
324
325 ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
326 if (ret < 0) {
327 dst_release(dst);
328 return ret;
329 }
330
331 dev_hold(*route_dev);
332 n = dst_neigh_lookup(dst, &fl6->daddr);
333 dst_release(dst);
334 if (!n) {
335 dev_put(*route_dev);
336 return -ENOMEM;
337 }
338
339 *out_n = n;
340 return 0;
341 }
342
mlx5e_route_lookup_ipv6_put(struct net_device * route_dev,struct neighbour * n)343 static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
344 struct neighbour *n)
345 {
346 neigh_release(n);
347 dev_put(route_dev);
348 }
349
mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct mlx5e_encap_entry * e)350 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
351 struct net_device *mirred_dev,
352 struct mlx5e_encap_entry *e)
353 {
354 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
355 const struct ip_tunnel_key *tun_key = &e->tun_info->key;
356 struct net_device *out_dev, *route_dev;
357 struct flowi6 fl6 = {};
358 struct ipv6hdr *ip6h;
359 struct neighbour *n = NULL;
360 int ipv6_encap_size;
361 char *encap_header;
362 u8 nud_state, ttl;
363 int err;
364
365 ttl = tun_key->ttl;
366
367 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
368 fl6.daddr = tun_key->u.ipv6.dst;
369 fl6.saddr = tun_key->u.ipv6.src;
370
371 err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
372 &fl6, &n, &ttl);
373 if (err)
374 return err;
375
376 ipv6_encap_size =
377 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
378 sizeof(struct ipv6hdr) +
379 e->tunnel->calc_hlen(e);
380
381 if (max_encap_size < ipv6_encap_size) {
382 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
383 ipv6_encap_size, max_encap_size);
384 err = -EOPNOTSUPP;
385 goto release_neigh;
386 }
387
388 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
389 if (!encap_header) {
390 err = -ENOMEM;
391 goto release_neigh;
392 }
393
394 /* used by mlx5e_detach_encap to lookup a neigh hash table
395 * entry in the neigh hash table when a user deletes a rule
396 */
397 e->m_neigh.dev = n->dev;
398 e->m_neigh.family = n->ops->family;
399 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
400 e->out_dev = out_dev;
401 e->route_dev_ifindex = route_dev->ifindex;
402
403 /* It's importent to add the neigh to the hash table before checking
404 * the neigh validity state. So if we'll get a notification, in case the
405 * neigh changes it's validity state, we would find the relevant neigh
406 * in the hash.
407 */
408 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
409 if (err)
410 goto free_encap;
411
412 read_lock_bh(&n->lock);
413 nud_state = n->nud_state;
414 ether_addr_copy(e->h_dest, n->ha);
415 read_unlock_bh(&n->lock);
416
417 /* add ethernet header */
418 ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
419 ETH_P_IPV6);
420
421 /* add ip header */
422 ip6_flow_hdr(ip6h, tun_key->tos, 0);
423 /* the HW fills up ipv6 payload len */
424 ip6h->hop_limit = ttl;
425 ip6h->daddr = fl6.daddr;
426 ip6h->saddr = fl6.saddr;
427
428 /* add tunneling protocol header */
429 err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
430 &ip6h->nexthdr, e);
431 if (err)
432 goto destroy_neigh_entry;
433
434 e->encap_size = ipv6_encap_size;
435 e->encap_header = encap_header;
436
437 if (!(nud_state & NUD_VALID)) {
438 neigh_event_send(n, NULL);
439 /* the encap entry will be made valid on neigh update event
440 * and not used before that.
441 */
442 goto release_neigh;
443 }
444
445 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
446 e->reformat_type,
447 ipv6_encap_size, encap_header,
448 MLX5_FLOW_NAMESPACE_FDB);
449 if (IS_ERR(e->pkt_reformat)) {
450 err = PTR_ERR(e->pkt_reformat);
451 goto destroy_neigh_entry;
452 }
453
454 e->flags |= MLX5_ENCAP_ENTRY_VALID;
455 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
456 mlx5e_route_lookup_ipv6_put(route_dev, n);
457 return err;
458
459 destroy_neigh_entry:
460 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
461 free_encap:
462 kfree(encap_header);
463 release_neigh:
464 mlx5e_route_lookup_ipv6_put(route_dev, n);
465 return err;
466 }
467 #endif
468
mlx5e_tc_tun_device_to_offload(struct mlx5e_priv * priv,struct net_device * netdev)469 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
470 struct net_device *netdev)
471 {
472 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(netdev);
473
474 if (tunnel && tunnel->can_offload(priv))
475 return true;
476 else
477 return false;
478 }
479
mlx5e_tc_tun_init_encap_attr(struct net_device * tunnel_dev,struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,struct netlink_ext_ack * extack)480 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
481 struct mlx5e_priv *priv,
482 struct mlx5e_encap_entry *e,
483 struct netlink_ext_ack *extack)
484 {
485 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(tunnel_dev);
486
487 if (!tunnel) {
488 e->reformat_type = -1;
489 return -EOPNOTSUPP;
490 }
491
492 return tunnel->init_encap_attr(tunnel_dev, priv, e, extack);
493 }
494
mlx5e_tc_tun_parse(struct net_device * filter_dev,struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct flow_cls_offload * f,u8 * match_level)495 int mlx5e_tc_tun_parse(struct net_device *filter_dev,
496 struct mlx5e_priv *priv,
497 struct mlx5_flow_spec *spec,
498 struct flow_cls_offload *f,
499 u8 *match_level)
500 {
501 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
502 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
503 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
504 outer_headers);
505 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
506 outer_headers);
507 struct netlink_ext_ack *extack = f->common.extack;
508 int err = 0;
509
510 if (!tunnel) {
511 netdev_warn(priv->netdev,
512 "decapsulation offload is not supported for %s net device\n",
513 mlx5e_netdev_kind(filter_dev));
514 err = -EOPNOTSUPP;
515 goto out;
516 }
517
518 *match_level = tunnel->match_level;
519
520 if (tunnel->parse_udp_ports) {
521 err = tunnel->parse_udp_ports(priv, spec, f,
522 headers_c, headers_v);
523 if (err)
524 goto out;
525 }
526
527 if (tunnel->parse_tunnel) {
528 err = tunnel->parse_tunnel(priv, spec, f,
529 headers_c, headers_v);
530 if (err)
531 goto out;
532 }
533
534 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
535 struct flow_dissector_key_basic key_basic = {};
536 struct flow_dissector_key_basic mask_basic = {
537 .n_proto = htons(0xFFFF),
538 };
539 struct flow_match_basic match_basic = {
540 .key = &key_basic, .mask = &mask_basic,
541 };
542 struct flow_match_control match;
543 u16 addr_type;
544
545 flow_rule_match_enc_control(rule, &match);
546 addr_type = match.key->addr_type;
547
548 /* For tunnel addr_type used same key id`s as for non-tunnel */
549 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
550 struct flow_match_ipv4_addrs match;
551
552 flow_rule_match_enc_ipv4_addrs(rule, &match);
553 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
554 src_ipv4_src_ipv6.ipv4_layout.ipv4,
555 ntohl(match.mask->src));
556 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
557 src_ipv4_src_ipv6.ipv4_layout.ipv4,
558 ntohl(match.key->src));
559
560 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
561 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
562 ntohl(match.mask->dst));
563 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
564 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
565 ntohl(match.key->dst));
566
567 key_basic.n_proto = htons(ETH_P_IP);
568 mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true,
569 headers_c, headers_v);
570 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
571 struct flow_match_ipv6_addrs match;
572
573 flow_rule_match_enc_ipv6_addrs(rule, &match);
574 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
575 src_ipv4_src_ipv6.ipv6_layout.ipv6),
576 &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
577 ipv6));
578 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
579 src_ipv4_src_ipv6.ipv6_layout.ipv6),
580 &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
581 ipv6));
582
583 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
584 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
585 &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
586 ipv6));
587 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
588 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
589 &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
590 ipv6));
591
592 key_basic.n_proto = htons(ETH_P_IPV6);
593 mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true,
594 headers_c, headers_v);
595 }
596 }
597
598 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
599 struct flow_match_ip match;
600
601 flow_rule_match_enc_ip(rule, &match);
602 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
603 match.mask->tos & 0x3);
604 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
605 match.key->tos & 0x3);
606
607 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
608 match.mask->tos >> 2);
609 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
610 match.key->tos >> 2);
611
612 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
613 match.mask->ttl);
614 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
615 match.key->ttl);
616
617 if (match.mask->ttl &&
618 !MLX5_CAP_ESW_FLOWTABLE_FDB
619 (priv->mdev,
620 ft_field_support.outer_ipv4_ttl)) {
621 NL_SET_ERR_MSG_MOD(extack,
622 "Matching on TTL is not supported");
623 err = -EOPNOTSUPP;
624 goto out;
625 }
626 }
627
628 /* Enforce DMAC when offloading incoming tunneled flows.
629 * Flow counters require a match on the DMAC.
630 */
631 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
632 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
633 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
634 dmac_47_16), priv->netdev->dev_addr);
635
636 /* let software handle IP fragments */
637 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
638 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
639
640 return 0;
641
642 out:
643 return err;
644 }
645
mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct flow_cls_offload * f,void * headers_c,void * headers_v)646 int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
647 struct mlx5_flow_spec *spec,
648 struct flow_cls_offload *f,
649 void *headers_c,
650 void *headers_v)
651 {
652 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
653 struct netlink_ext_ack *extack = f->common.extack;
654 struct flow_match_ports enc_ports;
655
656 /* Full udp dst port must be given */
657
658 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
659 NL_SET_ERR_MSG_MOD(extack,
660 "UDP tunnel decap filter must include enc_dst_port condition");
661 netdev_warn(priv->netdev,
662 "UDP tunnel decap filter must include enc_dst_port condition\n");
663 return -EOPNOTSUPP;
664 }
665
666 flow_rule_match_enc_ports(rule, &enc_ports);
667
668 if (memchr_inv(&enc_ports.mask->dst, 0xff,
669 sizeof(enc_ports.mask->dst))) {
670 NL_SET_ERR_MSG_MOD(extack,
671 "UDP tunnel decap filter must match enc_dst_port fully");
672 netdev_warn(priv->netdev,
673 "UDP tunnel decap filter must match enc_dst_port fully\n");
674 return -EOPNOTSUPP;
675 }
676
677 /* match on UDP protocol and dst port number */
678
679 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
680 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
681
682 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
683 ntohs(enc_ports.mask->dst));
684 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
685 ntohs(enc_ports.key->dst));
686
687 /* UDP src port on outer header is generated by HW,
688 * so it is probably a bad idea to request matching it.
689 * Nonetheless, it is allowed.
690 */
691
692 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
693 ntohs(enc_ports.mask->src));
694 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
695 ntohs(enc_ports.key->src));
696
697 return 0;
698 }
699