1 /*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
52
53 /*
54 Problems & solutions
55 --------------------
56
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
61
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
68
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
77
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
86
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
89
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
93
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
106
107 Alexey Kuznetsov.
108 */
109
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
117 u32 id, u32 index,
118 bool truncate, bool is_ipv4);
119
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
123
ipgre_err(struct sk_buff * skb,u32 info,const struct tnl_ptk_info * tpi)124 static void ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
126 {
127
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
131
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
135 them "thank you".
136
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
139 by themselves???
140 */
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
147 struct ip_tunnel *t;
148
149 switch (type) {
150 default:
151 case ICMP_PARAMETERPROB:
152 return;
153
154 case ICMP_DEST_UNREACH:
155 switch (code) {
156 case ICMP_SR_FAILED:
157 case ICMP_PORT_UNREACH:
158 /* Impossible event. */
159 return;
160 default:
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
164 */
165 break;
166 }
167 break;
168
169 case ICMP_TIME_EXCEEDED:
170 if (code != ICMP_EXC_TTL)
171 return;
172 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
173 break;
174
175 case ICMP_REDIRECT:
176 break;
177 }
178
179 if (tpi->proto == htons(ETH_P_TEB))
180 itn = net_generic(net, gre_tap_net_id);
181 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
182 tpi->proto == htons(ETH_P_ERSPAN2))
183 itn = net_generic(net, erspan_net_id);
184 else
185 itn = net_generic(net, ipgre_net_id);
186
187 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
188 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
189 iph->daddr, iph->saddr, tpi->key);
190
191 if (!t)
192 return;
193
194 #if IS_ENABLED(CONFIG_IPV6)
195 if (tpi->proto == htons(ETH_P_IPV6) &&
196 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
197 type, data_len))
198 return;
199 #endif
200
201 if (t->parms.iph.daddr == 0 ||
202 ipv4_is_multicast(t->parms.iph.daddr))
203 return;
204
205 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
206 return;
207
208 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
209 t->err_count++;
210 else
211 t->err_count = 1;
212 t->err_time = jiffies;
213 }
214
gre_err(struct sk_buff * skb,u32 info)215 static void gre_err(struct sk_buff *skb, u32 info)
216 {
217 /* All the routers (except for Linux) return only
218 * 8 bytes of packet payload. It means, that precise relaying of
219 * ICMP in the real Internet is absolutely infeasible.
220 *
221 * Moreover, Cisco "wise men" put GRE key to the third word
222 * in GRE header. It makes impossible maintaining even soft
223 * state for keyed
224 * GRE tunnels with enabled checksum. Tell them "thank you".
225 *
226 * Well, I wonder, rfc1812 was written by Cisco employee,
227 * what the hell these idiots break standards established
228 * by themselves???
229 */
230
231 const struct iphdr *iph = (struct iphdr *)skb->data;
232 const int type = icmp_hdr(skb)->type;
233 const int code = icmp_hdr(skb)->code;
234 struct tnl_ptk_info tpi;
235 bool csum_err = false;
236
237 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
238 iph->ihl * 4) < 0) {
239 if (!csum_err) /* ignore csum errors. */
240 return;
241 }
242
243 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
244 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
245 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
246 return;
247 }
248 if (type == ICMP_REDIRECT) {
249 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
250 IPPROTO_GRE, 0);
251 return;
252 }
253
254 ipgre_err(skb, info, &tpi);
255 }
256
erspan_rcv(struct sk_buff * skb,struct tnl_ptk_info * tpi,int gre_hdr_len)257 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
258 int gre_hdr_len)
259 {
260 struct net *net = dev_net(skb->dev);
261 struct metadata_dst *tun_dst = NULL;
262 struct erspan_base_hdr *ershdr;
263 struct erspan_metadata *pkt_md;
264 struct ip_tunnel_net *itn;
265 struct ip_tunnel *tunnel;
266 const struct iphdr *iph;
267 struct erspan_md2 *md2;
268 int ver;
269 int len;
270
271 itn = net_generic(net, erspan_net_id);
272 len = gre_hdr_len + sizeof(*ershdr);
273
274 /* Check based hdr len */
275 if (unlikely(!pskb_may_pull(skb, len)))
276 return PACKET_REJECT;
277
278 iph = ip_hdr(skb);
279 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
280 ver = ershdr->ver;
281
282 /* The original GRE header does not have key field,
283 * Use ERSPAN 10-bit session ID as key.
284 */
285 tpi->key = cpu_to_be32(get_session_id(ershdr));
286 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
287 tpi->flags | TUNNEL_KEY,
288 iph->saddr, iph->daddr, tpi->key);
289
290 if (tunnel) {
291 len = gre_hdr_len + erspan_hdr_len(ver);
292 if (unlikely(!pskb_may_pull(skb, len)))
293 return PACKET_REJECT;
294
295 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
296 pkt_md = (struct erspan_metadata *)(ershdr + 1);
297
298 if (__iptunnel_pull_header(skb,
299 len,
300 htons(ETH_P_TEB),
301 false, false) < 0)
302 goto drop;
303
304 if (tunnel->collect_md) {
305 struct ip_tunnel_info *info;
306 struct erspan_metadata *md;
307 __be64 tun_id;
308 __be16 flags;
309
310 tpi->flags |= TUNNEL_KEY;
311 flags = tpi->flags;
312 tun_id = key32_to_tunnel_id(tpi->key);
313
314 tun_dst = ip_tun_rx_dst(skb, flags,
315 tun_id, sizeof(*md));
316 if (!tun_dst)
317 return PACKET_REJECT;
318
319 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
320 md->version = ver;
321 md2 = &md->u.md2;
322 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
323 ERSPAN_V2_MDSIZE);
324
325 info = &tun_dst->u.tun_info;
326 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
327 info->options_len = sizeof(*md);
328 }
329
330 skb_reset_mac_header(skb);
331 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
332 return PACKET_RCVD;
333 }
334 return PACKET_REJECT;
335
336 drop:
337 kfree_skb(skb);
338 return PACKET_RCVD;
339 }
340
__ipgre_rcv(struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct ip_tunnel_net * itn,int hdr_len,bool raw_proto)341 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
342 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
343 {
344 struct metadata_dst *tun_dst = NULL;
345 const struct iphdr *iph;
346 struct ip_tunnel *tunnel;
347
348 iph = ip_hdr(skb);
349 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
350 iph->saddr, iph->daddr, tpi->key);
351
352 if (tunnel) {
353 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
354 raw_proto, false) < 0)
355 goto drop;
356
357 if (tunnel->dev->type != ARPHRD_NONE)
358 skb_pop_mac_header(skb);
359 else
360 skb_reset_mac_header(skb);
361 if (tunnel->collect_md) {
362 __be16 flags;
363 __be64 tun_id;
364
365 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
366 tun_id = key32_to_tunnel_id(tpi->key);
367 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
368 if (!tun_dst)
369 return PACKET_REJECT;
370 }
371
372 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
373 return PACKET_RCVD;
374 }
375 return PACKET_NEXT;
376
377 drop:
378 kfree_skb(skb);
379 return PACKET_RCVD;
380 }
381
ipgre_rcv(struct sk_buff * skb,const struct tnl_ptk_info * tpi,int hdr_len)382 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
383 int hdr_len)
384 {
385 struct net *net = dev_net(skb->dev);
386 struct ip_tunnel_net *itn;
387 int res;
388
389 if (tpi->proto == htons(ETH_P_TEB))
390 itn = net_generic(net, gre_tap_net_id);
391 else
392 itn = net_generic(net, ipgre_net_id);
393
394 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
395 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
396 /* ipgre tunnels in collect metadata mode should receive
397 * also ETH_P_TEB traffic.
398 */
399 itn = net_generic(net, ipgre_net_id);
400 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
401 }
402 return res;
403 }
404
gre_rcv(struct sk_buff * skb)405 static int gre_rcv(struct sk_buff *skb)
406 {
407 struct tnl_ptk_info tpi;
408 bool csum_err = false;
409 int hdr_len;
410
411 #ifdef CONFIG_NET_IPGRE_BROADCAST
412 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
413 /* Looped back packet, drop it! */
414 if (rt_is_output_route(skb_rtable(skb)))
415 goto drop;
416 }
417 #endif
418
419 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
420 if (hdr_len < 0)
421 goto drop;
422
423 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
424 tpi.proto == htons(ETH_P_ERSPAN2))) {
425 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
426 return 0;
427 goto out;
428 }
429
430 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
431 return 0;
432
433 out:
434 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
435 drop:
436 kfree_skb(skb);
437 return 0;
438 }
439
__gre_xmit(struct sk_buff * skb,struct net_device * dev,const struct iphdr * tnl_params,__be16 proto)440 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
441 const struct iphdr *tnl_params,
442 __be16 proto)
443 {
444 struct ip_tunnel *tunnel = netdev_priv(dev);
445
446 if (tunnel->parms.o_flags & TUNNEL_SEQ)
447 tunnel->o_seqno++;
448
449 /* Push GRE header. */
450 gre_build_header(skb, tunnel->tun_hlen,
451 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
452 htonl(tunnel->o_seqno));
453
454 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
455 }
456
gre_handle_offloads(struct sk_buff * skb,bool csum)457 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
458 {
459 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
460 }
461
gre_get_rt(struct sk_buff * skb,struct net_device * dev,struct flowi4 * fl,const struct ip_tunnel_key * key)462 static struct rtable *gre_get_rt(struct sk_buff *skb,
463 struct net_device *dev,
464 struct flowi4 *fl,
465 const struct ip_tunnel_key *key)
466 {
467 struct net *net = dev_net(dev);
468
469 memset(fl, 0, sizeof(*fl));
470 fl->daddr = key->u.ipv4.dst;
471 fl->saddr = key->u.ipv4.src;
472 fl->flowi4_tos = RT_TOS(key->tos);
473 fl->flowi4_mark = skb->mark;
474 fl->flowi4_proto = IPPROTO_GRE;
475
476 return ip_route_output_key(net, fl);
477 }
478
prepare_fb_xmit(struct sk_buff * skb,struct net_device * dev,struct flowi4 * fl,int tunnel_hlen)479 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
480 struct net_device *dev,
481 struct flowi4 *fl,
482 int tunnel_hlen)
483 {
484 struct ip_tunnel_info *tun_info;
485 const struct ip_tunnel_key *key;
486 struct rtable *rt = NULL;
487 int min_headroom;
488 bool use_cache;
489 int err;
490
491 tun_info = skb_tunnel_info(skb);
492 key = &tun_info->key;
493 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
494
495 if (use_cache)
496 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
497 if (!rt) {
498 rt = gre_get_rt(skb, dev, fl, key);
499 if (IS_ERR(rt))
500 goto err_free_skb;
501 if (use_cache)
502 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
503 fl->saddr);
504 }
505
506 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
507 + tunnel_hlen + sizeof(struct iphdr);
508 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
509 int head_delta = SKB_DATA_ALIGN(min_headroom -
510 skb_headroom(skb) +
511 16);
512 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
513 0, GFP_ATOMIC);
514 if (unlikely(err))
515 goto err_free_rt;
516 }
517 return rt;
518
519 err_free_rt:
520 ip_rt_put(rt);
521 err_free_skb:
522 kfree_skb(skb);
523 dev->stats.tx_dropped++;
524 return NULL;
525 }
526
gre_fb_xmit(struct sk_buff * skb,struct net_device * dev,__be16 proto)527 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
528 __be16 proto)
529 {
530 struct ip_tunnel *tunnel = netdev_priv(dev);
531 struct ip_tunnel_info *tun_info;
532 const struct ip_tunnel_key *key;
533 struct rtable *rt = NULL;
534 struct flowi4 fl;
535 int tunnel_hlen;
536 __be16 df, flags;
537
538 tun_info = skb_tunnel_info(skb);
539 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
540 ip_tunnel_info_af(tun_info) != AF_INET))
541 goto err_free_skb;
542
543 key = &tun_info->key;
544 tunnel_hlen = gre_calc_hlen(key->tun_flags);
545
546 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
547 if (!rt)
548 return;
549
550 /* Push Tunnel header. */
551 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
552 goto err_free_rt;
553
554 flags = tun_info->key.tun_flags &
555 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
556 gre_build_header(skb, tunnel_hlen, flags, proto,
557 tunnel_id_to_key32(tun_info->key.tun_id),
558 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
559
560 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
561
562 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
563 key->tos, key->ttl, df, false);
564 return;
565
566 err_free_rt:
567 ip_rt_put(rt);
568 err_free_skb:
569 kfree_skb(skb);
570 dev->stats.tx_dropped++;
571 }
572
erspan_fb_xmit(struct sk_buff * skb,struct net_device * dev,__be16 proto)573 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
574 __be16 proto)
575 {
576 struct ip_tunnel *tunnel = netdev_priv(dev);
577 struct ip_tunnel_info *tun_info;
578 const struct ip_tunnel_key *key;
579 struct erspan_metadata *md;
580 struct rtable *rt = NULL;
581 bool truncate = false;
582 struct flowi4 fl;
583 int tunnel_hlen;
584 int version;
585 __be16 df;
586 int nhoff;
587 int thoff;
588
589 tun_info = skb_tunnel_info(skb);
590 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
591 ip_tunnel_info_af(tun_info) != AF_INET))
592 goto err_free_skb;
593
594 key = &tun_info->key;
595 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
596 goto err_free_rt;
597 md = ip_tunnel_info_opts(tun_info);
598 if (!md)
599 goto err_free_rt;
600
601 /* ERSPAN has fixed 8 byte GRE header */
602 version = md->version;
603 tunnel_hlen = 8 + erspan_hdr_len(version);
604
605 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
606 if (!rt)
607 return;
608
609 if (gre_handle_offloads(skb, false))
610 goto err_free_rt;
611
612 if (skb->len > dev->mtu + dev->hard_header_len) {
613 pskb_trim(skb, dev->mtu + dev->hard_header_len);
614 truncate = true;
615 }
616
617 nhoff = skb_network_header(skb) - skb_mac_header(skb);
618 if (skb->protocol == htons(ETH_P_IP) &&
619 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
620 truncate = true;
621
622 thoff = skb_transport_header(skb) - skb_mac_header(skb);
623 if (skb->protocol == htons(ETH_P_IPV6) &&
624 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
625 truncate = true;
626
627 if (version == 1) {
628 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
629 ntohl(md->u.index), truncate, true);
630 } else if (version == 2) {
631 erspan_build_header_v2(skb,
632 ntohl(tunnel_id_to_key32(key->tun_id)),
633 md->u.md2.dir,
634 get_hwid(&md->u.md2),
635 truncate, true);
636 } else {
637 goto err_free_rt;
638 }
639
640 gre_build_header(skb, 8, TUNNEL_SEQ,
641 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
642
643 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
644
645 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
646 key->tos, key->ttl, df, false);
647 return;
648
649 err_free_rt:
650 ip_rt_put(rt);
651 err_free_skb:
652 kfree_skb(skb);
653 dev->stats.tx_dropped++;
654 }
655
gre_fill_metadata_dst(struct net_device * dev,struct sk_buff * skb)656 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
657 {
658 struct ip_tunnel_info *info = skb_tunnel_info(skb);
659 struct rtable *rt;
660 struct flowi4 fl4;
661
662 if (ip_tunnel_info_af(info) != AF_INET)
663 return -EINVAL;
664
665 rt = gre_get_rt(skb, dev, &fl4, &info->key);
666 if (IS_ERR(rt))
667 return PTR_ERR(rt);
668
669 ip_rt_put(rt);
670 info->key.u.ipv4.src = fl4.saddr;
671 return 0;
672 }
673
ipgre_xmit(struct sk_buff * skb,struct net_device * dev)674 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
675 struct net_device *dev)
676 {
677 struct ip_tunnel *tunnel = netdev_priv(dev);
678 const struct iphdr *tnl_params;
679
680 if (tunnel->collect_md) {
681 gre_fb_xmit(skb, dev, skb->protocol);
682 return NETDEV_TX_OK;
683 }
684
685 if (dev->header_ops) {
686 /* Need space for new headers */
687 if (skb_cow_head(skb, dev->needed_headroom -
688 (tunnel->hlen + sizeof(struct iphdr))))
689 goto free_skb;
690
691 tnl_params = (const struct iphdr *)skb->data;
692
693 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
694 * to gre header.
695 */
696 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
697 skb_reset_mac_header(skb);
698 } else {
699 if (skb_cow_head(skb, dev->needed_headroom))
700 goto free_skb;
701
702 tnl_params = &tunnel->parms.iph;
703 }
704
705 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
706 goto free_skb;
707
708 __gre_xmit(skb, dev, tnl_params, skb->protocol);
709 return NETDEV_TX_OK;
710
711 free_skb:
712 kfree_skb(skb);
713 dev->stats.tx_dropped++;
714 return NETDEV_TX_OK;
715 }
716
erspan_xmit(struct sk_buff * skb,struct net_device * dev)717 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
718 struct net_device *dev)
719 {
720 struct ip_tunnel *tunnel = netdev_priv(dev);
721 bool truncate = false;
722
723 if (tunnel->collect_md) {
724 erspan_fb_xmit(skb, dev, skb->protocol);
725 return NETDEV_TX_OK;
726 }
727
728 if (gre_handle_offloads(skb, false))
729 goto free_skb;
730
731 if (skb_cow_head(skb, dev->needed_headroom))
732 goto free_skb;
733
734 if (skb->len > dev->mtu + dev->hard_header_len) {
735 pskb_trim(skb, dev->mtu + dev->hard_header_len);
736 truncate = true;
737 }
738
739 /* Push ERSPAN header */
740 if (tunnel->erspan_ver == 1)
741 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
742 tunnel->index,
743 truncate, true);
744 else if (tunnel->erspan_ver == 2)
745 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
746 tunnel->dir, tunnel->hwid,
747 truncate, true);
748 else
749 goto free_skb;
750
751 tunnel->parms.o_flags &= ~TUNNEL_KEY;
752 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
753 return NETDEV_TX_OK;
754
755 free_skb:
756 kfree_skb(skb);
757 dev->stats.tx_dropped++;
758 return NETDEV_TX_OK;
759 }
760
gre_tap_xmit(struct sk_buff * skb,struct net_device * dev)761 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
762 struct net_device *dev)
763 {
764 struct ip_tunnel *tunnel = netdev_priv(dev);
765
766 if (tunnel->collect_md) {
767 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
768 return NETDEV_TX_OK;
769 }
770
771 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
772 goto free_skb;
773
774 if (skb_cow_head(skb, dev->needed_headroom))
775 goto free_skb;
776
777 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
778 return NETDEV_TX_OK;
779
780 free_skb:
781 kfree_skb(skb);
782 dev->stats.tx_dropped++;
783 return NETDEV_TX_OK;
784 }
785
ipgre_link_update(struct net_device * dev,bool set_mtu)786 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
787 {
788 struct ip_tunnel *tunnel = netdev_priv(dev);
789 int len;
790
791 len = tunnel->tun_hlen;
792 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
793 len = tunnel->tun_hlen - len;
794 tunnel->hlen = tunnel->hlen + len;
795
796 dev->needed_headroom = dev->needed_headroom + len;
797 if (set_mtu)
798 dev->mtu = max_t(int, dev->mtu - len, 68);
799
800 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
801 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
802 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
803 dev->features |= NETIF_F_GSO_SOFTWARE;
804 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
805 } else {
806 dev->features &= ~NETIF_F_GSO_SOFTWARE;
807 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
808 }
809 dev->features |= NETIF_F_LLTX;
810 } else {
811 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
812 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
813 }
814 }
815
ipgre_tunnel_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)816 static int ipgre_tunnel_ioctl(struct net_device *dev,
817 struct ifreq *ifr, int cmd)
818 {
819 struct ip_tunnel_parm p;
820 int err;
821
822 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
823 return -EFAULT;
824
825 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
826 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
827 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
828 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
829 return -EINVAL;
830 }
831
832 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
833 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
834
835 err = ip_tunnel_ioctl(dev, &p, cmd);
836 if (err)
837 return err;
838
839 if (cmd == SIOCCHGTUNNEL) {
840 struct ip_tunnel *t = netdev_priv(dev);
841
842 t->parms.i_flags = p.i_flags;
843 t->parms.o_flags = p.o_flags;
844
845 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
846 ipgre_link_update(dev, true);
847 }
848
849 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
850 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
851
852 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
853 return -EFAULT;
854
855 return 0;
856 }
857
858 /* Nice toy. Unfortunately, useless in real life :-)
859 It allows to construct virtual multiprotocol broadcast "LAN"
860 over the Internet, provided multicast routing is tuned.
861
862
863 I have no idea was this bicycle invented before me,
864 so that I had to set ARPHRD_IPGRE to a random value.
865 I have an impression, that Cisco could make something similar,
866 but this feature is apparently missing in IOS<=11.2(8).
867
868 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
869 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
870
871 ping -t 255 224.66.66.66
872
873 If nobody answers, mbone does not work.
874
875 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
876 ip addr add 10.66.66.<somewhat>/24 dev Universe
877 ifconfig Universe up
878 ifconfig Universe add fe80::<Your_real_addr>/10
879 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
880 ftp 10.66.66.66
881 ...
882 ftp fec0:6666:6666::193.233.7.65
883 ...
884 */
ipgre_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)885 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
886 unsigned short type,
887 const void *daddr, const void *saddr, unsigned int len)
888 {
889 struct ip_tunnel *t = netdev_priv(dev);
890 struct iphdr *iph;
891 struct gre_base_hdr *greh;
892
893 iph = skb_push(skb, t->hlen + sizeof(*iph));
894 greh = (struct gre_base_hdr *)(iph+1);
895 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
896 greh->protocol = htons(type);
897
898 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
899
900 /* Set the source hardware address. */
901 if (saddr)
902 memcpy(&iph->saddr, saddr, 4);
903 if (daddr)
904 memcpy(&iph->daddr, daddr, 4);
905 if (iph->daddr)
906 return t->hlen + sizeof(*iph);
907
908 return -(t->hlen + sizeof(*iph));
909 }
910
ipgre_header_parse(const struct sk_buff * skb,unsigned char * haddr)911 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
912 {
913 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
914 memcpy(haddr, &iph->saddr, 4);
915 return 4;
916 }
917
918 static const struct header_ops ipgre_header_ops = {
919 .create = ipgre_header,
920 .parse = ipgre_header_parse,
921 };
922
923 #ifdef CONFIG_NET_IPGRE_BROADCAST
ipgre_open(struct net_device * dev)924 static int ipgre_open(struct net_device *dev)
925 {
926 struct ip_tunnel *t = netdev_priv(dev);
927
928 if (ipv4_is_multicast(t->parms.iph.daddr)) {
929 struct flowi4 fl4;
930 struct rtable *rt;
931
932 rt = ip_route_output_gre(t->net, &fl4,
933 t->parms.iph.daddr,
934 t->parms.iph.saddr,
935 t->parms.o_key,
936 RT_TOS(t->parms.iph.tos),
937 t->parms.link);
938 if (IS_ERR(rt))
939 return -EADDRNOTAVAIL;
940 dev = rt->dst.dev;
941 ip_rt_put(rt);
942 if (!__in_dev_get_rtnl(dev))
943 return -EADDRNOTAVAIL;
944 t->mlink = dev->ifindex;
945 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
946 }
947 return 0;
948 }
949
ipgre_close(struct net_device * dev)950 static int ipgre_close(struct net_device *dev)
951 {
952 struct ip_tunnel *t = netdev_priv(dev);
953
954 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
955 struct in_device *in_dev;
956 in_dev = inetdev_by_index(t->net, t->mlink);
957 if (in_dev)
958 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
959 }
960 return 0;
961 }
962 #endif
963
964 static const struct net_device_ops ipgre_netdev_ops = {
965 .ndo_init = ipgre_tunnel_init,
966 .ndo_uninit = ip_tunnel_uninit,
967 #ifdef CONFIG_NET_IPGRE_BROADCAST
968 .ndo_open = ipgre_open,
969 .ndo_stop = ipgre_close,
970 #endif
971 .ndo_start_xmit = ipgre_xmit,
972 .ndo_do_ioctl = ipgre_tunnel_ioctl,
973 .ndo_change_mtu = ip_tunnel_change_mtu,
974 .ndo_get_stats64 = ip_tunnel_get_stats64,
975 .ndo_get_iflink = ip_tunnel_get_iflink,
976 };
977
978 #define GRE_FEATURES (NETIF_F_SG | \
979 NETIF_F_FRAGLIST | \
980 NETIF_F_HIGHDMA | \
981 NETIF_F_HW_CSUM)
982
ipgre_tunnel_setup(struct net_device * dev)983 static void ipgre_tunnel_setup(struct net_device *dev)
984 {
985 dev->netdev_ops = &ipgre_netdev_ops;
986 dev->type = ARPHRD_IPGRE;
987 ip_tunnel_setup(dev, ipgre_net_id);
988 }
989
__gre_tunnel_init(struct net_device * dev)990 static void __gre_tunnel_init(struct net_device *dev)
991 {
992 struct ip_tunnel *tunnel;
993
994 tunnel = netdev_priv(dev);
995 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
996 tunnel->parms.iph.protocol = IPPROTO_GRE;
997
998 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
999
1000 dev->features |= GRE_FEATURES;
1001 dev->hw_features |= GRE_FEATURES;
1002
1003 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
1004 /* TCP offload with GRE SEQ is not supported, nor
1005 * can we support 2 levels of outer headers requiring
1006 * an update.
1007 */
1008 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
1009 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
1010 dev->features |= NETIF_F_GSO_SOFTWARE;
1011 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1012 }
1013
1014 /* Can use a lockless transmit, unless we generate
1015 * output sequences
1016 */
1017 dev->features |= NETIF_F_LLTX;
1018 }
1019 }
1020
ipgre_tunnel_init(struct net_device * dev)1021 static int ipgre_tunnel_init(struct net_device *dev)
1022 {
1023 struct ip_tunnel *tunnel = netdev_priv(dev);
1024 struct iphdr *iph = &tunnel->parms.iph;
1025
1026 __gre_tunnel_init(dev);
1027
1028 memcpy(dev->dev_addr, &iph->saddr, 4);
1029 memcpy(dev->broadcast, &iph->daddr, 4);
1030
1031 dev->flags = IFF_NOARP;
1032 netif_keep_dst(dev);
1033 dev->addr_len = 4;
1034
1035 if (iph->daddr && !tunnel->collect_md) {
1036 #ifdef CONFIG_NET_IPGRE_BROADCAST
1037 if (ipv4_is_multicast(iph->daddr)) {
1038 if (!iph->saddr)
1039 return -EINVAL;
1040 dev->flags = IFF_BROADCAST;
1041 dev->header_ops = &ipgre_header_ops;
1042 }
1043 #endif
1044 } else if (!tunnel->collect_md) {
1045 dev->header_ops = &ipgre_header_ops;
1046 }
1047
1048 return ip_tunnel_init(dev);
1049 }
1050
1051 static const struct gre_protocol ipgre_protocol = {
1052 .handler = gre_rcv,
1053 .err_handler = gre_err,
1054 };
1055
ipgre_init_net(struct net * net)1056 static int __net_init ipgre_init_net(struct net *net)
1057 {
1058 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1059 }
1060
ipgre_exit_batch_net(struct list_head * list_net)1061 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1062 {
1063 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1064 }
1065
1066 static struct pernet_operations ipgre_net_ops = {
1067 .init = ipgre_init_net,
1068 .exit_batch = ipgre_exit_batch_net,
1069 .id = &ipgre_net_id,
1070 .size = sizeof(struct ip_tunnel_net),
1071 };
1072
ipgre_tunnel_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1073 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1074 struct netlink_ext_ack *extack)
1075 {
1076 __be16 flags;
1077
1078 if (!data)
1079 return 0;
1080
1081 flags = 0;
1082 if (data[IFLA_GRE_IFLAGS])
1083 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1084 if (data[IFLA_GRE_OFLAGS])
1085 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1086 if (flags & (GRE_VERSION|GRE_ROUTING))
1087 return -EINVAL;
1088
1089 if (data[IFLA_GRE_COLLECT_METADATA] &&
1090 data[IFLA_GRE_ENCAP_TYPE] &&
1091 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1092 return -EINVAL;
1093
1094 return 0;
1095 }
1096
ipgre_tap_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1097 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1098 struct netlink_ext_ack *extack)
1099 {
1100 __be32 daddr;
1101
1102 if (tb[IFLA_ADDRESS]) {
1103 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1104 return -EINVAL;
1105 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1106 return -EADDRNOTAVAIL;
1107 }
1108
1109 if (!data)
1110 goto out;
1111
1112 if (data[IFLA_GRE_REMOTE]) {
1113 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1114 if (!daddr)
1115 return -EINVAL;
1116 }
1117
1118 out:
1119 return ipgre_tunnel_validate(tb, data, extack);
1120 }
1121
erspan_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1122 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1123 struct netlink_ext_ack *extack)
1124 {
1125 __be16 flags = 0;
1126 int ret;
1127
1128 if (!data)
1129 return 0;
1130
1131 ret = ipgre_tap_validate(tb, data, extack);
1132 if (ret)
1133 return ret;
1134
1135 /* ERSPAN should only have GRE sequence and key flag */
1136 if (data[IFLA_GRE_OFLAGS])
1137 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1138 if (data[IFLA_GRE_IFLAGS])
1139 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1140 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1141 flags != (GRE_SEQ | GRE_KEY))
1142 return -EINVAL;
1143
1144 /* ERSPAN Session ID only has 10-bit. Since we reuse
1145 * 32-bit key field as ID, check it's range.
1146 */
1147 if (data[IFLA_GRE_IKEY] &&
1148 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1149 return -EINVAL;
1150
1151 if (data[IFLA_GRE_OKEY] &&
1152 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1153 return -EINVAL;
1154
1155 return 0;
1156 }
1157
ipgre_netlink_parms(struct net_device * dev,struct nlattr * data[],struct nlattr * tb[],struct ip_tunnel_parm * parms,__u32 * fwmark)1158 static int ipgre_netlink_parms(struct net_device *dev,
1159 struct nlattr *data[],
1160 struct nlattr *tb[],
1161 struct ip_tunnel_parm *parms,
1162 __u32 *fwmark)
1163 {
1164 struct ip_tunnel *t = netdev_priv(dev);
1165
1166 memset(parms, 0, sizeof(*parms));
1167
1168 parms->iph.protocol = IPPROTO_GRE;
1169
1170 if (!data)
1171 return 0;
1172
1173 if (data[IFLA_GRE_LINK])
1174 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1175
1176 if (data[IFLA_GRE_IFLAGS])
1177 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1178
1179 if (data[IFLA_GRE_OFLAGS])
1180 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1181
1182 if (data[IFLA_GRE_IKEY])
1183 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1184
1185 if (data[IFLA_GRE_OKEY])
1186 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1187
1188 if (data[IFLA_GRE_LOCAL])
1189 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1190
1191 if (data[IFLA_GRE_REMOTE])
1192 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1193
1194 if (data[IFLA_GRE_TTL])
1195 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1196
1197 if (data[IFLA_GRE_TOS])
1198 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1199
1200 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1201 if (t->ignore_df)
1202 return -EINVAL;
1203 parms->iph.frag_off = htons(IP_DF);
1204 }
1205
1206 if (data[IFLA_GRE_COLLECT_METADATA]) {
1207 t->collect_md = true;
1208 if (dev->type == ARPHRD_IPGRE)
1209 dev->type = ARPHRD_NONE;
1210 }
1211
1212 if (data[IFLA_GRE_IGNORE_DF]) {
1213 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1214 && (parms->iph.frag_off & htons(IP_DF)))
1215 return -EINVAL;
1216 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1217 }
1218
1219 if (data[IFLA_GRE_FWMARK])
1220 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1221
1222 if (data[IFLA_GRE_ERSPAN_VER]) {
1223 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1224
1225 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1226 return -EINVAL;
1227 }
1228
1229 if (t->erspan_ver == 1) {
1230 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1231 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1232 if (t->index & ~INDEX_MASK)
1233 return -EINVAL;
1234 }
1235 } else if (t->erspan_ver == 2) {
1236 if (data[IFLA_GRE_ERSPAN_DIR]) {
1237 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1238 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1239 return -EINVAL;
1240 }
1241 if (data[IFLA_GRE_ERSPAN_HWID]) {
1242 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1243 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1244 return -EINVAL;
1245 }
1246 }
1247
1248 return 0;
1249 }
1250
1251 /* This function returns true when ENCAP attributes are present in the nl msg */
ipgre_netlink_encap_parms(struct nlattr * data[],struct ip_tunnel_encap * ipencap)1252 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1253 struct ip_tunnel_encap *ipencap)
1254 {
1255 bool ret = false;
1256
1257 memset(ipencap, 0, sizeof(*ipencap));
1258
1259 if (!data)
1260 return ret;
1261
1262 if (data[IFLA_GRE_ENCAP_TYPE]) {
1263 ret = true;
1264 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1265 }
1266
1267 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1268 ret = true;
1269 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1270 }
1271
1272 if (data[IFLA_GRE_ENCAP_SPORT]) {
1273 ret = true;
1274 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1275 }
1276
1277 if (data[IFLA_GRE_ENCAP_DPORT]) {
1278 ret = true;
1279 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1280 }
1281
1282 return ret;
1283 }
1284
gre_tap_init(struct net_device * dev)1285 static int gre_tap_init(struct net_device *dev)
1286 {
1287 __gre_tunnel_init(dev);
1288 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1289 netif_keep_dst(dev);
1290
1291 return ip_tunnel_init(dev);
1292 }
1293
1294 static const struct net_device_ops gre_tap_netdev_ops = {
1295 .ndo_init = gre_tap_init,
1296 .ndo_uninit = ip_tunnel_uninit,
1297 .ndo_start_xmit = gre_tap_xmit,
1298 .ndo_set_mac_address = eth_mac_addr,
1299 .ndo_validate_addr = eth_validate_addr,
1300 .ndo_change_mtu = ip_tunnel_change_mtu,
1301 .ndo_get_stats64 = ip_tunnel_get_stats64,
1302 .ndo_get_iflink = ip_tunnel_get_iflink,
1303 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1304 };
1305
erspan_tunnel_init(struct net_device * dev)1306 static int erspan_tunnel_init(struct net_device *dev)
1307 {
1308 struct ip_tunnel *tunnel = netdev_priv(dev);
1309
1310 tunnel->tun_hlen = 8;
1311 tunnel->parms.iph.protocol = IPPROTO_GRE;
1312 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1313 erspan_hdr_len(tunnel->erspan_ver);
1314
1315 dev->features |= GRE_FEATURES;
1316 dev->hw_features |= GRE_FEATURES;
1317 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1318 netif_keep_dst(dev);
1319
1320 return ip_tunnel_init(dev);
1321 }
1322
1323 static const struct net_device_ops erspan_netdev_ops = {
1324 .ndo_init = erspan_tunnel_init,
1325 .ndo_uninit = ip_tunnel_uninit,
1326 .ndo_start_xmit = erspan_xmit,
1327 .ndo_set_mac_address = eth_mac_addr,
1328 .ndo_validate_addr = eth_validate_addr,
1329 .ndo_change_mtu = ip_tunnel_change_mtu,
1330 .ndo_get_stats64 = ip_tunnel_get_stats64,
1331 .ndo_get_iflink = ip_tunnel_get_iflink,
1332 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1333 };
1334
ipgre_tap_setup(struct net_device * dev)1335 static void ipgre_tap_setup(struct net_device *dev)
1336 {
1337 ether_setup(dev);
1338 dev->max_mtu = 0;
1339 dev->netdev_ops = &gre_tap_netdev_ops;
1340 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1341 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1342 ip_tunnel_setup(dev, gre_tap_net_id);
1343 }
1344
is_gretap_dev(const struct net_device * dev)1345 bool is_gretap_dev(const struct net_device *dev)
1346 {
1347 return dev->netdev_ops == &gre_tap_netdev_ops;
1348 }
1349 EXPORT_SYMBOL_GPL(is_gretap_dev);
1350
ipgre_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1351 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1352 struct nlattr *tb[], struct nlattr *data[],
1353 struct netlink_ext_ack *extack)
1354 {
1355 struct ip_tunnel_parm p;
1356 struct ip_tunnel_encap ipencap;
1357 __u32 fwmark = 0;
1358 int err;
1359
1360 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1361 struct ip_tunnel *t = netdev_priv(dev);
1362 err = ip_tunnel_encap_setup(t, &ipencap);
1363
1364 if (err < 0)
1365 return err;
1366 }
1367
1368 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1369 if (err < 0)
1370 return err;
1371 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1372 }
1373
ipgre_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1374 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1375 struct nlattr *data[],
1376 struct netlink_ext_ack *extack)
1377 {
1378 struct ip_tunnel *t = netdev_priv(dev);
1379 struct ip_tunnel_encap ipencap;
1380 __u32 fwmark = t->fwmark;
1381 struct ip_tunnel_parm p;
1382 int err;
1383
1384 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1385 err = ip_tunnel_encap_setup(t, &ipencap);
1386
1387 if (err < 0)
1388 return err;
1389 }
1390
1391 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1392 if (err < 0)
1393 return err;
1394
1395 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1396 if (err < 0)
1397 return err;
1398
1399 t->parms.i_flags = p.i_flags;
1400 t->parms.o_flags = p.o_flags;
1401
1402 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1403 ipgre_link_update(dev, !tb[IFLA_MTU]);
1404
1405 return 0;
1406 }
1407
ipgre_get_size(const struct net_device * dev)1408 static size_t ipgre_get_size(const struct net_device *dev)
1409 {
1410 return
1411 /* IFLA_GRE_LINK */
1412 nla_total_size(4) +
1413 /* IFLA_GRE_IFLAGS */
1414 nla_total_size(2) +
1415 /* IFLA_GRE_OFLAGS */
1416 nla_total_size(2) +
1417 /* IFLA_GRE_IKEY */
1418 nla_total_size(4) +
1419 /* IFLA_GRE_OKEY */
1420 nla_total_size(4) +
1421 /* IFLA_GRE_LOCAL */
1422 nla_total_size(4) +
1423 /* IFLA_GRE_REMOTE */
1424 nla_total_size(4) +
1425 /* IFLA_GRE_TTL */
1426 nla_total_size(1) +
1427 /* IFLA_GRE_TOS */
1428 nla_total_size(1) +
1429 /* IFLA_GRE_PMTUDISC */
1430 nla_total_size(1) +
1431 /* IFLA_GRE_ENCAP_TYPE */
1432 nla_total_size(2) +
1433 /* IFLA_GRE_ENCAP_FLAGS */
1434 nla_total_size(2) +
1435 /* IFLA_GRE_ENCAP_SPORT */
1436 nla_total_size(2) +
1437 /* IFLA_GRE_ENCAP_DPORT */
1438 nla_total_size(2) +
1439 /* IFLA_GRE_COLLECT_METADATA */
1440 nla_total_size(0) +
1441 /* IFLA_GRE_IGNORE_DF */
1442 nla_total_size(1) +
1443 /* IFLA_GRE_FWMARK */
1444 nla_total_size(4) +
1445 /* IFLA_GRE_ERSPAN_INDEX */
1446 nla_total_size(4) +
1447 /* IFLA_GRE_ERSPAN_VER */
1448 nla_total_size(1) +
1449 /* IFLA_GRE_ERSPAN_DIR */
1450 nla_total_size(1) +
1451 /* IFLA_GRE_ERSPAN_HWID */
1452 nla_total_size(2) +
1453 0;
1454 }
1455
ipgre_fill_info(struct sk_buff * skb,const struct net_device * dev)1456 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1457 {
1458 struct ip_tunnel *t = netdev_priv(dev);
1459 struct ip_tunnel_parm *p = &t->parms;
1460
1461 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1462 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1463 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1464 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1465 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1466 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1467 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1468 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1469 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1470 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1471 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1472 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1473 !!(p->iph.frag_off & htons(IP_DF))) ||
1474 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1475 goto nla_put_failure;
1476
1477 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1478 t->encap.type) ||
1479 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1480 t->encap.sport) ||
1481 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1482 t->encap.dport) ||
1483 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1484 t->encap.flags))
1485 goto nla_put_failure;
1486
1487 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1488 goto nla_put_failure;
1489
1490 if (t->collect_md) {
1491 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1492 goto nla_put_failure;
1493 }
1494
1495 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1496 goto nla_put_failure;
1497
1498 if (t->erspan_ver == 1) {
1499 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1500 goto nla_put_failure;
1501 } else if (t->erspan_ver == 2) {
1502 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1503 goto nla_put_failure;
1504 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1505 goto nla_put_failure;
1506 }
1507
1508 return 0;
1509
1510 nla_put_failure:
1511 return -EMSGSIZE;
1512 }
1513
erspan_setup(struct net_device * dev)1514 static void erspan_setup(struct net_device *dev)
1515 {
1516 struct ip_tunnel *t = netdev_priv(dev);
1517
1518 ether_setup(dev);
1519 dev->netdev_ops = &erspan_netdev_ops;
1520 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1521 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1522 ip_tunnel_setup(dev, erspan_net_id);
1523 t->erspan_ver = 1;
1524 }
1525
1526 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1527 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1528 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1529 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1530 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1531 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1532 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1533 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1534 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1535 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1536 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1537 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1538 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1539 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1540 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1541 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1542 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1543 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1544 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1545 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1546 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1547 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1548 };
1549
1550 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1551 .kind = "gre",
1552 .maxtype = IFLA_GRE_MAX,
1553 .policy = ipgre_policy,
1554 .priv_size = sizeof(struct ip_tunnel),
1555 .setup = ipgre_tunnel_setup,
1556 .validate = ipgre_tunnel_validate,
1557 .newlink = ipgre_newlink,
1558 .changelink = ipgre_changelink,
1559 .dellink = ip_tunnel_dellink,
1560 .get_size = ipgre_get_size,
1561 .fill_info = ipgre_fill_info,
1562 .get_link_net = ip_tunnel_get_link_net,
1563 };
1564
1565 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1566 .kind = "gretap",
1567 .maxtype = IFLA_GRE_MAX,
1568 .policy = ipgre_policy,
1569 .priv_size = sizeof(struct ip_tunnel),
1570 .setup = ipgre_tap_setup,
1571 .validate = ipgre_tap_validate,
1572 .newlink = ipgre_newlink,
1573 .changelink = ipgre_changelink,
1574 .dellink = ip_tunnel_dellink,
1575 .get_size = ipgre_get_size,
1576 .fill_info = ipgre_fill_info,
1577 .get_link_net = ip_tunnel_get_link_net,
1578 };
1579
1580 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1581 .kind = "erspan",
1582 .maxtype = IFLA_GRE_MAX,
1583 .policy = ipgre_policy,
1584 .priv_size = sizeof(struct ip_tunnel),
1585 .setup = erspan_setup,
1586 .validate = erspan_validate,
1587 .newlink = ipgre_newlink,
1588 .changelink = ipgre_changelink,
1589 .dellink = ip_tunnel_dellink,
1590 .get_size = ipgre_get_size,
1591 .fill_info = ipgre_fill_info,
1592 .get_link_net = ip_tunnel_get_link_net,
1593 };
1594
gretap_fb_dev_create(struct net * net,const char * name,u8 name_assign_type)1595 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1596 u8 name_assign_type)
1597 {
1598 struct nlattr *tb[IFLA_MAX + 1];
1599 struct net_device *dev;
1600 LIST_HEAD(list_kill);
1601 struct ip_tunnel *t;
1602 int err;
1603
1604 memset(&tb, 0, sizeof(tb));
1605
1606 dev = rtnl_create_link(net, name, name_assign_type,
1607 &ipgre_tap_ops, tb);
1608 if (IS_ERR(dev))
1609 return dev;
1610
1611 /* Configure flow based GRE device. */
1612 t = netdev_priv(dev);
1613 t->collect_md = true;
1614
1615 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1616 if (err < 0) {
1617 free_netdev(dev);
1618 return ERR_PTR(err);
1619 }
1620
1621 /* openvswitch users expect packet sizes to be unrestricted,
1622 * so set the largest MTU we can.
1623 */
1624 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1625 if (err)
1626 goto out;
1627
1628 err = rtnl_configure_link(dev, NULL);
1629 if (err < 0)
1630 goto out;
1631
1632 return dev;
1633 out:
1634 ip_tunnel_dellink(dev, &list_kill);
1635 unregister_netdevice_many(&list_kill);
1636 return ERR_PTR(err);
1637 }
1638 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1639
ipgre_tap_init_net(struct net * net)1640 static int __net_init ipgre_tap_init_net(struct net *net)
1641 {
1642 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1643 }
1644
ipgre_tap_exit_batch_net(struct list_head * list_net)1645 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1646 {
1647 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1648 }
1649
1650 static struct pernet_operations ipgre_tap_net_ops = {
1651 .init = ipgre_tap_init_net,
1652 .exit_batch = ipgre_tap_exit_batch_net,
1653 .id = &gre_tap_net_id,
1654 .size = sizeof(struct ip_tunnel_net),
1655 };
1656
erspan_init_net(struct net * net)1657 static int __net_init erspan_init_net(struct net *net)
1658 {
1659 return ip_tunnel_init_net(net, erspan_net_id,
1660 &erspan_link_ops, "erspan0");
1661 }
1662
erspan_exit_batch_net(struct list_head * net_list)1663 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1664 {
1665 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1666 }
1667
1668 static struct pernet_operations erspan_net_ops = {
1669 .init = erspan_init_net,
1670 .exit_batch = erspan_exit_batch_net,
1671 .id = &erspan_net_id,
1672 .size = sizeof(struct ip_tunnel_net),
1673 };
1674
ipgre_init(void)1675 static int __init ipgre_init(void)
1676 {
1677 int err;
1678
1679 pr_info("GRE over IPv4 tunneling driver\n");
1680
1681 err = register_pernet_device(&ipgre_net_ops);
1682 if (err < 0)
1683 return err;
1684
1685 err = register_pernet_device(&ipgre_tap_net_ops);
1686 if (err < 0)
1687 goto pnet_tap_failed;
1688
1689 err = register_pernet_device(&erspan_net_ops);
1690 if (err < 0)
1691 goto pnet_erspan_failed;
1692
1693 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1694 if (err < 0) {
1695 pr_info("%s: can't add protocol\n", __func__);
1696 goto add_proto_failed;
1697 }
1698
1699 err = rtnl_link_register(&ipgre_link_ops);
1700 if (err < 0)
1701 goto rtnl_link_failed;
1702
1703 err = rtnl_link_register(&ipgre_tap_ops);
1704 if (err < 0)
1705 goto tap_ops_failed;
1706
1707 err = rtnl_link_register(&erspan_link_ops);
1708 if (err < 0)
1709 goto erspan_link_failed;
1710
1711 return 0;
1712
1713 erspan_link_failed:
1714 rtnl_link_unregister(&ipgre_tap_ops);
1715 tap_ops_failed:
1716 rtnl_link_unregister(&ipgre_link_ops);
1717 rtnl_link_failed:
1718 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1719 add_proto_failed:
1720 unregister_pernet_device(&erspan_net_ops);
1721 pnet_erspan_failed:
1722 unregister_pernet_device(&ipgre_tap_net_ops);
1723 pnet_tap_failed:
1724 unregister_pernet_device(&ipgre_net_ops);
1725 return err;
1726 }
1727
ipgre_fini(void)1728 static void __exit ipgre_fini(void)
1729 {
1730 rtnl_link_unregister(&ipgre_tap_ops);
1731 rtnl_link_unregister(&ipgre_link_ops);
1732 rtnl_link_unregister(&erspan_link_ops);
1733 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1734 unregister_pernet_device(&ipgre_tap_net_ops);
1735 unregister_pernet_device(&ipgre_net_ops);
1736 unregister_pernet_device(&erspan_net_ops);
1737 }
1738
1739 module_init(ipgre_init);
1740 module_exit(ipgre_fini);
1741 MODULE_LICENSE("GPL");
1742 MODULE_ALIAS_RTNL_LINK("gre");
1743 MODULE_ALIAS_RTNL_LINK("gretap");
1744 MODULE_ALIAS_RTNL_LINK("erspan");
1745 MODULE_ALIAS_NETDEV("gre0");
1746 MODULE_ALIAS_NETDEV("gretap0");
1747 MODULE_ALIAS_NETDEV("erspan0");
1748