1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* L2TPv3 IP encapsulation support for IPv6
3 *
4 * Copyright (c) 2012 Katalix Systems Ltd
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/icmp.h>
10 #include <linux/module.h>
11 #include <linux/skbuff.h>
12 #include <linux/random.h>
13 #include <linux/socket.h>
14 #include <linux/l2tp.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <net/sock.h>
18 #include <net/ip.h>
19 #include <net/icmp.h>
20 #include <net/udp.h>
21 #include <net/inet_common.h>
22 #include <net/tcp_states.h>
23 #include <net/protocol.h>
24 #include <net/xfrm.h>
25
26 #include <net/transp_v6.h>
27 #include <net/addrconf.h>
28 #include <net/ip6_route.h>
29
30 #include "l2tp_core.h"
31
32 struct l2tp_ip6_sock {
33 /* inet_sock has to be the first member of l2tp_ip6_sock */
34 struct inet_sock inet;
35
36 u32 conn_id;
37 u32 peer_conn_id;
38
39 /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
40 * inet6_sk_generic
41 */
42 struct ipv6_pinfo inet6;
43 };
44
45 static DEFINE_RWLOCK(l2tp_ip6_lock);
46 static struct hlist_head l2tp_ip6_table;
47 static struct hlist_head l2tp_ip6_bind_table;
48
l2tp_ip6_sk(const struct sock * sk)49 static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
50 {
51 return (struct l2tp_ip6_sock *)sk;
52 }
53
__l2tp_ip6_bind_lookup(const struct net * net,const struct in6_addr * laddr,const struct in6_addr * raddr,int dif,u32 tunnel_id)54 static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
55 const struct in6_addr *laddr,
56 const struct in6_addr *raddr,
57 int dif, u32 tunnel_id)
58 {
59 struct sock *sk;
60
61 sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
62 const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
63 const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
64 const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
65 int bound_dev_if;
66
67 if (!net_eq(sock_net(sk), net))
68 continue;
69
70 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
71 if (bound_dev_if && dif && bound_dev_if != dif)
72 continue;
73
74 if (sk_laddr && !ipv6_addr_any(sk_laddr) &&
75 !ipv6_addr_any(laddr) && !ipv6_addr_equal(sk_laddr, laddr))
76 continue;
77
78 if (!ipv6_addr_any(sk_raddr) && raddr &&
79 !ipv6_addr_any(raddr) && !ipv6_addr_equal(sk_raddr, raddr))
80 continue;
81
82 if (l2tp->conn_id != tunnel_id)
83 continue;
84
85 goto found;
86 }
87
88 sk = NULL;
89 found:
90 return sk;
91 }
92
93 /* When processing receive frames, there are two cases to
94 * consider. Data frames consist of a non-zero session-id and an
95 * optional cookie. Control frames consist of a regular L2TP header
96 * preceded by 32-bits of zeros.
97 *
98 * L2TPv3 Session Header Over IP
99 *
100 * 0 1 2 3
101 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
102 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
103 * | Session ID |
104 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
105 * | Cookie (optional, maximum 64 bits)...
106 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
107 * |
108 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
109 *
110 * L2TPv3 Control Message Header Over IP
111 *
112 * 0 1 2 3
113 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
114 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
115 * | (32 bits of zeros) |
116 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
117 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
118 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
119 * | Control Connection ID |
120 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
121 * | Ns | Nr |
122 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
123 *
124 * All control frames are passed to userspace.
125 */
l2tp_ip6_recv(struct sk_buff * skb)126 static int l2tp_ip6_recv(struct sk_buff *skb)
127 {
128 struct net *net = dev_net(skb->dev);
129 struct sock *sk;
130 u32 session_id;
131 u32 tunnel_id;
132 unsigned char *ptr, *optr;
133 struct l2tp_session *session;
134 struct l2tp_tunnel *tunnel = NULL;
135 struct ipv6hdr *iph;
136
137 if (!pskb_may_pull(skb, 4))
138 goto discard;
139
140 /* Point to L2TP header */
141 optr = skb->data;
142 ptr = skb->data;
143 session_id = ntohl(*((__be32 *)ptr));
144 ptr += 4;
145
146 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
147 * the session_id. If it is 0, the packet is a L2TP control
148 * frame and the session_id value can be discarded.
149 */
150 if (session_id == 0) {
151 __skb_pull(skb, 4);
152 goto pass_up;
153 }
154
155 /* Ok, this is a data packet. Lookup the session. */
156 session = l2tp_session_get(net, session_id);
157 if (!session)
158 goto discard;
159
160 tunnel = session->tunnel;
161 if (!tunnel)
162 goto discard_sess;
163
164 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
165 goto discard_sess;
166
167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
168 l2tp_session_dec_refcount(session);
169
170 return 0;
171
172 pass_up:
173 /* Get the tunnel_id from the L2TP header */
174 if (!pskb_may_pull(skb, 12))
175 goto discard;
176
177 if ((skb->data[0] & 0xc0) != 0xc0)
178 goto discard;
179
180 tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
181 iph = ipv6_hdr(skb);
182
183 read_lock_bh(&l2tp_ip6_lock);
184 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
185 inet6_iif(skb), tunnel_id);
186 if (!sk) {
187 read_unlock_bh(&l2tp_ip6_lock);
188 goto discard;
189 }
190 sock_hold(sk);
191 read_unlock_bh(&l2tp_ip6_lock);
192
193 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
194 goto discard_put;
195
196 nf_reset_ct(skb);
197
198 return sk_receive_skb(sk, skb, 1);
199
200 discard_sess:
201 l2tp_session_dec_refcount(session);
202 goto discard;
203
204 discard_put:
205 sock_put(sk);
206
207 discard:
208 kfree_skb(skb);
209 return 0;
210 }
211
l2tp_ip6_hash(struct sock * sk)212 static int l2tp_ip6_hash(struct sock *sk)
213 {
214 if (sk_unhashed(sk)) {
215 write_lock_bh(&l2tp_ip6_lock);
216 sk_add_node(sk, &l2tp_ip6_table);
217 write_unlock_bh(&l2tp_ip6_lock);
218 }
219 return 0;
220 }
221
l2tp_ip6_unhash(struct sock * sk)222 static void l2tp_ip6_unhash(struct sock *sk)
223 {
224 if (sk_unhashed(sk))
225 return;
226 write_lock_bh(&l2tp_ip6_lock);
227 sk_del_node_init(sk);
228 write_unlock_bh(&l2tp_ip6_lock);
229 }
230
l2tp_ip6_open(struct sock * sk)231 static int l2tp_ip6_open(struct sock *sk)
232 {
233 /* Prevent autobind. We don't have ports. */
234 inet_sk(sk)->inet_num = IPPROTO_L2TP;
235
236 l2tp_ip6_hash(sk);
237 return 0;
238 }
239
l2tp_ip6_close(struct sock * sk,long timeout)240 static void l2tp_ip6_close(struct sock *sk, long timeout)
241 {
242 write_lock_bh(&l2tp_ip6_lock);
243 hlist_del_init(&sk->sk_bind_node);
244 sk_del_node_init(sk);
245 write_unlock_bh(&l2tp_ip6_lock);
246
247 sk_common_release(sk);
248 }
249
l2tp_ip6_destroy_sock(struct sock * sk)250 static void l2tp_ip6_destroy_sock(struct sock *sk)
251 {
252 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
253
254 lock_sock(sk);
255 ip6_flush_pending_frames(sk);
256 release_sock(sk);
257
258 if (tunnel)
259 l2tp_tunnel_delete(tunnel);
260
261 inet6_destroy_sock(sk);
262 }
263
l2tp_ip6_bind(struct sock * sk,struct sockaddr * uaddr,int addr_len)264 static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
265 {
266 struct inet_sock *inet = inet_sk(sk);
267 struct ipv6_pinfo *np = inet6_sk(sk);
268 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr;
269 struct net *net = sock_net(sk);
270 __be32 v4addr = 0;
271 int bound_dev_if;
272 int addr_type;
273 int err;
274
275 if (addr->l2tp_family != AF_INET6)
276 return -EINVAL;
277 if (addr_len < sizeof(*addr))
278 return -EINVAL;
279
280 addr_type = ipv6_addr_type(&addr->l2tp_addr);
281
282 /* l2tp_ip6 sockets are IPv6 only */
283 if (addr_type == IPV6_ADDR_MAPPED)
284 return -EADDRNOTAVAIL;
285
286 /* L2TP is point-point, not multicast */
287 if (addr_type & IPV6_ADDR_MULTICAST)
288 return -EADDRNOTAVAIL;
289
290 lock_sock(sk);
291
292 err = -EINVAL;
293 if (!sock_flag(sk, SOCK_ZAPPED))
294 goto out_unlock;
295
296 if (sk->sk_state != TCP_CLOSE)
297 goto out_unlock;
298
299 bound_dev_if = sk->sk_bound_dev_if;
300
301 /* Check if the address belongs to the host. */
302 rcu_read_lock();
303 if (addr_type != IPV6_ADDR_ANY) {
304 struct net_device *dev = NULL;
305
306 if (addr_type & IPV6_ADDR_LINKLOCAL) {
307 if (addr->l2tp_scope_id)
308 bound_dev_if = addr->l2tp_scope_id;
309
310 /* Binding to link-local address requires an
311 * interface.
312 */
313 if (!bound_dev_if)
314 goto out_unlock_rcu;
315
316 err = -ENODEV;
317 dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
318 if (!dev)
319 goto out_unlock_rcu;
320 }
321
322 /* ipv4 addr of the socket is invalid. Only the
323 * unspecified and mapped address have a v4 equivalent.
324 */
325 v4addr = LOOPBACK4_IPV6;
326 err = -EADDRNOTAVAIL;
327 if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
328 goto out_unlock_rcu;
329 }
330 rcu_read_unlock();
331
332 write_lock_bh(&l2tp_ip6_lock);
333 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
334 addr->l2tp_conn_id)) {
335 write_unlock_bh(&l2tp_ip6_lock);
336 err = -EADDRINUSE;
337 goto out_unlock;
338 }
339
340 inet->inet_saddr = v4addr;
341 inet->inet_rcv_saddr = v4addr;
342 sk->sk_bound_dev_if = bound_dev_if;
343 sk->sk_v6_rcv_saddr = addr->l2tp_addr;
344 np->saddr = addr->l2tp_addr;
345
346 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
347
348 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
349 sk_del_node_init(sk);
350 write_unlock_bh(&l2tp_ip6_lock);
351
352 sock_reset_flag(sk, SOCK_ZAPPED);
353 release_sock(sk);
354 return 0;
355
356 out_unlock_rcu:
357 rcu_read_unlock();
358 out_unlock:
359 release_sock(sk);
360
361 return err;
362 }
363
l2tp_ip6_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)364 static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
365 int addr_len)
366 {
367 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
368 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
369 struct in6_addr *daddr;
370 int addr_type;
371 int rc;
372
373 if (addr_len < sizeof(*lsa))
374 return -EINVAL;
375
376 if (usin->sin6_family != AF_INET6)
377 return -EINVAL;
378
379 addr_type = ipv6_addr_type(&usin->sin6_addr);
380 if (addr_type & IPV6_ADDR_MULTICAST)
381 return -EINVAL;
382
383 if (addr_type & IPV6_ADDR_MAPPED) {
384 daddr = &usin->sin6_addr;
385 if (ipv4_is_multicast(daddr->s6_addr32[3]))
386 return -EINVAL;
387 }
388
389 lock_sock(sk);
390
391 /* Must bind first - autobinding does not work */
392 if (sock_flag(sk, SOCK_ZAPPED)) {
393 rc = -EINVAL;
394 goto out_sk;
395 }
396
397 rc = __ip6_datagram_connect(sk, uaddr, addr_len);
398 if (rc < 0)
399 goto out_sk;
400
401 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
402
403 write_lock_bh(&l2tp_ip6_lock);
404 hlist_del_init(&sk->sk_bind_node);
405 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
406 write_unlock_bh(&l2tp_ip6_lock);
407
408 out_sk:
409 release_sock(sk);
410
411 return rc;
412 }
413
l2tp_ip6_disconnect(struct sock * sk,int flags)414 static int l2tp_ip6_disconnect(struct sock *sk, int flags)
415 {
416 if (sock_flag(sk, SOCK_ZAPPED))
417 return 0;
418
419 return __udp_disconnect(sk, flags);
420 }
421
l2tp_ip6_getname(struct socket * sock,struct sockaddr * uaddr,int peer)422 static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
423 int peer)
424 {
425 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
426 struct sock *sk = sock->sk;
427 struct ipv6_pinfo *np = inet6_sk(sk);
428 struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
429
430 lsa->l2tp_family = AF_INET6;
431 lsa->l2tp_flowinfo = 0;
432 lsa->l2tp_scope_id = 0;
433 lsa->l2tp_unused = 0;
434 if (peer) {
435 if (!lsk->peer_conn_id)
436 return -ENOTCONN;
437 lsa->l2tp_conn_id = lsk->peer_conn_id;
438 lsa->l2tp_addr = sk->sk_v6_daddr;
439 if (np->sndflow)
440 lsa->l2tp_flowinfo = np->flow_label;
441 } else {
442 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
443 lsa->l2tp_addr = np->saddr;
444 else
445 lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
446
447 lsa->l2tp_conn_id = lsk->conn_id;
448 }
449 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
450 lsa->l2tp_scope_id = READ_ONCE(sk->sk_bound_dev_if);
451 return sizeof(*lsa);
452 }
453
l2tp_ip6_backlog_recv(struct sock * sk,struct sk_buff * skb)454 static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
455 {
456 int rc;
457
458 /* Charge it to the socket, dropping if the queue is full. */
459 rc = sock_queue_rcv_skb(sk, skb);
460 if (rc < 0)
461 goto drop;
462
463 return 0;
464
465 drop:
466 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
467 kfree_skb(skb);
468 return -1;
469 }
470
l2tp_ip6_push_pending_frames(struct sock * sk)471 static int l2tp_ip6_push_pending_frames(struct sock *sk)
472 {
473 struct sk_buff *skb;
474 __be32 *transhdr = NULL;
475 int err = 0;
476
477 skb = skb_peek(&sk->sk_write_queue);
478 if (!skb)
479 goto out;
480
481 transhdr = (__be32 *)skb_transport_header(skb);
482 *transhdr = 0;
483
484 err = ip6_push_pending_frames(sk);
485
486 out:
487 return err;
488 }
489
490 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
491 * control frames.
492 */
l2tp_ip6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)493 static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
494 {
495 struct ipv6_txoptions opt_space;
496 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
497 struct in6_addr *daddr, *final_p, final;
498 struct ipv6_pinfo *np = inet6_sk(sk);
499 struct ipv6_txoptions *opt_to_free = NULL;
500 struct ipv6_txoptions *opt = NULL;
501 struct ip6_flowlabel *flowlabel = NULL;
502 struct dst_entry *dst = NULL;
503 struct flowi6 fl6;
504 struct ipcm6_cookie ipc6;
505 int addr_len = msg->msg_namelen;
506 int transhdrlen = 4; /* zero session-id */
507 int ulen;
508 int err;
509
510 /* Rough check on arithmetic overflow,
511 * better check is made in ip6_append_data().
512 */
513 if (len > INT_MAX - transhdrlen)
514 return -EMSGSIZE;
515 ulen = len + transhdrlen;
516
517 /* Mirror BSD error message compatibility */
518 if (msg->msg_flags & MSG_OOB)
519 return -EOPNOTSUPP;
520
521 /* Get and verify the address */
522 memset(&fl6, 0, sizeof(fl6));
523
524 fl6.flowi6_mark = sk->sk_mark;
525 fl6.flowi6_uid = sk->sk_uid;
526
527 ipcm6_init(&ipc6);
528
529 if (lsa) {
530 if (addr_len < SIN6_LEN_RFC2133)
531 return -EINVAL;
532
533 if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
534 return -EAFNOSUPPORT;
535
536 daddr = &lsa->l2tp_addr;
537 if (np->sndflow) {
538 fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
539 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
540 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
541 if (IS_ERR(flowlabel))
542 return -EINVAL;
543 }
544 }
545
546 /* Otherwise it will be difficult to maintain
547 * sk->sk_dst_cache.
548 */
549 if (sk->sk_state == TCP_ESTABLISHED &&
550 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
551 daddr = &sk->sk_v6_daddr;
552
553 if (addr_len >= sizeof(struct sockaddr_in6) &&
554 lsa->l2tp_scope_id &&
555 ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
556 fl6.flowi6_oif = lsa->l2tp_scope_id;
557 } else {
558 if (sk->sk_state != TCP_ESTABLISHED)
559 return -EDESTADDRREQ;
560
561 daddr = &sk->sk_v6_daddr;
562 fl6.flowlabel = np->flow_label;
563 }
564
565 if (fl6.flowi6_oif == 0)
566 fl6.flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
567
568 if (msg->msg_controllen) {
569 opt = &opt_space;
570 memset(opt, 0, sizeof(struct ipv6_txoptions));
571 opt->tot_len = sizeof(struct ipv6_txoptions);
572 ipc6.opt = opt;
573
574 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
575 if (err < 0) {
576 fl6_sock_release(flowlabel);
577 return err;
578 }
579 if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
580 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
581 if (IS_ERR(flowlabel))
582 return -EINVAL;
583 }
584 if (!(opt->opt_nflen | opt->opt_flen))
585 opt = NULL;
586 }
587
588 if (!opt) {
589 opt = txopt_get(np);
590 opt_to_free = opt;
591 }
592 if (flowlabel)
593 opt = fl6_merge_options(&opt_space, flowlabel, opt);
594 opt = ipv6_fixup_options(&opt_space, opt);
595 ipc6.opt = opt;
596
597 fl6.flowi6_proto = sk->sk_protocol;
598 if (!ipv6_addr_any(daddr))
599 fl6.daddr = *daddr;
600 else
601 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
602 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
603 fl6.saddr = np->saddr;
604
605 final_p = fl6_update_dst(&fl6, opt, &final);
606
607 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
608 fl6.flowi6_oif = np->mcast_oif;
609 else if (!fl6.flowi6_oif)
610 fl6.flowi6_oif = np->ucast_oif;
611
612 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
613
614 if (ipc6.tclass < 0)
615 ipc6.tclass = np->tclass;
616
617 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
618
619 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
620 if (IS_ERR(dst)) {
621 err = PTR_ERR(dst);
622 goto out;
623 }
624
625 if (ipc6.hlimit < 0)
626 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
627
628 if (ipc6.dontfrag < 0)
629 ipc6.dontfrag = np->dontfrag;
630
631 if (msg->msg_flags & MSG_CONFIRM)
632 goto do_confirm;
633
634 back_from_confirm:
635 lock_sock(sk);
636 err = ip6_append_data(sk, ip_generic_getfrag, msg,
637 ulen, transhdrlen, &ipc6,
638 &fl6, (struct rt6_info *)dst,
639 msg->msg_flags);
640 if (err)
641 ip6_flush_pending_frames(sk);
642 else if (!(msg->msg_flags & MSG_MORE))
643 err = l2tp_ip6_push_pending_frames(sk);
644 release_sock(sk);
645 done:
646 dst_release(dst);
647 out:
648 fl6_sock_release(flowlabel);
649 txopt_put(opt_to_free);
650
651 return err < 0 ? err : len;
652
653 do_confirm:
654 if (msg->msg_flags & MSG_PROBE)
655 dst_confirm_neigh(dst, &fl6.daddr);
656 if (!(msg->msg_flags & MSG_PROBE) || len)
657 goto back_from_confirm;
658 err = 0;
659 goto done;
660 }
661
l2tp_ip6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)662 static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
663 int flags, int *addr_len)
664 {
665 struct ipv6_pinfo *np = inet6_sk(sk);
666 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
667 size_t copied = 0;
668 int err = -EOPNOTSUPP;
669 struct sk_buff *skb;
670
671 if (flags & MSG_OOB)
672 goto out;
673
674 if (flags & MSG_ERRQUEUE)
675 return ipv6_recv_error(sk, msg, len, addr_len);
676
677 skb = skb_recv_datagram(sk, flags, &err);
678 if (!skb)
679 goto out;
680
681 copied = skb->len;
682 if (len < copied) {
683 msg->msg_flags |= MSG_TRUNC;
684 copied = len;
685 }
686
687 err = skb_copy_datagram_msg(skb, 0, msg, copied);
688 if (err)
689 goto done;
690
691 sock_recv_timestamp(msg, sk, skb);
692
693 /* Copy the address. */
694 if (lsa) {
695 lsa->l2tp_family = AF_INET6;
696 lsa->l2tp_unused = 0;
697 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
698 lsa->l2tp_flowinfo = 0;
699 lsa->l2tp_scope_id = 0;
700 lsa->l2tp_conn_id = 0;
701 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
702 lsa->l2tp_scope_id = inet6_iif(skb);
703 *addr_len = sizeof(*lsa);
704 }
705
706 if (np->rxopt.all)
707 ip6_datagram_recv_ctl(sk, msg, skb);
708
709 if (flags & MSG_TRUNC)
710 copied = skb->len;
711 done:
712 skb_free_datagram(sk, skb);
713 out:
714 return err ? err : copied;
715 }
716
717 static struct proto l2tp_ip6_prot = {
718 .name = "L2TP/IPv6",
719 .owner = THIS_MODULE,
720 .init = l2tp_ip6_open,
721 .close = l2tp_ip6_close,
722 .bind = l2tp_ip6_bind,
723 .connect = l2tp_ip6_connect,
724 .disconnect = l2tp_ip6_disconnect,
725 .ioctl = l2tp_ioctl,
726 .destroy = l2tp_ip6_destroy_sock,
727 .setsockopt = ipv6_setsockopt,
728 .getsockopt = ipv6_getsockopt,
729 .sendmsg = l2tp_ip6_sendmsg,
730 .recvmsg = l2tp_ip6_recvmsg,
731 .backlog_rcv = l2tp_ip6_backlog_recv,
732 .hash = l2tp_ip6_hash,
733 .unhash = l2tp_ip6_unhash,
734 .obj_size = sizeof(struct l2tp_ip6_sock),
735 };
736
737 static const struct proto_ops l2tp_ip6_ops = {
738 .family = PF_INET6,
739 .owner = THIS_MODULE,
740 .release = inet6_release,
741 .bind = inet6_bind,
742 .connect = inet_dgram_connect,
743 .socketpair = sock_no_socketpair,
744 .accept = sock_no_accept,
745 .getname = l2tp_ip6_getname,
746 .poll = datagram_poll,
747 .ioctl = inet6_ioctl,
748 .gettstamp = sock_gettstamp,
749 .listen = sock_no_listen,
750 .shutdown = inet_shutdown,
751 .setsockopt = sock_common_setsockopt,
752 .getsockopt = sock_common_getsockopt,
753 .sendmsg = inet_sendmsg,
754 .recvmsg = sock_common_recvmsg,
755 .mmap = sock_no_mmap,
756 .sendpage = sock_no_sendpage,
757 #ifdef CONFIG_COMPAT
758 .compat_ioctl = inet6_compat_ioctl,
759 #endif
760 };
761
762 static struct inet_protosw l2tp_ip6_protosw = {
763 .type = SOCK_DGRAM,
764 .protocol = IPPROTO_L2TP,
765 .prot = &l2tp_ip6_prot,
766 .ops = &l2tp_ip6_ops,
767 };
768
769 static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
770 .handler = l2tp_ip6_recv,
771 };
772
l2tp_ip6_init(void)773 static int __init l2tp_ip6_init(void)
774 {
775 int err;
776
777 pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
778
779 err = proto_register(&l2tp_ip6_prot, 1);
780 if (err != 0)
781 goto out;
782
783 err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
784 if (err)
785 goto out1;
786
787 inet6_register_protosw(&l2tp_ip6_protosw);
788 return 0;
789
790 out1:
791 proto_unregister(&l2tp_ip6_prot);
792 out:
793 return err;
794 }
795
l2tp_ip6_exit(void)796 static void __exit l2tp_ip6_exit(void)
797 {
798 inet6_unregister_protosw(&l2tp_ip6_protosw);
799 inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
800 proto_unregister(&l2tp_ip6_prot);
801 }
802
803 module_init(l2tp_ip6_init);
804 module_exit(l2tp_ip6_exit);
805
806 MODULE_LICENSE("GPL");
807 MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
808 MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
809 MODULE_VERSION("1.0");
810
811 /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
812 * enums
813 */
814 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
815 MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);
816