1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/indirect_call_wrapper.h>
36
37 #include <net/addrconf.h>
38 #include <net/ndisc.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/ip6_route.h>
42 #include <net/raw.h>
43 #include <net/tcp_states.h>
44 #include <net/ip6_checksum.h>
45 #include <net/ip6_tunnel.h>
46 #include <net/xfrm.h>
47 #include <net/inet_hashtables.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/busy_poll.h>
50 #include <net/sock_reuseport.h>
51
52 #include <linux/proc_fs.h>
53 #include <linux/seq_file.h>
54 #include <trace/events/skb.h>
55 #include "udp_impl.h"
56
udp6_ehashfn(const struct net * net,const struct in6_addr * laddr,const u16 lport,const struct in6_addr * faddr,const __be16 fport)57 static u32 udp6_ehashfn(const struct net *net,
58 const struct in6_addr *laddr,
59 const u16 lport,
60 const struct in6_addr *faddr,
61 const __be16 fport)
62 {
63 static u32 udp6_ehash_secret __read_mostly;
64 static u32 udp_ipv6_hash_secret __read_mostly;
65
66 u32 lhash, fhash;
67
68 net_get_random_once(&udp6_ehash_secret,
69 sizeof(udp6_ehash_secret));
70 net_get_random_once(&udp_ipv6_hash_secret,
71 sizeof(udp_ipv6_hash_secret));
72
73 lhash = (__force u32)laddr->s6_addr32[3];
74 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
75
76 return __inet6_ehashfn(lhash, lport, fhash, fport,
77 udp_ipv6_hash_secret + net_hash_mix(net));
78 }
79
udp_v6_get_port(struct sock * sk,unsigned short snum)80 int udp_v6_get_port(struct sock *sk, unsigned short snum)
81 {
82 unsigned int hash2_nulladdr =
83 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
84 unsigned int hash2_partial =
85 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
86
87 /* precompute partial secondary hash */
88 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
89 return udp_lib_get_port(sk, snum, hash2_nulladdr);
90 }
91
udp_v6_rehash(struct sock * sk)92 void udp_v6_rehash(struct sock *sk)
93 {
94 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
95 &sk->sk_v6_rcv_saddr,
96 inet_sk(sk)->inet_num);
97
98 udp_lib_rehash(sk, new_hash);
99 }
100
compute_score(struct sock * sk,struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned short hnum,int dif,int sdif)101 static int compute_score(struct sock *sk, struct net *net,
102 const struct in6_addr *saddr, __be16 sport,
103 const struct in6_addr *daddr, unsigned short hnum,
104 int dif, int sdif)
105 {
106 int score;
107 struct inet_sock *inet;
108 bool dev_match;
109
110 if (!net_eq(sock_net(sk), net) ||
111 udp_sk(sk)->udp_port_hash != hnum ||
112 sk->sk_family != PF_INET6)
113 return -1;
114
115 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
116 return -1;
117
118 score = 0;
119 inet = inet_sk(sk);
120
121 if (inet->inet_dport) {
122 if (inet->inet_dport != sport)
123 return -1;
124 score++;
125 }
126
127 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
128 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
129 return -1;
130 score++;
131 }
132
133 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
134 if (!dev_match)
135 return -1;
136 score++;
137
138 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
139 score++;
140
141 return score;
142 }
143
144 /* called with rcu_read_lock() */
udp6_lib_lookup2(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)145 static struct sock *udp6_lib_lookup2(struct net *net,
146 const struct in6_addr *saddr, __be16 sport,
147 const struct in6_addr *daddr, unsigned int hnum,
148 int dif, int sdif, struct udp_hslot *hslot2,
149 struct sk_buff *skb)
150 {
151 struct sock *sk, *result;
152 int score, badness;
153 u32 hash = 0;
154
155 result = NULL;
156 badness = -1;
157 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
158 score = compute_score(sk, net, saddr, sport,
159 daddr, hnum, dif, sdif);
160 if (score > badness) {
161 if (sk->sk_reuseport &&
162 sk->sk_state != TCP_ESTABLISHED) {
163 hash = udp6_ehashfn(net, daddr, hnum,
164 saddr, sport);
165
166 result = reuseport_select_sock(sk, hash, skb,
167 sizeof(struct udphdr));
168 if (result && !reuseport_has_conns(sk, false))
169 return result;
170 }
171 result = sk;
172 badness = score;
173 }
174 }
175 return result;
176 }
177
178 /* rcu_read_lock() must be held */
__udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)179 struct sock *__udp6_lib_lookup(struct net *net,
180 const struct in6_addr *saddr, __be16 sport,
181 const struct in6_addr *daddr, __be16 dport,
182 int dif, int sdif, struct udp_table *udptable,
183 struct sk_buff *skb)
184 {
185 unsigned short hnum = ntohs(dport);
186 unsigned int hash2, slot2;
187 struct udp_hslot *hslot2;
188 struct sock *result;
189
190 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
191 slot2 = hash2 & udptable->mask;
192 hslot2 = &udptable->hash2[slot2];
193
194 result = udp6_lib_lookup2(net, saddr, sport,
195 daddr, hnum, dif, sdif,
196 hslot2, skb);
197 if (!result) {
198 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
199 slot2 = hash2 & udptable->mask;
200
201 hslot2 = &udptable->hash2[slot2];
202
203 result = udp6_lib_lookup2(net, saddr, sport,
204 &in6addr_any, hnum, dif, sdif,
205 hslot2, skb);
206 }
207 if (IS_ERR(result))
208 return NULL;
209 return result;
210 }
211 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
212
__udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)213 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
214 __be16 sport, __be16 dport,
215 struct udp_table *udptable)
216 {
217 const struct ipv6hdr *iph = ipv6_hdr(skb);
218
219 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
220 &iph->daddr, dport, inet6_iif(skb),
221 inet6_sdif(skb), udptable, skb);
222 }
223
udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport)224 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
225 __be16 sport, __be16 dport)
226 {
227 const struct ipv6hdr *iph = ipv6_hdr(skb);
228
229 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
230 &iph->daddr, dport, inet6_iif(skb),
231 inet6_sdif(skb), &udp_table, NULL);
232 }
233 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
234
235 /* Must be called under rcu_read_lock().
236 * Does increment socket refcount.
237 */
238 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif)239 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
240 const struct in6_addr *daddr, __be16 dport, int dif)
241 {
242 struct sock *sk;
243
244 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
245 dif, 0, &udp_table, NULL);
246 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
247 sk = NULL;
248 return sk;
249 }
250 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
251 #endif
252
253 /* do not use the scratch area len for jumbogram: their length execeeds the
254 * scratch area space; note that the IP6CB flags is still in the first
255 * cacheline, so checking for jumbograms is cheap
256 */
udp6_skb_len(struct sk_buff * skb)257 static int udp6_skb_len(struct sk_buff *skb)
258 {
259 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
260 }
261
262 /*
263 * This should be easy, if there is something there we
264 * return it, otherwise we block.
265 */
266
udpv6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int noblock,int flags,int * addr_len)267 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
268 int noblock, int flags, int *addr_len)
269 {
270 struct ipv6_pinfo *np = inet6_sk(sk);
271 struct inet_sock *inet = inet_sk(sk);
272 struct sk_buff *skb;
273 unsigned int ulen, copied;
274 int off, err, peeking = flags & MSG_PEEK;
275 int is_udplite = IS_UDPLITE(sk);
276 struct udp_mib __percpu *mib;
277 bool checksum_valid = false;
278 int is_udp4;
279
280 if (flags & MSG_ERRQUEUE)
281 return ipv6_recv_error(sk, msg, len, addr_len);
282
283 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
284 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
285
286 try_again:
287 off = sk_peek_offset(sk, flags);
288 skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
289 if (!skb)
290 return err;
291
292 ulen = udp6_skb_len(skb);
293 copied = len;
294 if (copied > ulen - off)
295 copied = ulen - off;
296 else if (copied < ulen)
297 msg->msg_flags |= MSG_TRUNC;
298
299 is_udp4 = (skb->protocol == htons(ETH_P_IP));
300 mib = __UDPX_MIB(sk, is_udp4);
301
302 /*
303 * If checksum is needed at all, try to do it while copying the
304 * data. If the data is truncated, or if we only want a partial
305 * coverage checksum (UDP-Lite), do it before the copy.
306 */
307
308 if (copied < ulen || peeking ||
309 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
310 checksum_valid = udp_skb_csum_unnecessary(skb) ||
311 !__udp_lib_checksum_complete(skb);
312 if (!checksum_valid)
313 goto csum_copy_err;
314 }
315
316 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
317 if (udp_skb_is_linear(skb))
318 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
319 else
320 err = skb_copy_datagram_msg(skb, off, msg, copied);
321 } else {
322 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
323 if (err == -EINVAL)
324 goto csum_copy_err;
325 }
326 if (unlikely(err)) {
327 if (!peeking) {
328 atomic_inc(&sk->sk_drops);
329 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
330 }
331 kfree_skb(skb);
332 return err;
333 }
334 if (!peeking)
335 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
336
337 sock_recv_ts_and_drops(msg, sk, skb);
338
339 /* Copy the address. */
340 if (msg->msg_name) {
341 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
342 sin6->sin6_family = AF_INET6;
343 sin6->sin6_port = udp_hdr(skb)->source;
344 sin6->sin6_flowinfo = 0;
345
346 if (is_udp4) {
347 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
348 &sin6->sin6_addr);
349 sin6->sin6_scope_id = 0;
350 } else {
351 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
352 sin6->sin6_scope_id =
353 ipv6_iface_scope_id(&sin6->sin6_addr,
354 inet6_iif(skb));
355 }
356 *addr_len = sizeof(*sin6);
357
358 if (cgroup_bpf_enabled)
359 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
360 (struct sockaddr *)sin6);
361 }
362
363 if (udp_sk(sk)->gro_enabled)
364 udp_cmsg_recv(msg, sk, skb);
365
366 if (np->rxopt.all)
367 ip6_datagram_recv_common_ctl(sk, msg, skb);
368
369 if (is_udp4) {
370 if (inet->cmsg_flags)
371 ip_cmsg_recv_offset(msg, sk, skb,
372 sizeof(struct udphdr), off);
373 } else {
374 if (np->rxopt.all)
375 ip6_datagram_recv_specific_ctl(sk, msg, skb);
376 }
377
378 err = copied;
379 if (flags & MSG_TRUNC)
380 err = ulen;
381
382 skb_consume_udp(sk, skb, peeking ? -err : err);
383 return err;
384
385 csum_copy_err:
386 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
387 udp_skb_destructor)) {
388 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
389 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
390 }
391 kfree_skb(skb);
392
393 /* starting over for a new packet, but check if we need to yield */
394 cond_resched();
395 msg->msg_flags &= ~MSG_TRUNC;
396 goto try_again;
397 }
398
399 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
udpv6_encap_enable(void)400 void udpv6_encap_enable(void)
401 {
402 static_branch_inc(&udpv6_encap_needed_key);
403 }
404 EXPORT_SYMBOL(udpv6_encap_enable);
405
406 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
407 * through error handlers in encapsulations looking for a match.
408 */
__udp6_lib_err_encap_no_sk(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)409 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
410 struct inet6_skb_parm *opt,
411 u8 type, u8 code, int offset, __be32 info)
412 {
413 int i;
414
415 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
416 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
417 u8 type, u8 code, int offset, __be32 info);
418 const struct ip6_tnl_encap_ops *encap;
419
420 encap = rcu_dereference(ip6tun_encaps[i]);
421 if (!encap)
422 continue;
423 handler = encap->err_handler;
424 if (handler && !handler(skb, opt, type, code, offset, info))
425 return 0;
426 }
427
428 return -ENOENT;
429 }
430
431 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
432 * reversing source and destination port: this will match tunnels that force the
433 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
434 * lwtunnels might actually break this assumption by being configured with
435 * different destination ports on endpoints, in this case we won't be able to
436 * trace ICMP messages back to them.
437 *
438 * If this doesn't match any socket, probe tunnels with arbitrary destination
439 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
440 * we've sent packets to won't necessarily match the local destination port.
441 *
442 * Then ask the tunnel implementation to match the error against a valid
443 * association.
444 *
445 * Return an error if we can't find a match, the socket if we need further
446 * processing, zero otherwise.
447 */
__udp6_lib_err_encap(struct net * net,const struct ipv6hdr * hdr,int offset,struct udphdr * uh,struct udp_table * udptable,struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,__be32 info)448 static struct sock *__udp6_lib_err_encap(struct net *net,
449 const struct ipv6hdr *hdr, int offset,
450 struct udphdr *uh,
451 struct udp_table *udptable,
452 struct sk_buff *skb,
453 struct inet6_skb_parm *opt,
454 u8 type, u8 code, __be32 info)
455 {
456 int network_offset, transport_offset;
457 struct sock *sk;
458
459 network_offset = skb_network_offset(skb);
460 transport_offset = skb_transport_offset(skb);
461
462 /* Network header needs to point to the outer IPv6 header inside ICMP */
463 skb_reset_network_header(skb);
464
465 /* Transport header needs to point to the UDP header */
466 skb_set_transport_header(skb, offset);
467
468 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
469 &hdr->saddr, uh->dest,
470 inet6_iif(skb), 0, udptable, skb);
471 if (sk) {
472 int (*lookup)(struct sock *sk, struct sk_buff *skb);
473 struct udp_sock *up = udp_sk(sk);
474
475 lookup = READ_ONCE(up->encap_err_lookup);
476 if (!lookup || lookup(sk, skb))
477 sk = NULL;
478 }
479
480 if (!sk) {
481 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
482 offset, info));
483 }
484
485 skb_set_transport_header(skb, transport_offset);
486 skb_set_network_header(skb, network_offset);
487
488 return sk;
489 }
490
__udp6_lib_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info,struct udp_table * udptable)491 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
492 u8 type, u8 code, int offset, __be32 info,
493 struct udp_table *udptable)
494 {
495 struct ipv6_pinfo *np;
496 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
497 const struct in6_addr *saddr = &hdr->saddr;
498 const struct in6_addr *daddr = &hdr->daddr;
499 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
500 bool tunnel = false;
501 struct sock *sk;
502 int harderr;
503 int err;
504 struct net *net = dev_net(skb->dev);
505
506 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
507 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
508 if (!sk) {
509 /* No socket for error: try tunnels before discarding */
510 sk = ERR_PTR(-ENOENT);
511 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
512 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
513 udptable, skb,
514 opt, type, code, info);
515 if (!sk)
516 return 0;
517 }
518
519 if (IS_ERR(sk)) {
520 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
521 ICMP6_MIB_INERRORS);
522 return PTR_ERR(sk);
523 }
524
525 tunnel = true;
526 }
527
528 harderr = icmpv6_err_convert(type, code, &err);
529 np = inet6_sk(sk);
530
531 if (type == ICMPV6_PKT_TOOBIG) {
532 if (!ip6_sk_accept_pmtu(sk))
533 goto out;
534 ip6_sk_update_pmtu(skb, sk, info);
535 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
536 harderr = 1;
537 }
538 if (type == NDISC_REDIRECT) {
539 if (tunnel) {
540 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
541 sk->sk_mark, sk->sk_uid);
542 } else {
543 ip6_sk_redirect(skb, sk);
544 }
545 goto out;
546 }
547
548 /* Tunnels don't have an application socket: don't pass errors back */
549 if (tunnel)
550 goto out;
551
552 if (!np->recverr) {
553 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
554 goto out;
555 } else {
556 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
557 }
558
559 sk->sk_err = err;
560 sk->sk_error_report(sk);
561 out:
562 return 0;
563 }
564
__udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)565 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
566 {
567 int rc;
568
569 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
570 sock_rps_save_rxhash(sk, skb);
571 sk_mark_napi_id(sk, skb);
572 sk_incoming_cpu_update(sk);
573 } else {
574 sk_mark_napi_id_once(sk, skb);
575 }
576
577 rc = __udp_enqueue_schedule_skb(sk, skb);
578 if (rc < 0) {
579 int is_udplite = IS_UDPLITE(sk);
580
581 /* Note that an ENOMEM error is charged twice */
582 if (rc == -ENOMEM)
583 UDP6_INC_STATS(sock_net(sk),
584 UDP_MIB_RCVBUFERRORS, is_udplite);
585 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
586 kfree_skb(skb);
587 return -1;
588 }
589
590 return 0;
591 }
592
udpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)593 static __inline__ int udpv6_err(struct sk_buff *skb,
594 struct inet6_skb_parm *opt, u8 type,
595 u8 code, int offset, __be32 info)
596 {
597 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
598 }
599
udpv6_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)600 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
601 {
602 struct udp_sock *up = udp_sk(sk);
603 int is_udplite = IS_UDPLITE(sk);
604
605 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
606 goto drop;
607
608 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
609 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
610
611 /*
612 * This is an encapsulation socket so pass the skb to
613 * the socket's udp_encap_rcv() hook. Otherwise, just
614 * fall through and pass this up the UDP socket.
615 * up->encap_rcv() returns the following value:
616 * =0 if skb was successfully passed to the encap
617 * handler or was discarded by it.
618 * >0 if skb should be passed on to UDP.
619 * <0 if skb should be resubmitted as proto -N
620 */
621
622 /* if we're overly short, let UDP handle it */
623 encap_rcv = READ_ONCE(up->encap_rcv);
624 if (encap_rcv) {
625 int ret;
626
627 /* Verify checksum before giving to encap */
628 if (udp_lib_checksum_complete(skb))
629 goto csum_error;
630
631 ret = encap_rcv(sk, skb);
632 if (ret <= 0) {
633 __UDP_INC_STATS(sock_net(sk),
634 UDP_MIB_INDATAGRAMS,
635 is_udplite);
636 return -ret;
637 }
638 }
639
640 /* FALLTHROUGH -- it's a UDP Packet */
641 }
642
643 /*
644 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
645 */
646 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
647
648 if (up->pcrlen == 0) { /* full coverage was set */
649 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
650 UDP_SKB_CB(skb)->cscov, skb->len);
651 goto drop;
652 }
653 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
654 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
655 UDP_SKB_CB(skb)->cscov, up->pcrlen);
656 goto drop;
657 }
658 }
659
660 prefetch(&sk->sk_rmem_alloc);
661 if (rcu_access_pointer(sk->sk_filter) &&
662 udp_lib_checksum_complete(skb))
663 goto csum_error;
664
665 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
666 goto drop;
667
668 udp_csum_pull_header(skb);
669
670 skb_dst_drop(skb);
671
672 return __udpv6_queue_rcv_skb(sk, skb);
673
674 csum_error:
675 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
676 drop:
677 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
678 atomic_inc(&sk->sk_drops);
679 kfree_skb(skb);
680 return -1;
681 }
682
udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)683 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
684 {
685 struct sk_buff *next, *segs;
686 int ret;
687
688 if (likely(!udp_unexpected_gso(sk, skb)))
689 return udpv6_queue_rcv_one_skb(sk, skb);
690
691 __skb_push(skb, -skb_mac_offset(skb));
692 segs = udp_rcv_segment(sk, skb, false);
693 for (skb = segs; skb; skb = next) {
694 next = skb->next;
695 __skb_pull(skb, skb_transport_offset(skb));
696
697 ret = udpv6_queue_rcv_one_skb(sk, skb);
698 if (ret > 0)
699 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
700 true);
701 }
702 return 0;
703 }
704
__udp_v6_is_mcast_sock(struct net * net,struct sock * sk,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif,unsigned short hnum)705 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
706 __be16 loc_port, const struct in6_addr *loc_addr,
707 __be16 rmt_port, const struct in6_addr *rmt_addr,
708 int dif, int sdif, unsigned short hnum)
709 {
710 struct inet_sock *inet = inet_sk(sk);
711
712 if (!net_eq(sock_net(sk), net))
713 return false;
714
715 if (udp_sk(sk)->udp_port_hash != hnum ||
716 sk->sk_family != PF_INET6 ||
717 (inet->inet_dport && inet->inet_dport != rmt_port) ||
718 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
719 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
720 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
721 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
722 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
723 return false;
724 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
725 return false;
726 return true;
727 }
728
udp6_csum_zero_error(struct sk_buff * skb)729 static void udp6_csum_zero_error(struct sk_buff *skb)
730 {
731 /* RFC 2460 section 8.1 says that we SHOULD log
732 * this error. Well, it is reasonable.
733 */
734 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
735 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
736 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
737 }
738
739 /*
740 * Note: called only from the BH handler context,
741 * so we don't need to lock the hashes.
742 */
__udp6_lib_mcast_deliver(struct net * net,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,struct udp_table * udptable,int proto)743 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
744 const struct in6_addr *saddr, const struct in6_addr *daddr,
745 struct udp_table *udptable, int proto)
746 {
747 struct sock *sk, *first = NULL;
748 const struct udphdr *uh = udp_hdr(skb);
749 unsigned short hnum = ntohs(uh->dest);
750 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
751 unsigned int offset = offsetof(typeof(*sk), sk_node);
752 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
753 int dif = inet6_iif(skb);
754 int sdif = inet6_sdif(skb);
755 struct hlist_node *node;
756 struct sk_buff *nskb;
757
758 if (use_hash2) {
759 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
760 udptable->mask;
761 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
762 start_lookup:
763 hslot = &udptable->hash2[hash2];
764 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
765 }
766
767 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
768 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
769 uh->source, saddr, dif, sdif,
770 hnum))
771 continue;
772 /* If zero checksum and no_check is not on for
773 * the socket then skip it.
774 */
775 if (!uh->check && !udp_sk(sk)->no_check6_rx)
776 continue;
777 if (!first) {
778 first = sk;
779 continue;
780 }
781 nskb = skb_clone(skb, GFP_ATOMIC);
782 if (unlikely(!nskb)) {
783 atomic_inc(&sk->sk_drops);
784 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
785 IS_UDPLITE(sk));
786 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
787 IS_UDPLITE(sk));
788 continue;
789 }
790
791 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
792 consume_skb(nskb);
793 }
794
795 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
796 if (use_hash2 && hash2 != hash2_any) {
797 hash2 = hash2_any;
798 goto start_lookup;
799 }
800
801 if (first) {
802 if (udpv6_queue_rcv_skb(first, skb) > 0)
803 consume_skb(skb);
804 } else {
805 kfree_skb(skb);
806 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
807 proto == IPPROTO_UDPLITE);
808 }
809 return 0;
810 }
811
udp6_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)812 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
813 {
814 if (udp_sk_rx_dst_set(sk, dst)) {
815 const struct rt6_info *rt = (const struct rt6_info *)dst;
816
817 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
818 }
819 }
820
821 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
822 * return code conversion for ip layer consumption
823 */
udp6_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)824 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
825 struct udphdr *uh)
826 {
827 int ret;
828
829 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
830 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
831
832 ret = udpv6_queue_rcv_skb(sk, skb);
833
834 /* a return value > 0 means to resubmit the input */
835 if (ret > 0)
836 return ret;
837 return 0;
838 }
839
__udp6_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)840 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
841 int proto)
842 {
843 const struct in6_addr *saddr, *daddr;
844 struct net *net = dev_net(skb->dev);
845 struct udphdr *uh;
846 struct sock *sk;
847 u32 ulen = 0;
848
849 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
850 goto discard;
851
852 saddr = &ipv6_hdr(skb)->saddr;
853 daddr = &ipv6_hdr(skb)->daddr;
854 uh = udp_hdr(skb);
855
856 ulen = ntohs(uh->len);
857 if (ulen > skb->len)
858 goto short_packet;
859
860 if (proto == IPPROTO_UDP) {
861 /* UDP validates ulen. */
862
863 /* Check for jumbo payload */
864 if (ulen == 0)
865 ulen = skb->len;
866
867 if (ulen < sizeof(*uh))
868 goto short_packet;
869
870 if (ulen < skb->len) {
871 if (pskb_trim_rcsum(skb, ulen))
872 goto short_packet;
873 saddr = &ipv6_hdr(skb)->saddr;
874 daddr = &ipv6_hdr(skb)->daddr;
875 uh = udp_hdr(skb);
876 }
877 }
878
879 if (udp6_csum_init(skb, uh, proto))
880 goto csum_error;
881
882 /* Check if the socket is already available, e.g. due to early demux */
883 sk = skb_steal_sock(skb);
884 if (sk) {
885 struct dst_entry *dst = skb_dst(skb);
886 int ret;
887
888 if (unlikely(sk->sk_rx_dst != dst))
889 udp6_sk_rx_dst_set(sk, dst);
890
891 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
892 sock_put(sk);
893 goto report_csum_error;
894 }
895
896 ret = udp6_unicast_rcv_skb(sk, skb, uh);
897 sock_put(sk);
898 return ret;
899 }
900
901 /*
902 * Multicast receive code
903 */
904 if (ipv6_addr_is_multicast(daddr))
905 return __udp6_lib_mcast_deliver(net, skb,
906 saddr, daddr, udptable, proto);
907
908 /* Unicast */
909 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
910 if (sk) {
911 if (!uh->check && !udp_sk(sk)->no_check6_rx)
912 goto report_csum_error;
913 return udp6_unicast_rcv_skb(sk, skb, uh);
914 }
915
916 if (!uh->check)
917 goto report_csum_error;
918
919 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
920 goto discard;
921
922 if (udp_lib_checksum_complete(skb))
923 goto csum_error;
924
925 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
926 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
927
928 kfree_skb(skb);
929 return 0;
930
931 short_packet:
932 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
933 proto == IPPROTO_UDPLITE ? "-Lite" : "",
934 saddr, ntohs(uh->source),
935 ulen, skb->len,
936 daddr, ntohs(uh->dest));
937 goto discard;
938
939 report_csum_error:
940 udp6_csum_zero_error(skb);
941 csum_error:
942 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
943 discard:
944 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
945 kfree_skb(skb);
946 return 0;
947 }
948
949
__udp6_lib_demux_lookup(struct net * net,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif)950 static struct sock *__udp6_lib_demux_lookup(struct net *net,
951 __be16 loc_port, const struct in6_addr *loc_addr,
952 __be16 rmt_port, const struct in6_addr *rmt_addr,
953 int dif, int sdif)
954 {
955 unsigned short hnum = ntohs(loc_port);
956 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
957 unsigned int slot2 = hash2 & udp_table.mask;
958 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
959 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
960 struct sock *sk;
961
962 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
963 if (sk->sk_state == TCP_ESTABLISHED &&
964 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
965 return sk;
966 /* Only check first socket in chain */
967 break;
968 }
969 return NULL;
970 }
971
udp_v6_early_demux(struct sk_buff * skb)972 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
973 {
974 struct net *net = dev_net(skb->dev);
975 const struct udphdr *uh;
976 struct sock *sk;
977 struct dst_entry *dst;
978 int dif = skb->dev->ifindex;
979 int sdif = inet6_sdif(skb);
980
981 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
982 sizeof(struct udphdr)))
983 return;
984
985 uh = udp_hdr(skb);
986
987 if (skb->pkt_type == PACKET_HOST)
988 sk = __udp6_lib_demux_lookup(net, uh->dest,
989 &ipv6_hdr(skb)->daddr,
990 uh->source, &ipv6_hdr(skb)->saddr,
991 dif, sdif);
992 else
993 return;
994
995 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
996 return;
997
998 skb->sk = sk;
999 skb->destructor = sock_efree;
1000 dst = READ_ONCE(sk->sk_rx_dst);
1001
1002 if (dst)
1003 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1004 if (dst) {
1005 /* set noref for now.
1006 * any place which wants to hold dst has to call
1007 * dst_hold_safe()
1008 */
1009 skb_dst_set_noref(skb, dst);
1010 }
1011 }
1012
udpv6_rcv(struct sk_buff * skb)1013 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1014 {
1015 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1016 }
1017
1018 /*
1019 * Throw away all pending data and cancel the corking. Socket is locked.
1020 */
udp_v6_flush_pending_frames(struct sock * sk)1021 static void udp_v6_flush_pending_frames(struct sock *sk)
1022 {
1023 struct udp_sock *up = udp_sk(sk);
1024
1025 if (up->pending == AF_INET)
1026 udp_flush_pending_frames(sk);
1027 else if (up->pending) {
1028 up->len = 0;
1029 up->pending = 0;
1030 ip6_flush_pending_frames(sk);
1031 }
1032 }
1033
udpv6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1034 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1035 int addr_len)
1036 {
1037 if (addr_len < offsetofend(struct sockaddr, sa_family))
1038 return -EINVAL;
1039 /* The following checks are replicated from __ip6_datagram_connect()
1040 * and intended to prevent BPF program called below from accessing
1041 * bytes that are out of the bound specified by user in addr_len.
1042 */
1043 if (uaddr->sa_family == AF_INET) {
1044 if (__ipv6_only_sock(sk))
1045 return -EAFNOSUPPORT;
1046 return udp_pre_connect(sk, uaddr, addr_len);
1047 }
1048
1049 if (addr_len < SIN6_LEN_RFC2133)
1050 return -EINVAL;
1051
1052 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1053 }
1054
1055 /**
1056 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1057 * @sk: socket we are sending on
1058 * @skb: sk_buff containing the filled-in UDP header
1059 * (checksum field must be zeroed out)
1060 */
udp6_hwcsum_outgoing(struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,int len)1061 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1062 const struct in6_addr *saddr,
1063 const struct in6_addr *daddr, int len)
1064 {
1065 unsigned int offset;
1066 struct udphdr *uh = udp_hdr(skb);
1067 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1068 __wsum csum = 0;
1069
1070 if (!frags) {
1071 /* Only one fragment on the socket. */
1072 skb->csum_start = skb_transport_header(skb) - skb->head;
1073 skb->csum_offset = offsetof(struct udphdr, check);
1074 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1075 } else {
1076 /*
1077 * HW-checksum won't work as there are two or more
1078 * fragments on the socket so that all csums of sk_buffs
1079 * should be together
1080 */
1081 offset = skb_transport_offset(skb);
1082 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1083 csum = skb->csum;
1084
1085 skb->ip_summed = CHECKSUM_NONE;
1086
1087 do {
1088 csum = csum_add(csum, frags->csum);
1089 } while ((frags = frags->next));
1090
1091 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1092 csum);
1093 if (uh->check == 0)
1094 uh->check = CSUM_MANGLED_0;
1095 }
1096 }
1097
1098 /*
1099 * Sending
1100 */
1101
udp_v6_send_skb(struct sk_buff * skb,struct flowi6 * fl6,struct inet_cork * cork)1102 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1103 struct inet_cork *cork)
1104 {
1105 struct sock *sk = skb->sk;
1106 struct udphdr *uh;
1107 int err = 0;
1108 int is_udplite = IS_UDPLITE(sk);
1109 __wsum csum = 0;
1110 int offset = skb_transport_offset(skb);
1111 int len = skb->len - offset;
1112 int datalen = len - sizeof(*uh);
1113
1114 /*
1115 * Create a UDP header
1116 */
1117 uh = udp_hdr(skb);
1118 uh->source = fl6->fl6_sport;
1119 uh->dest = fl6->fl6_dport;
1120 uh->len = htons(len);
1121 uh->check = 0;
1122
1123 if (cork->gso_size) {
1124 const int hlen = skb_network_header_len(skb) +
1125 sizeof(struct udphdr);
1126
1127 if (hlen + cork->gso_size > cork->fragsize) {
1128 kfree_skb(skb);
1129 return -EINVAL;
1130 }
1131 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1132 kfree_skb(skb);
1133 return -EINVAL;
1134 }
1135 if (udp_sk(sk)->no_check6_tx) {
1136 kfree_skb(skb);
1137 return -EINVAL;
1138 }
1139 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1140 dst_xfrm(skb_dst(skb))) {
1141 kfree_skb(skb);
1142 return -EIO;
1143 }
1144
1145 if (datalen > cork->gso_size) {
1146 skb_shinfo(skb)->gso_size = cork->gso_size;
1147 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1148 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1149 cork->gso_size);
1150 }
1151 goto csum_partial;
1152 }
1153
1154 if (is_udplite)
1155 csum = udplite_csum(skb);
1156 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
1157 skb->ip_summed = CHECKSUM_NONE;
1158 goto send;
1159 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1160 csum_partial:
1161 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1162 goto send;
1163 } else
1164 csum = udp_csum(skb);
1165
1166 /* add protocol-dependent pseudo-header */
1167 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1168 len, fl6->flowi6_proto, csum);
1169 if (uh->check == 0)
1170 uh->check = CSUM_MANGLED_0;
1171
1172 send:
1173 err = ip6_send_skb(skb);
1174 if (err) {
1175 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1176 UDP6_INC_STATS(sock_net(sk),
1177 UDP_MIB_SNDBUFERRORS, is_udplite);
1178 err = 0;
1179 }
1180 } else {
1181 UDP6_INC_STATS(sock_net(sk),
1182 UDP_MIB_OUTDATAGRAMS, is_udplite);
1183 }
1184 return err;
1185 }
1186
udp_v6_push_pending_frames(struct sock * sk)1187 static int udp_v6_push_pending_frames(struct sock *sk)
1188 {
1189 struct sk_buff *skb;
1190 struct udp_sock *up = udp_sk(sk);
1191 struct flowi6 fl6;
1192 int err = 0;
1193
1194 if (up->pending == AF_INET)
1195 return udp_push_pending_frames(sk);
1196
1197 /* ip6_finish_skb will release the cork, so make a copy of
1198 * fl6 here.
1199 */
1200 fl6 = inet_sk(sk)->cork.fl.u.ip6;
1201
1202 skb = ip6_finish_skb(sk);
1203 if (!skb)
1204 goto out;
1205
1206 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1207
1208 out:
1209 up->len = 0;
1210 up->pending = 0;
1211 return err;
1212 }
1213
udpv6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1214 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1215 {
1216 struct ipv6_txoptions opt_space;
1217 struct udp_sock *up = udp_sk(sk);
1218 struct inet_sock *inet = inet_sk(sk);
1219 struct ipv6_pinfo *np = inet6_sk(sk);
1220 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1221 struct in6_addr *daddr, *final_p, final;
1222 struct ipv6_txoptions *opt = NULL;
1223 struct ipv6_txoptions *opt_to_free = NULL;
1224 struct ip6_flowlabel *flowlabel = NULL;
1225 struct flowi6 fl6;
1226 struct dst_entry *dst;
1227 struct ipcm6_cookie ipc6;
1228 int addr_len = msg->msg_namelen;
1229 bool connected = false;
1230 int ulen = len;
1231 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1232 int err;
1233 int is_udplite = IS_UDPLITE(sk);
1234 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1235
1236 ipcm6_init(&ipc6);
1237 ipc6.gso_size = up->gso_size;
1238 ipc6.sockc.tsflags = sk->sk_tsflags;
1239 ipc6.sockc.mark = sk->sk_mark;
1240
1241 /* destination address check */
1242 if (sin6) {
1243 if (addr_len < offsetof(struct sockaddr, sa_data))
1244 return -EINVAL;
1245
1246 switch (sin6->sin6_family) {
1247 case AF_INET6:
1248 if (addr_len < SIN6_LEN_RFC2133)
1249 return -EINVAL;
1250 daddr = &sin6->sin6_addr;
1251 if (ipv6_addr_any(daddr) &&
1252 ipv6_addr_v4mapped(&np->saddr))
1253 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1254 daddr);
1255 break;
1256 case AF_INET:
1257 goto do_udp_sendmsg;
1258 case AF_UNSPEC:
1259 msg->msg_name = sin6 = NULL;
1260 msg->msg_namelen = addr_len = 0;
1261 daddr = NULL;
1262 break;
1263 default:
1264 return -EINVAL;
1265 }
1266 } else if (!up->pending) {
1267 if (sk->sk_state != TCP_ESTABLISHED)
1268 return -EDESTADDRREQ;
1269 daddr = &sk->sk_v6_daddr;
1270 } else
1271 daddr = NULL;
1272
1273 if (daddr) {
1274 if (ipv6_addr_v4mapped(daddr)) {
1275 struct sockaddr_in sin;
1276 sin.sin_family = AF_INET;
1277 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1278 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1279 msg->msg_name = &sin;
1280 msg->msg_namelen = sizeof(sin);
1281 do_udp_sendmsg:
1282 if (__ipv6_only_sock(sk))
1283 return -ENETUNREACH;
1284 return udp_sendmsg(sk, msg, len);
1285 }
1286 }
1287
1288 if (up->pending == AF_INET)
1289 return udp_sendmsg(sk, msg, len);
1290
1291 /* Rough check on arithmetic overflow,
1292 better check is made in ip6_append_data().
1293 */
1294 if (len > INT_MAX - sizeof(struct udphdr))
1295 return -EMSGSIZE;
1296
1297 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1298 if (up->pending) {
1299 /*
1300 * There are pending frames.
1301 * The socket lock must be held while it's corked.
1302 */
1303 lock_sock(sk);
1304 if (likely(up->pending)) {
1305 if (unlikely(up->pending != AF_INET6)) {
1306 release_sock(sk);
1307 return -EAFNOSUPPORT;
1308 }
1309 dst = NULL;
1310 goto do_append_data;
1311 }
1312 release_sock(sk);
1313 }
1314 ulen += sizeof(struct udphdr);
1315
1316 memset(&fl6, 0, sizeof(fl6));
1317
1318 if (sin6) {
1319 if (sin6->sin6_port == 0)
1320 return -EINVAL;
1321
1322 fl6.fl6_dport = sin6->sin6_port;
1323 daddr = &sin6->sin6_addr;
1324
1325 if (np->sndflow) {
1326 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1327 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1328 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1329 if (IS_ERR(flowlabel))
1330 return -EINVAL;
1331 }
1332 }
1333
1334 /*
1335 * Otherwise it will be difficult to maintain
1336 * sk->sk_dst_cache.
1337 */
1338 if (sk->sk_state == TCP_ESTABLISHED &&
1339 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1340 daddr = &sk->sk_v6_daddr;
1341
1342 if (addr_len >= sizeof(struct sockaddr_in6) &&
1343 sin6->sin6_scope_id &&
1344 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1345 fl6.flowi6_oif = sin6->sin6_scope_id;
1346 } else {
1347 if (sk->sk_state != TCP_ESTABLISHED)
1348 return -EDESTADDRREQ;
1349
1350 fl6.fl6_dport = inet->inet_dport;
1351 daddr = &sk->sk_v6_daddr;
1352 fl6.flowlabel = np->flow_label;
1353 connected = true;
1354 }
1355
1356 if (!fl6.flowi6_oif)
1357 fl6.flowi6_oif = sk->sk_bound_dev_if;
1358
1359 if (!fl6.flowi6_oif)
1360 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1361
1362 fl6.flowi6_mark = ipc6.sockc.mark;
1363 fl6.flowi6_uid = sk->sk_uid;
1364
1365 if (msg->msg_controllen) {
1366 opt = &opt_space;
1367 memset(opt, 0, sizeof(struct ipv6_txoptions));
1368 opt->tot_len = sizeof(*opt);
1369 ipc6.opt = opt;
1370
1371 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1372 if (err > 0)
1373 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1374 &ipc6);
1375 if (err < 0) {
1376 fl6_sock_release(flowlabel);
1377 return err;
1378 }
1379 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1380 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1381 if (IS_ERR(flowlabel))
1382 return -EINVAL;
1383 }
1384 if (!(opt->opt_nflen|opt->opt_flen))
1385 opt = NULL;
1386 connected = false;
1387 }
1388 if (!opt) {
1389 opt = txopt_get(np);
1390 opt_to_free = opt;
1391 }
1392 if (flowlabel)
1393 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1394 opt = ipv6_fixup_options(&opt_space, opt);
1395 ipc6.opt = opt;
1396
1397 fl6.flowi6_proto = sk->sk_protocol;
1398 fl6.daddr = *daddr;
1399 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1400 fl6.saddr = np->saddr;
1401 fl6.fl6_sport = inet->inet_sport;
1402
1403 if (cgroup_bpf_enabled && !connected) {
1404 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1405 (struct sockaddr *)sin6, &fl6.saddr);
1406 if (err)
1407 goto out_no_dst;
1408 if (sin6) {
1409 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1410 /* BPF program rewrote IPv6-only by IPv4-mapped
1411 * IPv6. It's currently unsupported.
1412 */
1413 err = -ENOTSUPP;
1414 goto out_no_dst;
1415 }
1416 if (sin6->sin6_port == 0) {
1417 /* BPF program set invalid port. Reject it. */
1418 err = -EINVAL;
1419 goto out_no_dst;
1420 }
1421 fl6.fl6_dport = sin6->sin6_port;
1422 fl6.daddr = sin6->sin6_addr;
1423 }
1424 }
1425
1426 if (ipv6_addr_any(&fl6.daddr))
1427 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1428
1429 final_p = fl6_update_dst(&fl6, opt, &final);
1430 if (final_p)
1431 connected = false;
1432
1433 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1434 fl6.flowi6_oif = np->mcast_oif;
1435 connected = false;
1436 } else if (!fl6.flowi6_oif)
1437 fl6.flowi6_oif = np->ucast_oif;
1438
1439 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1440
1441 if (ipc6.tclass < 0)
1442 ipc6.tclass = np->tclass;
1443
1444 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1445
1446 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1447 if (IS_ERR(dst)) {
1448 err = PTR_ERR(dst);
1449 dst = NULL;
1450 goto out;
1451 }
1452
1453 if (ipc6.hlimit < 0)
1454 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1455
1456 if (msg->msg_flags&MSG_CONFIRM)
1457 goto do_confirm;
1458 back_from_confirm:
1459
1460 /* Lockless fast path for the non-corking case */
1461 if (!corkreq) {
1462 struct inet_cork_full cork;
1463 struct sk_buff *skb;
1464
1465 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1466 sizeof(struct udphdr), &ipc6,
1467 &fl6, (struct rt6_info *)dst,
1468 msg->msg_flags, &cork);
1469 err = PTR_ERR(skb);
1470 if (!IS_ERR_OR_NULL(skb))
1471 err = udp_v6_send_skb(skb, &fl6, &cork.base);
1472 goto out;
1473 }
1474
1475 lock_sock(sk);
1476 if (unlikely(up->pending)) {
1477 /* The socket is already corked while preparing it. */
1478 /* ... which is an evident application bug. --ANK */
1479 release_sock(sk);
1480
1481 net_dbg_ratelimited("udp cork app bug 2\n");
1482 err = -EINVAL;
1483 goto out;
1484 }
1485
1486 up->pending = AF_INET6;
1487
1488 do_append_data:
1489 if (ipc6.dontfrag < 0)
1490 ipc6.dontfrag = np->dontfrag;
1491 up->len += ulen;
1492 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1493 &ipc6, &fl6, (struct rt6_info *)dst,
1494 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1495 if (err)
1496 udp_v6_flush_pending_frames(sk);
1497 else if (!corkreq)
1498 err = udp_v6_push_pending_frames(sk);
1499 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1500 up->pending = 0;
1501
1502 if (err > 0)
1503 err = np->recverr ? net_xmit_errno(err) : 0;
1504 release_sock(sk);
1505
1506 out:
1507 dst_release(dst);
1508 out_no_dst:
1509 fl6_sock_release(flowlabel);
1510 txopt_put(opt_to_free);
1511 if (!err)
1512 return len;
1513 /*
1514 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1515 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1516 * we don't have a good statistic (IpOutDiscards but it can be too many
1517 * things). We could add another new stat but at least for now that
1518 * seems like overkill.
1519 */
1520 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1521 UDP6_INC_STATS(sock_net(sk),
1522 UDP_MIB_SNDBUFERRORS, is_udplite);
1523 }
1524 return err;
1525
1526 do_confirm:
1527 if (msg->msg_flags & MSG_PROBE)
1528 dst_confirm_neigh(dst, &fl6.daddr);
1529 if (!(msg->msg_flags&MSG_PROBE) || len)
1530 goto back_from_confirm;
1531 err = 0;
1532 goto out;
1533 }
1534
udpv6_destroy_sock(struct sock * sk)1535 void udpv6_destroy_sock(struct sock *sk)
1536 {
1537 struct udp_sock *up = udp_sk(sk);
1538 lock_sock(sk);
1539 udp_v6_flush_pending_frames(sk);
1540 release_sock(sk);
1541
1542 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1543 if (up->encap_type) {
1544 void (*encap_destroy)(struct sock *sk);
1545 encap_destroy = READ_ONCE(up->encap_destroy);
1546 if (encap_destroy)
1547 encap_destroy(sk);
1548 }
1549 if (up->encap_enabled)
1550 static_branch_dec(&udpv6_encap_needed_key);
1551 }
1552
1553 inet6_destroy_sock(sk);
1554 }
1555
1556 /*
1557 * Socket option code for UDP
1558 */
udpv6_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen)1559 int udpv6_setsockopt(struct sock *sk, int level, int optname,
1560 char __user *optval, unsigned int optlen)
1561 {
1562 if (level == SOL_UDP || level == SOL_UDPLITE)
1563 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1564 udp_v6_push_pending_frames);
1565 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1566 }
1567
1568 #ifdef CONFIG_COMPAT
compat_udpv6_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen)1569 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
1570 char __user *optval, unsigned int optlen)
1571 {
1572 if (level == SOL_UDP || level == SOL_UDPLITE)
1573 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1574 udp_v6_push_pending_frames);
1575 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
1576 }
1577 #endif
1578
udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1579 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1580 char __user *optval, int __user *optlen)
1581 {
1582 if (level == SOL_UDP || level == SOL_UDPLITE)
1583 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1584 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1585 }
1586
1587 #ifdef CONFIG_COMPAT
compat_udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1588 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1589 char __user *optval, int __user *optlen)
1590 {
1591 if (level == SOL_UDP || level == SOL_UDPLITE)
1592 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1593 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
1594 }
1595 #endif
1596
1597 /* thinking of making this const? Don't.
1598 * early_demux can change based on sysctl.
1599 */
1600 static struct inet6_protocol udpv6_protocol = {
1601 .early_demux = udp_v6_early_demux,
1602 .early_demux_handler = udp_v6_early_demux,
1603 .handler = udpv6_rcv,
1604 .err_handler = udpv6_err,
1605 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1606 };
1607
1608 /* ------------------------------------------------------------------------ */
1609 #ifdef CONFIG_PROC_FS
udp6_seq_show(struct seq_file * seq,void * v)1610 int udp6_seq_show(struct seq_file *seq, void *v)
1611 {
1612 if (v == SEQ_START_TOKEN) {
1613 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1614 } else {
1615 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1616 struct inet_sock *inet = inet_sk(v);
1617 __u16 srcp = ntohs(inet->inet_sport);
1618 __u16 destp = ntohs(inet->inet_dport);
1619 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1620 udp_rqueue_get(v), bucket);
1621 }
1622 return 0;
1623 }
1624
1625 const struct seq_operations udp6_seq_ops = {
1626 .start = udp_seq_start,
1627 .next = udp_seq_next,
1628 .stop = udp_seq_stop,
1629 .show = udp6_seq_show,
1630 };
1631 EXPORT_SYMBOL(udp6_seq_ops);
1632
1633 static struct udp_seq_afinfo udp6_seq_afinfo = {
1634 .family = AF_INET6,
1635 .udp_table = &udp_table,
1636 };
1637
udp6_proc_init(struct net * net)1638 int __net_init udp6_proc_init(struct net *net)
1639 {
1640 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1641 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1642 return -ENOMEM;
1643 return 0;
1644 }
1645
udp6_proc_exit(struct net * net)1646 void udp6_proc_exit(struct net *net)
1647 {
1648 remove_proc_entry("udp6", net->proc_net);
1649 }
1650 #endif /* CONFIG_PROC_FS */
1651
1652 /* ------------------------------------------------------------------------ */
1653
1654 struct proto udpv6_prot = {
1655 .name = "UDPv6",
1656 .owner = THIS_MODULE,
1657 .close = udp_lib_close,
1658 .pre_connect = udpv6_pre_connect,
1659 .connect = ip6_datagram_connect,
1660 .disconnect = udp_disconnect,
1661 .ioctl = udp_ioctl,
1662 .init = udp_init_sock,
1663 .destroy = udpv6_destroy_sock,
1664 .setsockopt = udpv6_setsockopt,
1665 .getsockopt = udpv6_getsockopt,
1666 .sendmsg = udpv6_sendmsg,
1667 .recvmsg = udpv6_recvmsg,
1668 .release_cb = ip6_datagram_release_cb,
1669 .hash = udp_lib_hash,
1670 .unhash = udp_lib_unhash,
1671 .rehash = udp_v6_rehash,
1672 .get_port = udp_v6_get_port,
1673 .memory_allocated = &udp_memory_allocated,
1674 .sysctl_mem = sysctl_udp_mem,
1675 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1676 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1677 .obj_size = sizeof(struct udp6_sock),
1678 .h.udp_table = &udp_table,
1679 #ifdef CONFIG_COMPAT
1680 .compat_setsockopt = compat_udpv6_setsockopt,
1681 .compat_getsockopt = compat_udpv6_getsockopt,
1682 #endif
1683 .diag_destroy = udp_abort,
1684 };
1685
1686 static struct inet_protosw udpv6_protosw = {
1687 .type = SOCK_DGRAM,
1688 .protocol = IPPROTO_UDP,
1689 .prot = &udpv6_prot,
1690 .ops = &inet6_dgram_ops,
1691 .flags = INET_PROTOSW_PERMANENT,
1692 };
1693
udpv6_init(void)1694 int __init udpv6_init(void)
1695 {
1696 int ret;
1697
1698 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1699 if (ret)
1700 goto out;
1701
1702 ret = inet6_register_protosw(&udpv6_protosw);
1703 if (ret)
1704 goto out_udpv6_protocol;
1705 out:
1706 return ret;
1707
1708 out_udpv6_protocol:
1709 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1710 goto out;
1711 }
1712
udpv6_exit(void)1713 void udpv6_exit(void)
1714 {
1715 inet6_unregister_protosw(&udpv6_protosw);
1716 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1717 }
1718