1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	:	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/bpf-cgroup.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
45 
46 #include <net/sock.h>
47 #include <net/snmp.h>
48 
49 #include <net/ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
55 #include <net/icmp.h>
56 #include <net/xfrm.h>
57 #include <net/checksum.h>
58 #include <linux/mroute6.h>
59 #include <net/l3mdev.h>
60 #include <net/lwtunnel.h>
61 
ip6_finish_output2(struct net * net,struct sock * sk,struct sk_buff * skb)62 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
63 {
64 	struct dst_entry *dst = skb_dst(skb);
65 	struct net_device *dev = dst->dev;
66 	struct neighbour *neigh;
67 	struct in6_addr *nexthop;
68 	int ret;
69 
70 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72 
73 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
74 		    ((mroute6_is_socket(net, skb) &&
75 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 					 &ipv6_hdr(skb)->saddr))) {
78 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79 
80 			/* Do not check for IFF_ALLMULTI; multicast routing
81 			   is not supported in any case.
82 			 */
83 			if (newskb)
84 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 					net, sk, newskb, NULL, newskb->dev,
86 					dev_loopback_xmit);
87 
88 			if (ipv6_hdr(skb)->hop_limit == 0) {
89 				IP6_INC_STATS(net, idev,
90 					      IPSTATS_MIB_OUTDISCARDS);
91 				kfree_skb(skb);
92 				return 0;
93 			}
94 		}
95 
96 		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
97 
98 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
99 		    IPV6_ADDR_SCOPE_NODELOCAL &&
100 		    !(dev->flags & IFF_LOOPBACK)) {
101 			kfree_skb(skb);
102 			return 0;
103 		}
104 	}
105 
106 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
107 		int res = lwtunnel_xmit(skb);
108 
109 		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
110 			return res;
111 	}
112 
113 	rcu_read_lock_bh();
114 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
115 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
116 	if (unlikely(!neigh))
117 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
118 	if (!IS_ERR(neigh)) {
119 		sock_confirm_neigh(skb, neigh);
120 		ret = neigh_output(neigh, skb);
121 		rcu_read_unlock_bh();
122 		return ret;
123 	}
124 	rcu_read_unlock_bh();
125 
126 	IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
127 	kfree_skb(skb);
128 	return -EINVAL;
129 }
130 
ip6_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)131 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
132 {
133 	int ret;
134 
135 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
136 	if (ret) {
137 		kfree_skb(skb);
138 		return ret;
139 	}
140 
141 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
142 	/* Policy lookup after SNAT yielded a new policy */
143 	if (skb_dst(skb)->xfrm) {
144 		IPCB(skb)->flags |= IPSKB_REROUTED;
145 		return dst_output(net, sk, skb);
146 	}
147 #endif
148 
149 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
150 	    dst_allfrag(skb_dst(skb)) ||
151 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
152 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
153 	else
154 		return ip6_finish_output2(net, sk, skb);
155 }
156 
ip6_output(struct net * net,struct sock * sk,struct sk_buff * skb)157 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
158 {
159 	struct net_device *dev = skb_dst(skb)->dev;
160 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
161 
162 	skb->protocol = htons(ETH_P_IPV6);
163 	skb->dev = dev;
164 
165 	if (unlikely(idev->cnf.disable_ipv6)) {
166 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
167 		kfree_skb(skb);
168 		return 0;
169 	}
170 
171 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
172 			    net, sk, skb, NULL, dev,
173 			    ip6_finish_output,
174 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
175 }
176 
ip6_autoflowlabel(struct net * net,const struct ipv6_pinfo * np)177 bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
178 {
179 	if (!np->autoflowlabel_set)
180 		return ip6_default_np_autolabel(net);
181 	else
182 		return np->autoflowlabel;
183 }
184 
185 /*
186  * xmit an sk_buff (used by TCP, SCTP and DCCP)
187  * Note : socket lock is not held for SYNACK packets, but might be modified
188  * by calls to skb_set_owner_w() and ipv6_local_error(),
189  * which are using proper atomic operations or spinlocks.
190  */
ip6_xmit(const struct sock * sk,struct sk_buff * skb,struct flowi6 * fl6,__u32 mark,struct ipv6_txoptions * opt,int tclass)191 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
192 	     __u32 mark, struct ipv6_txoptions *opt, int tclass)
193 {
194 	struct net *net = sock_net(sk);
195 	const struct ipv6_pinfo *np = inet6_sk(sk);
196 	struct in6_addr *first_hop = &fl6->daddr;
197 	struct dst_entry *dst = skb_dst(skb);
198 	struct ipv6hdr *hdr;
199 	u8  proto = fl6->flowi6_proto;
200 	int seg_len = skb->len;
201 	int hlimit = -1;
202 	u32 mtu;
203 
204 	if (opt) {
205 		unsigned int head_room;
206 
207 		/* First: exthdrs may take lots of space (~8K for now)
208 		   MAX_HEADER is not enough.
209 		 */
210 		head_room = opt->opt_nflen + opt->opt_flen;
211 		seg_len += head_room;
212 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
213 
214 		if (skb_headroom(skb) < head_room) {
215 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
216 			if (!skb2) {
217 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
218 					      IPSTATS_MIB_OUTDISCARDS);
219 				kfree_skb(skb);
220 				return -ENOBUFS;
221 			}
222 			if (skb->sk)
223 				skb_set_owner_w(skb2, skb->sk);
224 			consume_skb(skb);
225 			skb = skb2;
226 		}
227 		if (opt->opt_flen)
228 			ipv6_push_frag_opts(skb, opt, &proto);
229 		if (opt->opt_nflen)
230 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
231 					     &fl6->saddr);
232 	}
233 
234 	skb_push(skb, sizeof(struct ipv6hdr));
235 	skb_reset_network_header(skb);
236 	hdr = ipv6_hdr(skb);
237 
238 	/*
239 	 *	Fill in the IPv6 header
240 	 */
241 	if (np)
242 		hlimit = np->hop_limit;
243 	if (hlimit < 0)
244 		hlimit = ip6_dst_hoplimit(dst);
245 
246 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
247 				ip6_autoflowlabel(net, np), fl6));
248 
249 	hdr->payload_len = htons(seg_len);
250 	hdr->nexthdr = proto;
251 	hdr->hop_limit = hlimit;
252 
253 	hdr->saddr = fl6->saddr;
254 	hdr->daddr = *first_hop;
255 
256 	skb->protocol = htons(ETH_P_IPV6);
257 	skb->priority = sk->sk_priority;
258 	skb->mark = mark;
259 
260 	mtu = dst_mtu(dst);
261 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
262 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
263 			      IPSTATS_MIB_OUT, skb->len);
264 
265 		/* if egress device is enslaved to an L3 master device pass the
266 		 * skb to its handler for processing
267 		 */
268 		skb = l3mdev_ip6_out((struct sock *)sk, skb);
269 		if (unlikely(!skb))
270 			return 0;
271 
272 		/* hooks should never assume socket lock is held.
273 		 * we promote our socket to non const
274 		 */
275 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
276 			       net, (struct sock *)sk, skb, NULL, dst->dev,
277 			       dst_output);
278 	}
279 
280 	skb->dev = dst->dev;
281 	/* ipv6_local_error() does not require socket lock,
282 	 * we promote our socket to non const
283 	 */
284 	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
285 
286 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
287 	kfree_skb(skb);
288 	return -EMSGSIZE;
289 }
290 EXPORT_SYMBOL(ip6_xmit);
291 
ip6_call_ra_chain(struct sk_buff * skb,int sel)292 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
293 {
294 	struct ip6_ra_chain *ra;
295 	struct sock *last = NULL;
296 
297 	read_lock(&ip6_ra_lock);
298 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
299 		struct sock *sk = ra->sk;
300 		if (sk && ra->sel == sel &&
301 		    (!sk->sk_bound_dev_if ||
302 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
303 			if (last) {
304 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
305 				if (skb2)
306 					rawv6_rcv(last, skb2);
307 			}
308 			last = sk;
309 		}
310 	}
311 
312 	if (last) {
313 		rawv6_rcv(last, skb);
314 		read_unlock(&ip6_ra_lock);
315 		return 1;
316 	}
317 	read_unlock(&ip6_ra_lock);
318 	return 0;
319 }
320 
ip6_forward_proxy_check(struct sk_buff * skb)321 static int ip6_forward_proxy_check(struct sk_buff *skb)
322 {
323 	struct ipv6hdr *hdr = ipv6_hdr(skb);
324 	u8 nexthdr = hdr->nexthdr;
325 	__be16 frag_off;
326 	int offset;
327 
328 	if (ipv6_ext_hdr(nexthdr)) {
329 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
330 		if (offset < 0)
331 			return 0;
332 	} else
333 		offset = sizeof(struct ipv6hdr);
334 
335 	if (nexthdr == IPPROTO_ICMPV6) {
336 		struct icmp6hdr *icmp6;
337 
338 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
339 					 offset + 1 - skb->data)))
340 			return 0;
341 
342 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
343 
344 		switch (icmp6->icmp6_type) {
345 		case NDISC_ROUTER_SOLICITATION:
346 		case NDISC_ROUTER_ADVERTISEMENT:
347 		case NDISC_NEIGHBOUR_SOLICITATION:
348 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
349 		case NDISC_REDIRECT:
350 			/* For reaction involving unicast neighbor discovery
351 			 * message destined to the proxied address, pass it to
352 			 * input function.
353 			 */
354 			return 1;
355 		default:
356 			break;
357 		}
358 	}
359 
360 	/*
361 	 * The proxying router can't forward traffic sent to a link-local
362 	 * address, so signal the sender and discard the packet. This
363 	 * behavior is clarified by the MIPv6 specification.
364 	 */
365 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
366 		dst_link_failure(skb);
367 		return -1;
368 	}
369 
370 	return 0;
371 }
372 
ip6_forward_finish(struct net * net,struct sock * sk,struct sk_buff * skb)373 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
374 				     struct sk_buff *skb)
375 {
376 	struct dst_entry *dst = skb_dst(skb);
377 
378 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
379 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
380 
381 	return dst_output(net, sk, skb);
382 }
383 
ip6_pkt_too_big(const struct sk_buff * skb,unsigned int mtu)384 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
385 {
386 	if (skb->len <= mtu)
387 		return false;
388 
389 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
390 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
391 		return true;
392 
393 	if (skb->ignore_df)
394 		return false;
395 
396 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
397 		return false;
398 
399 	return true;
400 }
401 
ip6_forward(struct sk_buff * skb)402 int ip6_forward(struct sk_buff *skb)
403 {
404 	struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
405 	struct dst_entry *dst = skb_dst(skb);
406 	struct ipv6hdr *hdr = ipv6_hdr(skb);
407 	struct inet6_skb_parm *opt = IP6CB(skb);
408 	struct net *net = dev_net(dst->dev);
409 	u32 mtu;
410 
411 	if (net->ipv6.devconf_all->forwarding == 0)
412 		goto error;
413 
414 	if (skb->pkt_type != PACKET_HOST)
415 		goto drop;
416 
417 	if (unlikely(skb->sk))
418 		goto drop;
419 
420 	if (skb_warn_if_lro(skb))
421 		goto drop;
422 
423 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
424 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
425 		goto drop;
426 	}
427 
428 	skb_forward_csum(skb);
429 
430 	/*
431 	 *	We DO NOT make any processing on
432 	 *	RA packets, pushing them to user level AS IS
433 	 *	without ane WARRANTY that application will be able
434 	 *	to interpret them. The reason is that we
435 	 *	cannot make anything clever here.
436 	 *
437 	 *	We are not end-node, so that if packet contains
438 	 *	AH/ESP, we cannot make anything.
439 	 *	Defragmentation also would be mistake, RA packets
440 	 *	cannot be fragmented, because there is no warranty
441 	 *	that different fragments will go along one path. --ANK
442 	 */
443 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
444 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
445 			return 0;
446 	}
447 
448 	/*
449 	 *	check and decrement ttl
450 	 */
451 	if (hdr->hop_limit <= 1) {
452 		/* Force OUTPUT device used as source address */
453 		skb->dev = dst->dev;
454 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
455 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
456 
457 		kfree_skb(skb);
458 		return -ETIMEDOUT;
459 	}
460 
461 	/* XXX: idev->cnf.proxy_ndp? */
462 	if (net->ipv6.devconf_all->proxy_ndp &&
463 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
464 		int proxied = ip6_forward_proxy_check(skb);
465 		if (proxied > 0)
466 			return ip6_input(skb);
467 		else if (proxied < 0) {
468 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
469 			goto drop;
470 		}
471 	}
472 
473 	if (!xfrm6_route_forward(skb)) {
474 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
475 		goto drop;
476 	}
477 	dst = skb_dst(skb);
478 
479 	/* IPv6 specs say nothing about it, but it is clear that we cannot
480 	   send redirects to source routed frames.
481 	   We don't send redirects to frames decapsulated from IPsec.
482 	 */
483 	if (IP6CB(skb)->iif == dst->dev->ifindex &&
484 	    opt->srcrt == 0 && !skb_sec_path(skb)) {
485 		struct in6_addr *target = NULL;
486 		struct inet_peer *peer;
487 		struct rt6_info *rt;
488 
489 		/*
490 		 *	incoming and outgoing devices are the same
491 		 *	send a redirect.
492 		 */
493 
494 		rt = (struct rt6_info *) dst;
495 		if (rt->rt6i_flags & RTF_GATEWAY)
496 			target = &rt->rt6i_gateway;
497 		else
498 			target = &hdr->daddr;
499 
500 		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
501 
502 		/* Limit redirects both by destination (here)
503 		   and by source (inside ndisc_send_redirect)
504 		 */
505 		if (inet_peer_xrlim_allow(peer, 1*HZ))
506 			ndisc_send_redirect(skb, target);
507 		if (peer)
508 			inet_putpeer(peer);
509 	} else {
510 		int addrtype = ipv6_addr_type(&hdr->saddr);
511 
512 		/* This check is security critical. */
513 		if (addrtype == IPV6_ADDR_ANY ||
514 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
515 			goto error;
516 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
517 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
518 				    ICMPV6_NOT_NEIGHBOUR, 0);
519 			goto error;
520 		}
521 	}
522 
523 	mtu = ip6_dst_mtu_forward(dst);
524 	if (mtu < IPV6_MIN_MTU)
525 		mtu = IPV6_MIN_MTU;
526 
527 	if (ip6_pkt_too_big(skb, mtu)) {
528 		/* Again, force OUTPUT device used as source address */
529 		skb->dev = dst->dev;
530 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
531 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
532 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
533 				IPSTATS_MIB_FRAGFAILS);
534 		kfree_skb(skb);
535 		return -EMSGSIZE;
536 	}
537 
538 	if (skb_cow(skb, dst->dev->hard_header_len)) {
539 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
540 				IPSTATS_MIB_OUTDISCARDS);
541 		goto drop;
542 	}
543 
544 	hdr = ipv6_hdr(skb);
545 
546 	/* Mangling hops number delayed to point after skb COW */
547 
548 	hdr->hop_limit--;
549 
550 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
551 		       net, NULL, skb, skb->dev, dst->dev,
552 		       ip6_forward_finish);
553 
554 error:
555 	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
556 drop:
557 	kfree_skb(skb);
558 	return -EINVAL;
559 }
560 
ip6_copy_metadata(struct sk_buff * to,struct sk_buff * from)561 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
562 {
563 	to->pkt_type = from->pkt_type;
564 	to->priority = from->priority;
565 	to->protocol = from->protocol;
566 	skb_dst_drop(to);
567 	skb_dst_set(to, dst_clone(skb_dst(from)));
568 	to->dev = from->dev;
569 	to->mark = from->mark;
570 
571 	skb_copy_hash(to, from);
572 
573 #ifdef CONFIG_NET_SCHED
574 	to->tc_index = from->tc_index;
575 #endif
576 	nf_copy(to, from);
577 	skb_copy_secmark(to, from);
578 }
579 
ip6_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))580 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
581 		 int (*output)(struct net *, struct sock *, struct sk_buff *))
582 {
583 	struct sk_buff *frag;
584 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
585 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
586 				inet6_sk(skb->sk) : NULL;
587 	struct ipv6hdr *tmp_hdr;
588 	struct frag_hdr *fh;
589 	unsigned int mtu, hlen, left, len;
590 	int hroom, troom;
591 	__be32 frag_id;
592 	int ptr, offset = 0, err = 0;
593 	u8 *prevhdr, nexthdr = 0;
594 
595 	err = ip6_find_1stfragopt(skb, &prevhdr);
596 	if (err < 0)
597 		goto fail;
598 	hlen = err;
599 	nexthdr = *prevhdr;
600 
601 	mtu = ip6_skb_dst_mtu(skb);
602 
603 	/* We must not fragment if the socket is set to force MTU discovery
604 	 * or if the skb it not generated by a local socket.
605 	 */
606 	if (unlikely(!skb->ignore_df && skb->len > mtu))
607 		goto fail_toobig;
608 
609 	if (IP6CB(skb)->frag_max_size) {
610 		if (IP6CB(skb)->frag_max_size > mtu)
611 			goto fail_toobig;
612 
613 		/* don't send fragments larger than what we received */
614 		mtu = IP6CB(skb)->frag_max_size;
615 		if (mtu < IPV6_MIN_MTU)
616 			mtu = IPV6_MIN_MTU;
617 	}
618 
619 	if (np && np->frag_size < mtu) {
620 		if (np->frag_size)
621 			mtu = np->frag_size;
622 	}
623 	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
624 		goto fail_toobig;
625 	mtu -= hlen + sizeof(struct frag_hdr);
626 
627 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
628 				    &ipv6_hdr(skb)->saddr);
629 
630 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
631 	    (err = skb_checksum_help(skb)))
632 		goto fail;
633 
634 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
635 	if (skb_has_frag_list(skb)) {
636 		unsigned int first_len = skb_pagelen(skb);
637 		struct sk_buff *frag2;
638 
639 		if (first_len - hlen > mtu ||
640 		    ((first_len - hlen) & 7) ||
641 		    skb_cloned(skb) ||
642 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
643 			goto slow_path;
644 
645 		skb_walk_frags(skb, frag) {
646 			/* Correct geometry. */
647 			if (frag->len > mtu ||
648 			    ((frag->len & 7) && frag->next) ||
649 			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
650 				goto slow_path_clean;
651 
652 			/* Partially cloned skb? */
653 			if (skb_shared(frag))
654 				goto slow_path_clean;
655 
656 			BUG_ON(frag->sk);
657 			if (skb->sk) {
658 				frag->sk = skb->sk;
659 				frag->destructor = sock_wfree;
660 			}
661 			skb->truesize -= frag->truesize;
662 		}
663 
664 		err = 0;
665 		offset = 0;
666 		/* BUILD HEADER */
667 
668 		*prevhdr = NEXTHDR_FRAGMENT;
669 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
670 		if (!tmp_hdr) {
671 			err = -ENOMEM;
672 			goto fail;
673 		}
674 		frag = skb_shinfo(skb)->frag_list;
675 		skb_frag_list_init(skb);
676 
677 		__skb_pull(skb, hlen);
678 		fh = __skb_push(skb, sizeof(struct frag_hdr));
679 		__skb_push(skb, hlen);
680 		skb_reset_network_header(skb);
681 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
682 
683 		fh->nexthdr = nexthdr;
684 		fh->reserved = 0;
685 		fh->frag_off = htons(IP6_MF);
686 		fh->identification = frag_id;
687 
688 		first_len = skb_pagelen(skb);
689 		skb->data_len = first_len - skb_headlen(skb);
690 		skb->len = first_len;
691 		ipv6_hdr(skb)->payload_len = htons(first_len -
692 						   sizeof(struct ipv6hdr));
693 
694 		for (;;) {
695 			/* Prepare header of the next frame,
696 			 * before previous one went down. */
697 			if (frag) {
698 				frag->ip_summed = CHECKSUM_NONE;
699 				skb_reset_transport_header(frag);
700 				fh = __skb_push(frag, sizeof(struct frag_hdr));
701 				__skb_push(frag, hlen);
702 				skb_reset_network_header(frag);
703 				memcpy(skb_network_header(frag), tmp_hdr,
704 				       hlen);
705 				offset += skb->len - hlen - sizeof(struct frag_hdr);
706 				fh->nexthdr = nexthdr;
707 				fh->reserved = 0;
708 				fh->frag_off = htons(offset);
709 				if (frag->next)
710 					fh->frag_off |= htons(IP6_MF);
711 				fh->identification = frag_id;
712 				ipv6_hdr(frag)->payload_len =
713 						htons(frag->len -
714 						      sizeof(struct ipv6hdr));
715 				ip6_copy_metadata(frag, skb);
716 			}
717 
718 			err = output(net, sk, skb);
719 			if (!err)
720 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
721 					      IPSTATS_MIB_FRAGCREATES);
722 
723 			if (err || !frag)
724 				break;
725 
726 			skb = frag;
727 			frag = skb->next;
728 			skb->next = NULL;
729 		}
730 
731 		kfree(tmp_hdr);
732 
733 		if (err == 0) {
734 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
735 				      IPSTATS_MIB_FRAGOKS);
736 			return 0;
737 		}
738 
739 		kfree_skb_list(frag);
740 
741 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
742 			      IPSTATS_MIB_FRAGFAILS);
743 		return err;
744 
745 slow_path_clean:
746 		skb_walk_frags(skb, frag2) {
747 			if (frag2 == frag)
748 				break;
749 			frag2->sk = NULL;
750 			frag2->destructor = NULL;
751 			skb->truesize += frag2->truesize;
752 		}
753 	}
754 
755 slow_path:
756 	left = skb->len - hlen;		/* Space per frame */
757 	ptr = hlen;			/* Where to start from */
758 
759 	/*
760 	 *	Fragment the datagram.
761 	 */
762 
763 	troom = rt->dst.dev->needed_tailroom;
764 
765 	/*
766 	 *	Keep copying data until we run out.
767 	 */
768 	while (left > 0)	{
769 		u8 *fragnexthdr_offset;
770 
771 		len = left;
772 		/* IF: it doesn't fit, use 'mtu' - the data space left */
773 		if (len > mtu)
774 			len = mtu;
775 		/* IF: we are not sending up to and including the packet end
776 		   then align the next start on an eight byte boundary */
777 		if (len < left)	{
778 			len &= ~7;
779 		}
780 
781 		/* Allocate buffer */
782 		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
783 				 hroom + troom, GFP_ATOMIC);
784 		if (!frag) {
785 			err = -ENOMEM;
786 			goto fail;
787 		}
788 
789 		/*
790 		 *	Set up data on packet
791 		 */
792 
793 		ip6_copy_metadata(frag, skb);
794 		skb_reserve(frag, hroom);
795 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
796 		skb_reset_network_header(frag);
797 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
798 		frag->transport_header = (frag->network_header + hlen +
799 					  sizeof(struct frag_hdr));
800 
801 		/*
802 		 *	Charge the memory for the fragment to any owner
803 		 *	it might possess
804 		 */
805 		if (skb->sk)
806 			skb_set_owner_w(frag, skb->sk);
807 
808 		/*
809 		 *	Copy the packet header into the new buffer.
810 		 */
811 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
812 
813 		fragnexthdr_offset = skb_network_header(frag);
814 		fragnexthdr_offset += prevhdr - skb_network_header(skb);
815 		*fragnexthdr_offset = NEXTHDR_FRAGMENT;
816 
817 		/*
818 		 *	Build fragment header.
819 		 */
820 		fh->nexthdr = nexthdr;
821 		fh->reserved = 0;
822 		fh->identification = frag_id;
823 
824 		/*
825 		 *	Copy a block of the IP datagram.
826 		 */
827 		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
828 				     len));
829 		left -= len;
830 
831 		fh->frag_off = htons(offset);
832 		if (left > 0)
833 			fh->frag_off |= htons(IP6_MF);
834 		ipv6_hdr(frag)->payload_len = htons(frag->len -
835 						    sizeof(struct ipv6hdr));
836 
837 		ptr += len;
838 		offset += len;
839 
840 		/*
841 		 *	Put this fragment into the sending queue.
842 		 */
843 		err = output(net, sk, frag);
844 		if (err)
845 			goto fail;
846 
847 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
848 			      IPSTATS_MIB_FRAGCREATES);
849 	}
850 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
851 		      IPSTATS_MIB_FRAGOKS);
852 	consume_skb(skb);
853 	return err;
854 
855 fail_toobig:
856 	if (skb->sk && dst_allfrag(skb_dst(skb)))
857 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
858 
859 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
860 	err = -EMSGSIZE;
861 
862 fail:
863 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
864 		      IPSTATS_MIB_FRAGFAILS);
865 	kfree_skb(skb);
866 	return err;
867 }
868 
ip6_rt_check(const struct rt6key * rt_key,const struct in6_addr * fl_addr,const struct in6_addr * addr_cache)869 static inline int ip6_rt_check(const struct rt6key *rt_key,
870 			       const struct in6_addr *fl_addr,
871 			       const struct in6_addr *addr_cache)
872 {
873 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
874 		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
875 }
876 
ip6_sk_dst_check(struct sock * sk,struct dst_entry * dst,const struct flowi6 * fl6)877 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
878 					  struct dst_entry *dst,
879 					  const struct flowi6 *fl6)
880 {
881 	struct ipv6_pinfo *np = inet6_sk(sk);
882 	struct rt6_info *rt;
883 
884 	if (!dst)
885 		goto out;
886 
887 	if (dst->ops->family != AF_INET6) {
888 		dst_release(dst);
889 		return NULL;
890 	}
891 
892 	rt = (struct rt6_info *)dst;
893 	/* Yes, checking route validity in not connected
894 	 * case is not very simple. Take into account,
895 	 * that we do not support routing by source, TOS,
896 	 * and MSG_DONTROUTE		--ANK (980726)
897 	 *
898 	 * 1. ip6_rt_check(): If route was host route,
899 	 *    check that cached destination is current.
900 	 *    If it is network route, we still may
901 	 *    check its validity using saved pointer
902 	 *    to the last used address: daddr_cache.
903 	 *    We do not want to save whole address now,
904 	 *    (because main consumer of this service
905 	 *    is tcp, which has not this problem),
906 	 *    so that the last trick works only on connected
907 	 *    sockets.
908 	 * 2. oif also should be the same.
909 	 */
910 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
911 #ifdef CONFIG_IPV6_SUBTREES
912 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
913 #endif
914 	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
915 	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
916 		dst_release(dst);
917 		dst = NULL;
918 	}
919 
920 out:
921 	return dst;
922 }
923 
ip6_dst_lookup_tail(struct net * net,const struct sock * sk,struct dst_entry ** dst,struct flowi6 * fl6)924 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
925 			       struct dst_entry **dst, struct flowi6 *fl6)
926 {
927 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
928 	struct neighbour *n;
929 	struct rt6_info *rt;
930 #endif
931 	int err;
932 	int flags = 0;
933 
934 	/* The correct way to handle this would be to do
935 	 * ip6_route_get_saddr, and then ip6_route_output; however,
936 	 * the route-specific preferred source forces the
937 	 * ip6_route_output call _before_ ip6_route_get_saddr.
938 	 *
939 	 * In source specific routing (no src=any default route),
940 	 * ip6_route_output will fail given src=any saddr, though, so
941 	 * that's why we try it again later.
942 	 */
943 	if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
944 		struct fib6_info *from;
945 		struct rt6_info *rt;
946 		bool had_dst = *dst != NULL;
947 
948 		if (!had_dst)
949 			*dst = ip6_route_output(net, sk, fl6);
950 		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
951 
952 		rcu_read_lock();
953 		from = rt ? rcu_dereference(rt->from) : NULL;
954 		err = ip6_route_get_saddr(net, from, &fl6->daddr,
955 					  sk ? inet6_sk(sk)->srcprefs : 0,
956 					  &fl6->saddr);
957 		rcu_read_unlock();
958 
959 		if (err)
960 			goto out_err_release;
961 
962 		/* If we had an erroneous initial result, pretend it
963 		 * never existed and let the SA-enabled version take
964 		 * over.
965 		 */
966 		if (!had_dst && (*dst)->error) {
967 			dst_release(*dst);
968 			*dst = NULL;
969 		}
970 
971 		if (fl6->flowi6_oif)
972 			flags |= RT6_LOOKUP_F_IFACE;
973 	}
974 
975 	if (!*dst)
976 		*dst = ip6_route_output_flags(net, sk, fl6, flags);
977 
978 	err = (*dst)->error;
979 	if (err)
980 		goto out_err_release;
981 
982 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
983 	/*
984 	 * Here if the dst entry we've looked up
985 	 * has a neighbour entry that is in the INCOMPLETE
986 	 * state and the src address from the flow is
987 	 * marked as OPTIMISTIC, we release the found
988 	 * dst entry and replace it instead with the
989 	 * dst entry of the nexthop router
990 	 */
991 	rt = (struct rt6_info *) *dst;
992 	rcu_read_lock_bh();
993 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
994 				      rt6_nexthop(rt, &fl6->daddr));
995 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
996 	rcu_read_unlock_bh();
997 
998 	if (err) {
999 		struct inet6_ifaddr *ifp;
1000 		struct flowi6 fl_gw6;
1001 		int redirect;
1002 
1003 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1004 				      (*dst)->dev, 1);
1005 
1006 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1007 		if (ifp)
1008 			in6_ifa_put(ifp);
1009 
1010 		if (redirect) {
1011 			/*
1012 			 * We need to get the dst entry for the
1013 			 * default router instead
1014 			 */
1015 			dst_release(*dst);
1016 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1017 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1018 			*dst = ip6_route_output(net, sk, &fl_gw6);
1019 			err = (*dst)->error;
1020 			if (err)
1021 				goto out_err_release;
1022 		}
1023 	}
1024 #endif
1025 	if (ipv6_addr_v4mapped(&fl6->saddr) &&
1026 	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1027 		err = -EAFNOSUPPORT;
1028 		goto out_err_release;
1029 	}
1030 
1031 	return 0;
1032 
1033 out_err_release:
1034 	dst_release(*dst);
1035 	*dst = NULL;
1036 
1037 	if (err == -ENETUNREACH)
1038 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1039 	return err;
1040 }
1041 
1042 /**
1043  *	ip6_dst_lookup - perform route lookup on flow
1044  *	@sk: socket which provides route info
1045  *	@dst: pointer to dst_entry * for result
1046  *	@fl6: flow to lookup
1047  *
1048  *	This function performs a route lookup on the given flow.
1049  *
1050  *	It returns zero on success, or a standard errno code on error.
1051  */
ip6_dst_lookup(struct net * net,struct sock * sk,struct dst_entry ** dst,struct flowi6 * fl6)1052 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1053 		   struct flowi6 *fl6)
1054 {
1055 	*dst = NULL;
1056 	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1057 }
1058 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1059 
1060 /**
1061  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1062  *	@sk: socket which provides route info
1063  *	@fl6: flow to lookup
1064  *	@final_dst: final destination address for ipsec lookup
1065  *
1066  *	This function performs a route lookup on the given flow.
1067  *
1068  *	It returns a valid dst pointer on success, or a pointer encoded
1069  *	error code.
1070  */
ip6_dst_lookup_flow(const struct sock * sk,struct flowi6 * fl6,const struct in6_addr * final_dst)1071 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1072 				      const struct in6_addr *final_dst)
1073 {
1074 	struct dst_entry *dst = NULL;
1075 	int err;
1076 
1077 	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1078 	if (err)
1079 		return ERR_PTR(err);
1080 	if (final_dst)
1081 		fl6->daddr = *final_dst;
1082 
1083 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1084 }
1085 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1086 
1087 /**
1088  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1089  *	@sk: socket which provides the dst cache and route info
1090  *	@fl6: flow to lookup
1091  *	@final_dst: final destination address for ipsec lookup
1092  *	@connected: whether @sk is connected or not
1093  *
1094  *	This function performs a route lookup on the given flow with the
1095  *	possibility of using the cached route in the socket if it is valid.
1096  *	It will take the socket dst lock when operating on the dst cache.
1097  *	As a result, this function can only be used in process context.
1098  *
1099  *	In addition, for a connected socket, cache the dst in the socket
1100  *	if the current cache is not valid.
1101  *
1102  *	It returns a valid dst pointer on success, or a pointer encoded
1103  *	error code.
1104  */
ip6_sk_dst_lookup_flow(struct sock * sk,struct flowi6 * fl6,const struct in6_addr * final_dst,bool connected)1105 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1106 					 const struct in6_addr *final_dst,
1107 					 bool connected)
1108 {
1109 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1110 
1111 	dst = ip6_sk_dst_check(sk, dst, fl6);
1112 	if (dst)
1113 		return dst;
1114 
1115 	dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1116 	if (connected && !IS_ERR(dst))
1117 		ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
1118 
1119 	return dst;
1120 }
1121 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1122 
ip6_opt_dup(struct ipv6_opt_hdr * src,gfp_t gfp)1123 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1124 					       gfp_t gfp)
1125 {
1126 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1127 }
1128 
ip6_rthdr_dup(struct ipv6_rt_hdr * src,gfp_t gfp)1129 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1130 						gfp_t gfp)
1131 {
1132 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1133 }
1134 
ip6_append_data_mtu(unsigned int * mtu,int * maxfraglen,unsigned int fragheaderlen,struct sk_buff * skb,struct rt6_info * rt,unsigned int orig_mtu)1135 static void ip6_append_data_mtu(unsigned int *mtu,
1136 				int *maxfraglen,
1137 				unsigned int fragheaderlen,
1138 				struct sk_buff *skb,
1139 				struct rt6_info *rt,
1140 				unsigned int orig_mtu)
1141 {
1142 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1143 		if (!skb) {
1144 			/* first fragment, reserve header_len */
1145 			*mtu = orig_mtu - rt->dst.header_len;
1146 
1147 		} else {
1148 			/*
1149 			 * this fragment is not first, the headers
1150 			 * space is regarded as data space.
1151 			 */
1152 			*mtu = orig_mtu;
1153 		}
1154 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1155 			      + fragheaderlen - sizeof(struct frag_hdr);
1156 	}
1157 }
1158 
ip6_setup_cork(struct sock * sk,struct inet_cork_full * cork,struct inet6_cork * v6_cork,struct ipcm6_cookie * ipc6,struct rt6_info * rt,struct flowi6 * fl6)1159 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1160 			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1161 			  struct rt6_info *rt, struct flowi6 *fl6)
1162 {
1163 	struct ipv6_pinfo *np = inet6_sk(sk);
1164 	unsigned int mtu;
1165 	struct ipv6_txoptions *opt = ipc6->opt;
1166 
1167 	/*
1168 	 * setup for corking
1169 	 */
1170 	if (opt) {
1171 		if (WARN_ON(v6_cork->opt))
1172 			return -EINVAL;
1173 
1174 		v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1175 		if (unlikely(!v6_cork->opt))
1176 			return -ENOBUFS;
1177 
1178 		v6_cork->opt->tot_len = sizeof(*opt);
1179 		v6_cork->opt->opt_flen = opt->opt_flen;
1180 		v6_cork->opt->opt_nflen = opt->opt_nflen;
1181 
1182 		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1183 						    sk->sk_allocation);
1184 		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1185 			return -ENOBUFS;
1186 
1187 		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1188 						    sk->sk_allocation);
1189 		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1190 			return -ENOBUFS;
1191 
1192 		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1193 						   sk->sk_allocation);
1194 		if (opt->hopopt && !v6_cork->opt->hopopt)
1195 			return -ENOBUFS;
1196 
1197 		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1198 						    sk->sk_allocation);
1199 		if (opt->srcrt && !v6_cork->opt->srcrt)
1200 			return -ENOBUFS;
1201 
1202 		/* need source address above miyazawa*/
1203 	}
1204 	dst_hold(&rt->dst);
1205 	cork->base.dst = &rt->dst;
1206 	cork->fl.u.ip6 = *fl6;
1207 	v6_cork->hop_limit = ipc6->hlimit;
1208 	v6_cork->tclass = ipc6->tclass;
1209 	if (rt->dst.flags & DST_XFRM_TUNNEL)
1210 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1211 		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1212 	else
1213 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1214 			READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
1215 	if (np->frag_size < mtu) {
1216 		if (np->frag_size)
1217 			mtu = np->frag_size;
1218 	}
1219 	if (mtu < IPV6_MIN_MTU)
1220 		return -EINVAL;
1221 	cork->base.fragsize = mtu;
1222 	cork->base.gso_size = ipc6->gso_size;
1223 	cork->base.tx_flags = 0;
1224 	sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
1225 
1226 	if (dst_allfrag(xfrm_dst_path(&rt->dst)))
1227 		cork->base.flags |= IPCORK_ALLFRAG;
1228 	cork->base.length = 0;
1229 
1230 	cork->base.transmit_time = ipc6->sockc.transmit_time;
1231 
1232 	return 0;
1233 }
1234 
__ip6_append_data(struct sock * sk,struct flowi6 * fl6,struct sk_buff_head * queue,struct inet_cork * cork,struct inet6_cork * v6_cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags,struct ipcm6_cookie * ipc6)1235 static int __ip6_append_data(struct sock *sk,
1236 			     struct flowi6 *fl6,
1237 			     struct sk_buff_head *queue,
1238 			     struct inet_cork *cork,
1239 			     struct inet6_cork *v6_cork,
1240 			     struct page_frag *pfrag,
1241 			     int getfrag(void *from, char *to, int offset,
1242 					 int len, int odd, struct sk_buff *skb),
1243 			     void *from, int length, int transhdrlen,
1244 			     unsigned int flags, struct ipcm6_cookie *ipc6)
1245 {
1246 	struct sk_buff *skb, *skb_prev = NULL;
1247 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
1248 	int exthdrlen = 0;
1249 	int dst_exthdrlen = 0;
1250 	int hh_len;
1251 	int copy;
1252 	int err;
1253 	int offset = 0;
1254 	u32 tskey = 0;
1255 	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1256 	struct ipv6_txoptions *opt = v6_cork->opt;
1257 	int csummode = CHECKSUM_NONE;
1258 	unsigned int maxnonfragsize, headersize;
1259 	unsigned int wmem_alloc_delta = 0;
1260 	bool paged;
1261 
1262 	skb = skb_peek_tail(queue);
1263 	if (!skb) {
1264 		exthdrlen = opt ? opt->opt_flen : 0;
1265 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1266 	}
1267 
1268 	paged = !!cork->gso_size;
1269 	mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
1270 	orig_mtu = mtu;
1271 
1272 	if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
1273 	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1274 		tskey = sk->sk_tskey++;
1275 
1276 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1277 
1278 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1279 			(opt ? opt->opt_nflen : 0);
1280 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1281 		     sizeof(struct frag_hdr);
1282 
1283 	headersize = sizeof(struct ipv6hdr) +
1284 		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1285 		     (dst_allfrag(&rt->dst) ?
1286 		      sizeof(struct frag_hdr) : 0) +
1287 		     rt->rt6i_nfheader_len;
1288 
1289 	/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1290 	 * the first fragment
1291 	 */
1292 	if (headersize + transhdrlen > mtu)
1293 		goto emsgsize;
1294 
1295 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1296 	    (sk->sk_protocol == IPPROTO_UDP ||
1297 	     sk->sk_protocol == IPPROTO_RAW)) {
1298 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1299 				sizeof(struct ipv6hdr));
1300 		goto emsgsize;
1301 	}
1302 
1303 	if (ip6_sk_ignore_df(sk))
1304 		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1305 	else
1306 		maxnonfragsize = mtu;
1307 
1308 	if (cork->length + length > maxnonfragsize - headersize) {
1309 emsgsize:
1310 		pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
1311 		ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
1312 		return -EMSGSIZE;
1313 	}
1314 
1315 	/* CHECKSUM_PARTIAL only with no extension headers and when
1316 	 * we are not going to fragment
1317 	 */
1318 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1319 	    headersize == sizeof(struct ipv6hdr) &&
1320 	    length <= mtu - headersize &&
1321 	    (!(flags & MSG_MORE) || cork->gso_size) &&
1322 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1323 		csummode = CHECKSUM_PARTIAL;
1324 
1325 	/*
1326 	 * Let's try using as much space as possible.
1327 	 * Use MTU if total length of the message fits into the MTU.
1328 	 * Otherwise, we need to reserve fragment header and
1329 	 * fragment alignment (= 8-15 octects, in total).
1330 	 *
1331 	 * Note that we may need to "move" the data from the tail of
1332 	 * of the buffer to the new fragment when we split
1333 	 * the message.
1334 	 *
1335 	 * FIXME: It may be fragmented into multiple chunks
1336 	 *        at once if non-fragmentable extension headers
1337 	 *        are too large.
1338 	 * --yoshfuji
1339 	 */
1340 
1341 	cork->length += length;
1342 	if (!skb)
1343 		goto alloc_new_skb;
1344 
1345 	while (length > 0) {
1346 		/* Check if the remaining data fits into current packet. */
1347 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1348 		if (copy < length)
1349 			copy = maxfraglen - skb->len;
1350 
1351 		if (copy <= 0) {
1352 			char *data;
1353 			unsigned int datalen;
1354 			unsigned int fraglen;
1355 			unsigned int fraggap;
1356 			unsigned int alloclen;
1357 			unsigned int pagedlen = 0;
1358 alloc_new_skb:
1359 			/* There's no room in the current skb */
1360 			if (skb)
1361 				fraggap = skb->len - maxfraglen;
1362 			else
1363 				fraggap = 0;
1364 			/* update mtu and maxfraglen if necessary */
1365 			if (!skb || !skb_prev)
1366 				ip6_append_data_mtu(&mtu, &maxfraglen,
1367 						    fragheaderlen, skb, rt,
1368 						    orig_mtu);
1369 
1370 			skb_prev = skb;
1371 
1372 			/*
1373 			 * If remaining data exceeds the mtu,
1374 			 * we know we need more fragment(s).
1375 			 */
1376 			datalen = length + fraggap;
1377 
1378 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1379 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1380 			fraglen = datalen + fragheaderlen;
1381 
1382 			if ((flags & MSG_MORE) &&
1383 			    !(rt->dst.dev->features&NETIF_F_SG))
1384 				alloclen = mtu;
1385 			else if (!paged)
1386 				alloclen = fraglen;
1387 			else {
1388 				alloclen = min_t(int, fraglen, MAX_HEADER);
1389 				pagedlen = fraglen - alloclen;
1390 			}
1391 
1392 			alloclen += dst_exthdrlen;
1393 
1394 			if (datalen != length + fraggap) {
1395 				/*
1396 				 * this is not the last fragment, the trailer
1397 				 * space is regarded as data space.
1398 				 */
1399 				datalen += rt->dst.trailer_len;
1400 			}
1401 
1402 			alloclen += rt->dst.trailer_len;
1403 			fraglen = datalen + fragheaderlen;
1404 
1405 			/*
1406 			 * We just reserve space for fragment header.
1407 			 * Note: this may be overallocation if the message
1408 			 * (without MSG_MORE) fits into the MTU.
1409 			 */
1410 			alloclen += sizeof(struct frag_hdr);
1411 
1412 			copy = datalen - transhdrlen - fraggap - pagedlen;
1413 			if (copy < 0) {
1414 				err = -EINVAL;
1415 				goto error;
1416 			}
1417 			if (transhdrlen) {
1418 				skb = sock_alloc_send_skb(sk,
1419 						alloclen + hh_len,
1420 						(flags & MSG_DONTWAIT), &err);
1421 			} else {
1422 				skb = NULL;
1423 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1424 				    2 * sk->sk_sndbuf)
1425 					skb = alloc_skb(alloclen + hh_len,
1426 							sk->sk_allocation);
1427 				if (unlikely(!skb))
1428 					err = -ENOBUFS;
1429 			}
1430 			if (!skb)
1431 				goto error;
1432 			/*
1433 			 *	Fill in the control structures
1434 			 */
1435 			skb->protocol = htons(ETH_P_IPV6);
1436 			skb->ip_summed = csummode;
1437 			skb->csum = 0;
1438 			/* reserve for fragmentation and ipsec header */
1439 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1440 				    dst_exthdrlen);
1441 
1442 			/* Only the initial fragment is time stamped */
1443 			skb_shinfo(skb)->tx_flags = cork->tx_flags;
1444 			cork->tx_flags = 0;
1445 			skb_shinfo(skb)->tskey = tskey;
1446 			tskey = 0;
1447 
1448 			/*
1449 			 *	Find where to start putting bytes
1450 			 */
1451 			data = skb_put(skb, fraglen - pagedlen);
1452 			skb_set_network_header(skb, exthdrlen);
1453 			data += fragheaderlen;
1454 			skb->transport_header = (skb->network_header +
1455 						 fragheaderlen);
1456 			if (fraggap) {
1457 				skb->csum = skb_copy_and_csum_bits(
1458 					skb_prev, maxfraglen,
1459 					data + transhdrlen, fraggap, 0);
1460 				skb_prev->csum = csum_sub(skb_prev->csum,
1461 							  skb->csum);
1462 				data += fraggap;
1463 				pskb_trim_unique(skb_prev, maxfraglen);
1464 			}
1465 			if (copy > 0 &&
1466 			    getfrag(from, data + transhdrlen, offset,
1467 				    copy, fraggap, skb) < 0) {
1468 				err = -EFAULT;
1469 				kfree_skb(skb);
1470 				goto error;
1471 			}
1472 
1473 			offset += copy;
1474 			length -= copy + transhdrlen;
1475 			transhdrlen = 0;
1476 			exthdrlen = 0;
1477 			dst_exthdrlen = 0;
1478 
1479 			if ((flags & MSG_CONFIRM) && !skb_prev)
1480 				skb_set_dst_pending_confirm(skb, 1);
1481 
1482 			/*
1483 			 * Put the packet on the pending queue
1484 			 */
1485 			if (!skb->destructor) {
1486 				skb->destructor = sock_wfree;
1487 				skb->sk = sk;
1488 				wmem_alloc_delta += skb->truesize;
1489 			}
1490 			__skb_queue_tail(queue, skb);
1491 			continue;
1492 		}
1493 
1494 		if (copy > length)
1495 			copy = length;
1496 
1497 		if (!(rt->dst.dev->features&NETIF_F_SG) &&
1498 		    skb_tailroom(skb) >= copy) {
1499 			unsigned int off;
1500 
1501 			off = skb->len;
1502 			if (getfrag(from, skb_put(skb, copy),
1503 						offset, copy, off, skb) < 0) {
1504 				__skb_trim(skb, off);
1505 				err = -EFAULT;
1506 				goto error;
1507 			}
1508 		} else {
1509 			int i = skb_shinfo(skb)->nr_frags;
1510 
1511 			err = -ENOMEM;
1512 			if (!sk_page_frag_refill(sk, pfrag))
1513 				goto error;
1514 
1515 			if (!skb_can_coalesce(skb, i, pfrag->page,
1516 					      pfrag->offset)) {
1517 				err = -EMSGSIZE;
1518 				if (i == MAX_SKB_FRAGS)
1519 					goto error;
1520 
1521 				__skb_fill_page_desc(skb, i, pfrag->page,
1522 						     pfrag->offset, 0);
1523 				skb_shinfo(skb)->nr_frags = ++i;
1524 				get_page(pfrag->page);
1525 			}
1526 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1527 			if (getfrag(from,
1528 				    page_address(pfrag->page) + pfrag->offset,
1529 				    offset, copy, skb->len, skb) < 0)
1530 				goto error_efault;
1531 
1532 			pfrag->offset += copy;
1533 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1534 			skb->len += copy;
1535 			skb->data_len += copy;
1536 			skb->truesize += copy;
1537 			wmem_alloc_delta += copy;
1538 		}
1539 		offset += copy;
1540 		length -= copy;
1541 	}
1542 
1543 	if (wmem_alloc_delta)
1544 		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1545 	return 0;
1546 
1547 error_efault:
1548 	err = -EFAULT;
1549 error:
1550 	cork->length -= length;
1551 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1552 	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1553 	return err;
1554 }
1555 
ip6_append_data(struct sock * sk,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm6_cookie * ipc6,struct flowi6 * fl6,struct rt6_info * rt,unsigned int flags)1556 int ip6_append_data(struct sock *sk,
1557 		    int getfrag(void *from, char *to, int offset, int len,
1558 				int odd, struct sk_buff *skb),
1559 		    void *from, int length, int transhdrlen,
1560 		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1561 		    struct rt6_info *rt, unsigned int flags)
1562 {
1563 	struct inet_sock *inet = inet_sk(sk);
1564 	struct ipv6_pinfo *np = inet6_sk(sk);
1565 	int exthdrlen;
1566 	int err;
1567 
1568 	if (flags&MSG_PROBE)
1569 		return 0;
1570 	if (skb_queue_empty(&sk->sk_write_queue)) {
1571 		/*
1572 		 * setup for corking
1573 		 */
1574 		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1575 				     ipc6, rt, fl6);
1576 		if (err)
1577 			return err;
1578 
1579 		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1580 		length += exthdrlen;
1581 		transhdrlen += exthdrlen;
1582 	} else {
1583 		fl6 = &inet->cork.fl.u.ip6;
1584 		transhdrlen = 0;
1585 	}
1586 
1587 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1588 				 &np->cork, sk_page_frag(sk), getfrag,
1589 				 from, length, transhdrlen, flags, ipc6);
1590 }
1591 EXPORT_SYMBOL_GPL(ip6_append_data);
1592 
ip6_cork_release(struct inet_cork_full * cork,struct inet6_cork * v6_cork)1593 static void ip6_cork_release(struct inet_cork_full *cork,
1594 			     struct inet6_cork *v6_cork)
1595 {
1596 	if (v6_cork->opt) {
1597 		kfree(v6_cork->opt->dst0opt);
1598 		kfree(v6_cork->opt->dst1opt);
1599 		kfree(v6_cork->opt->hopopt);
1600 		kfree(v6_cork->opt->srcrt);
1601 		kfree(v6_cork->opt);
1602 		v6_cork->opt = NULL;
1603 	}
1604 
1605 	if (cork->base.dst) {
1606 		dst_release(cork->base.dst);
1607 		cork->base.dst = NULL;
1608 		cork->base.flags &= ~IPCORK_ALLFRAG;
1609 	}
1610 	memset(&cork->fl, 0, sizeof(cork->fl));
1611 }
1612 
__ip6_make_skb(struct sock * sk,struct sk_buff_head * queue,struct inet_cork_full * cork,struct inet6_cork * v6_cork)1613 struct sk_buff *__ip6_make_skb(struct sock *sk,
1614 			       struct sk_buff_head *queue,
1615 			       struct inet_cork_full *cork,
1616 			       struct inet6_cork *v6_cork)
1617 {
1618 	struct sk_buff *skb, *tmp_skb;
1619 	struct sk_buff **tail_skb;
1620 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1621 	struct ipv6_pinfo *np = inet6_sk(sk);
1622 	struct net *net = sock_net(sk);
1623 	struct ipv6hdr *hdr;
1624 	struct ipv6_txoptions *opt = v6_cork->opt;
1625 	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1626 	struct flowi6 *fl6 = &cork->fl.u.ip6;
1627 	unsigned char proto = fl6->flowi6_proto;
1628 
1629 	skb = __skb_dequeue(queue);
1630 	if (!skb)
1631 		goto out;
1632 	tail_skb = &(skb_shinfo(skb)->frag_list);
1633 
1634 	/* move skb->data to ip header from ext header */
1635 	if (skb->data < skb_network_header(skb))
1636 		__skb_pull(skb, skb_network_offset(skb));
1637 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1638 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1639 		*tail_skb = tmp_skb;
1640 		tail_skb = &(tmp_skb->next);
1641 		skb->len += tmp_skb->len;
1642 		skb->data_len += tmp_skb->len;
1643 		skb->truesize += tmp_skb->truesize;
1644 		tmp_skb->destructor = NULL;
1645 		tmp_skb->sk = NULL;
1646 	}
1647 
1648 	/* Allow local fragmentation. */
1649 	skb->ignore_df = ip6_sk_ignore_df(sk);
1650 
1651 	*final_dst = fl6->daddr;
1652 	__skb_pull(skb, skb_network_header_len(skb));
1653 	if (opt && opt->opt_flen)
1654 		ipv6_push_frag_opts(skb, opt, &proto);
1655 	if (opt && opt->opt_nflen)
1656 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1657 
1658 	skb_push(skb, sizeof(struct ipv6hdr));
1659 	skb_reset_network_header(skb);
1660 	hdr = ipv6_hdr(skb);
1661 
1662 	ip6_flow_hdr(hdr, v6_cork->tclass,
1663 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1664 					ip6_autoflowlabel(net, np), fl6));
1665 	hdr->hop_limit = v6_cork->hop_limit;
1666 	hdr->nexthdr = proto;
1667 	hdr->saddr = fl6->saddr;
1668 	hdr->daddr = *final_dst;
1669 
1670 	skb->priority = sk->sk_priority;
1671 	skb->mark = sk->sk_mark;
1672 
1673 	skb->tstamp = cork->base.transmit_time;
1674 
1675 	skb_dst_set(skb, dst_clone(&rt->dst));
1676 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1677 	if (proto == IPPROTO_ICMPV6) {
1678 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1679 
1680 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1681 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1682 	}
1683 
1684 	ip6_cork_release(cork, v6_cork);
1685 out:
1686 	return skb;
1687 }
1688 
ip6_send_skb(struct sk_buff * skb)1689 int ip6_send_skb(struct sk_buff *skb)
1690 {
1691 	struct net *net = sock_net(skb->sk);
1692 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1693 	int err;
1694 
1695 	err = ip6_local_out(net, skb->sk, skb);
1696 	if (err) {
1697 		if (err > 0)
1698 			err = net_xmit_errno(err);
1699 		if (err)
1700 			IP6_INC_STATS(net, rt->rt6i_idev,
1701 				      IPSTATS_MIB_OUTDISCARDS);
1702 	}
1703 
1704 	return err;
1705 }
1706 
ip6_push_pending_frames(struct sock * sk)1707 int ip6_push_pending_frames(struct sock *sk)
1708 {
1709 	struct sk_buff *skb;
1710 
1711 	skb = ip6_finish_skb(sk);
1712 	if (!skb)
1713 		return 0;
1714 
1715 	return ip6_send_skb(skb);
1716 }
1717 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1718 
__ip6_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork_full * cork,struct inet6_cork * v6_cork)1719 static void __ip6_flush_pending_frames(struct sock *sk,
1720 				       struct sk_buff_head *queue,
1721 				       struct inet_cork_full *cork,
1722 				       struct inet6_cork *v6_cork)
1723 {
1724 	struct sk_buff *skb;
1725 
1726 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1727 		if (skb_dst(skb))
1728 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1729 				      IPSTATS_MIB_OUTDISCARDS);
1730 		kfree_skb(skb);
1731 	}
1732 
1733 	ip6_cork_release(cork, v6_cork);
1734 }
1735 
ip6_flush_pending_frames(struct sock * sk)1736 void ip6_flush_pending_frames(struct sock *sk)
1737 {
1738 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1739 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1740 }
1741 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1742 
ip6_make_skb(struct sock * sk,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm6_cookie * ipc6,struct flowi6 * fl6,struct rt6_info * rt,unsigned int flags,struct inet_cork_full * cork)1743 struct sk_buff *ip6_make_skb(struct sock *sk,
1744 			     int getfrag(void *from, char *to, int offset,
1745 					 int len, int odd, struct sk_buff *skb),
1746 			     void *from, int length, int transhdrlen,
1747 			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1748 			     struct rt6_info *rt, unsigned int flags,
1749 			     struct inet_cork_full *cork)
1750 {
1751 	struct inet6_cork v6_cork;
1752 	struct sk_buff_head queue;
1753 	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1754 	int err;
1755 
1756 	if (flags & MSG_PROBE)
1757 		return NULL;
1758 
1759 	__skb_queue_head_init(&queue);
1760 
1761 	cork->base.flags = 0;
1762 	cork->base.addr = 0;
1763 	cork->base.opt = NULL;
1764 	cork->base.dst = NULL;
1765 	v6_cork.opt = NULL;
1766 	err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6);
1767 	if (err) {
1768 		ip6_cork_release(cork, &v6_cork);
1769 		return ERR_PTR(err);
1770 	}
1771 	if (ipc6->dontfrag < 0)
1772 		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1773 
1774 	err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork,
1775 				&current->task_frag, getfrag, from,
1776 				length + exthdrlen, transhdrlen + exthdrlen,
1777 				flags, ipc6);
1778 	if (err) {
1779 		__ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
1780 		return ERR_PTR(err);
1781 	}
1782 
1783 	return __ip6_make_skb(sk, &queue, cork, &v6_cork);
1784 }
1785