1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) output module.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
13 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 *
19 * See ip_input.c for original log
20 *
21 * Fixes:
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
25 * no route is found.
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readability.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * datagrams.
43 * Hirokazu Takahashi: sendfile() on UDP works now.
44 */
45
46 #include <linux/uaccess.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <net/inet_ecn.h>
78 #include <net/lwtunnel.h>
79 #include <linux/bpf-cgroup.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/netfilter_bridge.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
85
86 static int
87 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
88 unsigned int mtu,
89 int (*output)(struct net *, struct sock *, struct sk_buff *));
90
91 /* Generate a checksum for an outgoing IP datagram. */
ip_send_check(struct iphdr * iph)92 void ip_send_check(struct iphdr *iph)
93 {
94 iph->check = 0;
95 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
96 }
97 EXPORT_SYMBOL(ip_send_check);
98
__ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)99 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
100 {
101 struct iphdr *iph = ip_hdr(skb);
102
103 iph->tot_len = htons(skb->len);
104 ip_send_check(iph);
105
106 /* if egress device is enslaved to an L3 master device pass the
107 * skb to its handler for processing
108 */
109 skb = l3mdev_ip_out(sk, skb);
110 if (unlikely(!skb))
111 return 0;
112
113 skb->protocol = htons(ETH_P_IP);
114
115 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
116 net, sk, skb, NULL, skb_dst(skb)->dev,
117 dst_output);
118 }
119
ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)120 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
121 {
122 int err;
123
124 err = __ip_local_out(net, sk, skb);
125 if (likely(err == 1))
126 err = dst_output(net, sk, skb);
127
128 return err;
129 }
130 EXPORT_SYMBOL_GPL(ip_local_out);
131
ip_select_ttl(struct inet_sock * inet,struct dst_entry * dst)132 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
133 {
134 int ttl = inet->uc_ttl;
135
136 if (ttl < 0)
137 ttl = ip4_dst_hoplimit(dst);
138 return ttl;
139 }
140
141 /*
142 * Add an ip header to a skbuff and send it out.
143 *
144 */
ip_build_and_send_pkt(struct sk_buff * skb,const struct sock * sk,__be32 saddr,__be32 daddr,struct ip_options_rcu * opt,u8 tos)145 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
146 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
147 u8 tos)
148 {
149 struct inet_sock *inet = inet_sk(sk);
150 struct rtable *rt = skb_rtable(skb);
151 struct net *net = sock_net(sk);
152 struct iphdr *iph;
153
154 /* Build the IP header. */
155 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
156 skb_reset_network_header(skb);
157 iph = ip_hdr(skb);
158 iph->version = 4;
159 iph->ihl = 5;
160 iph->tos = tos;
161 iph->ttl = ip_select_ttl(inet, &rt->dst);
162 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
163 iph->saddr = saddr;
164 iph->protocol = sk->sk_protocol;
165 if (ip_dont_fragment(sk, &rt->dst)) {
166 iph->frag_off = htons(IP_DF);
167 iph->id = 0;
168 } else {
169 iph->frag_off = 0;
170 __ip_select_ident(net, iph, 1);
171 }
172
173 if (opt && opt->opt.optlen) {
174 iph->ihl += opt->opt.optlen>>2;
175 ip_options_build(skb, &opt->opt, daddr, rt, 0);
176 }
177
178 skb->priority = sk->sk_priority;
179 if (!skb->mark)
180 skb->mark = sk->sk_mark;
181
182 /* Send it out. */
183 return ip_local_out(net, skb->sk, skb);
184 }
185 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
186
ip_finish_output2(struct net * net,struct sock * sk,struct sk_buff * skb)187 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
188 {
189 struct dst_entry *dst = skb_dst(skb);
190 struct rtable *rt = (struct rtable *)dst;
191 struct net_device *dev = dst->dev;
192 unsigned int hh_len = LL_RESERVED_SPACE(dev);
193 struct neighbour *neigh;
194 bool is_v6gw = false;
195
196 if (rt->rt_type == RTN_MULTICAST) {
197 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
198 } else if (rt->rt_type == RTN_BROADCAST)
199 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
200
201 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
202 skb = skb_expand_head(skb, hh_len);
203 if (!skb)
204 return -ENOMEM;
205 }
206
207 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
208 int res = lwtunnel_xmit(skb);
209
210 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
211 return res;
212 }
213
214 rcu_read_lock_bh();
215 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
216 if (!IS_ERR(neigh)) {
217 int res;
218
219 sock_confirm_neigh(skb, neigh);
220 /* if crossing protocols, can not use the cached header */
221 res = neigh_output(neigh, skb, is_v6gw);
222 rcu_read_unlock_bh();
223 return res;
224 }
225 rcu_read_unlock_bh();
226
227 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
228 __func__);
229 kfree_skb(skb);
230 return -EINVAL;
231 }
232
ip_finish_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu)233 static int ip_finish_output_gso(struct net *net, struct sock *sk,
234 struct sk_buff *skb, unsigned int mtu)
235 {
236 struct sk_buff *segs, *nskb;
237 netdev_features_t features;
238 int ret = 0;
239
240 /* common case: seglen is <= mtu
241 */
242 if (skb_gso_validate_network_len(skb, mtu))
243 return ip_finish_output2(net, sk, skb);
244
245 /* Slowpath - GSO segment length exceeds the egress MTU.
246 *
247 * This can happen in several cases:
248 * - Forwarding of a TCP GRO skb, when DF flag is not set.
249 * - Forwarding of an skb that arrived on a virtualization interface
250 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
251 * stack.
252 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
253 * interface with a smaller MTU.
254 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
255 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
256 * insufficient MTU.
257 */
258 features = netif_skb_features(skb);
259 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
260 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
261 if (IS_ERR_OR_NULL(segs)) {
262 kfree_skb(skb);
263 return -ENOMEM;
264 }
265
266 consume_skb(skb);
267
268 skb_list_walk_safe(segs, segs, nskb) {
269 int err;
270
271 skb_mark_not_on_list(segs);
272 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
273
274 if (err && ret == 0)
275 ret = err;
276 }
277
278 return ret;
279 }
280
__ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)281 static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
282 {
283 unsigned int mtu;
284
285 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
286 /* Policy lookup after SNAT yielded a new policy */
287 if (skb_dst(skb)->xfrm) {
288 IPCB(skb)->flags |= IPSKB_REROUTED;
289 return dst_output(net, sk, skb);
290 }
291 #endif
292 mtu = ip_skb_dst_mtu(sk, skb);
293 if (skb_is_gso(skb))
294 return ip_finish_output_gso(net, sk, skb, mtu);
295
296 if (skb->len > mtu || IPCB(skb)->frag_max_size)
297 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
298
299 return ip_finish_output2(net, sk, skb);
300 }
301
ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)302 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
303 {
304 int ret;
305
306 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
307 switch (ret) {
308 case NET_XMIT_SUCCESS:
309 return __ip_finish_output(net, sk, skb);
310 case NET_XMIT_CN:
311 return __ip_finish_output(net, sk, skb) ? : ret;
312 default:
313 kfree_skb(skb);
314 return ret;
315 }
316 }
317
ip_mc_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)318 static int ip_mc_finish_output(struct net *net, struct sock *sk,
319 struct sk_buff *skb)
320 {
321 struct rtable *new_rt;
322 bool do_cn = false;
323 int ret, err;
324
325 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
326 switch (ret) {
327 case NET_XMIT_CN:
328 do_cn = true;
329 fallthrough;
330 case NET_XMIT_SUCCESS:
331 break;
332 default:
333 kfree_skb(skb);
334 return ret;
335 }
336
337 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
338 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
339 * see ipv4_pktinfo_prepare().
340 */
341 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
342 if (new_rt) {
343 new_rt->rt_iif = 0;
344 skb_dst_drop(skb);
345 skb_dst_set(skb, &new_rt->dst);
346 }
347
348 err = dev_loopback_xmit(net, sk, skb);
349 return (do_cn && err) ? ret : err;
350 }
351
ip_mc_output(struct net * net,struct sock * sk,struct sk_buff * skb)352 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
353 {
354 struct rtable *rt = skb_rtable(skb);
355 struct net_device *dev = rt->dst.dev;
356
357 /*
358 * If the indicated interface is up and running, send the packet.
359 */
360 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
361
362 skb->dev = dev;
363 skb->protocol = htons(ETH_P_IP);
364
365 /*
366 * Multicasts are looped back for other local users
367 */
368
369 if (rt->rt_flags&RTCF_MULTICAST) {
370 if (sk_mc_loop(sk)
371 #ifdef CONFIG_IP_MROUTE
372 /* Small optimization: do not loopback not local frames,
373 which returned after forwarding; they will be dropped
374 by ip_mr_input in any case.
375 Note, that local frames are looped back to be delivered
376 to local recipients.
377
378 This check is duplicated in ip_mr_input at the moment.
379 */
380 &&
381 ((rt->rt_flags & RTCF_LOCAL) ||
382 !(IPCB(skb)->flags & IPSKB_FORWARDED))
383 #endif
384 ) {
385 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
386 if (newskb)
387 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
388 net, sk, newskb, NULL, newskb->dev,
389 ip_mc_finish_output);
390 }
391
392 /* Multicasts with ttl 0 must not go beyond the host */
393
394 if (ip_hdr(skb)->ttl == 0) {
395 kfree_skb(skb);
396 return 0;
397 }
398 }
399
400 if (rt->rt_flags&RTCF_BROADCAST) {
401 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
402 if (newskb)
403 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
404 net, sk, newskb, NULL, newskb->dev,
405 ip_mc_finish_output);
406 }
407
408 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
409 net, sk, skb, NULL, skb->dev,
410 ip_finish_output,
411 !(IPCB(skb)->flags & IPSKB_REROUTED));
412 }
413
ip_output(struct net * net,struct sock * sk,struct sk_buff * skb)414 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
415 {
416 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
417
418 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
419
420 skb->dev = dev;
421 skb->protocol = htons(ETH_P_IP);
422
423 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
424 net, sk, skb, indev, dev,
425 ip_finish_output,
426 !(IPCB(skb)->flags & IPSKB_REROUTED));
427 }
428 EXPORT_SYMBOL(ip_output);
429
430 /*
431 * copy saddr and daddr, possibly using 64bit load/stores
432 * Equivalent to :
433 * iph->saddr = fl4->saddr;
434 * iph->daddr = fl4->daddr;
435 */
ip_copy_addrs(struct iphdr * iph,const struct flowi4 * fl4)436 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
437 {
438 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
439 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
440
441 iph->saddr = fl4->saddr;
442 iph->daddr = fl4->daddr;
443 }
444
445 /* Note: skb->sk can be different from sk, in case of tunnels */
__ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl,__u8 tos)446 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
447 __u8 tos)
448 {
449 struct inet_sock *inet = inet_sk(sk);
450 struct net *net = sock_net(sk);
451 struct ip_options_rcu *inet_opt;
452 struct flowi4 *fl4;
453 struct rtable *rt;
454 struct iphdr *iph;
455 int res;
456
457 /* Skip all of this if the packet is already routed,
458 * f.e. by something like SCTP.
459 */
460 rcu_read_lock();
461 inet_opt = rcu_dereference(inet->inet_opt);
462 fl4 = &fl->u.ip4;
463 rt = skb_rtable(skb);
464 if (rt)
465 goto packet_routed;
466
467 /* Make sure we can route this packet. */
468 rt = (struct rtable *)__sk_dst_check(sk, 0);
469 if (!rt) {
470 __be32 daddr;
471
472 /* Use correct destination address if we have options. */
473 daddr = inet->inet_daddr;
474 if (inet_opt && inet_opt->opt.srr)
475 daddr = inet_opt->opt.faddr;
476
477 /* If this fails, retransmit mechanism of transport layer will
478 * keep trying until route appears or the connection times
479 * itself out.
480 */
481 rt = ip_route_output_ports(net, fl4, sk,
482 daddr, inet->inet_saddr,
483 inet->inet_dport,
484 inet->inet_sport,
485 sk->sk_protocol,
486 RT_CONN_FLAGS_TOS(sk, tos),
487 sk->sk_bound_dev_if);
488 if (IS_ERR(rt))
489 goto no_route;
490 sk_setup_caps(sk, &rt->dst);
491 }
492 skb_dst_set_noref(skb, &rt->dst);
493
494 packet_routed:
495 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
496 goto no_route;
497
498 /* OK, we know where to send it, allocate and build IP header. */
499 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
500 skb_reset_network_header(skb);
501 iph = ip_hdr(skb);
502 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
503 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
504 iph->frag_off = htons(IP_DF);
505 else
506 iph->frag_off = 0;
507 iph->ttl = ip_select_ttl(inet, &rt->dst);
508 iph->protocol = sk->sk_protocol;
509 ip_copy_addrs(iph, fl4);
510
511 /* Transport layer set skb->h.foo itself. */
512
513 if (inet_opt && inet_opt->opt.optlen) {
514 iph->ihl += inet_opt->opt.optlen >> 2;
515 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
516 }
517
518 ip_select_ident_segs(net, skb, sk,
519 skb_shinfo(skb)->gso_segs ?: 1);
520
521 /* TODO : should we use skb->sk here instead of sk ? */
522 skb->priority = sk->sk_priority;
523 skb->mark = sk->sk_mark;
524
525 res = ip_local_out(net, sk, skb);
526 rcu_read_unlock();
527 return res;
528
529 no_route:
530 rcu_read_unlock();
531 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
532 kfree_skb(skb);
533 return -EHOSTUNREACH;
534 }
535 EXPORT_SYMBOL(__ip_queue_xmit);
536
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)537 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
538 {
539 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
540 }
541 EXPORT_SYMBOL(ip_queue_xmit);
542
ip_copy_metadata(struct sk_buff * to,struct sk_buff * from)543 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
544 {
545 to->pkt_type = from->pkt_type;
546 to->priority = from->priority;
547 to->protocol = from->protocol;
548 to->skb_iif = from->skb_iif;
549 skb_dst_drop(to);
550 skb_dst_copy(to, from);
551 to->dev = from->dev;
552 to->mark = from->mark;
553
554 skb_copy_hash(to, from);
555
556 #ifdef CONFIG_NET_SCHED
557 to->tc_index = from->tc_index;
558 #endif
559 nf_copy(to, from);
560 skb_ext_copy(to, from);
561 #if IS_ENABLED(CONFIG_IP_VS)
562 to->ipvs_property = from->ipvs_property;
563 #endif
564 skb_copy_secmark(to, from);
565 }
566
ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu,int (* output)(struct net *,struct sock *,struct sk_buff *))567 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
568 unsigned int mtu,
569 int (*output)(struct net *, struct sock *, struct sk_buff *))
570 {
571 struct iphdr *iph = ip_hdr(skb);
572
573 if ((iph->frag_off & htons(IP_DF)) == 0)
574 return ip_do_fragment(net, sk, skb, output);
575
576 if (unlikely(!skb->ignore_df ||
577 (IPCB(skb)->frag_max_size &&
578 IPCB(skb)->frag_max_size > mtu))) {
579 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
580 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
581 htonl(mtu));
582 kfree_skb(skb);
583 return -EMSGSIZE;
584 }
585
586 return ip_do_fragment(net, sk, skb, output);
587 }
588
ip_fraglist_init(struct sk_buff * skb,struct iphdr * iph,unsigned int hlen,struct ip_fraglist_iter * iter)589 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
590 unsigned int hlen, struct ip_fraglist_iter *iter)
591 {
592 unsigned int first_len = skb_pagelen(skb);
593
594 iter->frag = skb_shinfo(skb)->frag_list;
595 skb_frag_list_init(skb);
596
597 iter->offset = 0;
598 iter->iph = iph;
599 iter->hlen = hlen;
600
601 skb->data_len = first_len - skb_headlen(skb);
602 skb->len = first_len;
603 iph->tot_len = htons(first_len);
604 iph->frag_off = htons(IP_MF);
605 ip_send_check(iph);
606 }
607 EXPORT_SYMBOL(ip_fraglist_init);
608
ip_fraglist_prepare(struct sk_buff * skb,struct ip_fraglist_iter * iter)609 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
610 {
611 unsigned int hlen = iter->hlen;
612 struct iphdr *iph = iter->iph;
613 struct sk_buff *frag;
614
615 frag = iter->frag;
616 frag->ip_summed = CHECKSUM_NONE;
617 skb_reset_transport_header(frag);
618 __skb_push(frag, hlen);
619 skb_reset_network_header(frag);
620 memcpy(skb_network_header(frag), iph, hlen);
621 iter->iph = ip_hdr(frag);
622 iph = iter->iph;
623 iph->tot_len = htons(frag->len);
624 ip_copy_metadata(frag, skb);
625 iter->offset += skb->len - hlen;
626 iph->frag_off = htons(iter->offset >> 3);
627 if (frag->next)
628 iph->frag_off |= htons(IP_MF);
629 /* Ready, complete checksum */
630 ip_send_check(iph);
631 }
632 EXPORT_SYMBOL(ip_fraglist_prepare);
633
ip_frag_init(struct sk_buff * skb,unsigned int hlen,unsigned int ll_rs,unsigned int mtu,bool DF,struct ip_frag_state * state)634 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
635 unsigned int ll_rs, unsigned int mtu, bool DF,
636 struct ip_frag_state *state)
637 {
638 struct iphdr *iph = ip_hdr(skb);
639
640 state->DF = DF;
641 state->hlen = hlen;
642 state->ll_rs = ll_rs;
643 state->mtu = mtu;
644
645 state->left = skb->len - hlen; /* Space per frame */
646 state->ptr = hlen; /* Where to start from */
647
648 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
649 state->not_last_frag = iph->frag_off & htons(IP_MF);
650 }
651 EXPORT_SYMBOL(ip_frag_init);
652
ip_frag_ipcb(struct sk_buff * from,struct sk_buff * to,bool first_frag)653 static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
654 bool first_frag)
655 {
656 /* Copy the flags to each fragment. */
657 IPCB(to)->flags = IPCB(from)->flags;
658
659 /* ANK: dirty, but effective trick. Upgrade options only if
660 * the segment to be fragmented was THE FIRST (otherwise,
661 * options are already fixed) and make it ONCE
662 * on the initial skb, so that all the following fragments
663 * will inherit fixed options.
664 */
665 if (first_frag)
666 ip_options_fragment(from);
667 }
668
ip_frag_next(struct sk_buff * skb,struct ip_frag_state * state)669 struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
670 {
671 unsigned int len = state->left;
672 struct sk_buff *skb2;
673 struct iphdr *iph;
674
675 len = state->left;
676 /* IF: it doesn't fit, use 'mtu' - the data space left */
677 if (len > state->mtu)
678 len = state->mtu;
679 /* IF: we are not sending up to and including the packet end
680 then align the next start on an eight byte boundary */
681 if (len < state->left) {
682 len &= ~7;
683 }
684
685 /* Allocate buffer */
686 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
687 if (!skb2)
688 return ERR_PTR(-ENOMEM);
689
690 /*
691 * Set up data on packet
692 */
693
694 ip_copy_metadata(skb2, skb);
695 skb_reserve(skb2, state->ll_rs);
696 skb_put(skb2, len + state->hlen);
697 skb_reset_network_header(skb2);
698 skb2->transport_header = skb2->network_header + state->hlen;
699
700 /*
701 * Charge the memory for the fragment to any owner
702 * it might possess
703 */
704
705 if (skb->sk)
706 skb_set_owner_w(skb2, skb->sk);
707
708 /*
709 * Copy the packet header into the new buffer.
710 */
711
712 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
713
714 /*
715 * Copy a block of the IP datagram.
716 */
717 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
718 BUG();
719 state->left -= len;
720
721 /*
722 * Fill in the new header fields.
723 */
724 iph = ip_hdr(skb2);
725 iph->frag_off = htons((state->offset >> 3));
726 if (state->DF)
727 iph->frag_off |= htons(IP_DF);
728
729 /*
730 * Added AC : If we are fragmenting a fragment that's not the
731 * last fragment then keep MF on each bit
732 */
733 if (state->left > 0 || state->not_last_frag)
734 iph->frag_off |= htons(IP_MF);
735 state->ptr += len;
736 state->offset += len;
737
738 iph->tot_len = htons(len + state->hlen);
739
740 ip_send_check(iph);
741
742 return skb2;
743 }
744 EXPORT_SYMBOL(ip_frag_next);
745
746 /*
747 * This IP datagram is too large to be sent in one piece. Break it up into
748 * smaller pieces (each of size equal to IP header plus
749 * a block of the data of the original IP data part) that will yet fit in a
750 * single device frame, and queue such a frame for sending.
751 */
752
ip_do_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))753 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
754 int (*output)(struct net *, struct sock *, struct sk_buff *))
755 {
756 struct iphdr *iph;
757 struct sk_buff *skb2;
758 struct rtable *rt = skb_rtable(skb);
759 unsigned int mtu, hlen, ll_rs;
760 struct ip_fraglist_iter iter;
761 ktime_t tstamp = skb->tstamp;
762 struct ip_frag_state state;
763 int err = 0;
764
765 /* for offloaded checksums cleanup checksum before fragmentation */
766 if (skb->ip_summed == CHECKSUM_PARTIAL &&
767 (err = skb_checksum_help(skb)))
768 goto fail;
769
770 /*
771 * Point into the IP datagram header.
772 */
773
774 iph = ip_hdr(skb);
775
776 mtu = ip_skb_dst_mtu(sk, skb);
777 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
778 mtu = IPCB(skb)->frag_max_size;
779
780 /*
781 * Setup starting values.
782 */
783
784 hlen = iph->ihl * 4;
785 mtu = mtu - hlen; /* Size of data space */
786 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
787 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
788
789 /* When frag_list is given, use it. First, check its validity:
790 * some transformers could create wrong frag_list or break existing
791 * one, it is not prohibited. In this case fall back to copying.
792 *
793 * LATER: this step can be merged to real generation of fragments,
794 * we can switch to copy when see the first bad fragment.
795 */
796 if (skb_has_frag_list(skb)) {
797 struct sk_buff *frag, *frag2;
798 unsigned int first_len = skb_pagelen(skb);
799
800 if (first_len - hlen > mtu ||
801 ((first_len - hlen) & 7) ||
802 ip_is_fragment(iph) ||
803 skb_cloned(skb) ||
804 skb_headroom(skb) < ll_rs)
805 goto slow_path;
806
807 skb_walk_frags(skb, frag) {
808 /* Correct geometry. */
809 if (frag->len > mtu ||
810 ((frag->len & 7) && frag->next) ||
811 skb_headroom(frag) < hlen + ll_rs)
812 goto slow_path_clean;
813
814 /* Partially cloned skb? */
815 if (skb_shared(frag))
816 goto slow_path_clean;
817
818 BUG_ON(frag->sk);
819 if (skb->sk) {
820 frag->sk = skb->sk;
821 frag->destructor = sock_wfree;
822 }
823 skb->truesize -= frag->truesize;
824 }
825
826 /* Everything is OK. Generate! */
827 ip_fraglist_init(skb, iph, hlen, &iter);
828
829 if (iter.frag)
830 ip_options_fragment(iter.frag);
831
832 for (;;) {
833 /* Prepare header of the next frame,
834 * before previous one went down. */
835 if (iter.frag) {
836 IPCB(iter.frag)->flags = IPCB(skb)->flags;
837 ip_fraglist_prepare(skb, &iter);
838 }
839
840 skb->tstamp = tstamp;
841 err = output(net, sk, skb);
842
843 if (!err)
844 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
845 if (err || !iter.frag)
846 break;
847
848 skb = ip_fraglist_next(&iter);
849 }
850
851 if (err == 0) {
852 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
853 return 0;
854 }
855
856 kfree_skb_list(iter.frag);
857
858 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
859 return err;
860
861 slow_path_clean:
862 skb_walk_frags(skb, frag2) {
863 if (frag2 == frag)
864 break;
865 frag2->sk = NULL;
866 frag2->destructor = NULL;
867 skb->truesize += frag2->truesize;
868 }
869 }
870
871 slow_path:
872 /*
873 * Fragment the datagram.
874 */
875
876 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
877 &state);
878
879 /*
880 * Keep copying data until we run out.
881 */
882
883 while (state.left > 0) {
884 bool first_frag = (state.offset == 0);
885
886 skb2 = ip_frag_next(skb, &state);
887 if (IS_ERR(skb2)) {
888 err = PTR_ERR(skb2);
889 goto fail;
890 }
891 ip_frag_ipcb(skb, skb2, first_frag);
892
893 /*
894 * Put this fragment into the sending queue.
895 */
896 skb2->tstamp = tstamp;
897 err = output(net, sk, skb2);
898 if (err)
899 goto fail;
900
901 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
902 }
903 consume_skb(skb);
904 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
905 return err;
906
907 fail:
908 kfree_skb(skb);
909 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
910 return err;
911 }
912 EXPORT_SYMBOL(ip_do_fragment);
913
914 int
ip_generic_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)915 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
916 {
917 struct msghdr *msg = from;
918
919 if (skb->ip_summed == CHECKSUM_PARTIAL) {
920 if (!copy_from_iter_full(to, len, &msg->msg_iter))
921 return -EFAULT;
922 } else {
923 __wsum csum = 0;
924 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
925 return -EFAULT;
926 skb->csum = csum_block_add(skb->csum, csum, odd);
927 }
928 return 0;
929 }
930 EXPORT_SYMBOL(ip_generic_getfrag);
931
932 static inline __wsum
csum_page(struct page * page,int offset,int copy)933 csum_page(struct page *page, int offset, int copy)
934 {
935 char *kaddr;
936 __wsum csum;
937 kaddr = kmap(page);
938 csum = csum_partial(kaddr + offset, copy, 0);
939 kunmap(page);
940 return csum;
941 }
942
__ip_append_data(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags)943 static int __ip_append_data(struct sock *sk,
944 struct flowi4 *fl4,
945 struct sk_buff_head *queue,
946 struct inet_cork *cork,
947 struct page_frag *pfrag,
948 int getfrag(void *from, char *to, int offset,
949 int len, int odd, struct sk_buff *skb),
950 void *from, int length, int transhdrlen,
951 unsigned int flags)
952 {
953 struct inet_sock *inet = inet_sk(sk);
954 struct ubuf_info *uarg = NULL;
955 struct sk_buff *skb;
956
957 struct ip_options *opt = cork->opt;
958 int hh_len;
959 int exthdrlen;
960 int mtu;
961 int copy;
962 int err;
963 int offset = 0;
964 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
965 int csummode = CHECKSUM_NONE;
966 struct rtable *rt = (struct rtable *)cork->dst;
967 unsigned int wmem_alloc_delta = 0;
968 bool paged, extra_uref = false;
969 u32 tskey = 0;
970
971 skb = skb_peek_tail(queue);
972
973 exthdrlen = !skb ? rt->dst.header_len : 0;
974 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
975 paged = !!cork->gso_size;
976
977 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
978 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
979 tskey = sk->sk_tskey++;
980
981 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
982
983 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
984 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
985 maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
986
987 if (cork->length + length > maxnonfragsize - fragheaderlen) {
988 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
989 mtu - (opt ? opt->optlen : 0));
990 return -EMSGSIZE;
991 }
992
993 /*
994 * transhdrlen > 0 means that this is the first fragment and we wish
995 * it won't be fragmented in the future.
996 */
997 if (transhdrlen &&
998 length + fragheaderlen <= mtu &&
999 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1000 (!(flags & MSG_MORE) || cork->gso_size) &&
1001 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1002 csummode = CHECKSUM_PARTIAL;
1003
1004 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1005 uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1006 if (!uarg)
1007 return -ENOBUFS;
1008 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1009 if (rt->dst.dev->features & NETIF_F_SG &&
1010 csummode == CHECKSUM_PARTIAL) {
1011 paged = true;
1012 } else {
1013 uarg->zerocopy = 0;
1014 skb_zcopy_set(skb, uarg, &extra_uref);
1015 }
1016 }
1017
1018 cork->length += length;
1019
1020 /* So, what's going on in the loop below?
1021 *
1022 * We use calculated fragment length to generate chained skb,
1023 * each of segments is IP fragment ready for sending to network after
1024 * adding appropriate IP header.
1025 */
1026
1027 if (!skb)
1028 goto alloc_new_skb;
1029
1030 while (length > 0) {
1031 /* Check if the remaining data fits into current packet. */
1032 copy = mtu - skb->len;
1033 if (copy < length)
1034 copy = maxfraglen - skb->len;
1035 if (copy <= 0) {
1036 char *data;
1037 unsigned int datalen;
1038 unsigned int fraglen;
1039 unsigned int fraggap;
1040 unsigned int alloclen, alloc_extra;
1041 unsigned int pagedlen;
1042 struct sk_buff *skb_prev;
1043 alloc_new_skb:
1044 skb_prev = skb;
1045 if (skb_prev)
1046 fraggap = skb_prev->len - maxfraglen;
1047 else
1048 fraggap = 0;
1049
1050 /*
1051 * If remaining data exceeds the mtu,
1052 * we know we need more fragment(s).
1053 */
1054 datalen = length + fraggap;
1055 if (datalen > mtu - fragheaderlen)
1056 datalen = maxfraglen - fragheaderlen;
1057 fraglen = datalen + fragheaderlen;
1058 pagedlen = 0;
1059
1060 alloc_extra = hh_len + 15;
1061 alloc_extra += exthdrlen;
1062
1063 /* The last fragment gets additional space at tail.
1064 * Note, with MSG_MORE we overallocate on fragments,
1065 * because we have no idea what fragment will be
1066 * the last.
1067 */
1068 if (datalen == length + fraggap)
1069 alloc_extra += rt->dst.trailer_len;
1070
1071 if ((flags & MSG_MORE) &&
1072 !(rt->dst.dev->features&NETIF_F_SG))
1073 alloclen = mtu;
1074 else if (!paged &&
1075 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1076 !(rt->dst.dev->features & NETIF_F_SG)))
1077 alloclen = fraglen;
1078 else {
1079 alloclen = min_t(int, fraglen, MAX_HEADER);
1080 pagedlen = fraglen - alloclen;
1081 }
1082
1083 alloclen += alloc_extra;
1084
1085 if (transhdrlen) {
1086 skb = sock_alloc_send_skb(sk, alloclen,
1087 (flags & MSG_DONTWAIT), &err);
1088 } else {
1089 skb = NULL;
1090 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1091 2 * sk->sk_sndbuf)
1092 skb = alloc_skb(alloclen,
1093 sk->sk_allocation);
1094 if (unlikely(!skb))
1095 err = -ENOBUFS;
1096 }
1097 if (!skb)
1098 goto error;
1099
1100 /*
1101 * Fill in the control structures
1102 */
1103 skb->ip_summed = csummode;
1104 skb->csum = 0;
1105 skb_reserve(skb, hh_len);
1106
1107 /*
1108 * Find where to start putting bytes.
1109 */
1110 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1111 skb_set_network_header(skb, exthdrlen);
1112 skb->transport_header = (skb->network_header +
1113 fragheaderlen);
1114 data += fragheaderlen + exthdrlen;
1115
1116 if (fraggap) {
1117 skb->csum = skb_copy_and_csum_bits(
1118 skb_prev, maxfraglen,
1119 data + transhdrlen, fraggap);
1120 skb_prev->csum = csum_sub(skb_prev->csum,
1121 skb->csum);
1122 data += fraggap;
1123 pskb_trim_unique(skb_prev, maxfraglen);
1124 }
1125
1126 copy = datalen - transhdrlen - fraggap - pagedlen;
1127 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1128 err = -EFAULT;
1129 kfree_skb(skb);
1130 goto error;
1131 }
1132
1133 offset += copy;
1134 length -= copy + transhdrlen;
1135 transhdrlen = 0;
1136 exthdrlen = 0;
1137 csummode = CHECKSUM_NONE;
1138
1139 /* only the initial fragment is time stamped */
1140 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1141 cork->tx_flags = 0;
1142 skb_shinfo(skb)->tskey = tskey;
1143 tskey = 0;
1144 skb_zcopy_set(skb, uarg, &extra_uref);
1145
1146 if ((flags & MSG_CONFIRM) && !skb_prev)
1147 skb_set_dst_pending_confirm(skb, 1);
1148
1149 /*
1150 * Put the packet on the pending queue.
1151 */
1152 if (!skb->destructor) {
1153 skb->destructor = sock_wfree;
1154 skb->sk = sk;
1155 wmem_alloc_delta += skb->truesize;
1156 }
1157 __skb_queue_tail(queue, skb);
1158 continue;
1159 }
1160
1161 if (copy > length)
1162 copy = length;
1163
1164 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1165 skb_tailroom(skb) >= copy) {
1166 unsigned int off;
1167
1168 off = skb->len;
1169 if (getfrag(from, skb_put(skb, copy),
1170 offset, copy, off, skb) < 0) {
1171 __skb_trim(skb, off);
1172 err = -EFAULT;
1173 goto error;
1174 }
1175 } else if (!uarg || !uarg->zerocopy) {
1176 int i = skb_shinfo(skb)->nr_frags;
1177
1178 err = -ENOMEM;
1179 if (!sk_page_frag_refill(sk, pfrag))
1180 goto error;
1181
1182 if (!skb_can_coalesce(skb, i, pfrag->page,
1183 pfrag->offset)) {
1184 err = -EMSGSIZE;
1185 if (i == MAX_SKB_FRAGS)
1186 goto error;
1187
1188 __skb_fill_page_desc(skb, i, pfrag->page,
1189 pfrag->offset, 0);
1190 skb_shinfo(skb)->nr_frags = ++i;
1191 get_page(pfrag->page);
1192 }
1193 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1194 if (getfrag(from,
1195 page_address(pfrag->page) + pfrag->offset,
1196 offset, copy, skb->len, skb) < 0)
1197 goto error_efault;
1198
1199 pfrag->offset += copy;
1200 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1201 skb->len += copy;
1202 skb->data_len += copy;
1203 skb->truesize += copy;
1204 wmem_alloc_delta += copy;
1205 } else {
1206 err = skb_zerocopy_iter_dgram(skb, from, copy);
1207 if (err < 0)
1208 goto error;
1209 }
1210 offset += copy;
1211 length -= copy;
1212 }
1213
1214 if (wmem_alloc_delta)
1215 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1216 return 0;
1217
1218 error_efault:
1219 err = -EFAULT;
1220 error:
1221 net_zcopy_put_abort(uarg, extra_uref);
1222 cork->length -= length;
1223 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1224 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1225 return err;
1226 }
1227
ip_setup_cork(struct sock * sk,struct inet_cork * cork,struct ipcm_cookie * ipc,struct rtable ** rtp)1228 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1229 struct ipcm_cookie *ipc, struct rtable **rtp)
1230 {
1231 struct ip_options_rcu *opt;
1232 struct rtable *rt;
1233
1234 rt = *rtp;
1235 if (unlikely(!rt))
1236 return -EFAULT;
1237
1238 /*
1239 * setup for corking.
1240 */
1241 opt = ipc->opt;
1242 if (opt) {
1243 if (!cork->opt) {
1244 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1245 sk->sk_allocation);
1246 if (unlikely(!cork->opt))
1247 return -ENOBUFS;
1248 }
1249 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1250 cork->flags |= IPCORK_OPT;
1251 cork->addr = ipc->addr;
1252 }
1253
1254 cork->fragsize = ip_sk_use_pmtu(sk) ?
1255 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1256
1257 if (!inetdev_valid_mtu(cork->fragsize))
1258 return -ENETUNREACH;
1259
1260 cork->gso_size = ipc->gso_size;
1261
1262 cork->dst = &rt->dst;
1263 /* We stole this route, caller should not release it. */
1264 *rtp = NULL;
1265
1266 cork->length = 0;
1267 cork->ttl = ipc->ttl;
1268 cork->tos = ipc->tos;
1269 cork->mark = ipc->sockc.mark;
1270 cork->priority = ipc->priority;
1271 cork->transmit_time = ipc->sockc.transmit_time;
1272 cork->tx_flags = 0;
1273 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
1274
1275 return 0;
1276 }
1277
1278 /*
1279 * ip_append_data() and ip_append_page() can make one large IP datagram
1280 * from many pieces of data. Each pieces will be holded on the socket
1281 * until ip_push_pending_frames() is called. Each piece can be a page
1282 * or non-page data.
1283 *
1284 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1285 * this interface potentially.
1286 *
1287 * LATER: length must be adjusted by pad at tail, when it is required.
1288 */
ip_append_data(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1289 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1290 int getfrag(void *from, char *to, int offset, int len,
1291 int odd, struct sk_buff *skb),
1292 void *from, int length, int transhdrlen,
1293 struct ipcm_cookie *ipc, struct rtable **rtp,
1294 unsigned int flags)
1295 {
1296 struct inet_sock *inet = inet_sk(sk);
1297 int err;
1298
1299 if (flags&MSG_PROBE)
1300 return 0;
1301
1302 if (skb_queue_empty(&sk->sk_write_queue)) {
1303 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1304 if (err)
1305 return err;
1306 } else {
1307 transhdrlen = 0;
1308 }
1309
1310 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1311 sk_page_frag(sk), getfrag,
1312 from, length, transhdrlen, flags);
1313 }
1314
ip_append_page(struct sock * sk,struct flowi4 * fl4,struct page * page,int offset,size_t size,int flags)1315 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1316 int offset, size_t size, int flags)
1317 {
1318 struct inet_sock *inet = inet_sk(sk);
1319 struct sk_buff *skb;
1320 struct rtable *rt;
1321 struct ip_options *opt = NULL;
1322 struct inet_cork *cork;
1323 int hh_len;
1324 int mtu;
1325 int len;
1326 int err;
1327 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1328
1329 if (inet->hdrincl)
1330 return -EPERM;
1331
1332 if (flags&MSG_PROBE)
1333 return 0;
1334
1335 if (skb_queue_empty(&sk->sk_write_queue))
1336 return -EINVAL;
1337
1338 cork = &inet->cork.base;
1339 rt = (struct rtable *)cork->dst;
1340 if (cork->flags & IPCORK_OPT)
1341 opt = cork->opt;
1342
1343 if (!(rt->dst.dev->features & NETIF_F_SG))
1344 return -EOPNOTSUPP;
1345
1346 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1347 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
1348
1349 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1350 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1351 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1352
1353 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1354 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1355 mtu - (opt ? opt->optlen : 0));
1356 return -EMSGSIZE;
1357 }
1358
1359 skb = skb_peek_tail(&sk->sk_write_queue);
1360 if (!skb)
1361 return -EINVAL;
1362
1363 cork->length += size;
1364
1365 while (size > 0) {
1366 /* Check if the remaining data fits into current packet. */
1367 len = mtu - skb->len;
1368 if (len < size)
1369 len = maxfraglen - skb->len;
1370
1371 if (len <= 0) {
1372 struct sk_buff *skb_prev;
1373 int alloclen;
1374
1375 skb_prev = skb;
1376 fraggap = skb_prev->len - maxfraglen;
1377
1378 alloclen = fragheaderlen + hh_len + fraggap + 15;
1379 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1380 if (unlikely(!skb)) {
1381 err = -ENOBUFS;
1382 goto error;
1383 }
1384
1385 /*
1386 * Fill in the control structures
1387 */
1388 skb->ip_summed = CHECKSUM_NONE;
1389 skb->csum = 0;
1390 skb_reserve(skb, hh_len);
1391
1392 /*
1393 * Find where to start putting bytes.
1394 */
1395 skb_put(skb, fragheaderlen + fraggap);
1396 skb_reset_network_header(skb);
1397 skb->transport_header = (skb->network_header +
1398 fragheaderlen);
1399 if (fraggap) {
1400 skb->csum = skb_copy_and_csum_bits(skb_prev,
1401 maxfraglen,
1402 skb_transport_header(skb),
1403 fraggap);
1404 skb_prev->csum = csum_sub(skb_prev->csum,
1405 skb->csum);
1406 pskb_trim_unique(skb_prev, maxfraglen);
1407 }
1408
1409 /*
1410 * Put the packet on the pending queue.
1411 */
1412 __skb_queue_tail(&sk->sk_write_queue, skb);
1413 continue;
1414 }
1415
1416 if (len > size)
1417 len = size;
1418
1419 if (skb_append_pagefrags(skb, page, offset, len)) {
1420 err = -EMSGSIZE;
1421 goto error;
1422 }
1423
1424 if (skb->ip_summed == CHECKSUM_NONE) {
1425 __wsum csum;
1426 csum = csum_page(page, offset, len);
1427 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1428 }
1429
1430 skb->len += len;
1431 skb->data_len += len;
1432 skb->truesize += len;
1433 refcount_add(len, &sk->sk_wmem_alloc);
1434 offset += len;
1435 size -= len;
1436 }
1437 return 0;
1438
1439 error:
1440 cork->length -= size;
1441 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1442 return err;
1443 }
1444
ip_cork_release(struct inet_cork * cork)1445 static void ip_cork_release(struct inet_cork *cork)
1446 {
1447 cork->flags &= ~IPCORK_OPT;
1448 kfree(cork->opt);
1449 cork->opt = NULL;
1450 dst_release(cork->dst);
1451 cork->dst = NULL;
1452 }
1453
1454 /*
1455 * Combined all pending IP fragments on the socket as one IP datagram
1456 * and push them out.
1457 */
__ip_make_skb(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork)1458 struct sk_buff *__ip_make_skb(struct sock *sk,
1459 struct flowi4 *fl4,
1460 struct sk_buff_head *queue,
1461 struct inet_cork *cork)
1462 {
1463 struct sk_buff *skb, *tmp_skb;
1464 struct sk_buff **tail_skb;
1465 struct inet_sock *inet = inet_sk(sk);
1466 struct net *net = sock_net(sk);
1467 struct ip_options *opt = NULL;
1468 struct rtable *rt = (struct rtable *)cork->dst;
1469 struct iphdr *iph;
1470 __be16 df = 0;
1471 __u8 ttl;
1472
1473 skb = __skb_dequeue(queue);
1474 if (!skb)
1475 goto out;
1476 tail_skb = &(skb_shinfo(skb)->frag_list);
1477
1478 /* move skb->data to ip header from ext header */
1479 if (skb->data < skb_network_header(skb))
1480 __skb_pull(skb, skb_network_offset(skb));
1481 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1482 __skb_pull(tmp_skb, skb_network_header_len(skb));
1483 *tail_skb = tmp_skb;
1484 tail_skb = &(tmp_skb->next);
1485 skb->len += tmp_skb->len;
1486 skb->data_len += tmp_skb->len;
1487 skb->truesize += tmp_skb->truesize;
1488 tmp_skb->destructor = NULL;
1489 tmp_skb->sk = NULL;
1490 }
1491
1492 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1493 * to fragment the frame generated here. No matter, what transforms
1494 * how transforms change size of the packet, it will come out.
1495 */
1496 skb->ignore_df = ip_sk_ignore_df(sk);
1497
1498 /* DF bit is set when we want to see DF on outgoing frames.
1499 * If ignore_df is set too, we still allow to fragment this frame
1500 * locally. */
1501 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1502 inet->pmtudisc == IP_PMTUDISC_PROBE ||
1503 (skb->len <= dst_mtu(&rt->dst) &&
1504 ip_dont_fragment(sk, &rt->dst)))
1505 df = htons(IP_DF);
1506
1507 if (cork->flags & IPCORK_OPT)
1508 opt = cork->opt;
1509
1510 if (cork->ttl != 0)
1511 ttl = cork->ttl;
1512 else if (rt->rt_type == RTN_MULTICAST)
1513 ttl = inet->mc_ttl;
1514 else
1515 ttl = ip_select_ttl(inet, &rt->dst);
1516
1517 iph = ip_hdr(skb);
1518 iph->version = 4;
1519 iph->ihl = 5;
1520 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1521 iph->frag_off = df;
1522 iph->ttl = ttl;
1523 iph->protocol = sk->sk_protocol;
1524 ip_copy_addrs(iph, fl4);
1525 ip_select_ident(net, skb, sk);
1526
1527 if (opt) {
1528 iph->ihl += opt->optlen >> 2;
1529 ip_options_build(skb, opt, cork->addr, rt, 0);
1530 }
1531
1532 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1533 skb->mark = cork->mark;
1534 skb->tstamp = cork->transmit_time;
1535 /*
1536 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1537 * on dst refcount
1538 */
1539 cork->dst = NULL;
1540 skb_dst_set(skb, &rt->dst);
1541
1542 if (iph->protocol == IPPROTO_ICMP)
1543 icmp_out_count(net, ((struct icmphdr *)
1544 skb_transport_header(skb))->type);
1545
1546 ip_cork_release(cork);
1547 out:
1548 return skb;
1549 }
1550
ip_send_skb(struct net * net,struct sk_buff * skb)1551 int ip_send_skb(struct net *net, struct sk_buff *skb)
1552 {
1553 int err;
1554
1555 err = ip_local_out(net, skb->sk, skb);
1556 if (err) {
1557 if (err > 0)
1558 err = net_xmit_errno(err);
1559 if (err)
1560 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1561 }
1562
1563 return err;
1564 }
1565
ip_push_pending_frames(struct sock * sk,struct flowi4 * fl4)1566 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1567 {
1568 struct sk_buff *skb;
1569
1570 skb = ip_finish_skb(sk, fl4);
1571 if (!skb)
1572 return 0;
1573
1574 /* Netfilter gets whole the not fragmented skb. */
1575 return ip_send_skb(sock_net(sk), skb);
1576 }
1577
1578 /*
1579 * Throw away all pending data on the socket.
1580 */
__ip_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork * cork)1581 static void __ip_flush_pending_frames(struct sock *sk,
1582 struct sk_buff_head *queue,
1583 struct inet_cork *cork)
1584 {
1585 struct sk_buff *skb;
1586
1587 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1588 kfree_skb(skb);
1589
1590 ip_cork_release(cork);
1591 }
1592
ip_flush_pending_frames(struct sock * sk)1593 void ip_flush_pending_frames(struct sock *sk)
1594 {
1595 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1596 }
1597
ip_make_skb(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,struct inet_cork * cork,unsigned int flags)1598 struct sk_buff *ip_make_skb(struct sock *sk,
1599 struct flowi4 *fl4,
1600 int getfrag(void *from, char *to, int offset,
1601 int len, int odd, struct sk_buff *skb),
1602 void *from, int length, int transhdrlen,
1603 struct ipcm_cookie *ipc, struct rtable **rtp,
1604 struct inet_cork *cork, unsigned int flags)
1605 {
1606 struct sk_buff_head queue;
1607 int err;
1608
1609 if (flags & MSG_PROBE)
1610 return NULL;
1611
1612 __skb_queue_head_init(&queue);
1613
1614 cork->flags = 0;
1615 cork->addr = 0;
1616 cork->opt = NULL;
1617 err = ip_setup_cork(sk, cork, ipc, rtp);
1618 if (err)
1619 return ERR_PTR(err);
1620
1621 err = __ip_append_data(sk, fl4, &queue, cork,
1622 ¤t->task_frag, getfrag,
1623 from, length, transhdrlen, flags);
1624 if (err) {
1625 __ip_flush_pending_frames(sk, &queue, cork);
1626 return ERR_PTR(err);
1627 }
1628
1629 return __ip_make_skb(sk, fl4, &queue, cork);
1630 }
1631
1632 /*
1633 * Fetch data from kernel space and fill in checksum if needed.
1634 */
ip_reply_glue_bits(void * dptr,char * to,int offset,int len,int odd,struct sk_buff * skb)1635 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1636 int len, int odd, struct sk_buff *skb)
1637 {
1638 __wsum csum;
1639
1640 csum = csum_partial_copy_nocheck(dptr+offset, to, len);
1641 skb->csum = csum_block_add(skb->csum, csum, odd);
1642 return 0;
1643 }
1644
1645 /*
1646 * Generic function to send a packet as reply to another packet.
1647 * Used to send some TCP resets/acks so far.
1648 */
ip_send_unicast_reply(struct sock * sk,struct sk_buff * skb,const struct ip_options * sopt,__be32 daddr,__be32 saddr,const struct ip_reply_arg * arg,unsigned int len,u64 transmit_time)1649 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1650 const struct ip_options *sopt,
1651 __be32 daddr, __be32 saddr,
1652 const struct ip_reply_arg *arg,
1653 unsigned int len, u64 transmit_time)
1654 {
1655 struct ip_options_data replyopts;
1656 struct ipcm_cookie ipc;
1657 struct flowi4 fl4;
1658 struct rtable *rt = skb_rtable(skb);
1659 struct net *net = sock_net(sk);
1660 struct sk_buff *nskb;
1661 int err;
1662 int oif;
1663
1664 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1665 return;
1666
1667 ipcm_init(&ipc);
1668 ipc.addr = daddr;
1669 ipc.sockc.transmit_time = transmit_time;
1670
1671 if (replyopts.opt.opt.optlen) {
1672 ipc.opt = &replyopts.opt;
1673
1674 if (replyopts.opt.opt.srr)
1675 daddr = replyopts.opt.opt.faddr;
1676 }
1677
1678 oif = arg->bound_dev_if;
1679 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1680 oif = skb->skb_iif;
1681
1682 flowi4_init_output(&fl4, oif,
1683 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1684 RT_TOS(arg->tos),
1685 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1686 ip_reply_arg_flowi_flags(arg),
1687 daddr, saddr,
1688 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1689 arg->uid);
1690 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1691 rt = ip_route_output_key(net, &fl4);
1692 if (IS_ERR(rt))
1693 return;
1694
1695 inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
1696
1697 sk->sk_protocol = ip_hdr(skb)->protocol;
1698 sk->sk_bound_dev_if = arg->bound_dev_if;
1699 sk->sk_sndbuf = sysctl_wmem_default;
1700 ipc.sockc.mark = fl4.flowi4_mark;
1701 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1702 len, 0, &ipc, &rt, MSG_DONTWAIT);
1703 if (unlikely(err)) {
1704 ip_flush_pending_frames(sk);
1705 goto out;
1706 }
1707
1708 nskb = skb_peek(&sk->sk_write_queue);
1709 if (nskb) {
1710 if (arg->csumoffset >= 0)
1711 *((__sum16 *)skb_transport_header(nskb) +
1712 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1713 arg->csum));
1714 nskb->ip_summed = CHECKSUM_NONE;
1715 ip_push_pending_frames(sk, &fl4);
1716 }
1717 out:
1718 ip_rt_put(rt);
1719 }
1720
ip_init(void)1721 void __init ip_init(void)
1722 {
1723 ip_rt_init();
1724 inet_initpeers();
1725
1726 #if defined(CONFIG_IP_MULTICAST)
1727 igmp_mc_init();
1728 #endif
1729 }
1730