1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71 EXPORT_SYMBOL(ip_vs_new_conn_out);
72
73 static unsigned int ip_vs_net_id __read_mostly;
74 /* netns cnt used for uniqueness */
75 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
76
77 /* ID used in ICMP lookups */
78 #define icmp_id(icmph) (((icmph)->un).echo.id)
79 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
80
ip_vs_proto_name(unsigned int proto)81 const char *ip_vs_proto_name(unsigned int proto)
82 {
83 static char buf[20];
84
85 switch (proto) {
86 case IPPROTO_IP:
87 return "IP";
88 case IPPROTO_UDP:
89 return "UDP";
90 case IPPROTO_TCP:
91 return "TCP";
92 case IPPROTO_SCTP:
93 return "SCTP";
94 case IPPROTO_ICMP:
95 return "ICMP";
96 #ifdef CONFIG_IP_VS_IPV6
97 case IPPROTO_ICMPV6:
98 return "ICMPv6";
99 #endif
100 default:
101 sprintf(buf, "IP_%u", proto);
102 return buf;
103 }
104 }
105
ip_vs_init_hash_table(struct list_head * table,int rows)106 void ip_vs_init_hash_table(struct list_head *table, int rows)
107 {
108 while (--rows >= 0)
109 INIT_LIST_HEAD(&table[rows]);
110 }
111
112 static inline void
ip_vs_in_stats(struct ip_vs_conn * cp,struct sk_buff * skb)113 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
114 {
115 struct ip_vs_dest *dest = cp->dest;
116 struct netns_ipvs *ipvs = cp->ipvs;
117
118 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
119 struct ip_vs_cpu_stats *s;
120 struct ip_vs_service *svc;
121
122 local_bh_disable();
123
124 s = this_cpu_ptr(dest->stats.cpustats);
125 u64_stats_update_begin(&s->syncp);
126 s->cnt.inpkts++;
127 s->cnt.inbytes += skb->len;
128 u64_stats_update_end(&s->syncp);
129
130 svc = rcu_dereference(dest->svc);
131 s = this_cpu_ptr(svc->stats.cpustats);
132 u64_stats_update_begin(&s->syncp);
133 s->cnt.inpkts++;
134 s->cnt.inbytes += skb->len;
135 u64_stats_update_end(&s->syncp);
136
137 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
138 u64_stats_update_begin(&s->syncp);
139 s->cnt.inpkts++;
140 s->cnt.inbytes += skb->len;
141 u64_stats_update_end(&s->syncp);
142
143 local_bh_enable();
144 }
145 }
146
147
148 static inline void
ip_vs_out_stats(struct ip_vs_conn * cp,struct sk_buff * skb)149 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
150 {
151 struct ip_vs_dest *dest = cp->dest;
152 struct netns_ipvs *ipvs = cp->ipvs;
153
154 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
155 struct ip_vs_cpu_stats *s;
156 struct ip_vs_service *svc;
157
158 local_bh_disable();
159
160 s = this_cpu_ptr(dest->stats.cpustats);
161 u64_stats_update_begin(&s->syncp);
162 s->cnt.outpkts++;
163 s->cnt.outbytes += skb->len;
164 u64_stats_update_end(&s->syncp);
165
166 svc = rcu_dereference(dest->svc);
167 s = this_cpu_ptr(svc->stats.cpustats);
168 u64_stats_update_begin(&s->syncp);
169 s->cnt.outpkts++;
170 s->cnt.outbytes += skb->len;
171 u64_stats_update_end(&s->syncp);
172
173 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
174 u64_stats_update_begin(&s->syncp);
175 s->cnt.outpkts++;
176 s->cnt.outbytes += skb->len;
177 u64_stats_update_end(&s->syncp);
178
179 local_bh_enable();
180 }
181 }
182
183
184 static inline void
ip_vs_conn_stats(struct ip_vs_conn * cp,struct ip_vs_service * svc)185 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
186 {
187 struct netns_ipvs *ipvs = svc->ipvs;
188 struct ip_vs_cpu_stats *s;
189
190 local_bh_disable();
191
192 s = this_cpu_ptr(cp->dest->stats.cpustats);
193 u64_stats_update_begin(&s->syncp);
194 s->cnt.conns++;
195 u64_stats_update_end(&s->syncp);
196
197 s = this_cpu_ptr(svc->stats.cpustats);
198 u64_stats_update_begin(&s->syncp);
199 s->cnt.conns++;
200 u64_stats_update_end(&s->syncp);
201
202 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
203 u64_stats_update_begin(&s->syncp);
204 s->cnt.conns++;
205 u64_stats_update_end(&s->syncp);
206
207 local_bh_enable();
208 }
209
210
211 static inline void
ip_vs_set_state(struct ip_vs_conn * cp,int direction,const struct sk_buff * skb,struct ip_vs_proto_data * pd)212 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
213 const struct sk_buff *skb,
214 struct ip_vs_proto_data *pd)
215 {
216 if (likely(pd->pp->state_transition))
217 pd->pp->state_transition(cp, direction, skb, pd);
218 }
219
220 static inline int
ip_vs_conn_fill_param_persist(const struct ip_vs_service * svc,struct sk_buff * skb,int protocol,const union nf_inet_addr * caddr,__be16 cport,const union nf_inet_addr * vaddr,__be16 vport,struct ip_vs_conn_param * p)221 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
222 struct sk_buff *skb, int protocol,
223 const union nf_inet_addr *caddr, __be16 cport,
224 const union nf_inet_addr *vaddr, __be16 vport,
225 struct ip_vs_conn_param *p)
226 {
227 ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
228 vport, p);
229 p->pe = rcu_dereference(svc->pe);
230 if (p->pe && p->pe->fill_param)
231 return p->pe->fill_param(p, skb);
232
233 return 0;
234 }
235
236 /*
237 * IPVS persistent scheduling function
238 * It creates a connection entry according to its template if exists,
239 * or selects a server and creates a connection entry plus a template.
240 * Locking: we are svc user (svc->refcnt), so we hold all dests too
241 * Protocols supported: TCP, UDP
242 */
243 static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service * svc,struct sk_buff * skb,__be16 src_port,__be16 dst_port,int * ignored,struct ip_vs_iphdr * iph)244 ip_vs_sched_persist(struct ip_vs_service *svc,
245 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
246 int *ignored, struct ip_vs_iphdr *iph)
247 {
248 struct ip_vs_conn *cp = NULL;
249 struct ip_vs_dest *dest;
250 struct ip_vs_conn *ct;
251 __be16 dport = 0; /* destination port to forward */
252 unsigned int flags;
253 struct ip_vs_conn_param param;
254 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
255 union nf_inet_addr snet; /* source network of the client,
256 after masking */
257 const union nf_inet_addr *src_addr, *dst_addr;
258
259 if (likely(!ip_vs_iph_inverse(iph))) {
260 src_addr = &iph->saddr;
261 dst_addr = &iph->daddr;
262 } else {
263 src_addr = &iph->daddr;
264 dst_addr = &iph->saddr;
265 }
266
267
268 /* Mask saddr with the netmask to adjust template granularity */
269 #ifdef CONFIG_IP_VS_IPV6
270 if (svc->af == AF_INET6)
271 ipv6_addr_prefix(&snet.in6, &src_addr->in6,
272 (__force __u32) svc->netmask);
273 else
274 #endif
275 snet.ip = src_addr->ip & svc->netmask;
276
277 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
278 "mnet %s\n",
279 IP_VS_DBG_ADDR(svc->af, src_addr), ntohs(src_port),
280 IP_VS_DBG_ADDR(svc->af, dst_addr), ntohs(dst_port),
281 IP_VS_DBG_ADDR(svc->af, &snet));
282
283 /*
284 * As far as we know, FTP is a very complicated network protocol, and
285 * it uses control connection and data connections. For active FTP,
286 * FTP server initialize data connection to the client, its source port
287 * is often 20. For passive FTP, FTP server tells the clients the port
288 * that it passively listens to, and the client issues the data
289 * connection. In the tunneling or direct routing mode, the load
290 * balancer is on the client-to-server half of connection, the port
291 * number is unknown to the load balancer. So, a conn template like
292 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
293 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
294 * is created for other persistent services.
295 */
296 {
297 int protocol = iph->protocol;
298 const union nf_inet_addr *vaddr = dst_addr;
299 __be16 vport = 0;
300
301 if (dst_port == svc->port) {
302 /* non-FTP template:
303 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
304 * FTP template:
305 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
306 */
307 if (svc->port != FTPPORT)
308 vport = dst_port;
309 } else {
310 /* Note: persistent fwmark-based services and
311 * persistent port zero service are handled here.
312 * fwmark template:
313 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
314 * port zero template:
315 * <protocol,caddr,0,vaddr,0,daddr,0>
316 */
317 if (svc->fwmark) {
318 protocol = IPPROTO_IP;
319 vaddr = &fwmark;
320 }
321 }
322 /* return *ignored = -1 so NF_DROP can be used */
323 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
324 vaddr, vport, ¶m) < 0) {
325 *ignored = -1;
326 return NULL;
327 }
328 }
329
330 /* Check if a template already exists */
331 ct = ip_vs_ct_in_get(¶m);
332 if (!ct || !ip_vs_check_template(ct, NULL)) {
333 struct ip_vs_scheduler *sched;
334
335 /*
336 * No template found or the dest of the connection
337 * template is not available.
338 * return *ignored=0 i.e. ICMP and NF_DROP
339 */
340 sched = rcu_dereference(svc->scheduler);
341 if (sched) {
342 /* read svc->sched_data after svc->scheduler */
343 smp_rmb();
344 dest = sched->schedule(svc, skb, iph);
345 } else {
346 dest = NULL;
347 }
348 if (!dest) {
349 IP_VS_DBG(1, "p-schedule: no dest found.\n");
350 kfree(param.pe_data);
351 *ignored = 0;
352 return NULL;
353 }
354
355 if (dst_port == svc->port && svc->port != FTPPORT)
356 dport = dest->port;
357
358 /* Create a template
359 * This adds param.pe_data to the template,
360 * and thus param.pe_data will be destroyed
361 * when the template expires */
362 ct = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport,
363 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
364 if (ct == NULL) {
365 kfree(param.pe_data);
366 *ignored = -1;
367 return NULL;
368 }
369
370 ct->timeout = svc->timeout;
371 } else {
372 /* set destination with the found template */
373 dest = ct->dest;
374 kfree(param.pe_data);
375 }
376
377 dport = dst_port;
378 if (dport == svc->port && dest->port)
379 dport = dest->port;
380
381 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
382 && iph->protocol == IPPROTO_UDP) ?
383 IP_VS_CONN_F_ONE_PACKET : 0;
384
385 /*
386 * Create a new connection according to the template
387 */
388 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
389 src_port, dst_addr, dst_port, ¶m);
390
391 cp = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport, flags, dest,
392 skb->mark);
393 if (cp == NULL) {
394 ip_vs_conn_put(ct);
395 *ignored = -1;
396 return NULL;
397 }
398
399 /*
400 * Add its control
401 */
402 ip_vs_control_add(cp, ct);
403 ip_vs_conn_put(ct);
404
405 ip_vs_conn_stats(cp, svc);
406 return cp;
407 }
408
409
410 /*
411 * IPVS main scheduling function
412 * It selects a server according to the virtual service, and
413 * creates a connection entry.
414 * Protocols supported: TCP, UDP
415 *
416 * Usage of *ignored
417 *
418 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
419 * svc/scheduler decides that this packet should be accepted with
420 * NF_ACCEPT because it must not be scheduled.
421 *
422 * 0 : scheduler can not find destination, so try bypass or
423 * return ICMP and then NF_DROP (ip_vs_leave).
424 *
425 * -1 : scheduler tried to schedule but fatal error occurred, eg.
426 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
427 * failure such as missing Call-ID, ENOMEM on skb_linearize
428 * or pe_data. In this case we should return NF_DROP without
429 * any attempts to send ICMP with ip_vs_leave.
430 */
431 struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service * svc,struct sk_buff * skb,struct ip_vs_proto_data * pd,int * ignored,struct ip_vs_iphdr * iph)432 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
433 struct ip_vs_proto_data *pd, int *ignored,
434 struct ip_vs_iphdr *iph)
435 {
436 struct ip_vs_protocol *pp = pd->pp;
437 struct ip_vs_conn *cp = NULL;
438 struct ip_vs_scheduler *sched;
439 struct ip_vs_dest *dest;
440 __be16 _ports[2], *pptr, cport, vport;
441 const void *caddr, *vaddr;
442 unsigned int flags;
443
444 *ignored = 1;
445 /*
446 * IPv6 frags, only the first hit here.
447 */
448 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
449 if (pptr == NULL)
450 return NULL;
451
452 if (likely(!ip_vs_iph_inverse(iph))) {
453 cport = pptr[0];
454 caddr = &iph->saddr;
455 vport = pptr[1];
456 vaddr = &iph->daddr;
457 } else {
458 cport = pptr[1];
459 caddr = &iph->daddr;
460 vport = pptr[0];
461 vaddr = &iph->saddr;
462 }
463
464 /*
465 * FTPDATA needs this check when using local real server.
466 * Never schedule Active FTPDATA connections from real server.
467 * For LVS-NAT they must be already created. For other methods
468 * with persistence the connection is created on SYN+ACK.
469 */
470 if (cport == FTPDATA) {
471 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
472 "Not scheduling FTPDATA");
473 return NULL;
474 }
475
476 /*
477 * Do not schedule replies from local real server.
478 */
479 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
480 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
481 cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
482 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
483
484 if (cp) {
485 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
486 "Not scheduling reply for existing"
487 " connection");
488 __ip_vs_conn_put(cp);
489 return NULL;
490 }
491 }
492
493 /*
494 * Persistent service
495 */
496 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
497 return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
498 iph);
499
500 *ignored = 0;
501
502 /*
503 * Non-persistent service
504 */
505 if (!svc->fwmark && vport != svc->port) {
506 if (!svc->port)
507 pr_err("Schedule: port zero only supported "
508 "in persistent services, "
509 "check your ipvs configuration\n");
510 return NULL;
511 }
512
513 sched = rcu_dereference(svc->scheduler);
514 if (sched) {
515 /* read svc->sched_data after svc->scheduler */
516 smp_rmb();
517 dest = sched->schedule(svc, skb, iph);
518 } else {
519 dest = NULL;
520 }
521 if (dest == NULL) {
522 IP_VS_DBG(1, "Schedule: no dest found.\n");
523 return NULL;
524 }
525
526 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
527 && iph->protocol == IPPROTO_UDP) ?
528 IP_VS_CONN_F_ONE_PACKET : 0;
529
530 /*
531 * Create a connection entry.
532 */
533 {
534 struct ip_vs_conn_param p;
535
536 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
537 caddr, cport, vaddr, vport, &p);
538 cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
539 dest->port ? dest->port : vport,
540 flags, dest, skb->mark);
541 if (!cp) {
542 *ignored = -1;
543 return NULL;
544 }
545 }
546
547 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
548 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
549 ip_vs_fwd_tag(cp),
550 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
551 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
552 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
553 cp->flags, refcount_read(&cp->refcnt));
554
555 ip_vs_conn_stats(cp, svc);
556 return cp;
557 }
558
ip_vs_addr_is_unicast(struct net * net,int af,union nf_inet_addr * addr)559 static inline int ip_vs_addr_is_unicast(struct net *net, int af,
560 union nf_inet_addr *addr)
561 {
562 #ifdef CONFIG_IP_VS_IPV6
563 if (af == AF_INET6)
564 return ipv6_addr_type(&addr->in6) & IPV6_ADDR_UNICAST;
565 #endif
566 return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
567 }
568
569 /*
570 * Pass or drop the packet.
571 * Called by ip_vs_in, when the virtual service is available but
572 * no destination is available for a new connection.
573 */
ip_vs_leave(struct ip_vs_service * svc,struct sk_buff * skb,struct ip_vs_proto_data * pd,struct ip_vs_iphdr * iph)574 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
575 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
576 {
577 __be16 _ports[2], *pptr, dport;
578 struct netns_ipvs *ipvs = svc->ipvs;
579 struct net *net = ipvs->net;
580
581 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
582 if (!pptr)
583 return NF_DROP;
584 dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
585
586 /* if it is fwmark-based service, the cache_bypass sysctl is up
587 and the destination is a non-local unicast, then create
588 a cache_bypass connection entry */
589 if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
590 !(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
591 ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
592 int ret;
593 struct ip_vs_conn *cp;
594 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
595 iph->protocol == IPPROTO_UDP) ?
596 IP_VS_CONN_F_ONE_PACKET : 0;
597 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
598
599 /* create a new connection entry */
600 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
601 {
602 struct ip_vs_conn_param p;
603 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
604 &iph->saddr, pptr[0],
605 &iph->daddr, pptr[1], &p);
606 cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
607 IP_VS_CONN_F_BYPASS | flags,
608 NULL, skb->mark);
609 if (!cp)
610 return NF_DROP;
611 }
612
613 /* statistics */
614 ip_vs_in_stats(cp, skb);
615
616 /* set state */
617 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
618
619 /* transmit the first SYN packet */
620 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
621 /* do not touch skb anymore */
622
623 if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
624 atomic_inc(&cp->control->in_pkts);
625 else
626 atomic_inc(&cp->in_pkts);
627 ip_vs_conn_put(cp);
628 return ret;
629 }
630
631 /*
632 * When the virtual ftp service is presented, packets destined
633 * for other services on the VIP may get here (except services
634 * listed in the ipvs table), pass the packets, because it is
635 * not ipvs job to decide to drop the packets.
636 */
637 if (svc->port == FTPPORT && dport != FTPPORT)
638 return NF_ACCEPT;
639
640 if (unlikely(ip_vs_iph_icmp(iph)))
641 return NF_DROP;
642
643 /*
644 * Notify the client that the destination is unreachable, and
645 * release the socket buffer.
646 * Since it is in IP layer, the TCP socket is not actually
647 * created, the TCP RST packet cannot be sent, instead that
648 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
649 */
650 #ifdef CONFIG_IP_VS_IPV6
651 if (svc->af == AF_INET6) {
652 if (!skb->dev)
653 skb->dev = net->loopback_dev;
654 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
655 } else
656 #endif
657 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
658
659 return NF_DROP;
660 }
661
662 #ifdef CONFIG_SYSCTL
663
sysctl_snat_reroute(struct netns_ipvs * ipvs)664 static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
665 {
666 return ipvs->sysctl_snat_reroute;
667 }
668
sysctl_nat_icmp_send(struct netns_ipvs * ipvs)669 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
670 {
671 return ipvs->sysctl_nat_icmp_send;
672 }
673
sysctl_expire_nodest_conn(struct netns_ipvs * ipvs)674 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
675 {
676 return ipvs->sysctl_expire_nodest_conn;
677 }
678
679 #else
680
sysctl_snat_reroute(struct netns_ipvs * ipvs)681 static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
sysctl_nat_icmp_send(struct netns_ipvs * ipvs)682 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
sysctl_expire_nodest_conn(struct netns_ipvs * ipvs)683 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
684
685 #endif
686
ip_vs_checksum_complete(struct sk_buff * skb,int offset)687 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
688 {
689 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
690 }
691
ip_vs_defrag_user(unsigned int hooknum)692 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
693 {
694 if (NF_INET_LOCAL_IN == hooknum)
695 return IP_DEFRAG_VS_IN;
696 if (NF_INET_FORWARD == hooknum)
697 return IP_DEFRAG_VS_FWD;
698 return IP_DEFRAG_VS_OUT;
699 }
700
ip_vs_gather_frags(struct netns_ipvs * ipvs,struct sk_buff * skb,u_int32_t user)701 static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
702 struct sk_buff *skb, u_int32_t user)
703 {
704 int err;
705
706 local_bh_disable();
707 err = ip_defrag(ipvs->net, skb, user);
708 local_bh_enable();
709 if (!err)
710 ip_send_check(ip_hdr(skb));
711
712 return err;
713 }
714
ip_vs_route_me_harder(struct netns_ipvs * ipvs,int af,struct sk_buff * skb,unsigned int hooknum)715 static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
716 struct sk_buff *skb, unsigned int hooknum)
717 {
718 if (!sysctl_snat_reroute(ipvs))
719 return 0;
720 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
721 if (NF_INET_LOCAL_IN == hooknum)
722 return 0;
723 #ifdef CONFIG_IP_VS_IPV6
724 if (af == AF_INET6) {
725 struct dst_entry *dst = skb_dst(skb);
726
727 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
728 ip6_route_me_harder(ipvs->net, skb) != 0)
729 return 1;
730 } else
731 #endif
732 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
733 ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
734 return 1;
735
736 return 0;
737 }
738
739 /*
740 * Packet has been made sufficiently writable in caller
741 * - inout: 1=in->out, 0=out->in
742 */
ip_vs_nat_icmp(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,int inout)743 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
744 struct ip_vs_conn *cp, int inout)
745 {
746 struct iphdr *iph = ip_hdr(skb);
747 unsigned int icmp_offset = iph->ihl*4;
748 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
749 icmp_offset);
750 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
751
752 if (inout) {
753 iph->saddr = cp->vaddr.ip;
754 ip_send_check(iph);
755 ciph->daddr = cp->vaddr.ip;
756 ip_send_check(ciph);
757 } else {
758 iph->daddr = cp->daddr.ip;
759 ip_send_check(iph);
760 ciph->saddr = cp->daddr.ip;
761 ip_send_check(ciph);
762 }
763
764 /* the TCP/UDP/SCTP port */
765 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
766 IPPROTO_SCTP == ciph->protocol) {
767 __be16 *ports = (void *)ciph + ciph->ihl*4;
768
769 if (inout)
770 ports[1] = cp->vport;
771 else
772 ports[0] = cp->dport;
773 }
774
775 /* And finally the ICMP checksum */
776 icmph->checksum = 0;
777 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
778 skb->ip_summed = CHECKSUM_UNNECESSARY;
779
780 if (inout)
781 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
782 "Forwarding altered outgoing ICMP");
783 else
784 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
785 "Forwarding altered incoming ICMP");
786 }
787
788 #ifdef CONFIG_IP_VS_IPV6
ip_vs_nat_icmp_v6(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,int inout)789 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
790 struct ip_vs_conn *cp, int inout)
791 {
792 struct ipv6hdr *iph = ipv6_hdr(skb);
793 unsigned int icmp_offset = 0;
794 unsigned int offs = 0; /* header offset*/
795 int protocol;
796 struct icmp6hdr *icmph;
797 struct ipv6hdr *ciph;
798 unsigned short fragoffs;
799
800 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
801 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
802 offs = icmp_offset + sizeof(struct icmp6hdr);
803 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
804
805 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
806
807 if (inout) {
808 iph->saddr = cp->vaddr.in6;
809 ciph->daddr = cp->vaddr.in6;
810 } else {
811 iph->daddr = cp->daddr.in6;
812 ciph->saddr = cp->daddr.in6;
813 }
814
815 /* the TCP/UDP/SCTP port */
816 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
817 IPPROTO_SCTP == protocol)) {
818 __be16 *ports = (void *)(skb_network_header(skb) + offs);
819
820 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
821 ntohs(inout ? ports[1] : ports[0]),
822 ntohs(inout ? cp->vport : cp->dport));
823 if (inout)
824 ports[1] = cp->vport;
825 else
826 ports[0] = cp->dport;
827 }
828
829 /* And finally the ICMP checksum */
830 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
831 skb->len - icmp_offset,
832 IPPROTO_ICMPV6, 0);
833 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
834 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
835 skb->ip_summed = CHECKSUM_PARTIAL;
836
837 if (inout)
838 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
839 (void *)ciph - (void *)iph,
840 "Forwarding altered outgoing ICMPv6");
841 else
842 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
843 (void *)ciph - (void *)iph,
844 "Forwarding altered incoming ICMPv6");
845 }
846 #endif
847
848 /* Handle relevant response ICMP messages - forward to the right
849 * destination host.
850 */
handle_response_icmp(int af,struct sk_buff * skb,union nf_inet_addr * snet,__u8 protocol,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,unsigned int offset,unsigned int ihl,unsigned int hooknum)851 static int handle_response_icmp(int af, struct sk_buff *skb,
852 union nf_inet_addr *snet,
853 __u8 protocol, struct ip_vs_conn *cp,
854 struct ip_vs_protocol *pp,
855 unsigned int offset, unsigned int ihl,
856 unsigned int hooknum)
857 {
858 unsigned int verdict = NF_DROP;
859
860 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
861 goto ignore_cp;
862
863 /* Ensure the checksum is correct */
864 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
865 /* Failed checksum! */
866 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
867 IP_VS_DBG_ADDR(af, snet));
868 goto out;
869 }
870
871 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
872 IPPROTO_SCTP == protocol)
873 offset += 2 * sizeof(__u16);
874 if (!skb_make_writable(skb, offset))
875 goto out;
876
877 #ifdef CONFIG_IP_VS_IPV6
878 if (af == AF_INET6)
879 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
880 else
881 #endif
882 ip_vs_nat_icmp(skb, pp, cp, 1);
883
884 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
885 goto out;
886
887 /* do the statistics and put it back */
888 ip_vs_out_stats(cp, skb);
889
890 skb->ipvs_property = 1;
891 if (!(cp->flags & IP_VS_CONN_F_NFCT))
892 ip_vs_notrack(skb);
893 else
894 ip_vs_update_conntrack(skb, cp, 0);
895
896 ignore_cp:
897 verdict = NF_ACCEPT;
898
899 out:
900 __ip_vs_conn_put(cp);
901
902 return verdict;
903 }
904
905 /*
906 * Handle ICMP messages in the inside-to-outside direction (outgoing).
907 * Find any that might be relevant, check against existing connections.
908 * Currently handles error types - unreachable, quench, ttl exceeded.
909 */
ip_vs_out_icmp(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum)910 static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
911 int *related, unsigned int hooknum)
912 {
913 struct iphdr *iph;
914 struct icmphdr _icmph, *ic;
915 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
916 struct ip_vs_iphdr ciph;
917 struct ip_vs_conn *cp;
918 struct ip_vs_protocol *pp;
919 unsigned int offset, ihl;
920 union nf_inet_addr snet;
921
922 *related = 1;
923
924 /* reassemble IP fragments */
925 if (ip_is_fragment(ip_hdr(skb))) {
926 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
927 return NF_STOLEN;
928 }
929
930 iph = ip_hdr(skb);
931 offset = ihl = iph->ihl * 4;
932 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
933 if (ic == NULL)
934 return NF_DROP;
935
936 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
937 ic->type, ntohs(icmp_id(ic)),
938 &iph->saddr, &iph->daddr);
939
940 /*
941 * Work through seeing if this is for us.
942 * These checks are supposed to be in an order that means easy
943 * things are checked first to speed up processing.... however
944 * this means that some packets will manage to get a long way
945 * down this stack and then be rejected, but that's life.
946 */
947 if ((ic->type != ICMP_DEST_UNREACH) &&
948 (ic->type != ICMP_SOURCE_QUENCH) &&
949 (ic->type != ICMP_TIME_EXCEEDED)) {
950 *related = 0;
951 return NF_ACCEPT;
952 }
953
954 /* Now find the contained IP header */
955 offset += sizeof(_icmph);
956 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
957 if (cih == NULL)
958 return NF_ACCEPT; /* The packet looks wrong, ignore */
959
960 pp = ip_vs_proto_get(cih->protocol);
961 if (!pp)
962 return NF_ACCEPT;
963
964 /* Is the embedded protocol header present? */
965 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
966 pp->dont_defrag))
967 return NF_ACCEPT;
968
969 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
970 "Checking outgoing ICMP for");
971
972 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
973
974 /* The embedded headers contain source and dest in reverse order */
975 cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
976 if (!cp)
977 return NF_ACCEPT;
978
979 snet.ip = iph->saddr;
980 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
981 pp, ciph.len, ihl, hooknum);
982 }
983
984 #ifdef CONFIG_IP_VS_IPV6
ip_vs_out_icmp_v6(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum,struct ip_vs_iphdr * ipvsh)985 static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
986 int *related, unsigned int hooknum,
987 struct ip_vs_iphdr *ipvsh)
988 {
989 struct icmp6hdr _icmph, *ic;
990 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
991 struct ip_vs_conn *cp;
992 struct ip_vs_protocol *pp;
993 union nf_inet_addr snet;
994 unsigned int offset;
995
996 *related = 1;
997 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph);
998 if (ic == NULL)
999 return NF_DROP;
1000
1001 /*
1002 * Work through seeing if this is for us.
1003 * These checks are supposed to be in an order that means easy
1004 * things are checked first to speed up processing.... however
1005 * this means that some packets will manage to get a long way
1006 * down this stack and then be rejected, but that's life.
1007 */
1008 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1009 *related = 0;
1010 return NF_ACCEPT;
1011 }
1012 /* Fragment header that is before ICMP header tells us that:
1013 * it's not an error message since they can't be fragmented.
1014 */
1015 if (ipvsh->flags & IP6_FH_F_FRAG)
1016 return NF_DROP;
1017
1018 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1019 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1020 &ipvsh->saddr, &ipvsh->daddr);
1021
1022 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
1023 true, &ciph))
1024 return NF_ACCEPT; /* The packet looks wrong, ignore */
1025
1026 pp = ip_vs_proto_get(ciph.protocol);
1027 if (!pp)
1028 return NF_ACCEPT;
1029
1030 /* The embedded headers contain source and dest in reverse order */
1031 cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
1032 if (!cp)
1033 return NF_ACCEPT;
1034
1035 snet.in6 = ciph.saddr.in6;
1036 offset = ciph.len;
1037 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
1038 pp, offset, sizeof(struct ipv6hdr),
1039 hooknum);
1040 }
1041 #endif
1042
1043 /*
1044 * Check if sctp chunc is ABORT chunk
1045 */
is_sctp_abort(const struct sk_buff * skb,int nh_len)1046 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
1047 {
1048 struct sctp_chunkhdr *sch, schunk;
1049 sch = skb_header_pointer(skb, nh_len + sizeof(struct sctphdr),
1050 sizeof(schunk), &schunk);
1051 if (sch == NULL)
1052 return 0;
1053 if (sch->type == SCTP_CID_ABORT)
1054 return 1;
1055 return 0;
1056 }
1057
is_tcp_reset(const struct sk_buff * skb,int nh_len)1058 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1059 {
1060 struct tcphdr _tcph, *th;
1061
1062 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1063 if (th == NULL)
1064 return 0;
1065 return th->rst;
1066 }
1067
is_new_conn(const struct sk_buff * skb,struct ip_vs_iphdr * iph)1068 static inline bool is_new_conn(const struct sk_buff *skb,
1069 struct ip_vs_iphdr *iph)
1070 {
1071 switch (iph->protocol) {
1072 case IPPROTO_TCP: {
1073 struct tcphdr _tcph, *th;
1074
1075 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1076 if (th == NULL)
1077 return false;
1078 return th->syn;
1079 }
1080 case IPPROTO_SCTP: {
1081 struct sctp_chunkhdr *sch, schunk;
1082
1083 sch = skb_header_pointer(skb, iph->len + sizeof(struct sctphdr),
1084 sizeof(schunk), &schunk);
1085 if (sch == NULL)
1086 return false;
1087 return sch->type == SCTP_CID_INIT;
1088 }
1089 default:
1090 return false;
1091 }
1092 }
1093
is_new_conn_expected(const struct ip_vs_conn * cp,int conn_reuse_mode)1094 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
1095 int conn_reuse_mode)
1096 {
1097 /* Controlled (FTP DATA or persistence)? */
1098 if (cp->control)
1099 return false;
1100
1101 switch (cp->protocol) {
1102 case IPPROTO_TCP:
1103 return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
1104 (cp->state == IP_VS_TCP_S_CLOSE) ||
1105 ((conn_reuse_mode & 2) &&
1106 (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
1107 (cp->flags & IP_VS_CONN_F_NOOUTPUT));
1108 case IPPROTO_SCTP:
1109 return cp->state == IP_VS_SCTP_S_CLOSED;
1110 default:
1111 return false;
1112 }
1113 }
1114
1115 /* Generic function to create new connections for outgoing RS packets
1116 *
1117 * Pre-requisites for successful connection creation:
1118 * 1) Virtual Service is NOT fwmark based:
1119 * In fwmark-VS actual vaddr and vport are unknown to IPVS
1120 * 2) Real Server and Virtual Service were NOT configured without port:
1121 * This is to allow match of different VS to the same RS ip-addr
1122 */
ip_vs_new_conn_out(struct ip_vs_service * svc,struct ip_vs_dest * dest,struct sk_buff * skb,const struct ip_vs_iphdr * iph,__be16 dport,__be16 cport)1123 struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
1124 struct ip_vs_dest *dest,
1125 struct sk_buff *skb,
1126 const struct ip_vs_iphdr *iph,
1127 __be16 dport,
1128 __be16 cport)
1129 {
1130 struct ip_vs_conn_param param;
1131 struct ip_vs_conn *ct = NULL, *cp = NULL;
1132 const union nf_inet_addr *vaddr, *daddr, *caddr;
1133 union nf_inet_addr snet;
1134 __be16 vport;
1135 unsigned int flags;
1136
1137 EnterFunction(12);
1138 vaddr = &svc->addr;
1139 vport = svc->port;
1140 daddr = &iph->saddr;
1141 caddr = &iph->daddr;
1142
1143 /* check pre-requisites are satisfied */
1144 if (svc->fwmark)
1145 return NULL;
1146 if (!vport || !dport)
1147 return NULL;
1148
1149 /* for persistent service first create connection template */
1150 if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
1151 /* apply netmask the same way ingress-side does */
1152 #ifdef CONFIG_IP_VS_IPV6
1153 if (svc->af == AF_INET6)
1154 ipv6_addr_prefix(&snet.in6, &caddr->in6,
1155 (__force __u32)svc->netmask);
1156 else
1157 #endif
1158 snet.ip = caddr->ip & svc->netmask;
1159 /* fill params and create template if not existent */
1160 if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol,
1161 &snet, 0, vaddr,
1162 vport, ¶m) < 0)
1163 return NULL;
1164 ct = ip_vs_ct_in_get(¶m);
1165 /* check if template exists and points to the same dest */
1166 if (!ct || !ip_vs_check_template(ct, dest)) {
1167 ct = ip_vs_conn_new(¶m, dest->af, daddr, dport,
1168 IP_VS_CONN_F_TEMPLATE, dest, 0);
1169 if (!ct) {
1170 kfree(param.pe_data);
1171 return NULL;
1172 }
1173 ct->timeout = svc->timeout;
1174 } else {
1175 kfree(param.pe_data);
1176 }
1177 }
1178
1179 /* connection flags */
1180 flags = ((svc->flags & IP_VS_SVC_F_ONEPACKET) &&
1181 iph->protocol == IPPROTO_UDP) ? IP_VS_CONN_F_ONE_PACKET : 0;
1182 /* create connection */
1183 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
1184 caddr, cport, vaddr, vport, ¶m);
1185 cp = ip_vs_conn_new(¶m, dest->af, daddr, dport, flags, dest, 0);
1186 if (!cp) {
1187 if (ct)
1188 ip_vs_conn_put(ct);
1189 return NULL;
1190 }
1191 if (ct) {
1192 ip_vs_control_add(cp, ct);
1193 ip_vs_conn_put(ct);
1194 }
1195 ip_vs_conn_stats(cp, svc);
1196
1197 /* return connection (will be used to handle outgoing packet) */
1198 IP_VS_DBG_BUF(6, "New connection RS-initiated:%c c:%s:%u v:%s:%u "
1199 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
1200 ip_vs_fwd_tag(cp),
1201 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
1202 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
1203 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
1204 cp->flags, refcount_read(&cp->refcnt));
1205 LeaveFunction(12);
1206 return cp;
1207 }
1208
1209 /* Handle outgoing packets which are considered requests initiated by
1210 * real servers, so that subsequent responses from external client can be
1211 * routed to the right real server.
1212 * Used also for outgoing responses in OPS mode.
1213 *
1214 * Connection management is handled by persistent-engine specific callback.
1215 */
__ip_vs_rs_conn_out(unsigned int hooknum,struct netns_ipvs * ipvs,int af,struct sk_buff * skb,const struct ip_vs_iphdr * iph)1216 static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
1217 struct netns_ipvs *ipvs,
1218 int af, struct sk_buff *skb,
1219 const struct ip_vs_iphdr *iph)
1220 {
1221 struct ip_vs_dest *dest;
1222 struct ip_vs_conn *cp = NULL;
1223 __be16 _ports[2], *pptr;
1224
1225 if (hooknum == NF_INET_LOCAL_IN)
1226 return NULL;
1227
1228 pptr = frag_safe_skb_hp(skb, iph->len,
1229 sizeof(_ports), _ports);
1230 if (!pptr)
1231 return NULL;
1232
1233 dest = ip_vs_find_real_service(ipvs, af, iph->protocol,
1234 &iph->saddr, pptr[0]);
1235 if (dest) {
1236 struct ip_vs_service *svc;
1237 struct ip_vs_pe *pe;
1238
1239 svc = rcu_dereference(dest->svc);
1240 if (svc) {
1241 pe = rcu_dereference(svc->pe);
1242 if (pe && pe->conn_out)
1243 cp = pe->conn_out(svc, dest, skb, iph,
1244 pptr[0], pptr[1]);
1245 }
1246 }
1247
1248 return cp;
1249 }
1250
1251 /* Handle response packets: rewrite addresses and send away...
1252 */
1253 static unsigned int
handle_response(int af,struct sk_buff * skb,struct ip_vs_proto_data * pd,struct ip_vs_conn * cp,struct ip_vs_iphdr * iph,unsigned int hooknum)1254 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1255 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1256 unsigned int hooknum)
1257 {
1258 struct ip_vs_protocol *pp = pd->pp;
1259
1260 IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
1261
1262 if (!skb_make_writable(skb, iph->len))
1263 goto drop;
1264
1265 /* mangle the packet */
1266 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1267 goto drop;
1268
1269 #ifdef CONFIG_IP_VS_IPV6
1270 if (af == AF_INET6)
1271 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1272 else
1273 #endif
1274 {
1275 ip_hdr(skb)->saddr = cp->vaddr.ip;
1276 ip_send_check(ip_hdr(skb));
1277 }
1278
1279 /*
1280 * nf_iterate does not expect change in the skb->dst->dev.
1281 * It looks like it is not fatal to enable this code for hooks
1282 * where our handlers are at the end of the chain list and
1283 * when all next handlers use skb->dst->dev and not outdev.
1284 * It will definitely route properly the inout NAT traffic
1285 * when multiple paths are used.
1286 */
1287
1288 /* For policy routing, packets originating from this
1289 * machine itself may be routed differently to packets
1290 * passing through. We want this packet to be routed as
1291 * if it came from this machine itself. So re-compute
1292 * the routing information.
1293 */
1294 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
1295 goto drop;
1296
1297 IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
1298
1299 ip_vs_out_stats(cp, skb);
1300 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1301 skb->ipvs_property = 1;
1302 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1303 ip_vs_notrack(skb);
1304 else
1305 ip_vs_update_conntrack(skb, cp, 0);
1306 ip_vs_conn_put(cp);
1307
1308 LeaveFunction(11);
1309 return NF_ACCEPT;
1310
1311 drop:
1312 ip_vs_conn_put(cp);
1313 kfree_skb(skb);
1314 LeaveFunction(11);
1315 return NF_STOLEN;
1316 }
1317
1318 /*
1319 * Check if outgoing packet belongs to the established ip_vs_conn.
1320 */
1321 static unsigned int
ip_vs_out(struct netns_ipvs * ipvs,unsigned int hooknum,struct sk_buff * skb,int af)1322 ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1323 {
1324 struct ip_vs_iphdr iph;
1325 struct ip_vs_protocol *pp;
1326 struct ip_vs_proto_data *pd;
1327 struct ip_vs_conn *cp;
1328 struct sock *sk;
1329
1330 EnterFunction(11);
1331
1332 /* Already marked as IPVS request or reply? */
1333 if (skb->ipvs_property)
1334 return NF_ACCEPT;
1335
1336 sk = skb_to_full_sk(skb);
1337 /* Bad... Do not break raw sockets */
1338 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1339 af == AF_INET)) {
1340
1341 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1342 return NF_ACCEPT;
1343 }
1344
1345 if (unlikely(!skb_dst(skb)))
1346 return NF_ACCEPT;
1347
1348 if (!ipvs->enable)
1349 return NF_ACCEPT;
1350
1351 ip_vs_fill_iph_skb(af, skb, false, &iph);
1352 #ifdef CONFIG_IP_VS_IPV6
1353 if (af == AF_INET6) {
1354 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1355 int related;
1356 int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
1357 hooknum, &iph);
1358
1359 if (related)
1360 return verdict;
1361 }
1362 } else
1363 #endif
1364 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1365 int related;
1366 int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
1367
1368 if (related)
1369 return verdict;
1370 }
1371
1372 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1373 if (unlikely(!pd))
1374 return NF_ACCEPT;
1375 pp = pd->pp;
1376
1377 /* reassemble IP fragments */
1378 #ifdef CONFIG_IP_VS_IPV6
1379 if (af == AF_INET)
1380 #endif
1381 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1382 if (ip_vs_gather_frags(ipvs, skb,
1383 ip_vs_defrag_user(hooknum)))
1384 return NF_STOLEN;
1385
1386 ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
1387 }
1388
1389 /*
1390 * Check if the packet belongs to an existing entry
1391 */
1392 cp = pp->conn_out_get(ipvs, af, skb, &iph);
1393
1394 if (likely(cp)) {
1395 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1396 goto ignore_cp;
1397 return handle_response(af, skb, pd, cp, &iph, hooknum);
1398 }
1399
1400 /* Check for real-server-started requests */
1401 if (atomic_read(&ipvs->conn_out_counter)) {
1402 /* Currently only for UDP:
1403 * connection oriented protocols typically use
1404 * ephemeral ports for outgoing connections, so
1405 * related incoming responses would not match any VS
1406 */
1407 if (pp->protocol == IPPROTO_UDP) {
1408 cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph);
1409 if (likely(cp))
1410 return handle_response(af, skb, pd, cp, &iph,
1411 hooknum);
1412 }
1413 }
1414
1415 if (sysctl_nat_icmp_send(ipvs) &&
1416 (pp->protocol == IPPROTO_TCP ||
1417 pp->protocol == IPPROTO_UDP ||
1418 pp->protocol == IPPROTO_SCTP)) {
1419 __be16 _ports[2], *pptr;
1420
1421 pptr = frag_safe_skb_hp(skb, iph.len,
1422 sizeof(_ports), _ports);
1423 if (pptr == NULL)
1424 return NF_ACCEPT; /* Not for me */
1425 if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
1426 pptr[0])) {
1427 /*
1428 * Notify the real server: there is no
1429 * existing entry if it is not RST
1430 * packet or not TCP packet.
1431 */
1432 if ((iph.protocol != IPPROTO_TCP &&
1433 iph.protocol != IPPROTO_SCTP)
1434 || ((iph.protocol == IPPROTO_TCP
1435 && !is_tcp_reset(skb, iph.len))
1436 || (iph.protocol == IPPROTO_SCTP
1437 && !is_sctp_abort(skb,
1438 iph.len)))) {
1439 #ifdef CONFIG_IP_VS_IPV6
1440 if (af == AF_INET6) {
1441 if (!skb->dev)
1442 skb->dev = ipvs->net->loopback_dev;
1443 icmpv6_send(skb,
1444 ICMPV6_DEST_UNREACH,
1445 ICMPV6_PORT_UNREACH,
1446 0);
1447 } else
1448 #endif
1449 icmp_send(skb,
1450 ICMP_DEST_UNREACH,
1451 ICMP_PORT_UNREACH, 0);
1452 return NF_DROP;
1453 }
1454 }
1455 }
1456
1457 out:
1458 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1459 "ip_vs_out: packet continues traversal as normal");
1460 return NF_ACCEPT;
1461
1462 ignore_cp:
1463 __ip_vs_conn_put(cp);
1464 goto out;
1465 }
1466
1467 /*
1468 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1469 * used only for VS/NAT.
1470 * Check if packet is reply for established ip_vs_conn.
1471 */
1472 static unsigned int
ip_vs_reply4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1473 ip_vs_reply4(void *priv, struct sk_buff *skb,
1474 const struct nf_hook_state *state)
1475 {
1476 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1477 }
1478
1479 /*
1480 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1481 * Check if packet is reply for established ip_vs_conn.
1482 */
1483 static unsigned int
ip_vs_local_reply4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1484 ip_vs_local_reply4(void *priv, struct sk_buff *skb,
1485 const struct nf_hook_state *state)
1486 {
1487 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1488 }
1489
1490 #ifdef CONFIG_IP_VS_IPV6
1491
1492 /*
1493 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1494 * used only for VS/NAT.
1495 * Check if packet is reply for established ip_vs_conn.
1496 */
1497 static unsigned int
ip_vs_reply6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1498 ip_vs_reply6(void *priv, struct sk_buff *skb,
1499 const struct nf_hook_state *state)
1500 {
1501 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1502 }
1503
1504 /*
1505 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1506 * Check if packet is reply for established ip_vs_conn.
1507 */
1508 static unsigned int
ip_vs_local_reply6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1509 ip_vs_local_reply6(void *priv, struct sk_buff *skb,
1510 const struct nf_hook_state *state)
1511 {
1512 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1513 }
1514
1515 #endif
1516
1517 static unsigned int
ip_vs_try_to_schedule(struct netns_ipvs * ipvs,int af,struct sk_buff * skb,struct ip_vs_proto_data * pd,int * verdict,struct ip_vs_conn ** cpp,struct ip_vs_iphdr * iph)1518 ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
1519 struct ip_vs_proto_data *pd,
1520 int *verdict, struct ip_vs_conn **cpp,
1521 struct ip_vs_iphdr *iph)
1522 {
1523 struct ip_vs_protocol *pp = pd->pp;
1524
1525 if (!iph->fragoffs) {
1526 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1527 * replayed fragment zero will already have created the cp
1528 */
1529
1530 /* Schedule and create new connection entry into cpp */
1531 if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
1532 return 0;
1533 }
1534
1535 if (unlikely(!*cpp)) {
1536 /* sorry, all this trouble for a no-hit :) */
1537 IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
1538 "ip_vs_in: packet continues traversal as normal");
1539 if (iph->fragoffs) {
1540 /* Fragment that couldn't be mapped to a conn entry
1541 * is missing module nf_defrag_ipv6
1542 */
1543 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1544 IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
1545 "unhandled fragment");
1546 }
1547 *verdict = NF_ACCEPT;
1548 return 0;
1549 }
1550
1551 return 1;
1552 }
1553
1554 /*
1555 * Handle ICMP messages in the outside-to-inside direction (incoming).
1556 * Find any that might be relevant, check against existing connections,
1557 * forward to the right destination host if relevant.
1558 * Currently handles error types - unreachable, quench, ttl exceeded.
1559 */
1560 static int
ip_vs_in_icmp(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum)1561 ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
1562 unsigned int hooknum)
1563 {
1564 struct iphdr *iph;
1565 struct icmphdr _icmph, *ic;
1566 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1567 struct ip_vs_iphdr ciph;
1568 struct ip_vs_conn *cp;
1569 struct ip_vs_protocol *pp;
1570 struct ip_vs_proto_data *pd;
1571 unsigned int offset, offset2, ihl, verdict;
1572 bool ipip, new_cp = false;
1573
1574 *related = 1;
1575
1576 /* reassemble IP fragments */
1577 if (ip_is_fragment(ip_hdr(skb))) {
1578 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
1579 return NF_STOLEN;
1580 }
1581
1582 iph = ip_hdr(skb);
1583 offset = ihl = iph->ihl * 4;
1584 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1585 if (ic == NULL)
1586 return NF_DROP;
1587
1588 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1589 ic->type, ntohs(icmp_id(ic)),
1590 &iph->saddr, &iph->daddr);
1591
1592 /*
1593 * Work through seeing if this is for us.
1594 * These checks are supposed to be in an order that means easy
1595 * things are checked first to speed up processing.... however
1596 * this means that some packets will manage to get a long way
1597 * down this stack and then be rejected, but that's life.
1598 */
1599 if ((ic->type != ICMP_DEST_UNREACH) &&
1600 (ic->type != ICMP_SOURCE_QUENCH) &&
1601 (ic->type != ICMP_TIME_EXCEEDED)) {
1602 *related = 0;
1603 return NF_ACCEPT;
1604 }
1605
1606 /* Now find the contained IP header */
1607 offset += sizeof(_icmph);
1608 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1609 if (cih == NULL)
1610 return NF_ACCEPT; /* The packet looks wrong, ignore */
1611
1612 /* Special case for errors for IPIP packets */
1613 ipip = false;
1614 if (cih->protocol == IPPROTO_IPIP) {
1615 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1616 return NF_ACCEPT;
1617 /* Error for our IPIP must arrive at LOCAL_IN */
1618 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1619 return NF_ACCEPT;
1620 offset += cih->ihl * 4;
1621 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1622 if (cih == NULL)
1623 return NF_ACCEPT; /* The packet looks wrong, ignore */
1624 ipip = true;
1625 }
1626
1627 pd = ip_vs_proto_data_get(ipvs, cih->protocol);
1628 if (!pd)
1629 return NF_ACCEPT;
1630 pp = pd->pp;
1631
1632 /* Is the embedded protocol header present? */
1633 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1634 pp->dont_defrag))
1635 return NF_ACCEPT;
1636
1637 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1638 "Checking incoming ICMP for");
1639
1640 offset2 = offset;
1641 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
1642 offset = ciph.len;
1643
1644 /* The embedded headers contain source and dest in reverse order.
1645 * For IPIP this is error for request, not for reply.
1646 */
1647 cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
1648
1649 if (!cp) {
1650 int v;
1651
1652 if (!sysctl_schedule_icmp(ipvs))
1653 return NF_ACCEPT;
1654
1655 if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
1656 return v;
1657 new_cp = true;
1658 }
1659
1660 verdict = NF_DROP;
1661
1662 /* Ensure the checksum is correct */
1663 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1664 /* Failed checksum! */
1665 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1666 &iph->saddr);
1667 goto out;
1668 }
1669
1670 if (ipip) {
1671 __be32 info = ic->un.gateway;
1672 __u8 type = ic->type;
1673 __u8 code = ic->code;
1674
1675 /* Update the MTU */
1676 if (ic->type == ICMP_DEST_UNREACH &&
1677 ic->code == ICMP_FRAG_NEEDED) {
1678 struct ip_vs_dest *dest = cp->dest;
1679 u32 mtu = ntohs(ic->un.frag.mtu);
1680 __be16 frag_off = cih->frag_off;
1681
1682 /* Strip outer IP and ICMP, go to IPIP header */
1683 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1684 goto ignore_ipip;
1685 offset2 -= ihl + sizeof(_icmph);
1686 skb_reset_network_header(skb);
1687 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1688 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1689 ipv4_update_pmtu(skb, ipvs->net,
1690 mtu, 0, 0, 0, 0);
1691 /* Client uses PMTUD? */
1692 if (!(frag_off & htons(IP_DF)))
1693 goto ignore_ipip;
1694 /* Prefer the resulting PMTU */
1695 if (dest) {
1696 struct ip_vs_dest_dst *dest_dst;
1697
1698 dest_dst = rcu_dereference(dest->dest_dst);
1699 if (dest_dst)
1700 mtu = dst_mtu(dest_dst->dst_cache);
1701 }
1702 if (mtu > 68 + sizeof(struct iphdr))
1703 mtu -= sizeof(struct iphdr);
1704 info = htonl(mtu);
1705 }
1706 /* Strip outer IP, ICMP and IPIP, go to IP header of
1707 * original request.
1708 */
1709 if (pskb_pull(skb, offset2) == NULL)
1710 goto ignore_ipip;
1711 skb_reset_network_header(skb);
1712 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1713 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1714 type, code, ntohl(info));
1715 icmp_send(skb, type, code, info);
1716 /* ICMP can be shorter but anyways, account it */
1717 ip_vs_out_stats(cp, skb);
1718
1719 ignore_ipip:
1720 consume_skb(skb);
1721 verdict = NF_STOLEN;
1722 goto out;
1723 }
1724
1725 /* do the statistics and put it back */
1726 ip_vs_in_stats(cp, skb);
1727 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1728 IPPROTO_SCTP == cih->protocol)
1729 offset += 2 * sizeof(__u16);
1730 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1731
1732 out:
1733 if (likely(!new_cp))
1734 __ip_vs_conn_put(cp);
1735 else
1736 ip_vs_conn_put(cp);
1737
1738 return verdict;
1739 }
1740
1741 #ifdef CONFIG_IP_VS_IPV6
ip_vs_in_icmp_v6(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum,struct ip_vs_iphdr * iph)1742 static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
1743 int *related, unsigned int hooknum,
1744 struct ip_vs_iphdr *iph)
1745 {
1746 struct icmp6hdr _icmph, *ic;
1747 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1748 struct ip_vs_conn *cp;
1749 struct ip_vs_protocol *pp;
1750 struct ip_vs_proto_data *pd;
1751 unsigned int offset, verdict;
1752 bool new_cp = false;
1753
1754 *related = 1;
1755
1756 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph);
1757 if (ic == NULL)
1758 return NF_DROP;
1759
1760 /*
1761 * Work through seeing if this is for us.
1762 * These checks are supposed to be in an order that means easy
1763 * things are checked first to speed up processing.... however
1764 * this means that some packets will manage to get a long way
1765 * down this stack and then be rejected, but that's life.
1766 */
1767 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1768 *related = 0;
1769 return NF_ACCEPT;
1770 }
1771 /* Fragment header that is before ICMP header tells us that:
1772 * it's not an error message since they can't be fragmented.
1773 */
1774 if (iph->flags & IP6_FH_F_FRAG)
1775 return NF_DROP;
1776
1777 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1778 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1779 &iph->saddr, &iph->daddr);
1780
1781 offset = iph->len + sizeof(_icmph);
1782 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
1783 return NF_ACCEPT;
1784
1785 pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
1786 if (!pd)
1787 return NF_ACCEPT;
1788 pp = pd->pp;
1789
1790 /* Cannot handle fragmented embedded protocol */
1791 if (ciph.fragoffs)
1792 return NF_ACCEPT;
1793
1794 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
1795 "Checking incoming ICMPv6 for");
1796
1797 /* The embedded headers contain source and dest in reverse order
1798 * if not from localhost
1799 */
1800 cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
1801
1802 if (!cp) {
1803 int v;
1804
1805 if (!sysctl_schedule_icmp(ipvs))
1806 return NF_ACCEPT;
1807
1808 if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
1809 return v;
1810
1811 new_cp = true;
1812 }
1813
1814 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1815 if ((hooknum == NF_INET_LOCAL_OUT) &&
1816 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1817 verdict = NF_ACCEPT;
1818 goto out;
1819 }
1820
1821 /* do the statistics and put it back */
1822 ip_vs_in_stats(cp, skb);
1823
1824 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1825 offset = ciph.len;
1826 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1827 IPPROTO_SCTP == ciph.protocol)
1828 offset += 2 * sizeof(__u16); /* Also mangle ports */
1829
1830 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
1831
1832 out:
1833 if (likely(!new_cp))
1834 __ip_vs_conn_put(cp);
1835 else
1836 ip_vs_conn_put(cp);
1837
1838 return verdict;
1839 }
1840 #endif
1841
1842
1843 /*
1844 * Check if it's for virtual services, look it up,
1845 * and send it on its way...
1846 */
1847 static unsigned int
ip_vs_in(struct netns_ipvs * ipvs,unsigned int hooknum,struct sk_buff * skb,int af)1848 ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1849 {
1850 struct ip_vs_iphdr iph;
1851 struct ip_vs_protocol *pp;
1852 struct ip_vs_proto_data *pd;
1853 struct ip_vs_conn *cp;
1854 int ret, pkts;
1855 int conn_reuse_mode;
1856 struct sock *sk;
1857
1858 /* Already marked as IPVS request or reply? */
1859 if (skb->ipvs_property)
1860 return NF_ACCEPT;
1861
1862 /*
1863 * Big tappo:
1864 * - remote client: only PACKET_HOST
1865 * - route: used for struct net when skb->dev is unset
1866 */
1867 if (unlikely((skb->pkt_type != PACKET_HOST &&
1868 hooknum != NF_INET_LOCAL_OUT) ||
1869 !skb_dst(skb))) {
1870 ip_vs_fill_iph_skb(af, skb, false, &iph);
1871 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1872 " ignored in hook %u\n",
1873 skb->pkt_type, iph.protocol,
1874 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1875 return NF_ACCEPT;
1876 }
1877 /* ipvs enabled in this netns ? */
1878 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1879 return NF_ACCEPT;
1880
1881 ip_vs_fill_iph_skb(af, skb, false, &iph);
1882
1883 /* Bad... Do not break raw sockets */
1884 sk = skb_to_full_sk(skb);
1885 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1886 af == AF_INET)) {
1887
1888 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1889 return NF_ACCEPT;
1890 }
1891
1892 #ifdef CONFIG_IP_VS_IPV6
1893 if (af == AF_INET6) {
1894 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1895 int related;
1896 int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
1897 hooknum, &iph);
1898
1899 if (related)
1900 return verdict;
1901 }
1902 } else
1903 #endif
1904 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1905 int related;
1906 int verdict = ip_vs_in_icmp(ipvs, skb, &related,
1907 hooknum);
1908
1909 if (related)
1910 return verdict;
1911 }
1912
1913 /* Protocol supported? */
1914 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1915 if (unlikely(!pd)) {
1916 /* The only way we'll see this packet again is if it's
1917 * encapsulated, so mark it with ipvs_property=1 so we
1918 * skip it if we're ignoring tunneled packets
1919 */
1920 if (sysctl_ignore_tunneled(ipvs))
1921 skb->ipvs_property = 1;
1922
1923 return NF_ACCEPT;
1924 }
1925 pp = pd->pp;
1926 /*
1927 * Check if the packet belongs to an existing connection entry
1928 */
1929 cp = pp->conn_in_get(ipvs, af, skb, &iph);
1930
1931 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
1932 if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
1933 bool uses_ct = false, resched = false;
1934
1935 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
1936 unlikely(!atomic_read(&cp->dest->weight))) {
1937 resched = true;
1938 uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
1939 } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
1940 uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
1941 if (!atomic_read(&cp->n_control)) {
1942 resched = true;
1943 } else {
1944 /* Do not reschedule controlling connection
1945 * that uses conntrack while it is still
1946 * referenced by controlled connection(s).
1947 */
1948 resched = !uses_ct;
1949 }
1950 }
1951
1952 if (resched) {
1953 if (!atomic_read(&cp->n_control))
1954 ip_vs_conn_expire_now(cp);
1955 __ip_vs_conn_put(cp);
1956 if (uses_ct)
1957 return NF_DROP;
1958 cp = NULL;
1959 }
1960 }
1961
1962 if (unlikely(!cp)) {
1963 int v;
1964
1965 if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
1966 return v;
1967 }
1968
1969 IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
1970
1971 /* Check the server status */
1972 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1973 /* the destination server is not available */
1974
1975 __u32 flags = cp->flags;
1976
1977 /* when timer already started, silently drop the packet.*/
1978 if (timer_pending(&cp->timer))
1979 __ip_vs_conn_put(cp);
1980 else
1981 ip_vs_conn_put(cp);
1982
1983 if (sysctl_expire_nodest_conn(ipvs) &&
1984 !(flags & IP_VS_CONN_F_ONE_PACKET)) {
1985 /* try to expire the connection immediately */
1986 ip_vs_conn_expire_now(cp);
1987 }
1988
1989 return NF_DROP;
1990 }
1991
1992 ip_vs_in_stats(cp, skb);
1993 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1994 if (cp->packet_xmit)
1995 ret = cp->packet_xmit(skb, cp, pp, &iph);
1996 /* do not touch skb anymore */
1997 else {
1998 IP_VS_DBG_RL("warning: packet_xmit is null");
1999 ret = NF_ACCEPT;
2000 }
2001
2002 /* Increase its packet counter and check if it is needed
2003 * to be synchronized
2004 *
2005 * Sync connection if it is about to close to
2006 * encorage the standby servers to update the connections timeout
2007 *
2008 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
2009 */
2010
2011 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
2012 pkts = sysctl_sync_threshold(ipvs);
2013 else
2014 pkts = atomic_add_return(1, &cp->in_pkts);
2015
2016 if (ipvs->sync_state & IP_VS_STATE_MASTER)
2017 ip_vs_sync_conn(ipvs, cp, pkts);
2018 else if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
2019 /* increment is done inside ip_vs_sync_conn too */
2020 atomic_inc(&cp->control->in_pkts);
2021
2022 ip_vs_conn_put(cp);
2023 return ret;
2024 }
2025
2026 /*
2027 * AF_INET handler in NF_INET_LOCAL_IN chain
2028 * Schedule and forward packets from remote clients
2029 */
2030 static unsigned int
ip_vs_remote_request4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2031 ip_vs_remote_request4(void *priv, struct sk_buff *skb,
2032 const struct nf_hook_state *state)
2033 {
2034 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
2035 }
2036
2037 /*
2038 * AF_INET handler in NF_INET_LOCAL_OUT chain
2039 * Schedule and forward packets from local clients
2040 */
2041 static unsigned int
ip_vs_local_request4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2042 ip_vs_local_request4(void *priv, struct sk_buff *skb,
2043 const struct nf_hook_state *state)
2044 {
2045 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
2046 }
2047
2048 #ifdef CONFIG_IP_VS_IPV6
2049
2050 /*
2051 * AF_INET6 handler in NF_INET_LOCAL_IN chain
2052 * Schedule and forward packets from remote clients
2053 */
2054 static unsigned int
ip_vs_remote_request6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2055 ip_vs_remote_request6(void *priv, struct sk_buff *skb,
2056 const struct nf_hook_state *state)
2057 {
2058 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
2059 }
2060
2061 /*
2062 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
2063 * Schedule and forward packets from local clients
2064 */
2065 static unsigned int
ip_vs_local_request6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2066 ip_vs_local_request6(void *priv, struct sk_buff *skb,
2067 const struct nf_hook_state *state)
2068 {
2069 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
2070 }
2071
2072 #endif
2073
2074
2075 /*
2076 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
2077 * related packets destined for 0.0.0.0/0.
2078 * When fwmark-based virtual service is used, such as transparent
2079 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
2080 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
2081 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
2082 * and send them to ip_vs_in_icmp.
2083 */
2084 static unsigned int
ip_vs_forward_icmp(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2085 ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
2086 const struct nf_hook_state *state)
2087 {
2088 int r;
2089 struct netns_ipvs *ipvs = net_ipvs(state->net);
2090
2091 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
2092 return NF_ACCEPT;
2093
2094 /* ipvs enabled in this netns ? */
2095 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
2096 return NF_ACCEPT;
2097
2098 return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
2099 }
2100
2101 #ifdef CONFIG_IP_VS_IPV6
2102 static unsigned int
ip_vs_forward_icmp_v6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2103 ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
2104 const struct nf_hook_state *state)
2105 {
2106 int r;
2107 struct netns_ipvs *ipvs = net_ipvs(state->net);
2108 struct ip_vs_iphdr iphdr;
2109
2110 ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
2111 if (iphdr.protocol != IPPROTO_ICMPV6)
2112 return NF_ACCEPT;
2113
2114 /* ipvs enabled in this netns ? */
2115 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
2116 return NF_ACCEPT;
2117
2118 return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
2119 }
2120 #endif
2121
2122
2123 static const struct nf_hook_ops ip_vs_ops[] = {
2124 /* After packet filtering, change source only for VS/NAT */
2125 {
2126 .hook = ip_vs_reply4,
2127 .pf = NFPROTO_IPV4,
2128 .hooknum = NF_INET_LOCAL_IN,
2129 .priority = NF_IP_PRI_NAT_SRC - 2,
2130 },
2131 /* After packet filtering, forward packet through VS/DR, VS/TUN,
2132 * or VS/NAT(change destination), so that filtering rules can be
2133 * applied to IPVS. */
2134 {
2135 .hook = ip_vs_remote_request4,
2136 .pf = NFPROTO_IPV4,
2137 .hooknum = NF_INET_LOCAL_IN,
2138 .priority = NF_IP_PRI_NAT_SRC - 1,
2139 },
2140 /* Before ip_vs_in, change source only for VS/NAT */
2141 {
2142 .hook = ip_vs_local_reply4,
2143 .pf = NFPROTO_IPV4,
2144 .hooknum = NF_INET_LOCAL_OUT,
2145 .priority = NF_IP_PRI_NAT_DST + 1,
2146 },
2147 /* After mangle, schedule and forward local requests */
2148 {
2149 .hook = ip_vs_local_request4,
2150 .pf = NFPROTO_IPV4,
2151 .hooknum = NF_INET_LOCAL_OUT,
2152 .priority = NF_IP_PRI_NAT_DST + 2,
2153 },
2154 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
2155 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2156 {
2157 .hook = ip_vs_forward_icmp,
2158 .pf = NFPROTO_IPV4,
2159 .hooknum = NF_INET_FORWARD,
2160 .priority = 99,
2161 },
2162 /* After packet filtering, change source only for VS/NAT */
2163 {
2164 .hook = ip_vs_reply4,
2165 .pf = NFPROTO_IPV4,
2166 .hooknum = NF_INET_FORWARD,
2167 .priority = 100,
2168 },
2169 #ifdef CONFIG_IP_VS_IPV6
2170 /* After packet filtering, change source only for VS/NAT */
2171 {
2172 .hook = ip_vs_reply6,
2173 .pf = NFPROTO_IPV6,
2174 .hooknum = NF_INET_LOCAL_IN,
2175 .priority = NF_IP6_PRI_NAT_SRC - 2,
2176 },
2177 /* After packet filtering, forward packet through VS/DR, VS/TUN,
2178 * or VS/NAT(change destination), so that filtering rules can be
2179 * applied to IPVS. */
2180 {
2181 .hook = ip_vs_remote_request6,
2182 .pf = NFPROTO_IPV6,
2183 .hooknum = NF_INET_LOCAL_IN,
2184 .priority = NF_IP6_PRI_NAT_SRC - 1,
2185 },
2186 /* Before ip_vs_in, change source only for VS/NAT */
2187 {
2188 .hook = ip_vs_local_reply6,
2189 .pf = NFPROTO_IPV6,
2190 .hooknum = NF_INET_LOCAL_OUT,
2191 .priority = NF_IP6_PRI_NAT_DST + 1,
2192 },
2193 /* After mangle, schedule and forward local requests */
2194 {
2195 .hook = ip_vs_local_request6,
2196 .pf = NFPROTO_IPV6,
2197 .hooknum = NF_INET_LOCAL_OUT,
2198 .priority = NF_IP6_PRI_NAT_DST + 2,
2199 },
2200 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
2201 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2202 {
2203 .hook = ip_vs_forward_icmp_v6,
2204 .pf = NFPROTO_IPV6,
2205 .hooknum = NF_INET_FORWARD,
2206 .priority = 99,
2207 },
2208 /* After packet filtering, change source only for VS/NAT */
2209 {
2210 .hook = ip_vs_reply6,
2211 .pf = NFPROTO_IPV6,
2212 .hooknum = NF_INET_FORWARD,
2213 .priority = 100,
2214 },
2215 #endif
2216 };
2217 /*
2218 * Initialize IP Virtual Server netns mem.
2219 */
__ip_vs_init(struct net * net)2220 static int __net_init __ip_vs_init(struct net *net)
2221 {
2222 struct netns_ipvs *ipvs;
2223 int ret;
2224
2225 ipvs = net_generic(net, ip_vs_net_id);
2226 if (ipvs == NULL)
2227 return -ENOMEM;
2228
2229 /* Hold the beast until a service is registerd */
2230 ipvs->enable = 0;
2231 ipvs->net = net;
2232 /* Counters used for creating unique names */
2233 ipvs->gen = atomic_read(&ipvs_netns_cnt);
2234 atomic_inc(&ipvs_netns_cnt);
2235 net->ipvs = ipvs;
2236
2237 if (ip_vs_estimator_net_init(ipvs) < 0)
2238 goto estimator_fail;
2239
2240 if (ip_vs_control_net_init(ipvs) < 0)
2241 goto control_fail;
2242
2243 if (ip_vs_protocol_net_init(ipvs) < 0)
2244 goto protocol_fail;
2245
2246 if (ip_vs_app_net_init(ipvs) < 0)
2247 goto app_fail;
2248
2249 if (ip_vs_conn_net_init(ipvs) < 0)
2250 goto conn_fail;
2251
2252 if (ip_vs_sync_net_init(ipvs) < 0)
2253 goto sync_fail;
2254
2255 ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2256 if (ret < 0)
2257 goto hook_fail;
2258
2259 return 0;
2260 /*
2261 * Error handling
2262 */
2263
2264 hook_fail:
2265 ip_vs_sync_net_cleanup(ipvs);
2266 sync_fail:
2267 ip_vs_conn_net_cleanup(ipvs);
2268 conn_fail:
2269 ip_vs_app_net_cleanup(ipvs);
2270 app_fail:
2271 ip_vs_protocol_net_cleanup(ipvs);
2272 protocol_fail:
2273 ip_vs_control_net_cleanup(ipvs);
2274 control_fail:
2275 ip_vs_estimator_net_cleanup(ipvs);
2276 estimator_fail:
2277 net->ipvs = NULL;
2278 return -ENOMEM;
2279 }
2280
__ip_vs_cleanup(struct net * net)2281 static void __net_exit __ip_vs_cleanup(struct net *net)
2282 {
2283 struct netns_ipvs *ipvs = net_ipvs(net);
2284
2285 nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2286 ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
2287 ip_vs_conn_net_cleanup(ipvs);
2288 ip_vs_app_net_cleanup(ipvs);
2289 ip_vs_protocol_net_cleanup(ipvs);
2290 ip_vs_control_net_cleanup(ipvs);
2291 ip_vs_estimator_net_cleanup(ipvs);
2292 IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
2293 net->ipvs = NULL;
2294 }
2295
__ip_vs_dev_cleanup(struct net * net)2296 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2297 {
2298 struct netns_ipvs *ipvs = net_ipvs(net);
2299 EnterFunction(2);
2300 ipvs->enable = 0; /* Disable packet reception */
2301 smp_wmb();
2302 ip_vs_sync_net_cleanup(ipvs);
2303 LeaveFunction(2);
2304 }
2305
2306 static struct pernet_operations ipvs_core_ops = {
2307 .init = __ip_vs_init,
2308 .exit = __ip_vs_cleanup,
2309 .id = &ip_vs_net_id,
2310 .size = sizeof(struct netns_ipvs),
2311 };
2312
2313 static struct pernet_operations ipvs_core_dev_ops = {
2314 .exit = __ip_vs_dev_cleanup,
2315 };
2316
2317 /*
2318 * Initialize IP Virtual Server
2319 */
ip_vs_init(void)2320 static int __init ip_vs_init(void)
2321 {
2322 int ret;
2323
2324 ret = ip_vs_control_init();
2325 if (ret < 0) {
2326 pr_err("can't setup control.\n");
2327 goto exit;
2328 }
2329
2330 ip_vs_protocol_init();
2331
2332 ret = ip_vs_conn_init();
2333 if (ret < 0) {
2334 pr_err("can't setup connection table.\n");
2335 goto cleanup_protocol;
2336 }
2337
2338 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2339 if (ret < 0)
2340 goto cleanup_conn;
2341
2342 ret = register_pernet_device(&ipvs_core_dev_ops);
2343 if (ret < 0)
2344 goto cleanup_sub;
2345
2346 ret = ip_vs_register_nl_ioctl();
2347 if (ret < 0) {
2348 pr_err("can't register netlink/ioctl.\n");
2349 goto cleanup_dev;
2350 }
2351
2352 pr_info("ipvs loaded.\n");
2353
2354 return ret;
2355
2356 cleanup_dev:
2357 unregister_pernet_device(&ipvs_core_dev_ops);
2358 cleanup_sub:
2359 unregister_pernet_subsys(&ipvs_core_ops);
2360 cleanup_conn:
2361 ip_vs_conn_cleanup();
2362 cleanup_protocol:
2363 ip_vs_protocol_cleanup();
2364 ip_vs_control_cleanup();
2365 exit:
2366 return ret;
2367 }
2368
ip_vs_cleanup(void)2369 static void __exit ip_vs_cleanup(void)
2370 {
2371 ip_vs_unregister_nl_ioctl();
2372 unregister_pernet_device(&ipvs_core_dev_ops);
2373 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2374 ip_vs_conn_cleanup();
2375 ip_vs_protocol_cleanup();
2376 ip_vs_control_cleanup();
2377 pr_info("ipvs unloaded.\n");
2378 }
2379
2380 module_init(ip_vs_init);
2381 module_exit(ip_vs_cleanup);
2382 MODULE_LICENSE("GPL");
2383