1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPVS An implementation of the IP virtual server support for the
4 * LINUX operating system. IPVS is now implemented as a module
5 * over the Netfilter framework. IPVS can be used to build a
6 * high-performance and highly available server based on a
7 * cluster of servers.
8 *
9 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
10 * Peter Kese <peter.kese@ijs.si>
11 * Julian Anastasov <ja@ssi.bg>
12 *
13 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
14 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
15 * and others.
16 *
17 * Changes:
18 * Paul `Rusty' Russell properly handle non-linear skbs
19 * Harald Welte don't use nfcache
20 */
21
22 #define KMSG_COMPONENT "IPVS"
23 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/ip.h>
28 #include <linux/tcp.h>
29 #include <linux/sctp.h>
30 #include <linux/icmp.h>
31 #include <linux/slab.h>
32
33 #include <net/ip.h>
34 #include <net/tcp.h>
35 #include <net/udp.h>
36 #include <net/icmp.h> /* for icmp_send */
37 #include <net/gue.h>
38 #include <net/gre.h>
39 #include <net/route.h>
40 #include <net/ip6_checksum.h>
41 #include <net/netns/generic.h> /* net_generic() */
42
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv4.h>
45
46 #ifdef CONFIG_IP_VS_IPV6
47 #include <net/ipv6.h>
48 #include <linux/netfilter_ipv6.h>
49 #include <net/ip6_route.h>
50 #endif
51
52 #include <net/ip_vs.h>
53 #include <linux/indirect_call_wrapper.h>
54
55
56 EXPORT_SYMBOL(register_ip_vs_scheduler);
57 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
58 EXPORT_SYMBOL(ip_vs_proto_name);
59 EXPORT_SYMBOL(ip_vs_conn_new);
60 EXPORT_SYMBOL(ip_vs_conn_in_get);
61 EXPORT_SYMBOL(ip_vs_conn_out_get);
62 #ifdef CONFIG_IP_VS_PROTO_TCP
63 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
64 #endif
65 EXPORT_SYMBOL(ip_vs_conn_put);
66 #ifdef CONFIG_IP_VS_DEBUG
67 EXPORT_SYMBOL(ip_vs_get_debug_level);
68 #endif
69 EXPORT_SYMBOL(ip_vs_new_conn_out);
70
71 #if defined(CONFIG_IP_VS_PROTO_TCP) && defined(CONFIG_IP_VS_PROTO_UDP)
72 #define SNAT_CALL(f, ...) \
73 INDIRECT_CALL_2(f, tcp_snat_handler, udp_snat_handler, __VA_ARGS__)
74 #elif defined(CONFIG_IP_VS_PROTO_TCP)
75 #define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, tcp_snat_handler, __VA_ARGS__)
76 #elif defined(CONFIG_IP_VS_PROTO_UDP)
77 #define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, udp_snat_handler, __VA_ARGS__)
78 #else
79 #define SNAT_CALL(f, ...) f(__VA_ARGS__)
80 #endif
81
82 static unsigned int ip_vs_net_id __read_mostly;
83 /* netns cnt used for uniqueness */
84 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
85
86 /* ID used in ICMP lookups */
87 #define icmp_id(icmph) (((icmph)->un).echo.id)
88 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
89
ip_vs_proto_name(unsigned int proto)90 const char *ip_vs_proto_name(unsigned int proto)
91 {
92 static char buf[20];
93
94 switch (proto) {
95 case IPPROTO_IP:
96 return "IP";
97 case IPPROTO_UDP:
98 return "UDP";
99 case IPPROTO_TCP:
100 return "TCP";
101 case IPPROTO_SCTP:
102 return "SCTP";
103 case IPPROTO_ICMP:
104 return "ICMP";
105 #ifdef CONFIG_IP_VS_IPV6
106 case IPPROTO_ICMPV6:
107 return "ICMPv6";
108 #endif
109 default:
110 sprintf(buf, "IP_%u", proto);
111 return buf;
112 }
113 }
114
ip_vs_init_hash_table(struct list_head * table,int rows)115 void ip_vs_init_hash_table(struct list_head *table, int rows)
116 {
117 while (--rows >= 0)
118 INIT_LIST_HEAD(&table[rows]);
119 }
120
121 static inline void
ip_vs_in_stats(struct ip_vs_conn * cp,struct sk_buff * skb)122 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
123 {
124 struct ip_vs_dest *dest = cp->dest;
125 struct netns_ipvs *ipvs = cp->ipvs;
126
127 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
128 struct ip_vs_cpu_stats *s;
129 struct ip_vs_service *svc;
130
131 local_bh_disable();
132
133 s = this_cpu_ptr(dest->stats.cpustats);
134 u64_stats_update_begin(&s->syncp);
135 s->cnt.inpkts++;
136 s->cnt.inbytes += skb->len;
137 u64_stats_update_end(&s->syncp);
138
139 svc = rcu_dereference(dest->svc);
140 s = this_cpu_ptr(svc->stats.cpustats);
141 u64_stats_update_begin(&s->syncp);
142 s->cnt.inpkts++;
143 s->cnt.inbytes += skb->len;
144 u64_stats_update_end(&s->syncp);
145
146 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
147 u64_stats_update_begin(&s->syncp);
148 s->cnt.inpkts++;
149 s->cnt.inbytes += skb->len;
150 u64_stats_update_end(&s->syncp);
151
152 local_bh_enable();
153 }
154 }
155
156
157 static inline void
ip_vs_out_stats(struct ip_vs_conn * cp,struct sk_buff * skb)158 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
159 {
160 struct ip_vs_dest *dest = cp->dest;
161 struct netns_ipvs *ipvs = cp->ipvs;
162
163 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
164 struct ip_vs_cpu_stats *s;
165 struct ip_vs_service *svc;
166
167 local_bh_disable();
168
169 s = this_cpu_ptr(dest->stats.cpustats);
170 u64_stats_update_begin(&s->syncp);
171 s->cnt.outpkts++;
172 s->cnt.outbytes += skb->len;
173 u64_stats_update_end(&s->syncp);
174
175 svc = rcu_dereference(dest->svc);
176 s = this_cpu_ptr(svc->stats.cpustats);
177 u64_stats_update_begin(&s->syncp);
178 s->cnt.outpkts++;
179 s->cnt.outbytes += skb->len;
180 u64_stats_update_end(&s->syncp);
181
182 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
183 u64_stats_update_begin(&s->syncp);
184 s->cnt.outpkts++;
185 s->cnt.outbytes += skb->len;
186 u64_stats_update_end(&s->syncp);
187
188 local_bh_enable();
189 }
190 }
191
192
193 static inline void
ip_vs_conn_stats(struct ip_vs_conn * cp,struct ip_vs_service * svc)194 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
195 {
196 struct netns_ipvs *ipvs = svc->ipvs;
197 struct ip_vs_cpu_stats *s;
198
199 local_bh_disable();
200
201 s = this_cpu_ptr(cp->dest->stats.cpustats);
202 u64_stats_update_begin(&s->syncp);
203 s->cnt.conns++;
204 u64_stats_update_end(&s->syncp);
205
206 s = this_cpu_ptr(svc->stats.cpustats);
207 u64_stats_update_begin(&s->syncp);
208 s->cnt.conns++;
209 u64_stats_update_end(&s->syncp);
210
211 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
212 u64_stats_update_begin(&s->syncp);
213 s->cnt.conns++;
214 u64_stats_update_end(&s->syncp);
215
216 local_bh_enable();
217 }
218
219
220 static inline void
ip_vs_set_state(struct ip_vs_conn * cp,int direction,const struct sk_buff * skb,struct ip_vs_proto_data * pd)221 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
222 const struct sk_buff *skb,
223 struct ip_vs_proto_data *pd)
224 {
225 if (likely(pd->pp->state_transition))
226 pd->pp->state_transition(cp, direction, skb, pd);
227 }
228
229 static inline int
ip_vs_conn_fill_param_persist(const struct ip_vs_service * svc,struct sk_buff * skb,int protocol,const union nf_inet_addr * caddr,__be16 cport,const union nf_inet_addr * vaddr,__be16 vport,struct ip_vs_conn_param * p)230 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
231 struct sk_buff *skb, int protocol,
232 const union nf_inet_addr *caddr, __be16 cport,
233 const union nf_inet_addr *vaddr, __be16 vport,
234 struct ip_vs_conn_param *p)
235 {
236 ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
237 vport, p);
238 p->pe = rcu_dereference(svc->pe);
239 if (p->pe && p->pe->fill_param)
240 return p->pe->fill_param(p, skb);
241
242 return 0;
243 }
244
245 /*
246 * IPVS persistent scheduling function
247 * It creates a connection entry according to its template if exists,
248 * or selects a server and creates a connection entry plus a template.
249 * Locking: we are svc user (svc->refcnt), so we hold all dests too
250 * Protocols supported: TCP, UDP
251 */
252 static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service * svc,struct sk_buff * skb,__be16 src_port,__be16 dst_port,int * ignored,struct ip_vs_iphdr * iph)253 ip_vs_sched_persist(struct ip_vs_service *svc,
254 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
255 int *ignored, struct ip_vs_iphdr *iph)
256 {
257 struct ip_vs_conn *cp = NULL;
258 struct ip_vs_dest *dest;
259 struct ip_vs_conn *ct;
260 __be16 dport = 0; /* destination port to forward */
261 unsigned int flags;
262 struct ip_vs_conn_param param;
263 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
264 union nf_inet_addr snet; /* source network of the client,
265 after masking */
266 const union nf_inet_addr *src_addr, *dst_addr;
267
268 if (likely(!ip_vs_iph_inverse(iph))) {
269 src_addr = &iph->saddr;
270 dst_addr = &iph->daddr;
271 } else {
272 src_addr = &iph->daddr;
273 dst_addr = &iph->saddr;
274 }
275
276
277 /* Mask saddr with the netmask to adjust template granularity */
278 #ifdef CONFIG_IP_VS_IPV6
279 if (svc->af == AF_INET6)
280 ipv6_addr_prefix(&snet.in6, &src_addr->in6,
281 (__force __u32) svc->netmask);
282 else
283 #endif
284 snet.ip = src_addr->ip & svc->netmask;
285
286 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
287 "mnet %s\n",
288 IP_VS_DBG_ADDR(svc->af, src_addr), ntohs(src_port),
289 IP_VS_DBG_ADDR(svc->af, dst_addr), ntohs(dst_port),
290 IP_VS_DBG_ADDR(svc->af, &snet));
291
292 /*
293 * As far as we know, FTP is a very complicated network protocol, and
294 * it uses control connection and data connections. For active FTP,
295 * FTP server initialize data connection to the client, its source port
296 * is often 20. For passive FTP, FTP server tells the clients the port
297 * that it passively listens to, and the client issues the data
298 * connection. In the tunneling or direct routing mode, the load
299 * balancer is on the client-to-server half of connection, the port
300 * number is unknown to the load balancer. So, a conn template like
301 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
302 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
303 * is created for other persistent services.
304 */
305 {
306 int protocol = iph->protocol;
307 const union nf_inet_addr *vaddr = dst_addr;
308 __be16 vport = 0;
309
310 if (dst_port == svc->port) {
311 /* non-FTP template:
312 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
313 * FTP template:
314 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
315 */
316 if (svc->port != FTPPORT)
317 vport = dst_port;
318 } else {
319 /* Note: persistent fwmark-based services and
320 * persistent port zero service are handled here.
321 * fwmark template:
322 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
323 * port zero template:
324 * <protocol,caddr,0,vaddr,0,daddr,0>
325 */
326 if (svc->fwmark) {
327 protocol = IPPROTO_IP;
328 vaddr = &fwmark;
329 }
330 }
331 /* return *ignored = -1 so NF_DROP can be used */
332 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
333 vaddr, vport, ¶m) < 0) {
334 *ignored = -1;
335 return NULL;
336 }
337 }
338
339 /* Check if a template already exists */
340 ct = ip_vs_ct_in_get(¶m);
341 if (!ct || !ip_vs_check_template(ct, NULL)) {
342 struct ip_vs_scheduler *sched;
343
344 /*
345 * No template found or the dest of the connection
346 * template is not available.
347 * return *ignored=0 i.e. ICMP and NF_DROP
348 */
349 sched = rcu_dereference(svc->scheduler);
350 if (sched) {
351 /* read svc->sched_data after svc->scheduler */
352 smp_rmb();
353 dest = sched->schedule(svc, skb, iph);
354 } else {
355 dest = NULL;
356 }
357 if (!dest) {
358 IP_VS_DBG(1, "p-schedule: no dest found.\n");
359 kfree(param.pe_data);
360 *ignored = 0;
361 return NULL;
362 }
363
364 if (dst_port == svc->port && svc->port != FTPPORT)
365 dport = dest->port;
366
367 /* Create a template
368 * This adds param.pe_data to the template,
369 * and thus param.pe_data will be destroyed
370 * when the template expires */
371 ct = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport,
372 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
373 if (ct == NULL) {
374 kfree(param.pe_data);
375 *ignored = -1;
376 return NULL;
377 }
378
379 ct->timeout = svc->timeout;
380 } else {
381 /* set destination with the found template */
382 dest = ct->dest;
383 kfree(param.pe_data);
384 }
385
386 dport = dst_port;
387 if (dport == svc->port && dest->port)
388 dport = dest->port;
389
390 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
391 && iph->protocol == IPPROTO_UDP) ?
392 IP_VS_CONN_F_ONE_PACKET : 0;
393
394 /*
395 * Create a new connection according to the template
396 */
397 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
398 src_port, dst_addr, dst_port, ¶m);
399
400 cp = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport, flags, dest,
401 skb->mark);
402 if (cp == NULL) {
403 ip_vs_conn_put(ct);
404 *ignored = -1;
405 return NULL;
406 }
407
408 /*
409 * Add its control
410 */
411 ip_vs_control_add(cp, ct);
412 ip_vs_conn_put(ct);
413
414 ip_vs_conn_stats(cp, svc);
415 return cp;
416 }
417
418
419 /*
420 * IPVS main scheduling function
421 * It selects a server according to the virtual service, and
422 * creates a connection entry.
423 * Protocols supported: TCP, UDP
424 *
425 * Usage of *ignored
426 *
427 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
428 * svc/scheduler decides that this packet should be accepted with
429 * NF_ACCEPT because it must not be scheduled.
430 *
431 * 0 : scheduler can not find destination, so try bypass or
432 * return ICMP and then NF_DROP (ip_vs_leave).
433 *
434 * -1 : scheduler tried to schedule but fatal error occurred, eg.
435 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
436 * failure such as missing Call-ID, ENOMEM on skb_linearize
437 * or pe_data. In this case we should return NF_DROP without
438 * any attempts to send ICMP with ip_vs_leave.
439 */
440 struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service * svc,struct sk_buff * skb,struct ip_vs_proto_data * pd,int * ignored,struct ip_vs_iphdr * iph)441 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
442 struct ip_vs_proto_data *pd, int *ignored,
443 struct ip_vs_iphdr *iph)
444 {
445 struct ip_vs_protocol *pp = pd->pp;
446 struct ip_vs_conn *cp = NULL;
447 struct ip_vs_scheduler *sched;
448 struct ip_vs_dest *dest;
449 __be16 _ports[2], *pptr, cport, vport;
450 const void *caddr, *vaddr;
451 unsigned int flags;
452
453 *ignored = 1;
454 /*
455 * IPv6 frags, only the first hit here.
456 */
457 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
458 if (pptr == NULL)
459 return NULL;
460
461 if (likely(!ip_vs_iph_inverse(iph))) {
462 cport = pptr[0];
463 caddr = &iph->saddr;
464 vport = pptr[1];
465 vaddr = &iph->daddr;
466 } else {
467 cport = pptr[1];
468 caddr = &iph->daddr;
469 vport = pptr[0];
470 vaddr = &iph->saddr;
471 }
472
473 /*
474 * FTPDATA needs this check when using local real server.
475 * Never schedule Active FTPDATA connections from real server.
476 * For LVS-NAT they must be already created. For other methods
477 * with persistence the connection is created on SYN+ACK.
478 */
479 if (cport == FTPDATA) {
480 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
481 "Not scheduling FTPDATA");
482 return NULL;
483 }
484
485 /*
486 * Do not schedule replies from local real server.
487 */
488 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
489 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
490 cp = INDIRECT_CALL_1(pp->conn_in_get,
491 ip_vs_conn_in_get_proto, svc->ipvs,
492 svc->af, skb, iph);
493 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
494
495 if (cp) {
496 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
497 "Not scheduling reply for existing"
498 " connection");
499 __ip_vs_conn_put(cp);
500 return NULL;
501 }
502 }
503
504 /*
505 * Persistent service
506 */
507 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
508 return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
509 iph);
510
511 *ignored = 0;
512
513 /*
514 * Non-persistent service
515 */
516 if (!svc->fwmark && vport != svc->port) {
517 if (!svc->port)
518 pr_err("Schedule: port zero only supported "
519 "in persistent services, "
520 "check your ipvs configuration\n");
521 return NULL;
522 }
523
524 sched = rcu_dereference(svc->scheduler);
525 if (sched) {
526 /* read svc->sched_data after svc->scheduler */
527 smp_rmb();
528 dest = sched->schedule(svc, skb, iph);
529 } else {
530 dest = NULL;
531 }
532 if (dest == NULL) {
533 IP_VS_DBG(1, "Schedule: no dest found.\n");
534 return NULL;
535 }
536
537 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
538 && iph->protocol == IPPROTO_UDP) ?
539 IP_VS_CONN_F_ONE_PACKET : 0;
540
541 /*
542 * Create a connection entry.
543 */
544 {
545 struct ip_vs_conn_param p;
546
547 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
548 caddr, cport, vaddr, vport, &p);
549 cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
550 dest->port ? dest->port : vport,
551 flags, dest, skb->mark);
552 if (!cp) {
553 *ignored = -1;
554 return NULL;
555 }
556 }
557
558 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
559 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
560 ip_vs_fwd_tag(cp),
561 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
562 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
563 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
564 cp->flags, refcount_read(&cp->refcnt));
565
566 ip_vs_conn_stats(cp, svc);
567 return cp;
568 }
569
ip_vs_addr_is_unicast(struct net * net,int af,union nf_inet_addr * addr)570 static inline int ip_vs_addr_is_unicast(struct net *net, int af,
571 union nf_inet_addr *addr)
572 {
573 #ifdef CONFIG_IP_VS_IPV6
574 if (af == AF_INET6)
575 return ipv6_addr_type(&addr->in6) & IPV6_ADDR_UNICAST;
576 #endif
577 return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
578 }
579
580 /*
581 * Pass or drop the packet.
582 * Called by ip_vs_in, when the virtual service is available but
583 * no destination is available for a new connection.
584 */
ip_vs_leave(struct ip_vs_service * svc,struct sk_buff * skb,struct ip_vs_proto_data * pd,struct ip_vs_iphdr * iph)585 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
586 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
587 {
588 __be16 _ports[2], *pptr, dport;
589 struct netns_ipvs *ipvs = svc->ipvs;
590 struct net *net = ipvs->net;
591
592 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
593 if (!pptr)
594 return NF_DROP;
595 dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
596
597 /* if it is fwmark-based service, the cache_bypass sysctl is up
598 and the destination is a non-local unicast, then create
599 a cache_bypass connection entry */
600 if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
601 !(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
602 ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
603 int ret;
604 struct ip_vs_conn *cp;
605 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
606 iph->protocol == IPPROTO_UDP) ?
607 IP_VS_CONN_F_ONE_PACKET : 0;
608 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
609
610 /* create a new connection entry */
611 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
612 {
613 struct ip_vs_conn_param p;
614 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
615 &iph->saddr, pptr[0],
616 &iph->daddr, pptr[1], &p);
617 cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
618 IP_VS_CONN_F_BYPASS | flags,
619 NULL, skb->mark);
620 if (!cp)
621 return NF_DROP;
622 }
623
624 /* statistics */
625 ip_vs_in_stats(cp, skb);
626
627 /* set state */
628 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
629
630 /* transmit the first SYN packet */
631 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
632 /* do not touch skb anymore */
633
634 if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
635 atomic_inc(&cp->control->in_pkts);
636 else
637 atomic_inc(&cp->in_pkts);
638 ip_vs_conn_put(cp);
639 return ret;
640 }
641
642 /*
643 * When the virtual ftp service is presented, packets destined
644 * for other services on the VIP may get here (except services
645 * listed in the ipvs table), pass the packets, because it is
646 * not ipvs job to decide to drop the packets.
647 */
648 if (svc->port == FTPPORT && dport != FTPPORT)
649 return NF_ACCEPT;
650
651 if (unlikely(ip_vs_iph_icmp(iph)))
652 return NF_DROP;
653
654 /*
655 * Notify the client that the destination is unreachable, and
656 * release the socket buffer.
657 * Since it is in IP layer, the TCP socket is not actually
658 * created, the TCP RST packet cannot be sent, instead that
659 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
660 */
661 #ifdef CONFIG_IP_VS_IPV6
662 if (svc->af == AF_INET6) {
663 if (!skb->dev)
664 skb->dev = net->loopback_dev;
665 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
666 } else
667 #endif
668 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
669
670 return NF_DROP;
671 }
672
673 #ifdef CONFIG_SYSCTL
674
sysctl_snat_reroute(struct netns_ipvs * ipvs)675 static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
676 {
677 return ipvs->sysctl_snat_reroute;
678 }
679
sysctl_nat_icmp_send(struct netns_ipvs * ipvs)680 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
681 {
682 return ipvs->sysctl_nat_icmp_send;
683 }
684
685 #else
686
sysctl_snat_reroute(struct netns_ipvs * ipvs)687 static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
sysctl_nat_icmp_send(struct netns_ipvs * ipvs)688 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
689
690 #endif
691
ip_vs_checksum_complete(struct sk_buff * skb,int offset)692 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
693 {
694 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
695 }
696
ip_vs_defrag_user(unsigned int hooknum)697 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
698 {
699 if (NF_INET_LOCAL_IN == hooknum)
700 return IP_DEFRAG_VS_IN;
701 if (NF_INET_FORWARD == hooknum)
702 return IP_DEFRAG_VS_FWD;
703 return IP_DEFRAG_VS_OUT;
704 }
705
ip_vs_gather_frags(struct netns_ipvs * ipvs,struct sk_buff * skb,u_int32_t user)706 static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
707 struct sk_buff *skb, u_int32_t user)
708 {
709 int err;
710
711 local_bh_disable();
712 err = ip_defrag(ipvs->net, skb, user);
713 local_bh_enable();
714 if (!err)
715 ip_send_check(ip_hdr(skb));
716
717 return err;
718 }
719
ip_vs_route_me_harder(struct netns_ipvs * ipvs,int af,struct sk_buff * skb,unsigned int hooknum)720 static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
721 struct sk_buff *skb, unsigned int hooknum)
722 {
723 if (!sysctl_snat_reroute(ipvs))
724 return 0;
725 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
726 if (NF_INET_LOCAL_IN == hooknum)
727 return 0;
728 #ifdef CONFIG_IP_VS_IPV6
729 if (af == AF_INET6) {
730 struct dst_entry *dst = skb_dst(skb);
731
732 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
733 ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0)
734 return 1;
735 } else
736 #endif
737 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
738 ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0)
739 return 1;
740
741 return 0;
742 }
743
744 /*
745 * Packet has been made sufficiently writable in caller
746 * - inout: 1=in->out, 0=out->in
747 */
ip_vs_nat_icmp(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,int inout)748 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
749 struct ip_vs_conn *cp, int inout)
750 {
751 struct iphdr *iph = ip_hdr(skb);
752 unsigned int icmp_offset = iph->ihl*4;
753 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
754 icmp_offset);
755 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
756
757 if (inout) {
758 iph->saddr = cp->vaddr.ip;
759 ip_send_check(iph);
760 ciph->daddr = cp->vaddr.ip;
761 ip_send_check(ciph);
762 } else {
763 iph->daddr = cp->daddr.ip;
764 ip_send_check(iph);
765 ciph->saddr = cp->daddr.ip;
766 ip_send_check(ciph);
767 }
768
769 /* the TCP/UDP/SCTP port */
770 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
771 IPPROTO_SCTP == ciph->protocol) {
772 __be16 *ports = (void *)ciph + ciph->ihl*4;
773
774 if (inout)
775 ports[1] = cp->vport;
776 else
777 ports[0] = cp->dport;
778 }
779
780 /* And finally the ICMP checksum */
781 icmph->checksum = 0;
782 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
783 skb->ip_summed = CHECKSUM_UNNECESSARY;
784
785 if (inout)
786 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
787 "Forwarding altered outgoing ICMP");
788 else
789 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
790 "Forwarding altered incoming ICMP");
791 }
792
793 #ifdef CONFIG_IP_VS_IPV6
ip_vs_nat_icmp_v6(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,int inout)794 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
795 struct ip_vs_conn *cp, int inout)
796 {
797 struct ipv6hdr *iph = ipv6_hdr(skb);
798 unsigned int icmp_offset = 0;
799 unsigned int offs = 0; /* header offset*/
800 int protocol;
801 struct icmp6hdr *icmph;
802 struct ipv6hdr *ciph;
803 unsigned short fragoffs;
804
805 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
806 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
807 offs = icmp_offset + sizeof(struct icmp6hdr);
808 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
809
810 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
811
812 if (inout) {
813 iph->saddr = cp->vaddr.in6;
814 ciph->daddr = cp->vaddr.in6;
815 } else {
816 iph->daddr = cp->daddr.in6;
817 ciph->saddr = cp->daddr.in6;
818 }
819
820 /* the TCP/UDP/SCTP port */
821 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
822 IPPROTO_SCTP == protocol)) {
823 __be16 *ports = (void *)(skb_network_header(skb) + offs);
824
825 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
826 ntohs(inout ? ports[1] : ports[0]),
827 ntohs(inout ? cp->vport : cp->dport));
828 if (inout)
829 ports[1] = cp->vport;
830 else
831 ports[0] = cp->dport;
832 }
833
834 /* And finally the ICMP checksum */
835 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
836 skb->len - icmp_offset,
837 IPPROTO_ICMPV6, 0);
838 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
839 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
840 skb->ip_summed = CHECKSUM_PARTIAL;
841
842 if (inout)
843 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
844 (void *)ciph - (void *)iph,
845 "Forwarding altered outgoing ICMPv6");
846 else
847 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
848 (void *)ciph - (void *)iph,
849 "Forwarding altered incoming ICMPv6");
850 }
851 #endif
852
853 /* Handle relevant response ICMP messages - forward to the right
854 * destination host.
855 */
handle_response_icmp(int af,struct sk_buff * skb,union nf_inet_addr * snet,__u8 protocol,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,unsigned int offset,unsigned int ihl,unsigned int hooknum)856 static int handle_response_icmp(int af, struct sk_buff *skb,
857 union nf_inet_addr *snet,
858 __u8 protocol, struct ip_vs_conn *cp,
859 struct ip_vs_protocol *pp,
860 unsigned int offset, unsigned int ihl,
861 unsigned int hooknum)
862 {
863 unsigned int verdict = NF_DROP;
864
865 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
866 goto after_nat;
867
868 /* Ensure the checksum is correct */
869 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
870 /* Failed checksum! */
871 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
872 IP_VS_DBG_ADDR(af, snet));
873 goto out;
874 }
875
876 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
877 IPPROTO_SCTP == protocol)
878 offset += 2 * sizeof(__u16);
879 if (skb_ensure_writable(skb, offset))
880 goto out;
881
882 #ifdef CONFIG_IP_VS_IPV6
883 if (af == AF_INET6)
884 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
885 else
886 #endif
887 ip_vs_nat_icmp(skb, pp, cp, 1);
888
889 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
890 goto out;
891
892 after_nat:
893 /* do the statistics and put it back */
894 ip_vs_out_stats(cp, skb);
895
896 skb->ipvs_property = 1;
897 if (!(cp->flags & IP_VS_CONN_F_NFCT))
898 ip_vs_notrack(skb);
899 else
900 ip_vs_update_conntrack(skb, cp, 0);
901 verdict = NF_ACCEPT;
902
903 out:
904 __ip_vs_conn_put(cp);
905
906 return verdict;
907 }
908
909 /*
910 * Handle ICMP messages in the inside-to-outside direction (outgoing).
911 * Find any that might be relevant, check against existing connections.
912 * Currently handles error types - unreachable, quench, ttl exceeded.
913 */
ip_vs_out_icmp(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum)914 static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
915 int *related, unsigned int hooknum)
916 {
917 struct iphdr *iph;
918 struct icmphdr _icmph, *ic;
919 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
920 struct ip_vs_iphdr ciph;
921 struct ip_vs_conn *cp;
922 struct ip_vs_protocol *pp;
923 unsigned int offset, ihl;
924 union nf_inet_addr snet;
925
926 *related = 1;
927
928 /* reassemble IP fragments */
929 if (ip_is_fragment(ip_hdr(skb))) {
930 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
931 return NF_STOLEN;
932 }
933
934 iph = ip_hdr(skb);
935 offset = ihl = iph->ihl * 4;
936 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
937 if (ic == NULL)
938 return NF_DROP;
939
940 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
941 ic->type, ntohs(icmp_id(ic)),
942 &iph->saddr, &iph->daddr);
943
944 /*
945 * Work through seeing if this is for us.
946 * These checks are supposed to be in an order that means easy
947 * things are checked first to speed up processing.... however
948 * this means that some packets will manage to get a long way
949 * down this stack and then be rejected, but that's life.
950 */
951 if ((ic->type != ICMP_DEST_UNREACH) &&
952 (ic->type != ICMP_SOURCE_QUENCH) &&
953 (ic->type != ICMP_TIME_EXCEEDED)) {
954 *related = 0;
955 return NF_ACCEPT;
956 }
957
958 /* Now find the contained IP header */
959 offset += sizeof(_icmph);
960 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
961 if (cih == NULL)
962 return NF_ACCEPT; /* The packet looks wrong, ignore */
963
964 pp = ip_vs_proto_get(cih->protocol);
965 if (!pp)
966 return NF_ACCEPT;
967
968 /* Is the embedded protocol header present? */
969 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
970 pp->dont_defrag))
971 return NF_ACCEPT;
972
973 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
974 "Checking outgoing ICMP for");
975
976 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
977
978 /* The embedded headers contain source and dest in reverse order */
979 cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
980 ipvs, AF_INET, skb, &ciph);
981 if (!cp)
982 return NF_ACCEPT;
983
984 snet.ip = iph->saddr;
985 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
986 pp, ciph.len, ihl, hooknum);
987 }
988
989 #ifdef CONFIG_IP_VS_IPV6
ip_vs_out_icmp_v6(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum,struct ip_vs_iphdr * ipvsh)990 static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
991 int *related, unsigned int hooknum,
992 struct ip_vs_iphdr *ipvsh)
993 {
994 struct icmp6hdr _icmph, *ic;
995 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
996 struct ip_vs_conn *cp;
997 struct ip_vs_protocol *pp;
998 union nf_inet_addr snet;
999 unsigned int offset;
1000
1001 *related = 1;
1002 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph);
1003 if (ic == NULL)
1004 return NF_DROP;
1005
1006 /*
1007 * Work through seeing if this is for us.
1008 * These checks are supposed to be in an order that means easy
1009 * things are checked first to speed up processing.... however
1010 * this means that some packets will manage to get a long way
1011 * down this stack and then be rejected, but that's life.
1012 */
1013 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1014 *related = 0;
1015 return NF_ACCEPT;
1016 }
1017 /* Fragment header that is before ICMP header tells us that:
1018 * it's not an error message since they can't be fragmented.
1019 */
1020 if (ipvsh->flags & IP6_FH_F_FRAG)
1021 return NF_DROP;
1022
1023 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1024 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1025 &ipvsh->saddr, &ipvsh->daddr);
1026
1027 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
1028 true, &ciph))
1029 return NF_ACCEPT; /* The packet looks wrong, ignore */
1030
1031 pp = ip_vs_proto_get(ciph.protocol);
1032 if (!pp)
1033 return NF_ACCEPT;
1034
1035 /* The embedded headers contain source and dest in reverse order */
1036 cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
1037 ipvs, AF_INET6, skb, &ciph);
1038 if (!cp)
1039 return NF_ACCEPT;
1040
1041 snet.in6 = ciph.saddr.in6;
1042 offset = ciph.len;
1043 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
1044 pp, offset, sizeof(struct ipv6hdr),
1045 hooknum);
1046 }
1047 #endif
1048
1049 /*
1050 * Check if sctp chunc is ABORT chunk
1051 */
is_sctp_abort(const struct sk_buff * skb,int nh_len)1052 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
1053 {
1054 struct sctp_chunkhdr *sch, schunk;
1055 sch = skb_header_pointer(skb, nh_len + sizeof(struct sctphdr),
1056 sizeof(schunk), &schunk);
1057 if (sch == NULL)
1058 return 0;
1059 if (sch->type == SCTP_CID_ABORT)
1060 return 1;
1061 return 0;
1062 }
1063
is_tcp_reset(const struct sk_buff * skb,int nh_len)1064 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1065 {
1066 struct tcphdr _tcph, *th;
1067
1068 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1069 if (th == NULL)
1070 return 0;
1071 return th->rst;
1072 }
1073
is_new_conn(const struct sk_buff * skb,struct ip_vs_iphdr * iph)1074 static inline bool is_new_conn(const struct sk_buff *skb,
1075 struct ip_vs_iphdr *iph)
1076 {
1077 switch (iph->protocol) {
1078 case IPPROTO_TCP: {
1079 struct tcphdr _tcph, *th;
1080
1081 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1082 if (th == NULL)
1083 return false;
1084 return th->syn;
1085 }
1086 case IPPROTO_SCTP: {
1087 struct sctp_chunkhdr *sch, schunk;
1088
1089 sch = skb_header_pointer(skb, iph->len + sizeof(struct sctphdr),
1090 sizeof(schunk), &schunk);
1091 if (sch == NULL)
1092 return false;
1093 return sch->type == SCTP_CID_INIT;
1094 }
1095 default:
1096 return false;
1097 }
1098 }
1099
is_new_conn_expected(const struct ip_vs_conn * cp,int conn_reuse_mode)1100 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
1101 int conn_reuse_mode)
1102 {
1103 /* Controlled (FTP DATA or persistence)? */
1104 if (cp->control)
1105 return false;
1106
1107 switch (cp->protocol) {
1108 case IPPROTO_TCP:
1109 return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
1110 (cp->state == IP_VS_TCP_S_CLOSE) ||
1111 ((conn_reuse_mode & 2) &&
1112 (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
1113 (cp->flags & IP_VS_CONN_F_NOOUTPUT));
1114 case IPPROTO_SCTP:
1115 return cp->state == IP_VS_SCTP_S_CLOSED;
1116 default:
1117 return false;
1118 }
1119 }
1120
1121 /* Generic function to create new connections for outgoing RS packets
1122 *
1123 * Pre-requisites for successful connection creation:
1124 * 1) Virtual Service is NOT fwmark based:
1125 * In fwmark-VS actual vaddr and vport are unknown to IPVS
1126 * 2) Real Server and Virtual Service were NOT configured without port:
1127 * This is to allow match of different VS to the same RS ip-addr
1128 */
ip_vs_new_conn_out(struct ip_vs_service * svc,struct ip_vs_dest * dest,struct sk_buff * skb,const struct ip_vs_iphdr * iph,__be16 dport,__be16 cport)1129 struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
1130 struct ip_vs_dest *dest,
1131 struct sk_buff *skb,
1132 const struct ip_vs_iphdr *iph,
1133 __be16 dport,
1134 __be16 cport)
1135 {
1136 struct ip_vs_conn_param param;
1137 struct ip_vs_conn *ct = NULL, *cp = NULL;
1138 const union nf_inet_addr *vaddr, *daddr, *caddr;
1139 union nf_inet_addr snet;
1140 __be16 vport;
1141 unsigned int flags;
1142
1143 EnterFunction(12);
1144 vaddr = &svc->addr;
1145 vport = svc->port;
1146 daddr = &iph->saddr;
1147 caddr = &iph->daddr;
1148
1149 /* check pre-requisites are satisfied */
1150 if (svc->fwmark)
1151 return NULL;
1152 if (!vport || !dport)
1153 return NULL;
1154
1155 /* for persistent service first create connection template */
1156 if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
1157 /* apply netmask the same way ingress-side does */
1158 #ifdef CONFIG_IP_VS_IPV6
1159 if (svc->af == AF_INET6)
1160 ipv6_addr_prefix(&snet.in6, &caddr->in6,
1161 (__force __u32)svc->netmask);
1162 else
1163 #endif
1164 snet.ip = caddr->ip & svc->netmask;
1165 /* fill params and create template if not existent */
1166 if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol,
1167 &snet, 0, vaddr,
1168 vport, ¶m) < 0)
1169 return NULL;
1170 ct = ip_vs_ct_in_get(¶m);
1171 /* check if template exists and points to the same dest */
1172 if (!ct || !ip_vs_check_template(ct, dest)) {
1173 ct = ip_vs_conn_new(¶m, dest->af, daddr, dport,
1174 IP_VS_CONN_F_TEMPLATE, dest, 0);
1175 if (!ct) {
1176 kfree(param.pe_data);
1177 return NULL;
1178 }
1179 ct->timeout = svc->timeout;
1180 } else {
1181 kfree(param.pe_data);
1182 }
1183 }
1184
1185 /* connection flags */
1186 flags = ((svc->flags & IP_VS_SVC_F_ONEPACKET) &&
1187 iph->protocol == IPPROTO_UDP) ? IP_VS_CONN_F_ONE_PACKET : 0;
1188 /* create connection */
1189 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
1190 caddr, cport, vaddr, vport, ¶m);
1191 cp = ip_vs_conn_new(¶m, dest->af, daddr, dport, flags, dest, 0);
1192 if (!cp) {
1193 if (ct)
1194 ip_vs_conn_put(ct);
1195 return NULL;
1196 }
1197 if (ct) {
1198 ip_vs_control_add(cp, ct);
1199 ip_vs_conn_put(ct);
1200 }
1201 ip_vs_conn_stats(cp, svc);
1202
1203 /* return connection (will be used to handle outgoing packet) */
1204 IP_VS_DBG_BUF(6, "New connection RS-initiated:%c c:%s:%u v:%s:%u "
1205 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
1206 ip_vs_fwd_tag(cp),
1207 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
1208 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
1209 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
1210 cp->flags, refcount_read(&cp->refcnt));
1211 LeaveFunction(12);
1212 return cp;
1213 }
1214
1215 /* Handle outgoing packets which are considered requests initiated by
1216 * real servers, so that subsequent responses from external client can be
1217 * routed to the right real server.
1218 * Used also for outgoing responses in OPS mode.
1219 *
1220 * Connection management is handled by persistent-engine specific callback.
1221 */
__ip_vs_rs_conn_out(unsigned int hooknum,struct netns_ipvs * ipvs,int af,struct sk_buff * skb,const struct ip_vs_iphdr * iph)1222 static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
1223 struct netns_ipvs *ipvs,
1224 int af, struct sk_buff *skb,
1225 const struct ip_vs_iphdr *iph)
1226 {
1227 struct ip_vs_dest *dest;
1228 struct ip_vs_conn *cp = NULL;
1229 __be16 _ports[2], *pptr;
1230
1231 if (hooknum == NF_INET_LOCAL_IN)
1232 return NULL;
1233
1234 pptr = frag_safe_skb_hp(skb, iph->len,
1235 sizeof(_ports), _ports);
1236 if (!pptr)
1237 return NULL;
1238
1239 dest = ip_vs_find_real_service(ipvs, af, iph->protocol,
1240 &iph->saddr, pptr[0]);
1241 if (dest) {
1242 struct ip_vs_service *svc;
1243 struct ip_vs_pe *pe;
1244
1245 svc = rcu_dereference(dest->svc);
1246 if (svc) {
1247 pe = rcu_dereference(svc->pe);
1248 if (pe && pe->conn_out)
1249 cp = pe->conn_out(svc, dest, skb, iph,
1250 pptr[0], pptr[1]);
1251 }
1252 }
1253
1254 return cp;
1255 }
1256
1257 /* Handle response packets: rewrite addresses and send away...
1258 */
1259 static unsigned int
handle_response(int af,struct sk_buff * skb,struct ip_vs_proto_data * pd,struct ip_vs_conn * cp,struct ip_vs_iphdr * iph,unsigned int hooknum)1260 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1261 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1262 unsigned int hooknum)
1263 {
1264 struct ip_vs_protocol *pp = pd->pp;
1265
1266 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1267 goto after_nat;
1268
1269 IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
1270
1271 if (skb_ensure_writable(skb, iph->len))
1272 goto drop;
1273
1274 /* mangle the packet */
1275 if (pp->snat_handler &&
1276 !SNAT_CALL(pp->snat_handler, skb, pp, cp, iph))
1277 goto drop;
1278
1279 #ifdef CONFIG_IP_VS_IPV6
1280 if (af == AF_INET6)
1281 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1282 else
1283 #endif
1284 {
1285 ip_hdr(skb)->saddr = cp->vaddr.ip;
1286 ip_send_check(ip_hdr(skb));
1287 }
1288
1289 /*
1290 * nf_iterate does not expect change in the skb->dst->dev.
1291 * It looks like it is not fatal to enable this code for hooks
1292 * where our handlers are at the end of the chain list and
1293 * when all next handlers use skb->dst->dev and not outdev.
1294 * It will definitely route properly the inout NAT traffic
1295 * when multiple paths are used.
1296 */
1297
1298 /* For policy routing, packets originating from this
1299 * machine itself may be routed differently to packets
1300 * passing through. We want this packet to be routed as
1301 * if it came from this machine itself. So re-compute
1302 * the routing information.
1303 */
1304 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
1305 goto drop;
1306
1307 IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
1308
1309 after_nat:
1310 ip_vs_out_stats(cp, skb);
1311 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1312 skb->ipvs_property = 1;
1313 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1314 ip_vs_notrack(skb);
1315 else
1316 ip_vs_update_conntrack(skb, cp, 0);
1317 ip_vs_conn_put(cp);
1318
1319 LeaveFunction(11);
1320 return NF_ACCEPT;
1321
1322 drop:
1323 ip_vs_conn_put(cp);
1324 kfree_skb(skb);
1325 LeaveFunction(11);
1326 return NF_STOLEN;
1327 }
1328
1329 /*
1330 * Check if outgoing packet belongs to the established ip_vs_conn.
1331 */
1332 static unsigned int
ip_vs_out(struct netns_ipvs * ipvs,unsigned int hooknum,struct sk_buff * skb,int af)1333 ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1334 {
1335 struct ip_vs_iphdr iph;
1336 struct ip_vs_protocol *pp;
1337 struct ip_vs_proto_data *pd;
1338 struct ip_vs_conn *cp;
1339 struct sock *sk;
1340
1341 EnterFunction(11);
1342
1343 /* Already marked as IPVS request or reply? */
1344 if (skb->ipvs_property)
1345 return NF_ACCEPT;
1346
1347 sk = skb_to_full_sk(skb);
1348 /* Bad... Do not break raw sockets */
1349 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1350 af == AF_INET)) {
1351
1352 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1353 return NF_ACCEPT;
1354 }
1355
1356 if (unlikely(!skb_dst(skb)))
1357 return NF_ACCEPT;
1358
1359 if (!ipvs->enable)
1360 return NF_ACCEPT;
1361
1362 ip_vs_fill_iph_skb(af, skb, false, &iph);
1363 #ifdef CONFIG_IP_VS_IPV6
1364 if (af == AF_INET6) {
1365 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1366 int related;
1367 int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
1368 hooknum, &iph);
1369
1370 if (related)
1371 return verdict;
1372 }
1373 } else
1374 #endif
1375 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1376 int related;
1377 int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
1378
1379 if (related)
1380 return verdict;
1381 }
1382
1383 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1384 if (unlikely(!pd))
1385 return NF_ACCEPT;
1386 pp = pd->pp;
1387
1388 /* reassemble IP fragments */
1389 #ifdef CONFIG_IP_VS_IPV6
1390 if (af == AF_INET)
1391 #endif
1392 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1393 if (ip_vs_gather_frags(ipvs, skb,
1394 ip_vs_defrag_user(hooknum)))
1395 return NF_STOLEN;
1396
1397 ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
1398 }
1399
1400 /*
1401 * Check if the packet belongs to an existing entry
1402 */
1403 cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
1404 ipvs, af, skb, &iph);
1405
1406 if (likely(cp))
1407 return handle_response(af, skb, pd, cp, &iph, hooknum);
1408
1409 /* Check for real-server-started requests */
1410 if (atomic_read(&ipvs->conn_out_counter)) {
1411 /* Currently only for UDP:
1412 * connection oriented protocols typically use
1413 * ephemeral ports for outgoing connections, so
1414 * related incoming responses would not match any VS
1415 */
1416 if (pp->protocol == IPPROTO_UDP) {
1417 cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph);
1418 if (likely(cp))
1419 return handle_response(af, skb, pd, cp, &iph,
1420 hooknum);
1421 }
1422 }
1423
1424 if (sysctl_nat_icmp_send(ipvs) &&
1425 (pp->protocol == IPPROTO_TCP ||
1426 pp->protocol == IPPROTO_UDP ||
1427 pp->protocol == IPPROTO_SCTP)) {
1428 __be16 _ports[2], *pptr;
1429
1430 pptr = frag_safe_skb_hp(skb, iph.len,
1431 sizeof(_ports), _ports);
1432 if (pptr == NULL)
1433 return NF_ACCEPT; /* Not for me */
1434 if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
1435 pptr[0])) {
1436 /*
1437 * Notify the real server: there is no
1438 * existing entry if it is not RST
1439 * packet or not TCP packet.
1440 */
1441 if ((iph.protocol != IPPROTO_TCP &&
1442 iph.protocol != IPPROTO_SCTP)
1443 || ((iph.protocol == IPPROTO_TCP
1444 && !is_tcp_reset(skb, iph.len))
1445 || (iph.protocol == IPPROTO_SCTP
1446 && !is_sctp_abort(skb,
1447 iph.len)))) {
1448 #ifdef CONFIG_IP_VS_IPV6
1449 if (af == AF_INET6) {
1450 if (!skb->dev)
1451 skb->dev = ipvs->net->loopback_dev;
1452 icmpv6_send(skb,
1453 ICMPV6_DEST_UNREACH,
1454 ICMPV6_PORT_UNREACH,
1455 0);
1456 } else
1457 #endif
1458 icmp_send(skb,
1459 ICMP_DEST_UNREACH,
1460 ICMP_PORT_UNREACH, 0);
1461 return NF_DROP;
1462 }
1463 }
1464 }
1465
1466 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1467 "ip_vs_out: packet continues traversal as normal");
1468 return NF_ACCEPT;
1469 }
1470
1471 /*
1472 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1473 * used only for VS/NAT.
1474 * Check if packet is reply for established ip_vs_conn.
1475 */
1476 static unsigned int
ip_vs_reply4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1477 ip_vs_reply4(void *priv, struct sk_buff *skb,
1478 const struct nf_hook_state *state)
1479 {
1480 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1481 }
1482
1483 /*
1484 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1485 * Check if packet is reply for established ip_vs_conn.
1486 */
1487 static unsigned int
ip_vs_local_reply4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1488 ip_vs_local_reply4(void *priv, struct sk_buff *skb,
1489 const struct nf_hook_state *state)
1490 {
1491 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1492 }
1493
1494 #ifdef CONFIG_IP_VS_IPV6
1495
1496 /*
1497 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1498 * used only for VS/NAT.
1499 * Check if packet is reply for established ip_vs_conn.
1500 */
1501 static unsigned int
ip_vs_reply6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1502 ip_vs_reply6(void *priv, struct sk_buff *skb,
1503 const struct nf_hook_state *state)
1504 {
1505 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1506 }
1507
1508 /*
1509 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1510 * Check if packet is reply for established ip_vs_conn.
1511 */
1512 static unsigned int
ip_vs_local_reply6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1513 ip_vs_local_reply6(void *priv, struct sk_buff *skb,
1514 const struct nf_hook_state *state)
1515 {
1516 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1517 }
1518
1519 #endif
1520
1521 static unsigned int
ip_vs_try_to_schedule(struct netns_ipvs * ipvs,int af,struct sk_buff * skb,struct ip_vs_proto_data * pd,int * verdict,struct ip_vs_conn ** cpp,struct ip_vs_iphdr * iph)1522 ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
1523 struct ip_vs_proto_data *pd,
1524 int *verdict, struct ip_vs_conn **cpp,
1525 struct ip_vs_iphdr *iph)
1526 {
1527 struct ip_vs_protocol *pp = pd->pp;
1528
1529 if (!iph->fragoffs) {
1530 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1531 * replayed fragment zero will already have created the cp
1532 */
1533
1534 /* Schedule and create new connection entry into cpp */
1535 if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
1536 return 0;
1537 }
1538
1539 if (unlikely(!*cpp)) {
1540 /* sorry, all this trouble for a no-hit :) */
1541 IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
1542 "ip_vs_in: packet continues traversal as normal");
1543
1544 /* Fragment couldn't be mapped to a conn entry */
1545 if (iph->fragoffs)
1546 IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
1547 "unhandled fragment");
1548
1549 *verdict = NF_ACCEPT;
1550 return 0;
1551 }
1552
1553 return 1;
1554 }
1555
1556 /* Check the UDP tunnel and return its header length */
ipvs_udp_decap(struct netns_ipvs * ipvs,struct sk_buff * skb,unsigned int offset,__u16 af,const union nf_inet_addr * daddr,__u8 * proto)1557 static int ipvs_udp_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
1558 unsigned int offset, __u16 af,
1559 const union nf_inet_addr *daddr, __u8 *proto)
1560 {
1561 struct udphdr _udph, *udph;
1562 struct ip_vs_dest *dest;
1563
1564 udph = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
1565 if (!udph)
1566 goto unk;
1567 offset += sizeof(struct udphdr);
1568 dest = ip_vs_find_tunnel(ipvs, af, daddr, udph->dest);
1569 if (!dest)
1570 goto unk;
1571 if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
1572 struct guehdr _gueh, *gueh;
1573
1574 gueh = skb_header_pointer(skb, offset, sizeof(_gueh), &_gueh);
1575 if (!gueh)
1576 goto unk;
1577 if (gueh->control != 0 || gueh->version != 0)
1578 goto unk;
1579 /* Later we can support also IPPROTO_IPV6 */
1580 if (gueh->proto_ctype != IPPROTO_IPIP)
1581 goto unk;
1582 *proto = gueh->proto_ctype;
1583 return sizeof(struct udphdr) + sizeof(struct guehdr) +
1584 (gueh->hlen << 2);
1585 }
1586
1587 unk:
1588 return 0;
1589 }
1590
1591 /* Check the GRE tunnel and return its header length */
ipvs_gre_decap(struct netns_ipvs * ipvs,struct sk_buff * skb,unsigned int offset,__u16 af,const union nf_inet_addr * daddr,__u8 * proto)1592 static int ipvs_gre_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
1593 unsigned int offset, __u16 af,
1594 const union nf_inet_addr *daddr, __u8 *proto)
1595 {
1596 struct gre_base_hdr _greh, *greh;
1597 struct ip_vs_dest *dest;
1598
1599 greh = skb_header_pointer(skb, offset, sizeof(_greh), &_greh);
1600 if (!greh)
1601 goto unk;
1602 dest = ip_vs_find_tunnel(ipvs, af, daddr, 0);
1603 if (!dest)
1604 goto unk;
1605 if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
1606 __be16 type;
1607
1608 /* Only support version 0 and C (csum) */
1609 if ((greh->flags & ~GRE_CSUM) != 0)
1610 goto unk;
1611 type = greh->protocol;
1612 /* Later we can support also IPPROTO_IPV6 */
1613 if (type != htons(ETH_P_IP))
1614 goto unk;
1615 *proto = IPPROTO_IPIP;
1616 return gre_calc_hlen(gre_flags_to_tnl_flags(greh->flags));
1617 }
1618
1619 unk:
1620 return 0;
1621 }
1622
1623 /*
1624 * Handle ICMP messages in the outside-to-inside direction (incoming).
1625 * Find any that might be relevant, check against existing connections,
1626 * forward to the right destination host if relevant.
1627 * Currently handles error types - unreachable, quench, ttl exceeded.
1628 */
1629 static int
ip_vs_in_icmp(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum)1630 ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
1631 unsigned int hooknum)
1632 {
1633 struct iphdr *iph;
1634 struct icmphdr _icmph, *ic;
1635 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1636 struct ip_vs_iphdr ciph;
1637 struct ip_vs_conn *cp;
1638 struct ip_vs_protocol *pp;
1639 struct ip_vs_proto_data *pd;
1640 unsigned int offset, offset2, ihl, verdict;
1641 bool tunnel, new_cp = false;
1642 union nf_inet_addr *raddr;
1643 char *outer_proto = "IPIP";
1644
1645 *related = 1;
1646
1647 /* reassemble IP fragments */
1648 if (ip_is_fragment(ip_hdr(skb))) {
1649 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
1650 return NF_STOLEN;
1651 }
1652
1653 iph = ip_hdr(skb);
1654 offset = ihl = iph->ihl * 4;
1655 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1656 if (ic == NULL)
1657 return NF_DROP;
1658
1659 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1660 ic->type, ntohs(icmp_id(ic)),
1661 &iph->saddr, &iph->daddr);
1662
1663 /*
1664 * Work through seeing if this is for us.
1665 * These checks are supposed to be in an order that means easy
1666 * things are checked first to speed up processing.... however
1667 * this means that some packets will manage to get a long way
1668 * down this stack and then be rejected, but that's life.
1669 */
1670 if ((ic->type != ICMP_DEST_UNREACH) &&
1671 (ic->type != ICMP_SOURCE_QUENCH) &&
1672 (ic->type != ICMP_TIME_EXCEEDED)) {
1673 *related = 0;
1674 return NF_ACCEPT;
1675 }
1676
1677 /* Now find the contained IP header */
1678 offset += sizeof(_icmph);
1679 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1680 if (cih == NULL)
1681 return NF_ACCEPT; /* The packet looks wrong, ignore */
1682 raddr = (union nf_inet_addr *)&cih->daddr;
1683
1684 /* Special case for errors for IPIP/UDP/GRE tunnel packets */
1685 tunnel = false;
1686 if (cih->protocol == IPPROTO_IPIP) {
1687 struct ip_vs_dest *dest;
1688
1689 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1690 return NF_ACCEPT;
1691 /* Error for our IPIP must arrive at LOCAL_IN */
1692 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1693 return NF_ACCEPT;
1694 dest = ip_vs_find_tunnel(ipvs, AF_INET, raddr, 0);
1695 /* Only for known tunnel */
1696 if (!dest || dest->tun_type != IP_VS_CONN_F_TUNNEL_TYPE_IPIP)
1697 return NF_ACCEPT;
1698 offset += cih->ihl * 4;
1699 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1700 if (cih == NULL)
1701 return NF_ACCEPT; /* The packet looks wrong, ignore */
1702 tunnel = true;
1703 } else if ((cih->protocol == IPPROTO_UDP || /* Can be UDP encap */
1704 cih->protocol == IPPROTO_GRE) && /* Can be GRE encap */
1705 /* Error for our tunnel must arrive at LOCAL_IN */
1706 (skb_rtable(skb)->rt_flags & RTCF_LOCAL)) {
1707 __u8 iproto;
1708 int ulen;
1709
1710 /* Non-first fragment has no UDP/GRE header */
1711 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1712 return NF_ACCEPT;
1713 offset2 = offset + cih->ihl * 4;
1714 if (cih->protocol == IPPROTO_UDP) {
1715 ulen = ipvs_udp_decap(ipvs, skb, offset2, AF_INET,
1716 raddr, &iproto);
1717 outer_proto = "UDP";
1718 } else {
1719 ulen = ipvs_gre_decap(ipvs, skb, offset2, AF_INET,
1720 raddr, &iproto);
1721 outer_proto = "GRE";
1722 }
1723 if (ulen > 0) {
1724 /* Skip IP and UDP/GRE tunnel headers */
1725 offset = offset2 + ulen;
1726 /* Now we should be at the original IP header */
1727 cih = skb_header_pointer(skb, offset, sizeof(_ciph),
1728 &_ciph);
1729 if (cih && cih->version == 4 && cih->ihl >= 5 &&
1730 iproto == IPPROTO_IPIP)
1731 tunnel = true;
1732 else
1733 return NF_ACCEPT;
1734 }
1735 }
1736
1737 pd = ip_vs_proto_data_get(ipvs, cih->protocol);
1738 if (!pd)
1739 return NF_ACCEPT;
1740 pp = pd->pp;
1741
1742 /* Is the embedded protocol header present? */
1743 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1744 pp->dont_defrag))
1745 return NF_ACCEPT;
1746
1747 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1748 "Checking incoming ICMP for");
1749
1750 offset2 = offset;
1751 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !tunnel, &ciph);
1752 offset = ciph.len;
1753
1754 /* The embedded headers contain source and dest in reverse order.
1755 * For IPIP/UDP/GRE tunnel this is error for request, not for reply.
1756 */
1757 cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
1758 ipvs, AF_INET, skb, &ciph);
1759
1760 if (!cp) {
1761 int v;
1762
1763 if (tunnel || !sysctl_schedule_icmp(ipvs))
1764 return NF_ACCEPT;
1765
1766 if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
1767 return v;
1768 new_cp = true;
1769 }
1770
1771 verdict = NF_DROP;
1772
1773 /* Ensure the checksum is correct */
1774 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1775 /* Failed checksum! */
1776 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1777 &iph->saddr);
1778 goto out;
1779 }
1780
1781 if (tunnel) {
1782 __be32 info = ic->un.gateway;
1783 __u8 type = ic->type;
1784 __u8 code = ic->code;
1785
1786 /* Update the MTU */
1787 if (ic->type == ICMP_DEST_UNREACH &&
1788 ic->code == ICMP_FRAG_NEEDED) {
1789 struct ip_vs_dest *dest = cp->dest;
1790 u32 mtu = ntohs(ic->un.frag.mtu);
1791 __be16 frag_off = cih->frag_off;
1792
1793 /* Strip outer IP and ICMP, go to IPIP/UDP/GRE header */
1794 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1795 goto ignore_tunnel;
1796 offset2 -= ihl + sizeof(_icmph);
1797 skb_reset_network_header(skb);
1798 IP_VS_DBG(12, "ICMP for %s %pI4->%pI4: mtu=%u\n",
1799 outer_proto, &ip_hdr(skb)->saddr,
1800 &ip_hdr(skb)->daddr, mtu);
1801 ipv4_update_pmtu(skb, ipvs->net, mtu, 0, 0);
1802 /* Client uses PMTUD? */
1803 if (!(frag_off & htons(IP_DF)))
1804 goto ignore_tunnel;
1805 /* Prefer the resulting PMTU */
1806 if (dest) {
1807 struct ip_vs_dest_dst *dest_dst;
1808
1809 dest_dst = rcu_dereference(dest->dest_dst);
1810 if (dest_dst)
1811 mtu = dst_mtu(dest_dst->dst_cache);
1812 }
1813 if (mtu > 68 + sizeof(struct iphdr))
1814 mtu -= sizeof(struct iphdr);
1815 info = htonl(mtu);
1816 }
1817 /* Strip outer IP, ICMP and IPIP/UDP/GRE, go to IP header of
1818 * original request.
1819 */
1820 if (pskb_pull(skb, offset2) == NULL)
1821 goto ignore_tunnel;
1822 skb_reset_network_header(skb);
1823 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1824 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1825 type, code, ntohl(info));
1826 icmp_send(skb, type, code, info);
1827 /* ICMP can be shorter but anyways, account it */
1828 ip_vs_out_stats(cp, skb);
1829
1830 ignore_tunnel:
1831 consume_skb(skb);
1832 verdict = NF_STOLEN;
1833 goto out;
1834 }
1835
1836 /* do the statistics and put it back */
1837 ip_vs_in_stats(cp, skb);
1838 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1839 IPPROTO_SCTP == cih->protocol)
1840 offset += 2 * sizeof(__u16);
1841 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1842
1843 out:
1844 if (likely(!new_cp))
1845 __ip_vs_conn_put(cp);
1846 else
1847 ip_vs_conn_put(cp);
1848
1849 return verdict;
1850 }
1851
1852 #ifdef CONFIG_IP_VS_IPV6
ip_vs_in_icmp_v6(struct netns_ipvs * ipvs,struct sk_buff * skb,int * related,unsigned int hooknum,struct ip_vs_iphdr * iph)1853 static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
1854 int *related, unsigned int hooknum,
1855 struct ip_vs_iphdr *iph)
1856 {
1857 struct icmp6hdr _icmph, *ic;
1858 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1859 struct ip_vs_conn *cp;
1860 struct ip_vs_protocol *pp;
1861 struct ip_vs_proto_data *pd;
1862 unsigned int offset, verdict;
1863 bool new_cp = false;
1864
1865 *related = 1;
1866
1867 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph);
1868 if (ic == NULL)
1869 return NF_DROP;
1870
1871 /*
1872 * Work through seeing if this is for us.
1873 * These checks are supposed to be in an order that means easy
1874 * things are checked first to speed up processing.... however
1875 * this means that some packets will manage to get a long way
1876 * down this stack and then be rejected, but that's life.
1877 */
1878 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1879 *related = 0;
1880 return NF_ACCEPT;
1881 }
1882 /* Fragment header that is before ICMP header tells us that:
1883 * it's not an error message since they can't be fragmented.
1884 */
1885 if (iph->flags & IP6_FH_F_FRAG)
1886 return NF_DROP;
1887
1888 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1889 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1890 &iph->saddr, &iph->daddr);
1891
1892 offset = iph->len + sizeof(_icmph);
1893 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
1894 return NF_ACCEPT;
1895
1896 pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
1897 if (!pd)
1898 return NF_ACCEPT;
1899 pp = pd->pp;
1900
1901 /* Cannot handle fragmented embedded protocol */
1902 if (ciph.fragoffs)
1903 return NF_ACCEPT;
1904
1905 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
1906 "Checking incoming ICMPv6 for");
1907
1908 /* The embedded headers contain source and dest in reverse order
1909 * if not from localhost
1910 */
1911 cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
1912 ipvs, AF_INET6, skb, &ciph);
1913
1914 if (!cp) {
1915 int v;
1916
1917 if (!sysctl_schedule_icmp(ipvs))
1918 return NF_ACCEPT;
1919
1920 if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
1921 return v;
1922
1923 new_cp = true;
1924 }
1925
1926 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1927 if ((hooknum == NF_INET_LOCAL_OUT) &&
1928 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1929 verdict = NF_ACCEPT;
1930 goto out;
1931 }
1932
1933 /* do the statistics and put it back */
1934 ip_vs_in_stats(cp, skb);
1935
1936 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1937 offset = ciph.len;
1938 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1939 IPPROTO_SCTP == ciph.protocol)
1940 offset += 2 * sizeof(__u16); /* Also mangle ports */
1941
1942 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
1943
1944 out:
1945 if (likely(!new_cp))
1946 __ip_vs_conn_put(cp);
1947 else
1948 ip_vs_conn_put(cp);
1949
1950 return verdict;
1951 }
1952 #endif
1953
1954
1955 /*
1956 * Check if it's for virtual services, look it up,
1957 * and send it on its way...
1958 */
1959 static unsigned int
ip_vs_in(struct netns_ipvs * ipvs,unsigned int hooknum,struct sk_buff * skb,int af)1960 ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1961 {
1962 struct ip_vs_iphdr iph;
1963 struct ip_vs_protocol *pp;
1964 struct ip_vs_proto_data *pd;
1965 struct ip_vs_conn *cp;
1966 int ret, pkts;
1967 int conn_reuse_mode;
1968 struct sock *sk;
1969
1970 /* Already marked as IPVS request or reply? */
1971 if (skb->ipvs_property)
1972 return NF_ACCEPT;
1973
1974 /*
1975 * Big tappo:
1976 * - remote client: only PACKET_HOST
1977 * - route: used for struct net when skb->dev is unset
1978 */
1979 if (unlikely((skb->pkt_type != PACKET_HOST &&
1980 hooknum != NF_INET_LOCAL_OUT) ||
1981 !skb_dst(skb))) {
1982 ip_vs_fill_iph_skb(af, skb, false, &iph);
1983 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1984 " ignored in hook %u\n",
1985 skb->pkt_type, iph.protocol,
1986 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1987 return NF_ACCEPT;
1988 }
1989 /* ipvs enabled in this netns ? */
1990 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1991 return NF_ACCEPT;
1992
1993 ip_vs_fill_iph_skb(af, skb, false, &iph);
1994
1995 /* Bad... Do not break raw sockets */
1996 sk = skb_to_full_sk(skb);
1997 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1998 af == AF_INET)) {
1999
2000 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
2001 return NF_ACCEPT;
2002 }
2003
2004 #ifdef CONFIG_IP_VS_IPV6
2005 if (af == AF_INET6) {
2006 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
2007 int related;
2008 int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
2009 hooknum, &iph);
2010
2011 if (related)
2012 return verdict;
2013 }
2014 } else
2015 #endif
2016 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
2017 int related;
2018 int verdict = ip_vs_in_icmp(ipvs, skb, &related,
2019 hooknum);
2020
2021 if (related)
2022 return verdict;
2023 }
2024
2025 /* Protocol supported? */
2026 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
2027 if (unlikely(!pd)) {
2028 /* The only way we'll see this packet again is if it's
2029 * encapsulated, so mark it with ipvs_property=1 so we
2030 * skip it if we're ignoring tunneled packets
2031 */
2032 if (sysctl_ignore_tunneled(ipvs))
2033 skb->ipvs_property = 1;
2034
2035 return NF_ACCEPT;
2036 }
2037 pp = pd->pp;
2038 /*
2039 * Check if the packet belongs to an existing connection entry
2040 */
2041 cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
2042 ipvs, af, skb, &iph);
2043
2044 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
2045 if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
2046 bool old_ct = false, resched = false;
2047
2048 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
2049 unlikely(!atomic_read(&cp->dest->weight))) {
2050 resched = true;
2051 old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
2052 } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
2053 old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
2054 if (!atomic_read(&cp->n_control)) {
2055 resched = true;
2056 } else {
2057 /* Do not reschedule controlling connection
2058 * that uses conntrack while it is still
2059 * referenced by controlled connection(s).
2060 */
2061 resched = !old_ct;
2062 }
2063 }
2064
2065 if (resched) {
2066 if (!old_ct)
2067 cp->flags &= ~IP_VS_CONN_F_NFCT;
2068 if (!atomic_read(&cp->n_control))
2069 ip_vs_conn_expire_now(cp);
2070 __ip_vs_conn_put(cp);
2071 if (old_ct)
2072 return NF_DROP;
2073 cp = NULL;
2074 }
2075 }
2076
2077 /* Check the server status */
2078 if (cp && cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
2079 /* the destination server is not available */
2080 if (sysctl_expire_nodest_conn(ipvs)) {
2081 bool old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
2082
2083 if (!old_ct)
2084 cp->flags &= ~IP_VS_CONN_F_NFCT;
2085
2086 ip_vs_conn_expire_now(cp);
2087 __ip_vs_conn_put(cp);
2088 if (old_ct)
2089 return NF_DROP;
2090 cp = NULL;
2091 } else {
2092 __ip_vs_conn_put(cp);
2093 return NF_DROP;
2094 }
2095 }
2096
2097 if (unlikely(!cp)) {
2098 int v;
2099
2100 if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
2101 return v;
2102 }
2103
2104 IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
2105
2106 ip_vs_in_stats(cp, skb);
2107 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
2108 if (cp->packet_xmit)
2109 ret = cp->packet_xmit(skb, cp, pp, &iph);
2110 /* do not touch skb anymore */
2111 else {
2112 IP_VS_DBG_RL("warning: packet_xmit is null");
2113 ret = NF_ACCEPT;
2114 }
2115
2116 /* Increase its packet counter and check if it is needed
2117 * to be synchronized
2118 *
2119 * Sync connection if it is about to close to
2120 * encorage the standby servers to update the connections timeout
2121 *
2122 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
2123 */
2124
2125 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
2126 pkts = sysctl_sync_threshold(ipvs);
2127 else
2128 pkts = atomic_inc_return(&cp->in_pkts);
2129
2130 if (ipvs->sync_state & IP_VS_STATE_MASTER)
2131 ip_vs_sync_conn(ipvs, cp, pkts);
2132 else if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
2133 /* increment is done inside ip_vs_sync_conn too */
2134 atomic_inc(&cp->control->in_pkts);
2135
2136 ip_vs_conn_put(cp);
2137 return ret;
2138 }
2139
2140 /*
2141 * AF_INET handler in NF_INET_LOCAL_IN chain
2142 * Schedule and forward packets from remote clients
2143 */
2144 static unsigned int
ip_vs_remote_request4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2145 ip_vs_remote_request4(void *priv, struct sk_buff *skb,
2146 const struct nf_hook_state *state)
2147 {
2148 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
2149 }
2150
2151 /*
2152 * AF_INET handler in NF_INET_LOCAL_OUT chain
2153 * Schedule and forward packets from local clients
2154 */
2155 static unsigned int
ip_vs_local_request4(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2156 ip_vs_local_request4(void *priv, struct sk_buff *skb,
2157 const struct nf_hook_state *state)
2158 {
2159 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
2160 }
2161
2162 #ifdef CONFIG_IP_VS_IPV6
2163
2164 /*
2165 * AF_INET6 handler in NF_INET_LOCAL_IN chain
2166 * Schedule and forward packets from remote clients
2167 */
2168 static unsigned int
ip_vs_remote_request6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2169 ip_vs_remote_request6(void *priv, struct sk_buff *skb,
2170 const struct nf_hook_state *state)
2171 {
2172 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
2173 }
2174
2175 /*
2176 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
2177 * Schedule and forward packets from local clients
2178 */
2179 static unsigned int
ip_vs_local_request6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2180 ip_vs_local_request6(void *priv, struct sk_buff *skb,
2181 const struct nf_hook_state *state)
2182 {
2183 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
2184 }
2185
2186 #endif
2187
2188
2189 /*
2190 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
2191 * related packets destined for 0.0.0.0/0.
2192 * When fwmark-based virtual service is used, such as transparent
2193 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
2194 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
2195 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
2196 * and send them to ip_vs_in_icmp.
2197 */
2198 static unsigned int
ip_vs_forward_icmp(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2199 ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
2200 const struct nf_hook_state *state)
2201 {
2202 int r;
2203 struct netns_ipvs *ipvs = net_ipvs(state->net);
2204
2205 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
2206 return NF_ACCEPT;
2207
2208 /* ipvs enabled in this netns ? */
2209 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
2210 return NF_ACCEPT;
2211
2212 return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
2213 }
2214
2215 #ifdef CONFIG_IP_VS_IPV6
2216 static unsigned int
ip_vs_forward_icmp_v6(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)2217 ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
2218 const struct nf_hook_state *state)
2219 {
2220 int r;
2221 struct netns_ipvs *ipvs = net_ipvs(state->net);
2222 struct ip_vs_iphdr iphdr;
2223
2224 ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
2225 if (iphdr.protocol != IPPROTO_ICMPV6)
2226 return NF_ACCEPT;
2227
2228 /* ipvs enabled in this netns ? */
2229 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
2230 return NF_ACCEPT;
2231
2232 return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
2233 }
2234 #endif
2235
2236
2237 static const struct nf_hook_ops ip_vs_ops4[] = {
2238 /* After packet filtering, change source only for VS/NAT */
2239 {
2240 .hook = ip_vs_reply4,
2241 .pf = NFPROTO_IPV4,
2242 .hooknum = NF_INET_LOCAL_IN,
2243 .priority = NF_IP_PRI_NAT_SRC - 2,
2244 },
2245 /* After packet filtering, forward packet through VS/DR, VS/TUN,
2246 * or VS/NAT(change destination), so that filtering rules can be
2247 * applied to IPVS. */
2248 {
2249 .hook = ip_vs_remote_request4,
2250 .pf = NFPROTO_IPV4,
2251 .hooknum = NF_INET_LOCAL_IN,
2252 .priority = NF_IP_PRI_NAT_SRC - 1,
2253 },
2254 /* Before ip_vs_in, change source only for VS/NAT */
2255 {
2256 .hook = ip_vs_local_reply4,
2257 .pf = NFPROTO_IPV4,
2258 .hooknum = NF_INET_LOCAL_OUT,
2259 .priority = NF_IP_PRI_NAT_DST + 1,
2260 },
2261 /* After mangle, schedule and forward local requests */
2262 {
2263 .hook = ip_vs_local_request4,
2264 .pf = NFPROTO_IPV4,
2265 .hooknum = NF_INET_LOCAL_OUT,
2266 .priority = NF_IP_PRI_NAT_DST + 2,
2267 },
2268 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
2269 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2270 {
2271 .hook = ip_vs_forward_icmp,
2272 .pf = NFPROTO_IPV4,
2273 .hooknum = NF_INET_FORWARD,
2274 .priority = 99,
2275 },
2276 /* After packet filtering, change source only for VS/NAT */
2277 {
2278 .hook = ip_vs_reply4,
2279 .pf = NFPROTO_IPV4,
2280 .hooknum = NF_INET_FORWARD,
2281 .priority = 100,
2282 },
2283 };
2284
2285 #ifdef CONFIG_IP_VS_IPV6
2286 static const struct nf_hook_ops ip_vs_ops6[] = {
2287 /* After packet filtering, change source only for VS/NAT */
2288 {
2289 .hook = ip_vs_reply6,
2290 .pf = NFPROTO_IPV6,
2291 .hooknum = NF_INET_LOCAL_IN,
2292 .priority = NF_IP6_PRI_NAT_SRC - 2,
2293 },
2294 /* After packet filtering, forward packet through VS/DR, VS/TUN,
2295 * or VS/NAT(change destination), so that filtering rules can be
2296 * applied to IPVS. */
2297 {
2298 .hook = ip_vs_remote_request6,
2299 .pf = NFPROTO_IPV6,
2300 .hooknum = NF_INET_LOCAL_IN,
2301 .priority = NF_IP6_PRI_NAT_SRC - 1,
2302 },
2303 /* Before ip_vs_in, change source only for VS/NAT */
2304 {
2305 .hook = ip_vs_local_reply6,
2306 .pf = NFPROTO_IPV6,
2307 .hooknum = NF_INET_LOCAL_OUT,
2308 .priority = NF_IP6_PRI_NAT_DST + 1,
2309 },
2310 /* After mangle, schedule and forward local requests */
2311 {
2312 .hook = ip_vs_local_request6,
2313 .pf = NFPROTO_IPV6,
2314 .hooknum = NF_INET_LOCAL_OUT,
2315 .priority = NF_IP6_PRI_NAT_DST + 2,
2316 },
2317 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
2318 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2319 {
2320 .hook = ip_vs_forward_icmp_v6,
2321 .pf = NFPROTO_IPV6,
2322 .hooknum = NF_INET_FORWARD,
2323 .priority = 99,
2324 },
2325 /* After packet filtering, change source only for VS/NAT */
2326 {
2327 .hook = ip_vs_reply6,
2328 .pf = NFPROTO_IPV6,
2329 .hooknum = NF_INET_FORWARD,
2330 .priority = 100,
2331 },
2332 };
2333 #endif
2334
ip_vs_register_hooks(struct netns_ipvs * ipvs,unsigned int af)2335 int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af)
2336 {
2337 const struct nf_hook_ops *ops;
2338 unsigned int count;
2339 unsigned int afmask;
2340 int ret = 0;
2341
2342 if (af == AF_INET6) {
2343 #ifdef CONFIG_IP_VS_IPV6
2344 ops = ip_vs_ops6;
2345 count = ARRAY_SIZE(ip_vs_ops6);
2346 afmask = 2;
2347 #else
2348 return -EINVAL;
2349 #endif
2350 } else {
2351 ops = ip_vs_ops4;
2352 count = ARRAY_SIZE(ip_vs_ops4);
2353 afmask = 1;
2354 }
2355
2356 if (!(ipvs->hooks_afmask & afmask)) {
2357 ret = nf_register_net_hooks(ipvs->net, ops, count);
2358 if (ret >= 0)
2359 ipvs->hooks_afmask |= afmask;
2360 }
2361 return ret;
2362 }
2363
ip_vs_unregister_hooks(struct netns_ipvs * ipvs,unsigned int af)2364 void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af)
2365 {
2366 const struct nf_hook_ops *ops;
2367 unsigned int count;
2368 unsigned int afmask;
2369
2370 if (af == AF_INET6) {
2371 #ifdef CONFIG_IP_VS_IPV6
2372 ops = ip_vs_ops6;
2373 count = ARRAY_SIZE(ip_vs_ops6);
2374 afmask = 2;
2375 #else
2376 return;
2377 #endif
2378 } else {
2379 ops = ip_vs_ops4;
2380 count = ARRAY_SIZE(ip_vs_ops4);
2381 afmask = 1;
2382 }
2383
2384 if (ipvs->hooks_afmask & afmask) {
2385 nf_unregister_net_hooks(ipvs->net, ops, count);
2386 ipvs->hooks_afmask &= ~afmask;
2387 }
2388 }
2389
2390 /*
2391 * Initialize IP Virtual Server netns mem.
2392 */
__ip_vs_init(struct net * net)2393 static int __net_init __ip_vs_init(struct net *net)
2394 {
2395 struct netns_ipvs *ipvs;
2396
2397 ipvs = net_generic(net, ip_vs_net_id);
2398 if (ipvs == NULL)
2399 return -ENOMEM;
2400
2401 /* Hold the beast until a service is registered */
2402 ipvs->enable = 0;
2403 ipvs->net = net;
2404 /* Counters used for creating unique names */
2405 ipvs->gen = atomic_read(&ipvs_netns_cnt);
2406 atomic_inc(&ipvs_netns_cnt);
2407 net->ipvs = ipvs;
2408
2409 if (ip_vs_estimator_net_init(ipvs) < 0)
2410 goto estimator_fail;
2411
2412 if (ip_vs_control_net_init(ipvs) < 0)
2413 goto control_fail;
2414
2415 if (ip_vs_protocol_net_init(ipvs) < 0)
2416 goto protocol_fail;
2417
2418 if (ip_vs_app_net_init(ipvs) < 0)
2419 goto app_fail;
2420
2421 if (ip_vs_conn_net_init(ipvs) < 0)
2422 goto conn_fail;
2423
2424 if (ip_vs_sync_net_init(ipvs) < 0)
2425 goto sync_fail;
2426
2427 return 0;
2428 /*
2429 * Error handling
2430 */
2431
2432 sync_fail:
2433 ip_vs_conn_net_cleanup(ipvs);
2434 conn_fail:
2435 ip_vs_app_net_cleanup(ipvs);
2436 app_fail:
2437 ip_vs_protocol_net_cleanup(ipvs);
2438 protocol_fail:
2439 ip_vs_control_net_cleanup(ipvs);
2440 control_fail:
2441 ip_vs_estimator_net_cleanup(ipvs);
2442 estimator_fail:
2443 net->ipvs = NULL;
2444 return -ENOMEM;
2445 }
2446
__ip_vs_cleanup_batch(struct list_head * net_list)2447 static void __net_exit __ip_vs_cleanup_batch(struct list_head *net_list)
2448 {
2449 struct netns_ipvs *ipvs;
2450 struct net *net;
2451
2452 ip_vs_service_nets_cleanup(net_list); /* ip_vs_flush() with locks */
2453 list_for_each_entry(net, net_list, exit_list) {
2454 ipvs = net_ipvs(net);
2455 ip_vs_conn_net_cleanup(ipvs);
2456 ip_vs_app_net_cleanup(ipvs);
2457 ip_vs_protocol_net_cleanup(ipvs);
2458 ip_vs_control_net_cleanup(ipvs);
2459 ip_vs_estimator_net_cleanup(ipvs);
2460 IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
2461 net->ipvs = NULL;
2462 }
2463 }
2464
__ip_vs_dev_cleanup_batch(struct list_head * net_list)2465 static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
2466 {
2467 struct netns_ipvs *ipvs;
2468 struct net *net;
2469
2470 EnterFunction(2);
2471 list_for_each_entry(net, net_list, exit_list) {
2472 ipvs = net_ipvs(net);
2473 ip_vs_unregister_hooks(ipvs, AF_INET);
2474 ip_vs_unregister_hooks(ipvs, AF_INET6);
2475 ipvs->enable = 0; /* Disable packet reception */
2476 smp_wmb();
2477 ip_vs_sync_net_cleanup(ipvs);
2478 }
2479 LeaveFunction(2);
2480 }
2481
2482 static struct pernet_operations ipvs_core_ops = {
2483 .init = __ip_vs_init,
2484 .exit_batch = __ip_vs_cleanup_batch,
2485 .id = &ip_vs_net_id,
2486 .size = sizeof(struct netns_ipvs),
2487 };
2488
2489 static struct pernet_operations ipvs_core_dev_ops = {
2490 .exit_batch = __ip_vs_dev_cleanup_batch,
2491 };
2492
2493 /*
2494 * Initialize IP Virtual Server
2495 */
ip_vs_init(void)2496 static int __init ip_vs_init(void)
2497 {
2498 int ret;
2499
2500 ret = ip_vs_control_init();
2501 if (ret < 0) {
2502 pr_err("can't setup control.\n");
2503 goto exit;
2504 }
2505
2506 ip_vs_protocol_init();
2507
2508 ret = ip_vs_conn_init();
2509 if (ret < 0) {
2510 pr_err("can't setup connection table.\n");
2511 goto cleanup_protocol;
2512 }
2513
2514 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2515 if (ret < 0)
2516 goto cleanup_conn;
2517
2518 ret = register_pernet_device(&ipvs_core_dev_ops);
2519 if (ret < 0)
2520 goto cleanup_sub;
2521
2522 ret = ip_vs_register_nl_ioctl();
2523 if (ret < 0) {
2524 pr_err("can't register netlink/ioctl.\n");
2525 goto cleanup_dev;
2526 }
2527
2528 pr_info("ipvs loaded.\n");
2529
2530 return ret;
2531
2532 cleanup_dev:
2533 unregister_pernet_device(&ipvs_core_dev_ops);
2534 cleanup_sub:
2535 unregister_pernet_subsys(&ipvs_core_ops);
2536 cleanup_conn:
2537 ip_vs_conn_cleanup();
2538 cleanup_protocol:
2539 ip_vs_protocol_cleanup();
2540 ip_vs_control_cleanup();
2541 exit:
2542 return ret;
2543 }
2544
ip_vs_cleanup(void)2545 static void __exit ip_vs_cleanup(void)
2546 {
2547 ip_vs_unregister_nl_ioctl();
2548 unregister_pernet_device(&ipvs_core_dev_ops);
2549 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2550 ip_vs_conn_cleanup();
2551 ip_vs_protocol_cleanup();
2552 ip_vs_control_cleanup();
2553 pr_info("ipvs unloaded.\n");
2554 }
2555
2556 module_init(ip_vs_init);
2557 module_exit(ip_vs_cleanup);
2558 MODULE_LICENSE("GPL");
2559