]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/netfilter/ipvs/ip_vs_core.c
NFC: pn533: handle interrupted commands in pn533_recv_frame
[mirror_ubuntu-hirsute-kernel.git] / net / netfilter / ipvs / ip_vs_core.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71
72 static int ip_vs_net_id __read_mostly;
73 /* netns cnt used for uniqueness */
74 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
75
76 /* ID used in ICMP lookups */
77 #define icmp_id(icmph) (((icmph)->un).echo.id)
78 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
79
80 const char *ip_vs_proto_name(unsigned int proto)
81 {
82 static char buf[20];
83
84 switch (proto) {
85 case IPPROTO_IP:
86 return "IP";
87 case IPPROTO_UDP:
88 return "UDP";
89 case IPPROTO_TCP:
90 return "TCP";
91 case IPPROTO_SCTP:
92 return "SCTP";
93 case IPPROTO_ICMP:
94 return "ICMP";
95 #ifdef CONFIG_IP_VS_IPV6
96 case IPPROTO_ICMPV6:
97 return "ICMPv6";
98 #endif
99 default:
100 sprintf(buf, "IP_%u", proto);
101 return buf;
102 }
103 }
104
105 void ip_vs_init_hash_table(struct list_head *table, int rows)
106 {
107 while (--rows >= 0)
108 INIT_LIST_HEAD(&table[rows]);
109 }
110
111 static inline void
112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
113 {
114 struct ip_vs_dest *dest = cp->dest;
115 struct netns_ipvs *ipvs = cp->ipvs;
116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s;
119 struct ip_vs_service *svc;
120
121 s = this_cpu_ptr(dest->stats.cpustats);
122 u64_stats_update_begin(&s->syncp);
123 s->cnt.inpkts++;
124 s->cnt.inbytes += skb->len;
125 u64_stats_update_end(&s->syncp);
126
127 rcu_read_lock();
128 svc = rcu_dereference(dest->svc);
129 s = this_cpu_ptr(svc->stats.cpustats);
130 u64_stats_update_begin(&s->syncp);
131 s->cnt.inpkts++;
132 s->cnt.inbytes += skb->len;
133 u64_stats_update_end(&s->syncp);
134 rcu_read_unlock();
135
136 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
137 u64_stats_update_begin(&s->syncp);
138 s->cnt.inpkts++;
139 s->cnt.inbytes += skb->len;
140 u64_stats_update_end(&s->syncp);
141 }
142 }
143
144
145 static inline void
146 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
147 {
148 struct ip_vs_dest *dest = cp->dest;
149 struct netns_ipvs *ipvs = cp->ipvs;
150
151 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
152 struct ip_vs_cpu_stats *s;
153 struct ip_vs_service *svc;
154
155 s = this_cpu_ptr(dest->stats.cpustats);
156 u64_stats_update_begin(&s->syncp);
157 s->cnt.outpkts++;
158 s->cnt.outbytes += skb->len;
159 u64_stats_update_end(&s->syncp);
160
161 rcu_read_lock();
162 svc = rcu_dereference(dest->svc);
163 s = this_cpu_ptr(svc->stats.cpustats);
164 u64_stats_update_begin(&s->syncp);
165 s->cnt.outpkts++;
166 s->cnt.outbytes += skb->len;
167 u64_stats_update_end(&s->syncp);
168 rcu_read_unlock();
169
170 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
171 u64_stats_update_begin(&s->syncp);
172 s->cnt.outpkts++;
173 s->cnt.outbytes += skb->len;
174 u64_stats_update_end(&s->syncp);
175 }
176 }
177
178
179 static inline void
180 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
181 {
182 struct netns_ipvs *ipvs = svc->ipvs;
183 struct ip_vs_cpu_stats *s;
184
185 s = this_cpu_ptr(cp->dest->stats.cpustats);
186 u64_stats_update_begin(&s->syncp);
187 s->cnt.conns++;
188 u64_stats_update_end(&s->syncp);
189
190 s = this_cpu_ptr(svc->stats.cpustats);
191 u64_stats_update_begin(&s->syncp);
192 s->cnt.conns++;
193 u64_stats_update_end(&s->syncp);
194
195 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
196 u64_stats_update_begin(&s->syncp);
197 s->cnt.conns++;
198 u64_stats_update_end(&s->syncp);
199 }
200
201
202 static inline void
203 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
204 const struct sk_buff *skb,
205 struct ip_vs_proto_data *pd)
206 {
207 if (likely(pd->pp->state_transition))
208 pd->pp->state_transition(cp, direction, skb, pd);
209 }
210
211 static inline int
212 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
213 struct sk_buff *skb, int protocol,
214 const union nf_inet_addr *caddr, __be16 cport,
215 const union nf_inet_addr *vaddr, __be16 vport,
216 struct ip_vs_conn_param *p)
217 {
218 ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
219 vport, p);
220 p->pe = rcu_dereference(svc->pe);
221 if (p->pe && p->pe->fill_param)
222 return p->pe->fill_param(p, skb);
223
224 return 0;
225 }
226
227 /*
228 * IPVS persistent scheduling function
229 * It creates a connection entry according to its template if exists,
230 * or selects a server and creates a connection entry plus a template.
231 * Locking: we are svc user (svc->refcnt), so we hold all dests too
232 * Protocols supported: TCP, UDP
233 */
234 static struct ip_vs_conn *
235 ip_vs_sched_persist(struct ip_vs_service *svc,
236 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
237 int *ignored, struct ip_vs_iphdr *iph)
238 {
239 struct ip_vs_conn *cp = NULL;
240 struct ip_vs_dest *dest;
241 struct ip_vs_conn *ct;
242 __be16 dport = 0; /* destination port to forward */
243 unsigned int flags;
244 struct ip_vs_conn_param param;
245 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
246 union nf_inet_addr snet; /* source network of the client,
247 after masking */
248 const union nf_inet_addr *src_addr, *dst_addr;
249
250 if (likely(!ip_vs_iph_inverse(iph))) {
251 src_addr = &iph->saddr;
252 dst_addr = &iph->daddr;
253 } else {
254 src_addr = &iph->daddr;
255 dst_addr = &iph->saddr;
256 }
257
258
259 /* Mask saddr with the netmask to adjust template granularity */
260 #ifdef CONFIG_IP_VS_IPV6
261 if (svc->af == AF_INET6)
262 ipv6_addr_prefix(&snet.in6, &src_addr->in6,
263 (__force __u32) svc->netmask);
264 else
265 #endif
266 snet.ip = src_addr->ip & svc->netmask;
267
268 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
269 "mnet %s\n",
270 IP_VS_DBG_ADDR(svc->af, src_addr), ntohs(src_port),
271 IP_VS_DBG_ADDR(svc->af, dst_addr), ntohs(dst_port),
272 IP_VS_DBG_ADDR(svc->af, &snet));
273
274 /*
275 * As far as we know, FTP is a very complicated network protocol, and
276 * it uses control connection and data connections. For active FTP,
277 * FTP server initialize data connection to the client, its source port
278 * is often 20. For passive FTP, FTP server tells the clients the port
279 * that it passively listens to, and the client issues the data
280 * connection. In the tunneling or direct routing mode, the load
281 * balancer is on the client-to-server half of connection, the port
282 * number is unknown to the load balancer. So, a conn template like
283 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
284 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
285 * is created for other persistent services.
286 */
287 {
288 int protocol = iph->protocol;
289 const union nf_inet_addr *vaddr = dst_addr;
290 __be16 vport = 0;
291
292 if (dst_port == svc->port) {
293 /* non-FTP template:
294 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
295 * FTP template:
296 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
297 */
298 if (svc->port != FTPPORT)
299 vport = dst_port;
300 } else {
301 /* Note: persistent fwmark-based services and
302 * persistent port zero service are handled here.
303 * fwmark template:
304 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
305 * port zero template:
306 * <protocol,caddr,0,vaddr,0,daddr,0>
307 */
308 if (svc->fwmark) {
309 protocol = IPPROTO_IP;
310 vaddr = &fwmark;
311 }
312 }
313 /* return *ignored = -1 so NF_DROP can be used */
314 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
315 vaddr, vport, &param) < 0) {
316 *ignored = -1;
317 return NULL;
318 }
319 }
320
321 /* Check if a template already exists */
322 ct = ip_vs_ct_in_get(&param);
323 if (!ct || !ip_vs_check_template(ct)) {
324 struct ip_vs_scheduler *sched;
325
326 /*
327 * No template found or the dest of the connection
328 * template is not available.
329 * return *ignored=0 i.e. ICMP and NF_DROP
330 */
331 sched = rcu_dereference(svc->scheduler);
332 if (sched) {
333 /* read svc->sched_data after svc->scheduler */
334 smp_rmb();
335 dest = sched->schedule(svc, skb, iph);
336 } else {
337 dest = NULL;
338 }
339 if (!dest) {
340 IP_VS_DBG(1, "p-schedule: no dest found.\n");
341 kfree(param.pe_data);
342 *ignored = 0;
343 return NULL;
344 }
345
346 if (dst_port == svc->port && svc->port != FTPPORT)
347 dport = dest->port;
348
349 /* Create a template
350 * This adds param.pe_data to the template,
351 * and thus param.pe_data will be destroyed
352 * when the template expires */
353 ct = ip_vs_conn_new(&param, dest->af, &dest->addr, dport,
354 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
355 if (ct == NULL) {
356 kfree(param.pe_data);
357 *ignored = -1;
358 return NULL;
359 }
360
361 ct->timeout = svc->timeout;
362 } else {
363 /* set destination with the found template */
364 dest = ct->dest;
365 kfree(param.pe_data);
366 }
367
368 dport = dst_port;
369 if (dport == svc->port && dest->port)
370 dport = dest->port;
371
372 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
373 && iph->protocol == IPPROTO_UDP) ?
374 IP_VS_CONN_F_ONE_PACKET : 0;
375
376 /*
377 * Create a new connection according to the template
378 */
379 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
380 src_port, dst_addr, dst_port, &param);
381
382 cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
383 skb->mark);
384 if (cp == NULL) {
385 ip_vs_conn_put(ct);
386 *ignored = -1;
387 return NULL;
388 }
389
390 /*
391 * Add its control
392 */
393 ip_vs_control_add(cp, ct);
394 ip_vs_conn_put(ct);
395
396 ip_vs_conn_stats(cp, svc);
397 return cp;
398 }
399
400
401 /*
402 * IPVS main scheduling function
403 * It selects a server according to the virtual service, and
404 * creates a connection entry.
405 * Protocols supported: TCP, UDP
406 *
407 * Usage of *ignored
408 *
409 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
410 * svc/scheduler decides that this packet should be accepted with
411 * NF_ACCEPT because it must not be scheduled.
412 *
413 * 0 : scheduler can not find destination, so try bypass or
414 * return ICMP and then NF_DROP (ip_vs_leave).
415 *
416 * -1 : scheduler tried to schedule but fatal error occurred, eg.
417 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
418 * failure such as missing Call-ID, ENOMEM on skb_linearize
419 * or pe_data. In this case we should return NF_DROP without
420 * any attempts to send ICMP with ip_vs_leave.
421 */
422 struct ip_vs_conn *
423 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
424 struct ip_vs_proto_data *pd, int *ignored,
425 struct ip_vs_iphdr *iph)
426 {
427 struct ip_vs_protocol *pp = pd->pp;
428 struct ip_vs_conn *cp = NULL;
429 struct ip_vs_scheduler *sched;
430 struct ip_vs_dest *dest;
431 __be16 _ports[2], *pptr, cport, vport;
432 const void *caddr, *vaddr;
433 unsigned int flags;
434
435 *ignored = 1;
436 /*
437 * IPv6 frags, only the first hit here.
438 */
439 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
440 if (pptr == NULL)
441 return NULL;
442
443 if (likely(!ip_vs_iph_inverse(iph))) {
444 cport = pptr[0];
445 caddr = &iph->saddr;
446 vport = pptr[1];
447 vaddr = &iph->daddr;
448 } else {
449 cport = pptr[1];
450 caddr = &iph->daddr;
451 vport = pptr[0];
452 vaddr = &iph->saddr;
453 }
454
455 /*
456 * FTPDATA needs this check when using local real server.
457 * Never schedule Active FTPDATA connections from real server.
458 * For LVS-NAT they must be already created. For other methods
459 * with persistence the connection is created on SYN+ACK.
460 */
461 if (cport == FTPDATA) {
462 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
463 "Not scheduling FTPDATA");
464 return NULL;
465 }
466
467 /*
468 * Do not schedule replies from local real server.
469 */
470 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
471 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
472 cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
473 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
474
475 if (cp) {
476 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
477 "Not scheduling reply for existing"
478 " connection");
479 __ip_vs_conn_put(cp);
480 return NULL;
481 }
482 }
483
484 /*
485 * Persistent service
486 */
487 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
488 return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
489 iph);
490
491 *ignored = 0;
492
493 /*
494 * Non-persistent service
495 */
496 if (!svc->fwmark && vport != svc->port) {
497 if (!svc->port)
498 pr_err("Schedule: port zero only supported "
499 "in persistent services, "
500 "check your ipvs configuration\n");
501 return NULL;
502 }
503
504 sched = rcu_dereference(svc->scheduler);
505 if (sched) {
506 /* read svc->sched_data after svc->scheduler */
507 smp_rmb();
508 dest = sched->schedule(svc, skb, iph);
509 } else {
510 dest = NULL;
511 }
512 if (dest == NULL) {
513 IP_VS_DBG(1, "Schedule: no dest found.\n");
514 return NULL;
515 }
516
517 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
518 && iph->protocol == IPPROTO_UDP) ?
519 IP_VS_CONN_F_ONE_PACKET : 0;
520
521 /*
522 * Create a connection entry.
523 */
524 {
525 struct ip_vs_conn_param p;
526
527 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
528 caddr, cport, vaddr, vport, &p);
529 cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
530 dest->port ? dest->port : vport,
531 flags, dest, skb->mark);
532 if (!cp) {
533 *ignored = -1;
534 return NULL;
535 }
536 }
537
538 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
539 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
540 ip_vs_fwd_tag(cp),
541 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
542 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
543 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
544 cp->flags, atomic_read(&cp->refcnt));
545
546 ip_vs_conn_stats(cp, svc);
547 return cp;
548 }
549
550 static inline int ip_vs_addr_is_unicast(struct net *net, int af,
551 union nf_inet_addr *addr)
552 {
553 #ifdef CONFIG_IP_VS_IPV6
554 if (af == AF_INET6)
555 return ipv6_addr_type(&addr->in6) & IPV6_ADDR_UNICAST;
556 #endif
557 return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
558 }
559
560 /*
561 * Pass or drop the packet.
562 * Called by ip_vs_in, when the virtual service is available but
563 * no destination is available for a new connection.
564 */
565 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
566 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
567 {
568 __be16 _ports[2], *pptr, dport;
569 struct netns_ipvs *ipvs = svc->ipvs;
570 struct net *net = ipvs->net;
571
572 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
573 if (!pptr)
574 return NF_DROP;
575 dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
576
577 /* if it is fwmark-based service, the cache_bypass sysctl is up
578 and the destination is a non-local unicast, then create
579 a cache_bypass connection entry */
580 if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
581 !(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
582 ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
583 int ret;
584 struct ip_vs_conn *cp;
585 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
586 iph->protocol == IPPROTO_UDP) ?
587 IP_VS_CONN_F_ONE_PACKET : 0;
588 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
589
590 /* create a new connection entry */
591 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
592 {
593 struct ip_vs_conn_param p;
594 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
595 &iph->saddr, pptr[0],
596 &iph->daddr, pptr[1], &p);
597 cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
598 IP_VS_CONN_F_BYPASS | flags,
599 NULL, skb->mark);
600 if (!cp)
601 return NF_DROP;
602 }
603
604 /* statistics */
605 ip_vs_in_stats(cp, skb);
606
607 /* set state */
608 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
609
610 /* transmit the first SYN packet */
611 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
612 /* do not touch skb anymore */
613
614 atomic_inc(&cp->in_pkts);
615 ip_vs_conn_put(cp);
616 return ret;
617 }
618
619 /*
620 * When the virtual ftp service is presented, packets destined
621 * for other services on the VIP may get here (except services
622 * listed in the ipvs table), pass the packets, because it is
623 * not ipvs job to decide to drop the packets.
624 */
625 if (svc->port == FTPPORT && dport != FTPPORT)
626 return NF_ACCEPT;
627
628 if (unlikely(ip_vs_iph_icmp(iph)))
629 return NF_DROP;
630
631 /*
632 * Notify the client that the destination is unreachable, and
633 * release the socket buffer.
634 * Since it is in IP layer, the TCP socket is not actually
635 * created, the TCP RST packet cannot be sent, instead that
636 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
637 */
638 #ifdef CONFIG_IP_VS_IPV6
639 if (svc->af == AF_INET6) {
640 if (!skb->dev)
641 skb->dev = net->loopback_dev;
642 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
643 } else
644 #endif
645 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
646
647 return NF_DROP;
648 }
649
650 #ifdef CONFIG_SYSCTL
651
652 static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
653 {
654 return ipvs->sysctl_snat_reroute;
655 }
656
657 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
658 {
659 return ipvs->sysctl_nat_icmp_send;
660 }
661
662 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
663 {
664 return ipvs->sysctl_expire_nodest_conn;
665 }
666
667 #else
668
669 static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
670 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
671 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
672
673 #endif
674
675 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
676 {
677 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
678 }
679
680 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
681 {
682 if (NF_INET_LOCAL_IN == hooknum)
683 return IP_DEFRAG_VS_IN;
684 if (NF_INET_FORWARD == hooknum)
685 return IP_DEFRAG_VS_FWD;
686 return IP_DEFRAG_VS_OUT;
687 }
688
689 static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
690 struct sk_buff *skb, u_int32_t user)
691 {
692 int err;
693
694 local_bh_disable();
695 err = ip_defrag(ipvs->net, skb, user);
696 local_bh_enable();
697 if (!err)
698 ip_send_check(ip_hdr(skb));
699
700 return err;
701 }
702
703 static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
704 struct sk_buff *skb, unsigned int hooknum)
705 {
706 if (!sysctl_snat_reroute(ipvs))
707 return 0;
708 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
709 if (NF_INET_LOCAL_IN == hooknum)
710 return 0;
711 #ifdef CONFIG_IP_VS_IPV6
712 if (af == AF_INET6) {
713 struct dst_entry *dst = skb_dst(skb);
714
715 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
716 ip6_route_me_harder(ipvs->net, skb) != 0)
717 return 1;
718 } else
719 #endif
720 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
721 ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
722 return 1;
723
724 return 0;
725 }
726
727 /*
728 * Packet has been made sufficiently writable in caller
729 * - inout: 1=in->out, 0=out->in
730 */
731 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
732 struct ip_vs_conn *cp, int inout)
733 {
734 struct iphdr *iph = ip_hdr(skb);
735 unsigned int icmp_offset = iph->ihl*4;
736 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
737 icmp_offset);
738 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
739
740 if (inout) {
741 iph->saddr = cp->vaddr.ip;
742 ip_send_check(iph);
743 ciph->daddr = cp->vaddr.ip;
744 ip_send_check(ciph);
745 } else {
746 iph->daddr = cp->daddr.ip;
747 ip_send_check(iph);
748 ciph->saddr = cp->daddr.ip;
749 ip_send_check(ciph);
750 }
751
752 /* the TCP/UDP/SCTP port */
753 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
754 IPPROTO_SCTP == ciph->protocol) {
755 __be16 *ports = (void *)ciph + ciph->ihl*4;
756
757 if (inout)
758 ports[1] = cp->vport;
759 else
760 ports[0] = cp->dport;
761 }
762
763 /* And finally the ICMP checksum */
764 icmph->checksum = 0;
765 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
766 skb->ip_summed = CHECKSUM_UNNECESSARY;
767
768 if (inout)
769 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
770 "Forwarding altered outgoing ICMP");
771 else
772 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
773 "Forwarding altered incoming ICMP");
774 }
775
776 #ifdef CONFIG_IP_VS_IPV6
777 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
778 struct ip_vs_conn *cp, int inout)
779 {
780 struct ipv6hdr *iph = ipv6_hdr(skb);
781 unsigned int icmp_offset = 0;
782 unsigned int offs = 0; /* header offset*/
783 int protocol;
784 struct icmp6hdr *icmph;
785 struct ipv6hdr *ciph;
786 unsigned short fragoffs;
787
788 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
789 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
790 offs = icmp_offset + sizeof(struct icmp6hdr);
791 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
792
793 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
794
795 if (inout) {
796 iph->saddr = cp->vaddr.in6;
797 ciph->daddr = cp->vaddr.in6;
798 } else {
799 iph->daddr = cp->daddr.in6;
800 ciph->saddr = cp->daddr.in6;
801 }
802
803 /* the TCP/UDP/SCTP port */
804 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
805 IPPROTO_SCTP == protocol)) {
806 __be16 *ports = (void *)(skb_network_header(skb) + offs);
807
808 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
809 ntohs(inout ? ports[1] : ports[0]),
810 ntohs(inout ? cp->vport : cp->dport));
811 if (inout)
812 ports[1] = cp->vport;
813 else
814 ports[0] = cp->dport;
815 }
816
817 /* And finally the ICMP checksum */
818 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
819 skb->len - icmp_offset,
820 IPPROTO_ICMPV6, 0);
821 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
822 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
823 skb->ip_summed = CHECKSUM_PARTIAL;
824
825 if (inout)
826 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
827 (void *)ciph - (void *)iph,
828 "Forwarding altered outgoing ICMPv6");
829 else
830 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
831 (void *)ciph - (void *)iph,
832 "Forwarding altered incoming ICMPv6");
833 }
834 #endif
835
836 /* Handle relevant response ICMP messages - forward to the right
837 * destination host.
838 */
839 static int handle_response_icmp(int af, struct sk_buff *skb,
840 union nf_inet_addr *snet,
841 __u8 protocol, struct ip_vs_conn *cp,
842 struct ip_vs_protocol *pp,
843 unsigned int offset, unsigned int ihl,
844 unsigned int hooknum)
845 {
846 unsigned int verdict = NF_DROP;
847
848 if (IP_VS_FWD_METHOD(cp) != 0) {
849 pr_err("shouldn't reach here, because the box is on the "
850 "half connection in the tun/dr module.\n");
851 }
852
853 /* Ensure the checksum is correct */
854 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
855 /* Failed checksum! */
856 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
857 IP_VS_DBG_ADDR(af, snet));
858 goto out;
859 }
860
861 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
862 IPPROTO_SCTP == protocol)
863 offset += 2 * sizeof(__u16);
864 if (!skb_make_writable(skb, offset))
865 goto out;
866
867 #ifdef CONFIG_IP_VS_IPV6
868 if (af == AF_INET6)
869 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
870 else
871 #endif
872 ip_vs_nat_icmp(skb, pp, cp, 1);
873
874 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
875 goto out;
876
877 /* do the statistics and put it back */
878 ip_vs_out_stats(cp, skb);
879
880 skb->ipvs_property = 1;
881 if (!(cp->flags & IP_VS_CONN_F_NFCT))
882 ip_vs_notrack(skb);
883 else
884 ip_vs_update_conntrack(skb, cp, 0);
885 verdict = NF_ACCEPT;
886
887 out:
888 __ip_vs_conn_put(cp);
889
890 return verdict;
891 }
892
893 /*
894 * Handle ICMP messages in the inside-to-outside direction (outgoing).
895 * Find any that might be relevant, check against existing connections.
896 * Currently handles error types - unreachable, quench, ttl exceeded.
897 */
898 static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
899 int *related, unsigned int hooknum)
900 {
901 struct iphdr *iph;
902 struct icmphdr _icmph, *ic;
903 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
904 struct ip_vs_iphdr ciph;
905 struct ip_vs_conn *cp;
906 struct ip_vs_protocol *pp;
907 unsigned int offset, ihl;
908 union nf_inet_addr snet;
909
910 *related = 1;
911
912 /* reassemble IP fragments */
913 if (ip_is_fragment(ip_hdr(skb))) {
914 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
915 return NF_STOLEN;
916 }
917
918 iph = ip_hdr(skb);
919 offset = ihl = iph->ihl * 4;
920 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
921 if (ic == NULL)
922 return NF_DROP;
923
924 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
925 ic->type, ntohs(icmp_id(ic)),
926 &iph->saddr, &iph->daddr);
927
928 /*
929 * Work through seeing if this is for us.
930 * These checks are supposed to be in an order that means easy
931 * things are checked first to speed up processing.... however
932 * this means that some packets will manage to get a long way
933 * down this stack and then be rejected, but that's life.
934 */
935 if ((ic->type != ICMP_DEST_UNREACH) &&
936 (ic->type != ICMP_SOURCE_QUENCH) &&
937 (ic->type != ICMP_TIME_EXCEEDED)) {
938 *related = 0;
939 return NF_ACCEPT;
940 }
941
942 /* Now find the contained IP header */
943 offset += sizeof(_icmph);
944 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
945 if (cih == NULL)
946 return NF_ACCEPT; /* The packet looks wrong, ignore */
947
948 pp = ip_vs_proto_get(cih->protocol);
949 if (!pp)
950 return NF_ACCEPT;
951
952 /* Is the embedded protocol header present? */
953 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
954 pp->dont_defrag))
955 return NF_ACCEPT;
956
957 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
958 "Checking outgoing ICMP for");
959
960 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
961
962 /* The embedded headers contain source and dest in reverse order */
963 cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
964 if (!cp)
965 return NF_ACCEPT;
966
967 snet.ip = iph->saddr;
968 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
969 pp, ciph.len, ihl, hooknum);
970 }
971
972 #ifdef CONFIG_IP_VS_IPV6
973 static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
974 int *related, unsigned int hooknum,
975 struct ip_vs_iphdr *ipvsh)
976 {
977 struct icmp6hdr _icmph, *ic;
978 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
979 struct ip_vs_conn *cp;
980 struct ip_vs_protocol *pp;
981 union nf_inet_addr snet;
982 unsigned int offset;
983
984 *related = 1;
985 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
986 if (ic == NULL)
987 return NF_DROP;
988
989 /*
990 * Work through seeing if this is for us.
991 * These checks are supposed to be in an order that means easy
992 * things are checked first to speed up processing.... however
993 * this means that some packets will manage to get a long way
994 * down this stack and then be rejected, but that's life.
995 */
996 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
997 *related = 0;
998 return NF_ACCEPT;
999 }
1000 /* Fragment header that is before ICMP header tells us that:
1001 * it's not an error message since they can't be fragmented.
1002 */
1003 if (ipvsh->flags & IP6_FH_F_FRAG)
1004 return NF_DROP;
1005
1006 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1007 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1008 &ipvsh->saddr, &ipvsh->daddr);
1009
1010 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
1011 true, &ciph))
1012 return NF_ACCEPT; /* The packet looks wrong, ignore */
1013
1014 pp = ip_vs_proto_get(ciph.protocol);
1015 if (!pp)
1016 return NF_ACCEPT;
1017
1018 /* The embedded headers contain source and dest in reverse order */
1019 cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
1020 if (!cp)
1021 return NF_ACCEPT;
1022
1023 snet.in6 = ciph.saddr.in6;
1024 offset = ciph.len;
1025 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
1026 pp, offset, sizeof(struct ipv6hdr),
1027 hooknum);
1028 }
1029 #endif
1030
1031 /*
1032 * Check if sctp chunc is ABORT chunk
1033 */
1034 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
1035 {
1036 sctp_chunkhdr_t *sch, schunk;
1037 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
1038 sizeof(schunk), &schunk);
1039 if (sch == NULL)
1040 return 0;
1041 if (sch->type == SCTP_CID_ABORT)
1042 return 1;
1043 return 0;
1044 }
1045
1046 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1047 {
1048 struct tcphdr _tcph, *th;
1049
1050 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1051 if (th == NULL)
1052 return 0;
1053 return th->rst;
1054 }
1055
1056 static inline bool is_new_conn(const struct sk_buff *skb,
1057 struct ip_vs_iphdr *iph)
1058 {
1059 switch (iph->protocol) {
1060 case IPPROTO_TCP: {
1061 struct tcphdr _tcph, *th;
1062
1063 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1064 if (th == NULL)
1065 return false;
1066 return th->syn;
1067 }
1068 case IPPROTO_SCTP: {
1069 sctp_chunkhdr_t *sch, schunk;
1070
1071 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1072 sizeof(schunk), &schunk);
1073 if (sch == NULL)
1074 return false;
1075 return sch->type == SCTP_CID_INIT;
1076 }
1077 default:
1078 return false;
1079 }
1080 }
1081
1082 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
1083 int conn_reuse_mode)
1084 {
1085 /* Controlled (FTP DATA or persistence)? */
1086 if (cp->control)
1087 return false;
1088
1089 switch (cp->protocol) {
1090 case IPPROTO_TCP:
1091 return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
1092 (cp->state == IP_VS_TCP_S_CLOSE) ||
1093 ((conn_reuse_mode & 2) &&
1094 (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
1095 (cp->flags & IP_VS_CONN_F_NOOUTPUT));
1096 case IPPROTO_SCTP:
1097 return cp->state == IP_VS_SCTP_S_CLOSED;
1098 default:
1099 return false;
1100 }
1101 }
1102
1103 /* Handle response packets: rewrite addresses and send away...
1104 */
1105 static unsigned int
1106 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1107 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1108 unsigned int hooknum)
1109 {
1110 struct ip_vs_protocol *pp = pd->pp;
1111
1112 IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
1113
1114 if (!skb_make_writable(skb, iph->len))
1115 goto drop;
1116
1117 /* mangle the packet */
1118 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1119 goto drop;
1120
1121 #ifdef CONFIG_IP_VS_IPV6
1122 if (af == AF_INET6)
1123 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1124 else
1125 #endif
1126 {
1127 ip_hdr(skb)->saddr = cp->vaddr.ip;
1128 ip_send_check(ip_hdr(skb));
1129 }
1130
1131 /*
1132 * nf_iterate does not expect change in the skb->dst->dev.
1133 * It looks like it is not fatal to enable this code for hooks
1134 * where our handlers are at the end of the chain list and
1135 * when all next handlers use skb->dst->dev and not outdev.
1136 * It will definitely route properly the inout NAT traffic
1137 * when multiple paths are used.
1138 */
1139
1140 /* For policy routing, packets originating from this
1141 * machine itself may be routed differently to packets
1142 * passing through. We want this packet to be routed as
1143 * if it came from this machine itself. So re-compute
1144 * the routing information.
1145 */
1146 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
1147 goto drop;
1148
1149 IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
1150
1151 ip_vs_out_stats(cp, skb);
1152 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1153 skb->ipvs_property = 1;
1154 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1155 ip_vs_notrack(skb);
1156 else
1157 ip_vs_update_conntrack(skb, cp, 0);
1158 ip_vs_conn_put(cp);
1159
1160 LeaveFunction(11);
1161 return NF_ACCEPT;
1162
1163 drop:
1164 ip_vs_conn_put(cp);
1165 kfree_skb(skb);
1166 LeaveFunction(11);
1167 return NF_STOLEN;
1168 }
1169
1170 /*
1171 * Check if outgoing packet belongs to the established ip_vs_conn.
1172 */
1173 static unsigned int
1174 ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1175 {
1176 struct ip_vs_iphdr iph;
1177 struct ip_vs_protocol *pp;
1178 struct ip_vs_proto_data *pd;
1179 struct ip_vs_conn *cp;
1180 struct sock *sk;
1181
1182 EnterFunction(11);
1183
1184 /* Already marked as IPVS request or reply? */
1185 if (skb->ipvs_property)
1186 return NF_ACCEPT;
1187
1188 sk = skb_to_full_sk(skb);
1189 /* Bad... Do not break raw sockets */
1190 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1191 af == AF_INET)) {
1192
1193 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1194 return NF_ACCEPT;
1195 }
1196
1197 if (unlikely(!skb_dst(skb)))
1198 return NF_ACCEPT;
1199
1200 if (!ipvs->enable)
1201 return NF_ACCEPT;
1202
1203 ip_vs_fill_iph_skb(af, skb, false, &iph);
1204 #ifdef CONFIG_IP_VS_IPV6
1205 if (af == AF_INET6) {
1206 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1207 int related;
1208 int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
1209 hooknum, &iph);
1210
1211 if (related)
1212 return verdict;
1213 }
1214 } else
1215 #endif
1216 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1217 int related;
1218 int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
1219
1220 if (related)
1221 return verdict;
1222 }
1223
1224 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1225 if (unlikely(!pd))
1226 return NF_ACCEPT;
1227 pp = pd->pp;
1228
1229 /* reassemble IP fragments */
1230 #ifdef CONFIG_IP_VS_IPV6
1231 if (af == AF_INET)
1232 #endif
1233 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1234 if (ip_vs_gather_frags(ipvs, skb,
1235 ip_vs_defrag_user(hooknum)))
1236 return NF_STOLEN;
1237
1238 ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
1239 }
1240
1241 /*
1242 * Check if the packet belongs to an existing entry
1243 */
1244 cp = pp->conn_out_get(ipvs, af, skb, &iph);
1245
1246 if (likely(cp))
1247 return handle_response(af, skb, pd, cp, &iph, hooknum);
1248 if (sysctl_nat_icmp_send(ipvs) &&
1249 (pp->protocol == IPPROTO_TCP ||
1250 pp->protocol == IPPROTO_UDP ||
1251 pp->protocol == IPPROTO_SCTP)) {
1252 __be16 _ports[2], *pptr;
1253
1254 pptr = frag_safe_skb_hp(skb, iph.len,
1255 sizeof(_ports), _ports, &iph);
1256 if (pptr == NULL)
1257 return NF_ACCEPT; /* Not for me */
1258 if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
1259 pptr[0])) {
1260 /*
1261 * Notify the real server: there is no
1262 * existing entry if it is not RST
1263 * packet or not TCP packet.
1264 */
1265 if ((iph.protocol != IPPROTO_TCP &&
1266 iph.protocol != IPPROTO_SCTP)
1267 || ((iph.protocol == IPPROTO_TCP
1268 && !is_tcp_reset(skb, iph.len))
1269 || (iph.protocol == IPPROTO_SCTP
1270 && !is_sctp_abort(skb,
1271 iph.len)))) {
1272 #ifdef CONFIG_IP_VS_IPV6
1273 if (af == AF_INET6) {
1274 if (!skb->dev)
1275 skb->dev = ipvs->net->loopback_dev;
1276 icmpv6_send(skb,
1277 ICMPV6_DEST_UNREACH,
1278 ICMPV6_PORT_UNREACH,
1279 0);
1280 } else
1281 #endif
1282 icmp_send(skb,
1283 ICMP_DEST_UNREACH,
1284 ICMP_PORT_UNREACH, 0);
1285 return NF_DROP;
1286 }
1287 }
1288 }
1289 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1290 "ip_vs_out: packet continues traversal as normal");
1291 return NF_ACCEPT;
1292 }
1293
1294 /*
1295 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1296 * used only for VS/NAT.
1297 * Check if packet is reply for established ip_vs_conn.
1298 */
1299 static unsigned int
1300 ip_vs_reply4(void *priv, struct sk_buff *skb,
1301 const struct nf_hook_state *state)
1302 {
1303 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1304 }
1305
1306 /*
1307 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1308 * Check if packet is reply for established ip_vs_conn.
1309 */
1310 static unsigned int
1311 ip_vs_local_reply4(void *priv, struct sk_buff *skb,
1312 const struct nf_hook_state *state)
1313 {
1314 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1315 }
1316
1317 #ifdef CONFIG_IP_VS_IPV6
1318
1319 /*
1320 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1321 * used only for VS/NAT.
1322 * Check if packet is reply for established ip_vs_conn.
1323 */
1324 static unsigned int
1325 ip_vs_reply6(void *priv, struct sk_buff *skb,
1326 const struct nf_hook_state *state)
1327 {
1328 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1329 }
1330
1331 /*
1332 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1333 * Check if packet is reply for established ip_vs_conn.
1334 */
1335 static unsigned int
1336 ip_vs_local_reply6(void *priv, struct sk_buff *skb,
1337 const struct nf_hook_state *state)
1338 {
1339 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1340 }
1341
1342 #endif
1343
1344 static unsigned int
1345 ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
1346 struct ip_vs_proto_data *pd,
1347 int *verdict, struct ip_vs_conn **cpp,
1348 struct ip_vs_iphdr *iph)
1349 {
1350 struct ip_vs_protocol *pp = pd->pp;
1351
1352 if (!iph->fragoffs) {
1353 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1354 * replayed fragment zero will already have created the cp
1355 */
1356
1357 /* Schedule and create new connection entry into cpp */
1358 if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
1359 return 0;
1360 }
1361
1362 if (unlikely(!*cpp)) {
1363 /* sorry, all this trouble for a no-hit :) */
1364 IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
1365 "ip_vs_in: packet continues traversal as normal");
1366 if (iph->fragoffs) {
1367 /* Fragment that couldn't be mapped to a conn entry
1368 * is missing module nf_defrag_ipv6
1369 */
1370 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1371 IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
1372 "unhandled fragment");
1373 }
1374 *verdict = NF_ACCEPT;
1375 return 0;
1376 }
1377
1378 return 1;
1379 }
1380
1381 /*
1382 * Handle ICMP messages in the outside-to-inside direction (incoming).
1383 * Find any that might be relevant, check against existing connections,
1384 * forward to the right destination host if relevant.
1385 * Currently handles error types - unreachable, quench, ttl exceeded.
1386 */
1387 static int
1388 ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
1389 unsigned int hooknum)
1390 {
1391 struct iphdr *iph;
1392 struct icmphdr _icmph, *ic;
1393 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1394 struct ip_vs_iphdr ciph;
1395 struct ip_vs_conn *cp;
1396 struct ip_vs_protocol *pp;
1397 struct ip_vs_proto_data *pd;
1398 unsigned int offset, offset2, ihl, verdict;
1399 bool ipip, new_cp = false;
1400
1401 *related = 1;
1402
1403 /* reassemble IP fragments */
1404 if (ip_is_fragment(ip_hdr(skb))) {
1405 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
1406 return NF_STOLEN;
1407 }
1408
1409 iph = ip_hdr(skb);
1410 offset = ihl = iph->ihl * 4;
1411 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1412 if (ic == NULL)
1413 return NF_DROP;
1414
1415 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1416 ic->type, ntohs(icmp_id(ic)),
1417 &iph->saddr, &iph->daddr);
1418
1419 /*
1420 * Work through seeing if this is for us.
1421 * These checks are supposed to be in an order that means easy
1422 * things are checked first to speed up processing.... however
1423 * this means that some packets will manage to get a long way
1424 * down this stack and then be rejected, but that's life.
1425 */
1426 if ((ic->type != ICMP_DEST_UNREACH) &&
1427 (ic->type != ICMP_SOURCE_QUENCH) &&
1428 (ic->type != ICMP_TIME_EXCEEDED)) {
1429 *related = 0;
1430 return NF_ACCEPT;
1431 }
1432
1433 /* Now find the contained IP header */
1434 offset += sizeof(_icmph);
1435 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1436 if (cih == NULL)
1437 return NF_ACCEPT; /* The packet looks wrong, ignore */
1438
1439 /* Special case for errors for IPIP packets */
1440 ipip = false;
1441 if (cih->protocol == IPPROTO_IPIP) {
1442 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1443 return NF_ACCEPT;
1444 /* Error for our IPIP must arrive at LOCAL_IN */
1445 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1446 return NF_ACCEPT;
1447 offset += cih->ihl * 4;
1448 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1449 if (cih == NULL)
1450 return NF_ACCEPT; /* The packet looks wrong, ignore */
1451 ipip = true;
1452 }
1453
1454 pd = ip_vs_proto_data_get(ipvs, cih->protocol);
1455 if (!pd)
1456 return NF_ACCEPT;
1457 pp = pd->pp;
1458
1459 /* Is the embedded protocol header present? */
1460 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1461 pp->dont_defrag))
1462 return NF_ACCEPT;
1463
1464 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1465 "Checking incoming ICMP for");
1466
1467 offset2 = offset;
1468 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
1469 offset = ciph.len;
1470
1471 /* The embedded headers contain source and dest in reverse order.
1472 * For IPIP this is error for request, not for reply.
1473 */
1474 cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
1475
1476 if (!cp) {
1477 int v;
1478
1479 if (!sysctl_schedule_icmp(ipvs))
1480 return NF_ACCEPT;
1481
1482 if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
1483 return v;
1484 new_cp = true;
1485 }
1486
1487 verdict = NF_DROP;
1488
1489 /* Ensure the checksum is correct */
1490 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1491 /* Failed checksum! */
1492 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1493 &iph->saddr);
1494 goto out;
1495 }
1496
1497 if (ipip) {
1498 __be32 info = ic->un.gateway;
1499 __u8 type = ic->type;
1500 __u8 code = ic->code;
1501
1502 /* Update the MTU */
1503 if (ic->type == ICMP_DEST_UNREACH &&
1504 ic->code == ICMP_FRAG_NEEDED) {
1505 struct ip_vs_dest *dest = cp->dest;
1506 u32 mtu = ntohs(ic->un.frag.mtu);
1507 __be16 frag_off = cih->frag_off;
1508
1509 /* Strip outer IP and ICMP, go to IPIP header */
1510 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1511 goto ignore_ipip;
1512 offset2 -= ihl + sizeof(_icmph);
1513 skb_reset_network_header(skb);
1514 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1515 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1516 ipv4_update_pmtu(skb, ipvs->net,
1517 mtu, 0, 0, 0, 0);
1518 /* Client uses PMTUD? */
1519 if (!(frag_off & htons(IP_DF)))
1520 goto ignore_ipip;
1521 /* Prefer the resulting PMTU */
1522 if (dest) {
1523 struct ip_vs_dest_dst *dest_dst;
1524
1525 rcu_read_lock();
1526 dest_dst = rcu_dereference(dest->dest_dst);
1527 if (dest_dst)
1528 mtu = dst_mtu(dest_dst->dst_cache);
1529 rcu_read_unlock();
1530 }
1531 if (mtu > 68 + sizeof(struct iphdr))
1532 mtu -= sizeof(struct iphdr);
1533 info = htonl(mtu);
1534 }
1535 /* Strip outer IP, ICMP and IPIP, go to IP header of
1536 * original request.
1537 */
1538 if (pskb_pull(skb, offset2) == NULL)
1539 goto ignore_ipip;
1540 skb_reset_network_header(skb);
1541 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1542 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1543 type, code, ntohl(info));
1544 icmp_send(skb, type, code, info);
1545 /* ICMP can be shorter but anyways, account it */
1546 ip_vs_out_stats(cp, skb);
1547
1548 ignore_ipip:
1549 consume_skb(skb);
1550 verdict = NF_STOLEN;
1551 goto out;
1552 }
1553
1554 /* do the statistics and put it back */
1555 ip_vs_in_stats(cp, skb);
1556 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1557 IPPROTO_SCTP == cih->protocol)
1558 offset += 2 * sizeof(__u16);
1559 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1560
1561 out:
1562 if (likely(!new_cp))
1563 __ip_vs_conn_put(cp);
1564 else
1565 ip_vs_conn_put(cp);
1566
1567 return verdict;
1568 }
1569
1570 #ifdef CONFIG_IP_VS_IPV6
1571 static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
1572 int *related, unsigned int hooknum,
1573 struct ip_vs_iphdr *iph)
1574 {
1575 struct icmp6hdr _icmph, *ic;
1576 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1577 struct ip_vs_conn *cp;
1578 struct ip_vs_protocol *pp;
1579 struct ip_vs_proto_data *pd;
1580 unsigned int offset, verdict;
1581 bool new_cp = false;
1582
1583 *related = 1;
1584
1585 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1586 if (ic == NULL)
1587 return NF_DROP;
1588
1589 /*
1590 * Work through seeing if this is for us.
1591 * These checks are supposed to be in an order that means easy
1592 * things are checked first to speed up processing.... however
1593 * this means that some packets will manage to get a long way
1594 * down this stack and then be rejected, but that's life.
1595 */
1596 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1597 *related = 0;
1598 return NF_ACCEPT;
1599 }
1600 /* Fragment header that is before ICMP header tells us that:
1601 * it's not an error message since they can't be fragmented.
1602 */
1603 if (iph->flags & IP6_FH_F_FRAG)
1604 return NF_DROP;
1605
1606 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1607 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1608 &iph->saddr, &iph->daddr);
1609
1610 offset = iph->len + sizeof(_icmph);
1611 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
1612 return NF_ACCEPT;
1613
1614 pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
1615 if (!pd)
1616 return NF_ACCEPT;
1617 pp = pd->pp;
1618
1619 /* Cannot handle fragmented embedded protocol */
1620 if (ciph.fragoffs)
1621 return NF_ACCEPT;
1622
1623 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
1624 "Checking incoming ICMPv6 for");
1625
1626 /* The embedded headers contain source and dest in reverse order
1627 * if not from localhost
1628 */
1629 cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
1630
1631 if (!cp) {
1632 int v;
1633
1634 if (!sysctl_schedule_icmp(ipvs))
1635 return NF_ACCEPT;
1636
1637 if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
1638 return v;
1639
1640 new_cp = true;
1641 }
1642
1643 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1644 if ((hooknum == NF_INET_LOCAL_OUT) &&
1645 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1646 verdict = NF_ACCEPT;
1647 goto out;
1648 }
1649
1650 /* do the statistics and put it back */
1651 ip_vs_in_stats(cp, skb);
1652
1653 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1654 offset = ciph.len;
1655 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1656 IPPROTO_SCTP == ciph.protocol)
1657 offset += 2 * sizeof(__u16); /* Also mangle ports */
1658
1659 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
1660
1661 out:
1662 if (likely(!new_cp))
1663 __ip_vs_conn_put(cp);
1664 else
1665 ip_vs_conn_put(cp);
1666
1667 return verdict;
1668 }
1669 #endif
1670
1671
1672 /*
1673 * Check if it's for virtual services, look it up,
1674 * and send it on its way...
1675 */
1676 static unsigned int
1677 ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1678 {
1679 struct ip_vs_iphdr iph;
1680 struct ip_vs_protocol *pp;
1681 struct ip_vs_proto_data *pd;
1682 struct ip_vs_conn *cp;
1683 int ret, pkts;
1684 int conn_reuse_mode;
1685 struct sock *sk;
1686
1687 /* Already marked as IPVS request or reply? */
1688 if (skb->ipvs_property)
1689 return NF_ACCEPT;
1690
1691 /*
1692 * Big tappo:
1693 * - remote client: only PACKET_HOST
1694 * - route: used for struct net when skb->dev is unset
1695 */
1696 if (unlikely((skb->pkt_type != PACKET_HOST &&
1697 hooknum != NF_INET_LOCAL_OUT) ||
1698 !skb_dst(skb))) {
1699 ip_vs_fill_iph_skb(af, skb, false, &iph);
1700 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1701 " ignored in hook %u\n",
1702 skb->pkt_type, iph.protocol,
1703 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1704 return NF_ACCEPT;
1705 }
1706 /* ipvs enabled in this netns ? */
1707 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1708 return NF_ACCEPT;
1709
1710 ip_vs_fill_iph_skb(af, skb, false, &iph);
1711
1712 /* Bad... Do not break raw sockets */
1713 sk = skb_to_full_sk(skb);
1714 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1715 af == AF_INET)) {
1716
1717 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1718 return NF_ACCEPT;
1719 }
1720
1721 #ifdef CONFIG_IP_VS_IPV6
1722 if (af == AF_INET6) {
1723 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1724 int related;
1725 int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
1726 hooknum, &iph);
1727
1728 if (related)
1729 return verdict;
1730 }
1731 } else
1732 #endif
1733 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1734 int related;
1735 int verdict = ip_vs_in_icmp(ipvs, skb, &related,
1736 hooknum);
1737
1738 if (related)
1739 return verdict;
1740 }
1741
1742 /* Protocol supported? */
1743 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1744 if (unlikely(!pd)) {
1745 /* The only way we'll see this packet again is if it's
1746 * encapsulated, so mark it with ipvs_property=1 so we
1747 * skip it if we're ignoring tunneled packets
1748 */
1749 if (sysctl_ignore_tunneled(ipvs))
1750 skb->ipvs_property = 1;
1751
1752 return NF_ACCEPT;
1753 }
1754 pp = pd->pp;
1755 /*
1756 * Check if the packet belongs to an existing connection entry
1757 */
1758 cp = pp->conn_in_get(ipvs, af, skb, &iph);
1759
1760 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
1761 if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
1762 bool uses_ct = false, resched = false;
1763
1764 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
1765 unlikely(!atomic_read(&cp->dest->weight))) {
1766 resched = true;
1767 uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
1768 } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
1769 uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
1770 if (!atomic_read(&cp->n_control)) {
1771 resched = true;
1772 } else {
1773 /* Do not reschedule controlling connection
1774 * that uses conntrack while it is still
1775 * referenced by controlled connection(s).
1776 */
1777 resched = !uses_ct;
1778 }
1779 }
1780
1781 if (resched) {
1782 if (!atomic_read(&cp->n_control))
1783 ip_vs_conn_expire_now(cp);
1784 __ip_vs_conn_put(cp);
1785 if (uses_ct)
1786 return NF_DROP;
1787 cp = NULL;
1788 }
1789 }
1790
1791 if (unlikely(!cp)) {
1792 int v;
1793
1794 if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
1795 return v;
1796 }
1797
1798 IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
1799
1800 /* Check the server status */
1801 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1802 /* the destination server is not available */
1803
1804 if (sysctl_expire_nodest_conn(ipvs)) {
1805 /* try to expire the connection immediately */
1806 ip_vs_conn_expire_now(cp);
1807 }
1808 /* don't restart its timer, and silently
1809 drop the packet. */
1810 __ip_vs_conn_put(cp);
1811 return NF_DROP;
1812 }
1813
1814 ip_vs_in_stats(cp, skb);
1815 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1816 if (cp->packet_xmit)
1817 ret = cp->packet_xmit(skb, cp, pp, &iph);
1818 /* do not touch skb anymore */
1819 else {
1820 IP_VS_DBG_RL("warning: packet_xmit is null");
1821 ret = NF_ACCEPT;
1822 }
1823
1824 /* Increase its packet counter and check if it is needed
1825 * to be synchronized
1826 *
1827 * Sync connection if it is about to close to
1828 * encorage the standby servers to update the connections timeout
1829 *
1830 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1831 */
1832
1833 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1834 pkts = sysctl_sync_threshold(ipvs);
1835 else
1836 pkts = atomic_add_return(1, &cp->in_pkts);
1837
1838 if (ipvs->sync_state & IP_VS_STATE_MASTER)
1839 ip_vs_sync_conn(ipvs, cp, pkts);
1840
1841 ip_vs_conn_put(cp);
1842 return ret;
1843 }
1844
1845 /*
1846 * AF_INET handler in NF_INET_LOCAL_IN chain
1847 * Schedule and forward packets from remote clients
1848 */
1849 static unsigned int
1850 ip_vs_remote_request4(void *priv, struct sk_buff *skb,
1851 const struct nf_hook_state *state)
1852 {
1853 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
1854 }
1855
1856 /*
1857 * AF_INET handler in NF_INET_LOCAL_OUT chain
1858 * Schedule and forward packets from local clients
1859 */
1860 static unsigned int
1861 ip_vs_local_request4(void *priv, struct sk_buff *skb,
1862 const struct nf_hook_state *state)
1863 {
1864 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
1865 }
1866
1867 #ifdef CONFIG_IP_VS_IPV6
1868
1869 /*
1870 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1871 * Schedule and forward packets from remote clients
1872 */
1873 static unsigned int
1874 ip_vs_remote_request6(void *priv, struct sk_buff *skb,
1875 const struct nf_hook_state *state)
1876 {
1877 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
1878 }
1879
1880 /*
1881 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
1882 * Schedule and forward packets from local clients
1883 */
1884 static unsigned int
1885 ip_vs_local_request6(void *priv, struct sk_buff *skb,
1886 const struct nf_hook_state *state)
1887 {
1888 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
1889 }
1890
1891 #endif
1892
1893
1894 /*
1895 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1896 * related packets destined for 0.0.0.0/0.
1897 * When fwmark-based virtual service is used, such as transparent
1898 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1899 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1900 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1901 * and send them to ip_vs_in_icmp.
1902 */
1903 static unsigned int
1904 ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
1905 const struct nf_hook_state *state)
1906 {
1907 int r;
1908 struct netns_ipvs *ipvs = net_ipvs(state->net);
1909
1910 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1911 return NF_ACCEPT;
1912
1913 /* ipvs enabled in this netns ? */
1914 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1915 return NF_ACCEPT;
1916
1917 return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
1918 }
1919
1920 #ifdef CONFIG_IP_VS_IPV6
1921 static unsigned int
1922 ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
1923 const struct nf_hook_state *state)
1924 {
1925 int r;
1926 struct netns_ipvs *ipvs = net_ipvs(state->net);
1927 struct ip_vs_iphdr iphdr;
1928
1929 ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
1930 if (iphdr.protocol != IPPROTO_ICMPV6)
1931 return NF_ACCEPT;
1932
1933 /* ipvs enabled in this netns ? */
1934 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1935 return NF_ACCEPT;
1936
1937 return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
1938 }
1939 #endif
1940
1941
1942 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1943 /* After packet filtering, change source only for VS/NAT */
1944 {
1945 .hook = ip_vs_reply4,
1946 .pf = NFPROTO_IPV4,
1947 .hooknum = NF_INET_LOCAL_IN,
1948 .priority = NF_IP_PRI_NAT_SRC - 2,
1949 },
1950 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1951 * or VS/NAT(change destination), so that filtering rules can be
1952 * applied to IPVS. */
1953 {
1954 .hook = ip_vs_remote_request4,
1955 .pf = NFPROTO_IPV4,
1956 .hooknum = NF_INET_LOCAL_IN,
1957 .priority = NF_IP_PRI_NAT_SRC - 1,
1958 },
1959 /* Before ip_vs_in, change source only for VS/NAT */
1960 {
1961 .hook = ip_vs_local_reply4,
1962 .pf = NFPROTO_IPV4,
1963 .hooknum = NF_INET_LOCAL_OUT,
1964 .priority = NF_IP_PRI_NAT_DST + 1,
1965 },
1966 /* After mangle, schedule and forward local requests */
1967 {
1968 .hook = ip_vs_local_request4,
1969 .pf = NFPROTO_IPV4,
1970 .hooknum = NF_INET_LOCAL_OUT,
1971 .priority = NF_IP_PRI_NAT_DST + 2,
1972 },
1973 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1974 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1975 {
1976 .hook = ip_vs_forward_icmp,
1977 .pf = NFPROTO_IPV4,
1978 .hooknum = NF_INET_FORWARD,
1979 .priority = 99,
1980 },
1981 /* After packet filtering, change source only for VS/NAT */
1982 {
1983 .hook = ip_vs_reply4,
1984 .pf = NFPROTO_IPV4,
1985 .hooknum = NF_INET_FORWARD,
1986 .priority = 100,
1987 },
1988 #ifdef CONFIG_IP_VS_IPV6
1989 /* After packet filtering, change source only for VS/NAT */
1990 {
1991 .hook = ip_vs_reply6,
1992 .pf = NFPROTO_IPV6,
1993 .hooknum = NF_INET_LOCAL_IN,
1994 .priority = NF_IP6_PRI_NAT_SRC - 2,
1995 },
1996 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1997 * or VS/NAT(change destination), so that filtering rules can be
1998 * applied to IPVS. */
1999 {
2000 .hook = ip_vs_remote_request6,
2001 .pf = NFPROTO_IPV6,
2002 .hooknum = NF_INET_LOCAL_IN,
2003 .priority = NF_IP6_PRI_NAT_SRC - 1,
2004 },
2005 /* Before ip_vs_in, change source only for VS/NAT */
2006 {
2007 .hook = ip_vs_local_reply6,
2008 .pf = NFPROTO_IPV6,
2009 .hooknum = NF_INET_LOCAL_OUT,
2010 .priority = NF_IP6_PRI_NAT_DST + 1,
2011 },
2012 /* After mangle, schedule and forward local requests */
2013 {
2014 .hook = ip_vs_local_request6,
2015 .pf = NFPROTO_IPV6,
2016 .hooknum = NF_INET_LOCAL_OUT,
2017 .priority = NF_IP6_PRI_NAT_DST + 2,
2018 },
2019 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
2020 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2021 {
2022 .hook = ip_vs_forward_icmp_v6,
2023 .pf = NFPROTO_IPV6,
2024 .hooknum = NF_INET_FORWARD,
2025 .priority = 99,
2026 },
2027 /* After packet filtering, change source only for VS/NAT */
2028 {
2029 .hook = ip_vs_reply6,
2030 .pf = NFPROTO_IPV6,
2031 .hooknum = NF_INET_FORWARD,
2032 .priority = 100,
2033 },
2034 #endif
2035 };
2036 /*
2037 * Initialize IP Virtual Server netns mem.
2038 */
2039 static int __net_init __ip_vs_init(struct net *net)
2040 {
2041 struct netns_ipvs *ipvs;
2042
2043 ipvs = net_generic(net, ip_vs_net_id);
2044 if (ipvs == NULL)
2045 return -ENOMEM;
2046
2047 /* Hold the beast until a service is registerd */
2048 ipvs->enable = 0;
2049 ipvs->net = net;
2050 /* Counters used for creating unique names */
2051 ipvs->gen = atomic_read(&ipvs_netns_cnt);
2052 atomic_inc(&ipvs_netns_cnt);
2053 net->ipvs = ipvs;
2054
2055 if (ip_vs_estimator_net_init(ipvs) < 0)
2056 goto estimator_fail;
2057
2058 if (ip_vs_control_net_init(ipvs) < 0)
2059 goto control_fail;
2060
2061 if (ip_vs_protocol_net_init(ipvs) < 0)
2062 goto protocol_fail;
2063
2064 if (ip_vs_app_net_init(ipvs) < 0)
2065 goto app_fail;
2066
2067 if (ip_vs_conn_net_init(ipvs) < 0)
2068 goto conn_fail;
2069
2070 if (ip_vs_sync_net_init(ipvs) < 0)
2071 goto sync_fail;
2072
2073 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
2074 sizeof(struct netns_ipvs), ipvs->gen);
2075 return 0;
2076 /*
2077 * Error handling
2078 */
2079
2080 sync_fail:
2081 ip_vs_conn_net_cleanup(ipvs);
2082 conn_fail:
2083 ip_vs_app_net_cleanup(ipvs);
2084 app_fail:
2085 ip_vs_protocol_net_cleanup(ipvs);
2086 protocol_fail:
2087 ip_vs_control_net_cleanup(ipvs);
2088 control_fail:
2089 ip_vs_estimator_net_cleanup(ipvs);
2090 estimator_fail:
2091 net->ipvs = NULL;
2092 return -ENOMEM;
2093 }
2094
2095 static void __net_exit __ip_vs_cleanup(struct net *net)
2096 {
2097 struct netns_ipvs *ipvs = net_ipvs(net);
2098
2099 ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
2100 ip_vs_conn_net_cleanup(ipvs);
2101 ip_vs_app_net_cleanup(ipvs);
2102 ip_vs_protocol_net_cleanup(ipvs);
2103 ip_vs_control_net_cleanup(ipvs);
2104 ip_vs_estimator_net_cleanup(ipvs);
2105 IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
2106 net->ipvs = NULL;
2107 }
2108
2109 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2110 {
2111 struct netns_ipvs *ipvs = net_ipvs(net);
2112 EnterFunction(2);
2113 ipvs->enable = 0; /* Disable packet reception */
2114 smp_wmb();
2115 ip_vs_sync_net_cleanup(ipvs);
2116 LeaveFunction(2);
2117 }
2118
2119 static struct pernet_operations ipvs_core_ops = {
2120 .init = __ip_vs_init,
2121 .exit = __ip_vs_cleanup,
2122 .id = &ip_vs_net_id,
2123 .size = sizeof(struct netns_ipvs),
2124 };
2125
2126 static struct pernet_operations ipvs_core_dev_ops = {
2127 .exit = __ip_vs_dev_cleanup,
2128 };
2129
2130 /*
2131 * Initialize IP Virtual Server
2132 */
2133 static int __init ip_vs_init(void)
2134 {
2135 int ret;
2136
2137 ret = ip_vs_control_init();
2138 if (ret < 0) {
2139 pr_err("can't setup control.\n");
2140 goto exit;
2141 }
2142
2143 ip_vs_protocol_init();
2144
2145 ret = ip_vs_conn_init();
2146 if (ret < 0) {
2147 pr_err("can't setup connection table.\n");
2148 goto cleanup_protocol;
2149 }
2150
2151 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2152 if (ret < 0)
2153 goto cleanup_conn;
2154
2155 ret = register_pernet_device(&ipvs_core_dev_ops);
2156 if (ret < 0)
2157 goto cleanup_sub;
2158
2159 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2160 if (ret < 0) {
2161 pr_err("can't register hooks.\n");
2162 goto cleanup_dev;
2163 }
2164
2165 ret = ip_vs_register_nl_ioctl();
2166 if (ret < 0) {
2167 pr_err("can't register netlink/ioctl.\n");
2168 goto cleanup_hooks;
2169 }
2170
2171 pr_info("ipvs loaded.\n");
2172
2173 return ret;
2174
2175 cleanup_hooks:
2176 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2177 cleanup_dev:
2178 unregister_pernet_device(&ipvs_core_dev_ops);
2179 cleanup_sub:
2180 unregister_pernet_subsys(&ipvs_core_ops);
2181 cleanup_conn:
2182 ip_vs_conn_cleanup();
2183 cleanup_protocol:
2184 ip_vs_protocol_cleanup();
2185 ip_vs_control_cleanup();
2186 exit:
2187 return ret;
2188 }
2189
2190 static void __exit ip_vs_cleanup(void)
2191 {
2192 ip_vs_unregister_nl_ioctl();
2193 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2194 unregister_pernet_device(&ipvs_core_dev_ops);
2195 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2196 ip_vs_conn_cleanup();
2197 ip_vs_protocol_cleanup();
2198 ip_vs_control_cleanup();
2199 pr_info("ipvs unloaded.\n");
2200 }
2201
2202 module_init(ip_vs_init);
2203 module_exit(ip_vs_cleanup);
2204 MODULE_LICENSE("GPL");