]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/netfilter/ipvs/ip_vs_core.c
ipvs: fix ipv6 hook registration for local replies
[mirror_ubuntu-zesty-kernel.git] / net / netfilter / ipvs / ip_vs_core.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71
72 static int ip_vs_net_id __read_mostly;
73 /* netns cnt used for uniqueness */
74 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
75
76 /* ID used in ICMP lookups */
77 #define icmp_id(icmph) (((icmph)->un).echo.id)
78 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
79
80 const char *ip_vs_proto_name(unsigned int proto)
81 {
82 static char buf[20];
83
84 switch (proto) {
85 case IPPROTO_IP:
86 return "IP";
87 case IPPROTO_UDP:
88 return "UDP";
89 case IPPROTO_TCP:
90 return "TCP";
91 case IPPROTO_SCTP:
92 return "SCTP";
93 case IPPROTO_ICMP:
94 return "ICMP";
95 #ifdef CONFIG_IP_VS_IPV6
96 case IPPROTO_ICMPV6:
97 return "ICMPv6";
98 #endif
99 default:
100 sprintf(buf, "IP_%u", proto);
101 return buf;
102 }
103 }
104
105 void ip_vs_init_hash_table(struct list_head *table, int rows)
106 {
107 while (--rows >= 0)
108 INIT_LIST_HEAD(&table[rows]);
109 }
110
111 static inline void
112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
113 {
114 struct ip_vs_dest *dest = cp->dest;
115 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s;
119 struct ip_vs_service *svc;
120
121 s = this_cpu_ptr(dest->stats.cpustats);
122 s->ustats.inpkts++;
123 u64_stats_update_begin(&s->syncp);
124 s->ustats.inbytes += skb->len;
125 u64_stats_update_end(&s->syncp);
126
127 rcu_read_lock();
128 svc = rcu_dereference(dest->svc);
129 s = this_cpu_ptr(svc->stats.cpustats);
130 s->ustats.inpkts++;
131 u64_stats_update_begin(&s->syncp);
132 s->ustats.inbytes += skb->len;
133 u64_stats_update_end(&s->syncp);
134 rcu_read_unlock();
135
136 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
137 s->ustats.inpkts++;
138 u64_stats_update_begin(&s->syncp);
139 s->ustats.inbytes += skb->len;
140 u64_stats_update_end(&s->syncp);
141 }
142 }
143
144
145 static inline void
146 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
147 {
148 struct ip_vs_dest *dest = cp->dest;
149 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
150
151 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
152 struct ip_vs_cpu_stats *s;
153 struct ip_vs_service *svc;
154
155 s = this_cpu_ptr(dest->stats.cpustats);
156 s->ustats.outpkts++;
157 u64_stats_update_begin(&s->syncp);
158 s->ustats.outbytes += skb->len;
159 u64_stats_update_end(&s->syncp);
160
161 rcu_read_lock();
162 svc = rcu_dereference(dest->svc);
163 s = this_cpu_ptr(svc->stats.cpustats);
164 s->ustats.outpkts++;
165 u64_stats_update_begin(&s->syncp);
166 s->ustats.outbytes += skb->len;
167 u64_stats_update_end(&s->syncp);
168 rcu_read_unlock();
169
170 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
171 s->ustats.outpkts++;
172 u64_stats_update_begin(&s->syncp);
173 s->ustats.outbytes += skb->len;
174 u64_stats_update_end(&s->syncp);
175 }
176 }
177
178
179 static inline void
180 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
181 {
182 struct netns_ipvs *ipvs = net_ipvs(svc->net);
183 struct ip_vs_cpu_stats *s;
184
185 s = this_cpu_ptr(cp->dest->stats.cpustats);
186 s->ustats.conns++;
187
188 s = this_cpu_ptr(svc->stats.cpustats);
189 s->ustats.conns++;
190
191 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
192 s->ustats.conns++;
193 }
194
195
196 static inline void
197 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
198 const struct sk_buff *skb,
199 struct ip_vs_proto_data *pd)
200 {
201 if (likely(pd->pp->state_transition))
202 pd->pp->state_transition(cp, direction, skb, pd);
203 }
204
205 static inline int
206 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
207 struct sk_buff *skb, int protocol,
208 const union nf_inet_addr *caddr, __be16 cport,
209 const union nf_inet_addr *vaddr, __be16 vport,
210 struct ip_vs_conn_param *p)
211 {
212 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
213 vport, p);
214 p->pe = rcu_dereference(svc->pe);
215 if (p->pe && p->pe->fill_param)
216 return p->pe->fill_param(p, skb);
217
218 return 0;
219 }
220
221 /*
222 * IPVS persistent scheduling function
223 * It creates a connection entry according to its template if exists,
224 * or selects a server and creates a connection entry plus a template.
225 * Locking: we are svc user (svc->refcnt), so we hold all dests too
226 * Protocols supported: TCP, UDP
227 */
228 static struct ip_vs_conn *
229 ip_vs_sched_persist(struct ip_vs_service *svc,
230 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
231 int *ignored, struct ip_vs_iphdr *iph)
232 {
233 struct ip_vs_conn *cp = NULL;
234 struct ip_vs_dest *dest;
235 struct ip_vs_conn *ct;
236 __be16 dport = 0; /* destination port to forward */
237 unsigned int flags;
238 struct ip_vs_conn_param param;
239 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
240 union nf_inet_addr snet; /* source network of the client,
241 after masking */
242
243 /* Mask saddr with the netmask to adjust template granularity */
244 #ifdef CONFIG_IP_VS_IPV6
245 if (svc->af == AF_INET6)
246 ipv6_addr_prefix(&snet.in6, &iph->saddr.in6,
247 (__force __u32) svc->netmask);
248 else
249 #endif
250 snet.ip = iph->saddr.ip & svc->netmask;
251
252 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
253 "mnet %s\n",
254 IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
255 IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
256 IP_VS_DBG_ADDR(svc->af, &snet));
257
258 /*
259 * As far as we know, FTP is a very complicated network protocol, and
260 * it uses control connection and data connections. For active FTP,
261 * FTP server initialize data connection to the client, its source port
262 * is often 20. For passive FTP, FTP server tells the clients the port
263 * that it passively listens to, and the client issues the data
264 * connection. In the tunneling or direct routing mode, the load
265 * balancer is on the client-to-server half of connection, the port
266 * number is unknown to the load balancer. So, a conn template like
267 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
268 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
269 * is created for other persistent services.
270 */
271 {
272 int protocol = iph->protocol;
273 const union nf_inet_addr *vaddr = &iph->daddr;
274 __be16 vport = 0;
275
276 if (dst_port == svc->port) {
277 /* non-FTP template:
278 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
279 * FTP template:
280 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
281 */
282 if (svc->port != FTPPORT)
283 vport = dst_port;
284 } else {
285 /* Note: persistent fwmark-based services and
286 * persistent port zero service are handled here.
287 * fwmark template:
288 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
289 * port zero template:
290 * <protocol,caddr,0,vaddr,0,daddr,0>
291 */
292 if (svc->fwmark) {
293 protocol = IPPROTO_IP;
294 vaddr = &fwmark;
295 }
296 }
297 /* return *ignored = -1 so NF_DROP can be used */
298 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
299 vaddr, vport, &param) < 0) {
300 *ignored = -1;
301 return NULL;
302 }
303 }
304
305 /* Check if a template already exists */
306 ct = ip_vs_ct_in_get(&param);
307 if (!ct || !ip_vs_check_template(ct)) {
308 struct ip_vs_scheduler *sched;
309
310 /*
311 * No template found or the dest of the connection
312 * template is not available.
313 * return *ignored=0 i.e. ICMP and NF_DROP
314 */
315 sched = rcu_dereference(svc->scheduler);
316 dest = sched->schedule(svc, skb, iph);
317 if (!dest) {
318 IP_VS_DBG(1, "p-schedule: no dest found.\n");
319 kfree(param.pe_data);
320 *ignored = 0;
321 return NULL;
322 }
323
324 if (dst_port == svc->port && svc->port != FTPPORT)
325 dport = dest->port;
326
327 /* Create a template
328 * This adds param.pe_data to the template,
329 * and thus param.pe_data will be destroyed
330 * when the template expires */
331 ct = ip_vs_conn_new(&param, &dest->addr, dport,
332 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
333 if (ct == NULL) {
334 kfree(param.pe_data);
335 *ignored = -1;
336 return NULL;
337 }
338
339 ct->timeout = svc->timeout;
340 } else {
341 /* set destination with the found template */
342 dest = ct->dest;
343 kfree(param.pe_data);
344 }
345
346 dport = dst_port;
347 if (dport == svc->port && dest->port)
348 dport = dest->port;
349
350 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
351 && iph->protocol == IPPROTO_UDP) ?
352 IP_VS_CONN_F_ONE_PACKET : 0;
353
354 /*
355 * Create a new connection according to the template
356 */
357 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
358 src_port, &iph->daddr, dst_port, &param);
359
360 cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
361 if (cp == NULL) {
362 ip_vs_conn_put(ct);
363 *ignored = -1;
364 return NULL;
365 }
366
367 /*
368 * Add its control
369 */
370 ip_vs_control_add(cp, ct);
371 ip_vs_conn_put(ct);
372
373 ip_vs_conn_stats(cp, svc);
374 return cp;
375 }
376
377
378 /*
379 * IPVS main scheduling function
380 * It selects a server according to the virtual service, and
381 * creates a connection entry.
382 * Protocols supported: TCP, UDP
383 *
384 * Usage of *ignored
385 *
386 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
387 * svc/scheduler decides that this packet should be accepted with
388 * NF_ACCEPT because it must not be scheduled.
389 *
390 * 0 : scheduler can not find destination, so try bypass or
391 * return ICMP and then NF_DROP (ip_vs_leave).
392 *
393 * -1 : scheduler tried to schedule but fatal error occurred, eg.
394 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
395 * failure such as missing Call-ID, ENOMEM on skb_linearize
396 * or pe_data. In this case we should return NF_DROP without
397 * any attempts to send ICMP with ip_vs_leave.
398 */
399 struct ip_vs_conn *
400 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
401 struct ip_vs_proto_data *pd, int *ignored,
402 struct ip_vs_iphdr *iph)
403 {
404 struct ip_vs_protocol *pp = pd->pp;
405 struct ip_vs_conn *cp = NULL;
406 struct ip_vs_scheduler *sched;
407 struct ip_vs_dest *dest;
408 __be16 _ports[2], *pptr;
409 unsigned int flags;
410
411 *ignored = 1;
412 /*
413 * IPv6 frags, only the first hit here.
414 */
415 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
416 if (pptr == NULL)
417 return NULL;
418
419 /*
420 * FTPDATA needs this check when using local real server.
421 * Never schedule Active FTPDATA connections from real server.
422 * For LVS-NAT they must be already created. For other methods
423 * with persistence the connection is created on SYN+ACK.
424 */
425 if (pptr[0] == FTPDATA) {
426 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
427 "Not scheduling FTPDATA");
428 return NULL;
429 }
430
431 /*
432 * Do not schedule replies from local real server.
433 */
434 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
435 (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
436 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
437 "Not scheduling reply for existing connection");
438 __ip_vs_conn_put(cp);
439 return NULL;
440 }
441
442 /*
443 * Persistent service
444 */
445 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
446 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
447 iph);
448
449 *ignored = 0;
450
451 /*
452 * Non-persistent service
453 */
454 if (!svc->fwmark && pptr[1] != svc->port) {
455 if (!svc->port)
456 pr_err("Schedule: port zero only supported "
457 "in persistent services, "
458 "check your ipvs configuration\n");
459 return NULL;
460 }
461
462 sched = rcu_dereference(svc->scheduler);
463 dest = sched->schedule(svc, skb, iph);
464 if (dest == NULL) {
465 IP_VS_DBG(1, "Schedule: no dest found.\n");
466 return NULL;
467 }
468
469 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
470 && iph->protocol == IPPROTO_UDP) ?
471 IP_VS_CONN_F_ONE_PACKET : 0;
472
473 /*
474 * Create a connection entry.
475 */
476 {
477 struct ip_vs_conn_param p;
478
479 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
480 &iph->saddr, pptr[0], &iph->daddr,
481 pptr[1], &p);
482 cp = ip_vs_conn_new(&p, &dest->addr,
483 dest->port ? dest->port : pptr[1],
484 flags, dest, skb->mark);
485 if (!cp) {
486 *ignored = -1;
487 return NULL;
488 }
489 }
490
491 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
492 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
493 ip_vs_fwd_tag(cp),
494 IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
495 IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
496 IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
497 cp->flags, atomic_read(&cp->refcnt));
498
499 ip_vs_conn_stats(cp, svc);
500 return cp;
501 }
502
503
504 /*
505 * Pass or drop the packet.
506 * Called by ip_vs_in, when the virtual service is available but
507 * no destination is available for a new connection.
508 */
509 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
510 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
511 {
512 __be16 _ports[2], *pptr;
513 #ifdef CONFIG_SYSCTL
514 struct net *net;
515 struct netns_ipvs *ipvs;
516 int unicast;
517 #endif
518
519 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
520 if (pptr == NULL) {
521 return NF_DROP;
522 }
523
524 #ifdef CONFIG_SYSCTL
525 net = skb_net(skb);
526
527 #ifdef CONFIG_IP_VS_IPV6
528 if (svc->af == AF_INET6)
529 unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
530 else
531 #endif
532 unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);
533
534 /* if it is fwmark-based service, the cache_bypass sysctl is up
535 and the destination is a non-local unicast, then create
536 a cache_bypass connection entry */
537 ipvs = net_ipvs(net);
538 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
539 int ret;
540 struct ip_vs_conn *cp;
541 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
542 iph->protocol == IPPROTO_UDP) ?
543 IP_VS_CONN_F_ONE_PACKET : 0;
544 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
545
546 /* create a new connection entry */
547 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
548 {
549 struct ip_vs_conn_param p;
550 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
551 &iph->saddr, pptr[0],
552 &iph->daddr, pptr[1], &p);
553 cp = ip_vs_conn_new(&p, &daddr, 0,
554 IP_VS_CONN_F_BYPASS | flags,
555 NULL, skb->mark);
556 if (!cp)
557 return NF_DROP;
558 }
559
560 /* statistics */
561 ip_vs_in_stats(cp, skb);
562
563 /* set state */
564 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
565
566 /* transmit the first SYN packet */
567 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
568 /* do not touch skb anymore */
569
570 atomic_inc(&cp->in_pkts);
571 ip_vs_conn_put(cp);
572 return ret;
573 }
574 #endif
575
576 /*
577 * When the virtual ftp service is presented, packets destined
578 * for other services on the VIP may get here (except services
579 * listed in the ipvs table), pass the packets, because it is
580 * not ipvs job to decide to drop the packets.
581 */
582 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT))
583 return NF_ACCEPT;
584
585 /*
586 * Notify the client that the destination is unreachable, and
587 * release the socket buffer.
588 * Since it is in IP layer, the TCP socket is not actually
589 * created, the TCP RST packet cannot be sent, instead that
590 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
591 */
592 #ifdef CONFIG_IP_VS_IPV6
593 if (svc->af == AF_INET6) {
594 if (!skb->dev) {
595 struct net *net_ = dev_net(skb_dst(skb)->dev);
596
597 skb->dev = net_->loopback_dev;
598 }
599 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
600 } else
601 #endif
602 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
603
604 return NF_DROP;
605 }
606
607 #ifdef CONFIG_SYSCTL
608
609 static int sysctl_snat_reroute(struct sk_buff *skb)
610 {
611 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
612 return ipvs->sysctl_snat_reroute;
613 }
614
615 static int sysctl_nat_icmp_send(struct net *net)
616 {
617 struct netns_ipvs *ipvs = net_ipvs(net);
618 return ipvs->sysctl_nat_icmp_send;
619 }
620
621 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
622 {
623 return ipvs->sysctl_expire_nodest_conn;
624 }
625
626 #else
627
628 static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
629 static int sysctl_nat_icmp_send(struct net *net) { return 0; }
630 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
631
632 #endif
633
634 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
635 {
636 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
637 }
638
639 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
640 {
641 if (NF_INET_LOCAL_IN == hooknum)
642 return IP_DEFRAG_VS_IN;
643 if (NF_INET_FORWARD == hooknum)
644 return IP_DEFRAG_VS_FWD;
645 return IP_DEFRAG_VS_OUT;
646 }
647
648 static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
649 {
650 int err;
651
652 local_bh_disable();
653 err = ip_defrag(skb, user);
654 local_bh_enable();
655 if (!err)
656 ip_send_check(ip_hdr(skb));
657
658 return err;
659 }
660
661 static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
662 {
663 #ifdef CONFIG_IP_VS_IPV6
664 if (af == AF_INET6) {
665 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
666 return 1;
667 } else
668 #endif
669 if ((sysctl_snat_reroute(skb) ||
670 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
671 ip_route_me_harder(skb, RTN_LOCAL) != 0)
672 return 1;
673
674 return 0;
675 }
676
677 /*
678 * Packet has been made sufficiently writable in caller
679 * - inout: 1=in->out, 0=out->in
680 */
681 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
682 struct ip_vs_conn *cp, int inout)
683 {
684 struct iphdr *iph = ip_hdr(skb);
685 unsigned int icmp_offset = iph->ihl*4;
686 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
687 icmp_offset);
688 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
689
690 if (inout) {
691 iph->saddr = cp->vaddr.ip;
692 ip_send_check(iph);
693 ciph->daddr = cp->vaddr.ip;
694 ip_send_check(ciph);
695 } else {
696 iph->daddr = cp->daddr.ip;
697 ip_send_check(iph);
698 ciph->saddr = cp->daddr.ip;
699 ip_send_check(ciph);
700 }
701
702 /* the TCP/UDP/SCTP port */
703 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
704 IPPROTO_SCTP == ciph->protocol) {
705 __be16 *ports = (void *)ciph + ciph->ihl*4;
706
707 if (inout)
708 ports[1] = cp->vport;
709 else
710 ports[0] = cp->dport;
711 }
712
713 /* And finally the ICMP checksum */
714 icmph->checksum = 0;
715 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
716 skb->ip_summed = CHECKSUM_UNNECESSARY;
717
718 if (inout)
719 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
720 "Forwarding altered outgoing ICMP");
721 else
722 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
723 "Forwarding altered incoming ICMP");
724 }
725
726 #ifdef CONFIG_IP_VS_IPV6
727 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
728 struct ip_vs_conn *cp, int inout)
729 {
730 struct ipv6hdr *iph = ipv6_hdr(skb);
731 unsigned int icmp_offset = 0;
732 unsigned int offs = 0; /* header offset*/
733 int protocol;
734 struct icmp6hdr *icmph;
735 struct ipv6hdr *ciph;
736 unsigned short fragoffs;
737
738 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
739 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
740 offs = icmp_offset + sizeof(struct icmp6hdr);
741 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
742
743 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
744
745 if (inout) {
746 iph->saddr = cp->vaddr.in6;
747 ciph->daddr = cp->vaddr.in6;
748 } else {
749 iph->daddr = cp->daddr.in6;
750 ciph->saddr = cp->daddr.in6;
751 }
752
753 /* the TCP/UDP/SCTP port */
754 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
755 IPPROTO_SCTP == protocol)) {
756 __be16 *ports = (void *)(skb_network_header(skb) + offs);
757
758 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
759 ntohs(inout ? ports[1] : ports[0]),
760 ntohs(inout ? cp->vport : cp->dport));
761 if (inout)
762 ports[1] = cp->vport;
763 else
764 ports[0] = cp->dport;
765 }
766
767 /* And finally the ICMP checksum */
768 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
769 skb->len - icmp_offset,
770 IPPROTO_ICMPV6, 0);
771 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
772 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
773 skb->ip_summed = CHECKSUM_PARTIAL;
774
775 if (inout)
776 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
777 (void *)ciph - (void *)iph,
778 "Forwarding altered outgoing ICMPv6");
779 else
780 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
781 (void *)ciph - (void *)iph,
782 "Forwarding altered incoming ICMPv6");
783 }
784 #endif
785
786 /* Handle relevant response ICMP messages - forward to the right
787 * destination host.
788 */
789 static int handle_response_icmp(int af, struct sk_buff *skb,
790 union nf_inet_addr *snet,
791 __u8 protocol, struct ip_vs_conn *cp,
792 struct ip_vs_protocol *pp,
793 unsigned int offset, unsigned int ihl)
794 {
795 unsigned int verdict = NF_DROP;
796
797 if (IP_VS_FWD_METHOD(cp) != 0) {
798 pr_err("shouldn't reach here, because the box is on the "
799 "half connection in the tun/dr module.\n");
800 }
801
802 /* Ensure the checksum is correct */
803 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
804 /* Failed checksum! */
805 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
806 IP_VS_DBG_ADDR(af, snet));
807 goto out;
808 }
809
810 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
811 IPPROTO_SCTP == protocol)
812 offset += 2 * sizeof(__u16);
813 if (!skb_make_writable(skb, offset))
814 goto out;
815
816 #ifdef CONFIG_IP_VS_IPV6
817 if (af == AF_INET6)
818 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
819 else
820 #endif
821 ip_vs_nat_icmp(skb, pp, cp, 1);
822
823 if (ip_vs_route_me_harder(af, skb))
824 goto out;
825
826 /* do the statistics and put it back */
827 ip_vs_out_stats(cp, skb);
828
829 skb->ipvs_property = 1;
830 if (!(cp->flags & IP_VS_CONN_F_NFCT))
831 ip_vs_notrack(skb);
832 else
833 ip_vs_update_conntrack(skb, cp, 0);
834 verdict = NF_ACCEPT;
835
836 out:
837 __ip_vs_conn_put(cp);
838
839 return verdict;
840 }
841
842 /*
843 * Handle ICMP messages in the inside-to-outside direction (outgoing).
844 * Find any that might be relevant, check against existing connections.
845 * Currently handles error types - unreachable, quench, ttl exceeded.
846 */
847 static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
848 unsigned int hooknum)
849 {
850 struct iphdr *iph;
851 struct icmphdr _icmph, *ic;
852 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
853 struct ip_vs_iphdr ciph;
854 struct ip_vs_conn *cp;
855 struct ip_vs_protocol *pp;
856 unsigned int offset, ihl;
857 union nf_inet_addr snet;
858
859 *related = 1;
860
861 /* reassemble IP fragments */
862 if (ip_is_fragment(ip_hdr(skb))) {
863 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
864 return NF_STOLEN;
865 }
866
867 iph = ip_hdr(skb);
868 offset = ihl = iph->ihl * 4;
869 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
870 if (ic == NULL)
871 return NF_DROP;
872
873 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
874 ic->type, ntohs(icmp_id(ic)),
875 &iph->saddr, &iph->daddr);
876
877 /*
878 * Work through seeing if this is for us.
879 * These checks are supposed to be in an order that means easy
880 * things are checked first to speed up processing.... however
881 * this means that some packets will manage to get a long way
882 * down this stack and then be rejected, but that's life.
883 */
884 if ((ic->type != ICMP_DEST_UNREACH) &&
885 (ic->type != ICMP_SOURCE_QUENCH) &&
886 (ic->type != ICMP_TIME_EXCEEDED)) {
887 *related = 0;
888 return NF_ACCEPT;
889 }
890
891 /* Now find the contained IP header */
892 offset += sizeof(_icmph);
893 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
894 if (cih == NULL)
895 return NF_ACCEPT; /* The packet looks wrong, ignore */
896
897 pp = ip_vs_proto_get(cih->protocol);
898 if (!pp)
899 return NF_ACCEPT;
900
901 /* Is the embedded protocol header present? */
902 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
903 pp->dont_defrag))
904 return NF_ACCEPT;
905
906 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
907 "Checking outgoing ICMP for");
908
909 ip_vs_fill_ip4hdr(cih, &ciph);
910 ciph.len += offset;
911 /* The embedded headers contain source and dest in reverse order */
912 cp = pp->conn_out_get(AF_INET, skb, &ciph, 1);
913 if (!cp)
914 return NF_ACCEPT;
915
916 snet.ip = iph->saddr;
917 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
918 pp, ciph.len, ihl);
919 }
920
921 #ifdef CONFIG_IP_VS_IPV6
922 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
923 unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
924 {
925 struct icmp6hdr _icmph, *ic;
926 struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */
927 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
928 struct ip_vs_conn *cp;
929 struct ip_vs_protocol *pp;
930 union nf_inet_addr snet;
931 unsigned int writable;
932
933 *related = 1;
934 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
935 if (ic == NULL)
936 return NF_DROP;
937
938 /*
939 * Work through seeing if this is for us.
940 * These checks are supposed to be in an order that means easy
941 * things are checked first to speed up processing.... however
942 * this means that some packets will manage to get a long way
943 * down this stack and then be rejected, but that's life.
944 */
945 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
946 *related = 0;
947 return NF_ACCEPT;
948 }
949 /* Fragment header that is before ICMP header tells us that:
950 * it's not an error message since they can't be fragmented.
951 */
952 if (ipvsh->flags & IP6_FH_F_FRAG)
953 return NF_DROP;
954
955 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
956 ic->icmp6_type, ntohs(icmpv6_id(ic)),
957 &ipvsh->saddr, &ipvsh->daddr);
958
959 /* Now find the contained IP header */
960 ciph.len = ipvsh->len + sizeof(_icmph);
961 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
962 if (ip6h == NULL)
963 return NF_ACCEPT; /* The packet looks wrong, ignore */
964 ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */
965 ciph.daddr.in6 = ip6h->daddr;
966 /* skip possible IPv6 exthdrs of contained IPv6 packet */
967 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
968 if (ciph.protocol < 0)
969 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
970
971 pp = ip_vs_proto_get(ciph.protocol);
972 if (!pp)
973 return NF_ACCEPT;
974
975 /* The embedded headers contain source and dest in reverse order */
976 cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1);
977 if (!cp)
978 return NF_ACCEPT;
979
980 snet.in6 = ciph.saddr.in6;
981 writable = ciph.len;
982 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
983 pp, writable, sizeof(struct ipv6hdr));
984 }
985 #endif
986
987 /*
988 * Check if sctp chunc is ABORT chunk
989 */
990 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
991 {
992 sctp_chunkhdr_t *sch, schunk;
993 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
994 sizeof(schunk), &schunk);
995 if (sch == NULL)
996 return 0;
997 if (sch->type == SCTP_CID_ABORT)
998 return 1;
999 return 0;
1000 }
1001
1002 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1003 {
1004 struct tcphdr _tcph, *th;
1005
1006 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1007 if (th == NULL)
1008 return 0;
1009 return th->rst;
1010 }
1011
1012 static inline bool is_new_conn(const struct sk_buff *skb,
1013 struct ip_vs_iphdr *iph)
1014 {
1015 switch (iph->protocol) {
1016 case IPPROTO_TCP: {
1017 struct tcphdr _tcph, *th;
1018
1019 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1020 if (th == NULL)
1021 return false;
1022 return th->syn;
1023 }
1024 case IPPROTO_SCTP: {
1025 sctp_chunkhdr_t *sch, schunk;
1026
1027 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1028 sizeof(schunk), &schunk);
1029 if (sch == NULL)
1030 return false;
1031 return sch->type == SCTP_CID_INIT;
1032 }
1033 default:
1034 return false;
1035 }
1036 }
1037
1038 /* Handle response packets: rewrite addresses and send away...
1039 */
1040 static unsigned int
1041 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1042 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
1043 {
1044 struct ip_vs_protocol *pp = pd->pp;
1045
1046 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
1047
1048 if (!skb_make_writable(skb, iph->len))
1049 goto drop;
1050
1051 /* mangle the packet */
1052 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1053 goto drop;
1054
1055 #ifdef CONFIG_IP_VS_IPV6
1056 if (af == AF_INET6)
1057 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1058 else
1059 #endif
1060 {
1061 ip_hdr(skb)->saddr = cp->vaddr.ip;
1062 ip_send_check(ip_hdr(skb));
1063 }
1064
1065 /*
1066 * nf_iterate does not expect change in the skb->dst->dev.
1067 * It looks like it is not fatal to enable this code for hooks
1068 * where our handlers are at the end of the chain list and
1069 * when all next handlers use skb->dst->dev and not outdev.
1070 * It will definitely route properly the inout NAT traffic
1071 * when multiple paths are used.
1072 */
1073
1074 /* For policy routing, packets originating from this
1075 * machine itself may be routed differently to packets
1076 * passing through. We want this packet to be routed as
1077 * if it came from this machine itself. So re-compute
1078 * the routing information.
1079 */
1080 if (ip_vs_route_me_harder(af, skb))
1081 goto drop;
1082
1083 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
1084
1085 ip_vs_out_stats(cp, skb);
1086 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1087 skb->ipvs_property = 1;
1088 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1089 ip_vs_notrack(skb);
1090 else
1091 ip_vs_update_conntrack(skb, cp, 0);
1092 ip_vs_conn_put(cp);
1093
1094 LeaveFunction(11);
1095 return NF_ACCEPT;
1096
1097 drop:
1098 ip_vs_conn_put(cp);
1099 kfree_skb(skb);
1100 LeaveFunction(11);
1101 return NF_STOLEN;
1102 }
1103
1104 /*
1105 * Check if outgoing packet belongs to the established ip_vs_conn.
1106 */
1107 static unsigned int
1108 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1109 {
1110 struct net *net = NULL;
1111 struct ip_vs_iphdr iph;
1112 struct ip_vs_protocol *pp;
1113 struct ip_vs_proto_data *pd;
1114 struct ip_vs_conn *cp;
1115
1116 EnterFunction(11);
1117
1118 /* Already marked as IPVS request or reply? */
1119 if (skb->ipvs_property)
1120 return NF_ACCEPT;
1121
1122 /* Bad... Do not break raw sockets */
1123 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1124 af == AF_INET)) {
1125 struct sock *sk = skb->sk;
1126 struct inet_sock *inet = inet_sk(skb->sk);
1127
1128 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1129 return NF_ACCEPT;
1130 }
1131
1132 if (unlikely(!skb_dst(skb)))
1133 return NF_ACCEPT;
1134
1135 net = skb_net(skb);
1136 if (!net_ipvs(net)->enable)
1137 return NF_ACCEPT;
1138
1139 ip_vs_fill_iph_skb(af, skb, &iph);
1140 #ifdef CONFIG_IP_VS_IPV6
1141 if (af == AF_INET6) {
1142 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1143 int related;
1144 int verdict = ip_vs_out_icmp_v6(skb, &related,
1145 hooknum, &iph);
1146
1147 if (related)
1148 return verdict;
1149 }
1150 } else
1151 #endif
1152 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1153 int related;
1154 int verdict = ip_vs_out_icmp(skb, &related, hooknum);
1155
1156 if (related)
1157 return verdict;
1158 }
1159
1160 pd = ip_vs_proto_data_get(net, iph.protocol);
1161 if (unlikely(!pd))
1162 return NF_ACCEPT;
1163 pp = pd->pp;
1164
1165 /* reassemble IP fragments */
1166 #ifdef CONFIG_IP_VS_IPV6
1167 if (af == AF_INET)
1168 #endif
1169 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1170 if (ip_vs_gather_frags(skb,
1171 ip_vs_defrag_user(hooknum)))
1172 return NF_STOLEN;
1173
1174 ip_vs_fill_ip4hdr(skb_network_header(skb), &iph);
1175 }
1176
1177 /*
1178 * Check if the packet belongs to an existing entry
1179 */
1180 cp = pp->conn_out_get(af, skb, &iph, 0);
1181
1182 if (likely(cp))
1183 return handle_response(af, skb, pd, cp, &iph);
1184 if (sysctl_nat_icmp_send(net) &&
1185 (pp->protocol == IPPROTO_TCP ||
1186 pp->protocol == IPPROTO_UDP ||
1187 pp->protocol == IPPROTO_SCTP)) {
1188 __be16 _ports[2], *pptr;
1189
1190 pptr = frag_safe_skb_hp(skb, iph.len,
1191 sizeof(_ports), _ports, &iph);
1192 if (pptr == NULL)
1193 return NF_ACCEPT; /* Not for me */
1194 if (ip_vs_has_real_service(net, af, iph.protocol, &iph.saddr,
1195 pptr[0])) {
1196 /*
1197 * Notify the real server: there is no
1198 * existing entry if it is not RST
1199 * packet or not TCP packet.
1200 */
1201 if ((iph.protocol != IPPROTO_TCP &&
1202 iph.protocol != IPPROTO_SCTP)
1203 || ((iph.protocol == IPPROTO_TCP
1204 && !is_tcp_reset(skb, iph.len))
1205 || (iph.protocol == IPPROTO_SCTP
1206 && !is_sctp_abort(skb,
1207 iph.len)))) {
1208 #ifdef CONFIG_IP_VS_IPV6
1209 if (af == AF_INET6) {
1210 if (!skb->dev)
1211 skb->dev = net->loopback_dev;
1212 icmpv6_send(skb,
1213 ICMPV6_DEST_UNREACH,
1214 ICMPV6_PORT_UNREACH,
1215 0);
1216 } else
1217 #endif
1218 icmp_send(skb,
1219 ICMP_DEST_UNREACH,
1220 ICMP_PORT_UNREACH, 0);
1221 return NF_DROP;
1222 }
1223 }
1224 }
1225 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1226 "ip_vs_out: packet continues traversal as normal");
1227 return NF_ACCEPT;
1228 }
1229
1230 /*
1231 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1232 * used only for VS/NAT.
1233 * Check if packet is reply for established ip_vs_conn.
1234 */
1235 static unsigned int
1236 ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1237 const struct net_device *in, const struct net_device *out,
1238 int (*okfn)(struct sk_buff *))
1239 {
1240 return ip_vs_out(ops->hooknum, skb, AF_INET);
1241 }
1242
1243 /*
1244 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1245 * Check if packet is reply for established ip_vs_conn.
1246 */
1247 static unsigned int
1248 ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1249 const struct net_device *in, const struct net_device *out,
1250 int (*okfn)(struct sk_buff *))
1251 {
1252 return ip_vs_out(ops->hooknum, skb, AF_INET);
1253 }
1254
1255 #ifdef CONFIG_IP_VS_IPV6
1256
1257 /*
1258 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1259 * used only for VS/NAT.
1260 * Check if packet is reply for established ip_vs_conn.
1261 */
1262 static unsigned int
1263 ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1264 const struct net_device *in, const struct net_device *out,
1265 int (*okfn)(struct sk_buff *))
1266 {
1267 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1268 }
1269
1270 /*
1271 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1272 * Check if packet is reply for established ip_vs_conn.
1273 */
1274 static unsigned int
1275 ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1276 const struct net_device *in, const struct net_device *out,
1277 int (*okfn)(struct sk_buff *))
1278 {
1279 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1280 }
1281
1282 #endif
1283
1284 /*
1285 * Handle ICMP messages in the outside-to-inside direction (incoming).
1286 * Find any that might be relevant, check against existing connections,
1287 * forward to the right destination host if relevant.
1288 * Currently handles error types - unreachable, quench, ttl exceeded.
1289 */
1290 static int
1291 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1292 {
1293 struct net *net = NULL;
1294 struct iphdr *iph;
1295 struct icmphdr _icmph, *ic;
1296 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1297 struct ip_vs_iphdr ciph;
1298 struct ip_vs_conn *cp;
1299 struct ip_vs_protocol *pp;
1300 struct ip_vs_proto_data *pd;
1301 unsigned int offset, offset2, ihl, verdict;
1302 bool ipip;
1303
1304 *related = 1;
1305
1306 /* reassemble IP fragments */
1307 if (ip_is_fragment(ip_hdr(skb))) {
1308 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
1309 return NF_STOLEN;
1310 }
1311
1312 iph = ip_hdr(skb);
1313 offset = ihl = iph->ihl * 4;
1314 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1315 if (ic == NULL)
1316 return NF_DROP;
1317
1318 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1319 ic->type, ntohs(icmp_id(ic)),
1320 &iph->saddr, &iph->daddr);
1321
1322 /*
1323 * Work through seeing if this is for us.
1324 * These checks are supposed to be in an order that means easy
1325 * things are checked first to speed up processing.... however
1326 * this means that some packets will manage to get a long way
1327 * down this stack and then be rejected, but that's life.
1328 */
1329 if ((ic->type != ICMP_DEST_UNREACH) &&
1330 (ic->type != ICMP_SOURCE_QUENCH) &&
1331 (ic->type != ICMP_TIME_EXCEEDED)) {
1332 *related = 0;
1333 return NF_ACCEPT;
1334 }
1335
1336 /* Now find the contained IP header */
1337 offset += sizeof(_icmph);
1338 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1339 if (cih == NULL)
1340 return NF_ACCEPT; /* The packet looks wrong, ignore */
1341
1342 net = skb_net(skb);
1343
1344 /* Special case for errors for IPIP packets */
1345 ipip = false;
1346 if (cih->protocol == IPPROTO_IPIP) {
1347 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1348 return NF_ACCEPT;
1349 /* Error for our IPIP must arrive at LOCAL_IN */
1350 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1351 return NF_ACCEPT;
1352 offset += cih->ihl * 4;
1353 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1354 if (cih == NULL)
1355 return NF_ACCEPT; /* The packet looks wrong, ignore */
1356 ipip = true;
1357 }
1358
1359 pd = ip_vs_proto_data_get(net, cih->protocol);
1360 if (!pd)
1361 return NF_ACCEPT;
1362 pp = pd->pp;
1363
1364 /* Is the embedded protocol header present? */
1365 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1366 pp->dont_defrag))
1367 return NF_ACCEPT;
1368
1369 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1370 "Checking incoming ICMP for");
1371
1372 offset2 = offset;
1373 ip_vs_fill_ip4hdr(cih, &ciph);
1374 ciph.len += offset;
1375 offset = ciph.len;
1376 /* The embedded headers contain source and dest in reverse order.
1377 * For IPIP this is error for request, not for reply.
1378 */
1379 cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1);
1380 if (!cp)
1381 return NF_ACCEPT;
1382
1383 verdict = NF_DROP;
1384
1385 /* Ensure the checksum is correct */
1386 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1387 /* Failed checksum! */
1388 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1389 &iph->saddr);
1390 goto out;
1391 }
1392
1393 if (ipip) {
1394 __be32 info = ic->un.gateway;
1395 __u8 type = ic->type;
1396 __u8 code = ic->code;
1397
1398 /* Update the MTU */
1399 if (ic->type == ICMP_DEST_UNREACH &&
1400 ic->code == ICMP_FRAG_NEEDED) {
1401 struct ip_vs_dest *dest = cp->dest;
1402 u32 mtu = ntohs(ic->un.frag.mtu);
1403 __be16 frag_off = cih->frag_off;
1404
1405 /* Strip outer IP and ICMP, go to IPIP header */
1406 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1407 goto ignore_ipip;
1408 offset2 -= ihl + sizeof(_icmph);
1409 skb_reset_network_header(skb);
1410 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1411 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1412 ipv4_update_pmtu(skb, dev_net(skb->dev),
1413 mtu, 0, 0, 0, 0);
1414 /* Client uses PMTUD? */
1415 if (!(frag_off & htons(IP_DF)))
1416 goto ignore_ipip;
1417 /* Prefer the resulting PMTU */
1418 if (dest) {
1419 struct ip_vs_dest_dst *dest_dst;
1420
1421 rcu_read_lock();
1422 dest_dst = rcu_dereference(dest->dest_dst);
1423 if (dest_dst)
1424 mtu = dst_mtu(dest_dst->dst_cache);
1425 rcu_read_unlock();
1426 }
1427 if (mtu > 68 + sizeof(struct iphdr))
1428 mtu -= sizeof(struct iphdr);
1429 info = htonl(mtu);
1430 }
1431 /* Strip outer IP, ICMP and IPIP, go to IP header of
1432 * original request.
1433 */
1434 if (pskb_pull(skb, offset2) == NULL)
1435 goto ignore_ipip;
1436 skb_reset_network_header(skb);
1437 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1438 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1439 type, code, ntohl(info));
1440 icmp_send(skb, type, code, info);
1441 /* ICMP can be shorter but anyways, account it */
1442 ip_vs_out_stats(cp, skb);
1443
1444 ignore_ipip:
1445 consume_skb(skb);
1446 verdict = NF_STOLEN;
1447 goto out;
1448 }
1449
1450 /* do the statistics and put it back */
1451 ip_vs_in_stats(cp, skb);
1452 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1453 IPPROTO_SCTP == cih->protocol)
1454 offset += 2 * sizeof(__u16);
1455 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1456
1457 out:
1458 __ip_vs_conn_put(cp);
1459
1460 return verdict;
1461 }
1462
1463 #ifdef CONFIG_IP_VS_IPV6
1464 static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
1465 unsigned int hooknum, struct ip_vs_iphdr *iph)
1466 {
1467 struct net *net = NULL;
1468 struct ipv6hdr _ip6h, *ip6h;
1469 struct icmp6hdr _icmph, *ic;
1470 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1471 struct ip_vs_conn *cp;
1472 struct ip_vs_protocol *pp;
1473 struct ip_vs_proto_data *pd;
1474 unsigned int offs_ciph, writable, verdict;
1475
1476 *related = 1;
1477
1478 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1479 if (ic == NULL)
1480 return NF_DROP;
1481
1482 /*
1483 * Work through seeing if this is for us.
1484 * These checks are supposed to be in an order that means easy
1485 * things are checked first to speed up processing.... however
1486 * this means that some packets will manage to get a long way
1487 * down this stack and then be rejected, but that's life.
1488 */
1489 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1490 *related = 0;
1491 return NF_ACCEPT;
1492 }
1493 /* Fragment header that is before ICMP header tells us that:
1494 * it's not an error message since they can't be fragmented.
1495 */
1496 if (iph->flags & IP6_FH_F_FRAG)
1497 return NF_DROP;
1498
1499 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1500 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1501 &iph->saddr, &iph->daddr);
1502
1503 /* Now find the contained IP header */
1504 ciph.len = iph->len + sizeof(_icmph);
1505 offs_ciph = ciph.len; /* Save ip header offset */
1506 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
1507 if (ip6h == NULL)
1508 return NF_ACCEPT; /* The packet looks wrong, ignore */
1509 ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */
1510 ciph.daddr.in6 = ip6h->daddr;
1511 /* skip possible IPv6 exthdrs of contained IPv6 packet */
1512 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
1513 if (ciph.protocol < 0)
1514 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
1515
1516 net = skb_net(skb);
1517 pd = ip_vs_proto_data_get(net, ciph.protocol);
1518 if (!pd)
1519 return NF_ACCEPT;
1520 pp = pd->pp;
1521
1522 /* Cannot handle fragmented embedded protocol */
1523 if (ciph.fragoffs)
1524 return NF_ACCEPT;
1525
1526 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph,
1527 "Checking incoming ICMPv6 for");
1528
1529 /* The embedded headers contain source and dest in reverse order
1530 * if not from localhost
1531 */
1532 cp = pp->conn_in_get(AF_INET6, skb, &ciph,
1533 (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1);
1534
1535 if (!cp)
1536 return NF_ACCEPT;
1537 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1538 if ((hooknum == NF_INET_LOCAL_OUT) &&
1539 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1540 __ip_vs_conn_put(cp);
1541 return NF_ACCEPT;
1542 }
1543
1544 /* do the statistics and put it back */
1545 ip_vs_in_stats(cp, skb);
1546
1547 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1548 writable = ciph.len;
1549 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1550 IPPROTO_SCTP == ciph.protocol)
1551 writable += 2 * sizeof(__u16); /* Also mangle ports */
1552
1553 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph);
1554
1555 __ip_vs_conn_put(cp);
1556
1557 return verdict;
1558 }
1559 #endif
1560
1561
1562 /*
1563 * Check if it's for virtual services, look it up,
1564 * and send it on its way...
1565 */
1566 static unsigned int
1567 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1568 {
1569 struct net *net;
1570 struct ip_vs_iphdr iph;
1571 struct ip_vs_protocol *pp;
1572 struct ip_vs_proto_data *pd;
1573 struct ip_vs_conn *cp;
1574 int ret, pkts;
1575 struct netns_ipvs *ipvs;
1576
1577 /* Already marked as IPVS request or reply? */
1578 if (skb->ipvs_property)
1579 return NF_ACCEPT;
1580
1581 /*
1582 * Big tappo:
1583 * - remote client: only PACKET_HOST
1584 * - route: used for struct net when skb->dev is unset
1585 */
1586 if (unlikely((skb->pkt_type != PACKET_HOST &&
1587 hooknum != NF_INET_LOCAL_OUT) ||
1588 !skb_dst(skb))) {
1589 ip_vs_fill_iph_skb(af, skb, &iph);
1590 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1591 " ignored in hook %u\n",
1592 skb->pkt_type, iph.protocol,
1593 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1594 return NF_ACCEPT;
1595 }
1596 /* ipvs enabled in this netns ? */
1597 net = skb_net(skb);
1598 ipvs = net_ipvs(net);
1599 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1600 return NF_ACCEPT;
1601
1602 ip_vs_fill_iph_skb(af, skb, &iph);
1603
1604 /* Bad... Do not break raw sockets */
1605 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1606 af == AF_INET)) {
1607 struct sock *sk = skb->sk;
1608 struct inet_sock *inet = inet_sk(skb->sk);
1609
1610 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1611 return NF_ACCEPT;
1612 }
1613
1614 #ifdef CONFIG_IP_VS_IPV6
1615 if (af == AF_INET6) {
1616 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1617 int related;
1618 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
1619 &iph);
1620
1621 if (related)
1622 return verdict;
1623 }
1624 } else
1625 #endif
1626 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1627 int related;
1628 int verdict = ip_vs_in_icmp(skb, &related, hooknum);
1629
1630 if (related)
1631 return verdict;
1632 }
1633
1634 /* Protocol supported? */
1635 pd = ip_vs_proto_data_get(net, iph.protocol);
1636 if (unlikely(!pd))
1637 return NF_ACCEPT;
1638 pp = pd->pp;
1639 /*
1640 * Check if the packet belongs to an existing connection entry
1641 */
1642 cp = pp->conn_in_get(af, skb, &iph, 0);
1643
1644 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
1645 unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
1646 is_new_conn(skb, &iph)) {
1647 ip_vs_conn_expire_now(cp);
1648 __ip_vs_conn_put(cp);
1649 cp = NULL;
1650 }
1651
1652 if (unlikely(!cp) && !iph.fragoffs) {
1653 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1654 * replayed fragment zero will already have created the cp
1655 */
1656 int v;
1657
1658 /* Schedule and create new connection entry into &cp */
1659 if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph))
1660 return v;
1661 }
1662
1663 if (unlikely(!cp)) {
1664 /* sorry, all this trouble for a no-hit :) */
1665 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1666 "ip_vs_in: packet continues traversal as normal");
1667 if (iph.fragoffs) {
1668 /* Fragment that couldn't be mapped to a conn entry
1669 * is missing module nf_defrag_ipv6
1670 */
1671 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1672 IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment");
1673 }
1674 return NF_ACCEPT;
1675 }
1676
1677 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1678 /* Check the server status */
1679 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1680 /* the destination server is not available */
1681
1682 if (sysctl_expire_nodest_conn(ipvs)) {
1683 /* try to expire the connection immediately */
1684 ip_vs_conn_expire_now(cp);
1685 }
1686 /* don't restart its timer, and silently
1687 drop the packet. */
1688 __ip_vs_conn_put(cp);
1689 return NF_DROP;
1690 }
1691
1692 ip_vs_in_stats(cp, skb);
1693 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1694 if (cp->packet_xmit)
1695 ret = cp->packet_xmit(skb, cp, pp, &iph);
1696 /* do not touch skb anymore */
1697 else {
1698 IP_VS_DBG_RL("warning: packet_xmit is null");
1699 ret = NF_ACCEPT;
1700 }
1701
1702 /* Increase its packet counter and check if it is needed
1703 * to be synchronized
1704 *
1705 * Sync connection if it is about to close to
1706 * encorage the standby servers to update the connections timeout
1707 *
1708 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1709 */
1710
1711 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1712 pkts = sysctl_sync_threshold(ipvs);
1713 else
1714 pkts = atomic_add_return(1, &cp->in_pkts);
1715
1716 if (ipvs->sync_state & IP_VS_STATE_MASTER)
1717 ip_vs_sync_conn(net, cp, pkts);
1718
1719 ip_vs_conn_put(cp);
1720 return ret;
1721 }
1722
1723 /*
1724 * AF_INET handler in NF_INET_LOCAL_IN chain
1725 * Schedule and forward packets from remote clients
1726 */
1727 static unsigned int
1728 ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1729 const struct net_device *in,
1730 const struct net_device *out,
1731 int (*okfn)(struct sk_buff *))
1732 {
1733 return ip_vs_in(ops->hooknum, skb, AF_INET);
1734 }
1735
1736 /*
1737 * AF_INET handler in NF_INET_LOCAL_OUT chain
1738 * Schedule and forward packets from local clients
1739 */
1740 static unsigned int
1741 ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1742 const struct net_device *in, const struct net_device *out,
1743 int (*okfn)(struct sk_buff *))
1744 {
1745 return ip_vs_in(ops->hooknum, skb, AF_INET);
1746 }
1747
1748 #ifdef CONFIG_IP_VS_IPV6
1749
1750 /*
1751 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1752 * Schedule and forward packets from remote clients
1753 */
1754 static unsigned int
1755 ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1756 const struct net_device *in,
1757 const struct net_device *out,
1758 int (*okfn)(struct sk_buff *))
1759 {
1760 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1761 }
1762
1763 /*
1764 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
1765 * Schedule and forward packets from local clients
1766 */
1767 static unsigned int
1768 ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1769 const struct net_device *in, const struct net_device *out,
1770 int (*okfn)(struct sk_buff *))
1771 {
1772 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1773 }
1774
1775 #endif
1776
1777
1778 /*
1779 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1780 * related packets destined for 0.0.0.0/0.
1781 * When fwmark-based virtual service is used, such as transparent
1782 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1783 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1784 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1785 * and send them to ip_vs_in_icmp.
1786 */
1787 static unsigned int
1788 ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
1789 const struct net_device *in, const struct net_device *out,
1790 int (*okfn)(struct sk_buff *))
1791 {
1792 int r;
1793 struct net *net;
1794 struct netns_ipvs *ipvs;
1795
1796 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1797 return NF_ACCEPT;
1798
1799 /* ipvs enabled in this netns ? */
1800 net = skb_net(skb);
1801 ipvs = net_ipvs(net);
1802 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1803 return NF_ACCEPT;
1804
1805 return ip_vs_in_icmp(skb, &r, ops->hooknum);
1806 }
1807
1808 #ifdef CONFIG_IP_VS_IPV6
1809 static unsigned int
1810 ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1811 const struct net_device *in, const struct net_device *out,
1812 int (*okfn)(struct sk_buff *))
1813 {
1814 int r;
1815 struct net *net;
1816 struct netns_ipvs *ipvs;
1817 struct ip_vs_iphdr iphdr;
1818
1819 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
1820 if (iphdr.protocol != IPPROTO_ICMPV6)
1821 return NF_ACCEPT;
1822
1823 /* ipvs enabled in this netns ? */
1824 net = skb_net(skb);
1825 ipvs = net_ipvs(net);
1826 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1827 return NF_ACCEPT;
1828
1829 return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
1830 }
1831 #endif
1832
1833
1834 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1835 /* After packet filtering, change source only for VS/NAT */
1836 {
1837 .hook = ip_vs_reply4,
1838 .owner = THIS_MODULE,
1839 .pf = NFPROTO_IPV4,
1840 .hooknum = NF_INET_LOCAL_IN,
1841 .priority = NF_IP_PRI_NAT_SRC - 2,
1842 },
1843 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1844 * or VS/NAT(change destination), so that filtering rules can be
1845 * applied to IPVS. */
1846 {
1847 .hook = ip_vs_remote_request4,
1848 .owner = THIS_MODULE,
1849 .pf = NFPROTO_IPV4,
1850 .hooknum = NF_INET_LOCAL_IN,
1851 .priority = NF_IP_PRI_NAT_SRC - 1,
1852 },
1853 /* Before ip_vs_in, change source only for VS/NAT */
1854 {
1855 .hook = ip_vs_local_reply4,
1856 .owner = THIS_MODULE,
1857 .pf = NFPROTO_IPV4,
1858 .hooknum = NF_INET_LOCAL_OUT,
1859 .priority = NF_IP_PRI_NAT_DST + 1,
1860 },
1861 /* After mangle, schedule and forward local requests */
1862 {
1863 .hook = ip_vs_local_request4,
1864 .owner = THIS_MODULE,
1865 .pf = NFPROTO_IPV4,
1866 .hooknum = NF_INET_LOCAL_OUT,
1867 .priority = NF_IP_PRI_NAT_DST + 2,
1868 },
1869 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1870 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1871 {
1872 .hook = ip_vs_forward_icmp,
1873 .owner = THIS_MODULE,
1874 .pf = NFPROTO_IPV4,
1875 .hooknum = NF_INET_FORWARD,
1876 .priority = 99,
1877 },
1878 /* After packet filtering, change source only for VS/NAT */
1879 {
1880 .hook = ip_vs_reply4,
1881 .owner = THIS_MODULE,
1882 .pf = NFPROTO_IPV4,
1883 .hooknum = NF_INET_FORWARD,
1884 .priority = 100,
1885 },
1886 #ifdef CONFIG_IP_VS_IPV6
1887 /* After packet filtering, change source only for VS/NAT */
1888 {
1889 .hook = ip_vs_reply6,
1890 .owner = THIS_MODULE,
1891 .pf = NFPROTO_IPV6,
1892 .hooknum = NF_INET_LOCAL_IN,
1893 .priority = NF_IP6_PRI_NAT_SRC - 2,
1894 },
1895 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1896 * or VS/NAT(change destination), so that filtering rules can be
1897 * applied to IPVS. */
1898 {
1899 .hook = ip_vs_remote_request6,
1900 .owner = THIS_MODULE,
1901 .pf = NFPROTO_IPV6,
1902 .hooknum = NF_INET_LOCAL_IN,
1903 .priority = NF_IP6_PRI_NAT_SRC - 1,
1904 },
1905 /* Before ip_vs_in, change source only for VS/NAT */
1906 {
1907 .hook = ip_vs_local_reply6,
1908 .owner = THIS_MODULE,
1909 .pf = NFPROTO_IPV6,
1910 .hooknum = NF_INET_LOCAL_OUT,
1911 .priority = NF_IP6_PRI_NAT_DST + 1,
1912 },
1913 /* After mangle, schedule and forward local requests */
1914 {
1915 .hook = ip_vs_local_request6,
1916 .owner = THIS_MODULE,
1917 .pf = NFPROTO_IPV6,
1918 .hooknum = NF_INET_LOCAL_OUT,
1919 .priority = NF_IP6_PRI_NAT_DST + 2,
1920 },
1921 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1922 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1923 {
1924 .hook = ip_vs_forward_icmp_v6,
1925 .owner = THIS_MODULE,
1926 .pf = NFPROTO_IPV6,
1927 .hooknum = NF_INET_FORWARD,
1928 .priority = 99,
1929 },
1930 /* After packet filtering, change source only for VS/NAT */
1931 {
1932 .hook = ip_vs_reply6,
1933 .owner = THIS_MODULE,
1934 .pf = NFPROTO_IPV6,
1935 .hooknum = NF_INET_FORWARD,
1936 .priority = 100,
1937 },
1938 #endif
1939 };
1940 /*
1941 * Initialize IP Virtual Server netns mem.
1942 */
1943 static int __net_init __ip_vs_init(struct net *net)
1944 {
1945 struct netns_ipvs *ipvs;
1946
1947 ipvs = net_generic(net, ip_vs_net_id);
1948 if (ipvs == NULL)
1949 return -ENOMEM;
1950
1951 /* Hold the beast until a service is registerd */
1952 ipvs->enable = 0;
1953 ipvs->net = net;
1954 /* Counters used for creating unique names */
1955 ipvs->gen = atomic_read(&ipvs_netns_cnt);
1956 atomic_inc(&ipvs_netns_cnt);
1957 net->ipvs = ipvs;
1958
1959 if (ip_vs_estimator_net_init(net) < 0)
1960 goto estimator_fail;
1961
1962 if (ip_vs_control_net_init(net) < 0)
1963 goto control_fail;
1964
1965 if (ip_vs_protocol_net_init(net) < 0)
1966 goto protocol_fail;
1967
1968 if (ip_vs_app_net_init(net) < 0)
1969 goto app_fail;
1970
1971 if (ip_vs_conn_net_init(net) < 0)
1972 goto conn_fail;
1973
1974 if (ip_vs_sync_net_init(net) < 0)
1975 goto sync_fail;
1976
1977 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
1978 sizeof(struct netns_ipvs), ipvs->gen);
1979 return 0;
1980 /*
1981 * Error handling
1982 */
1983
1984 sync_fail:
1985 ip_vs_conn_net_cleanup(net);
1986 conn_fail:
1987 ip_vs_app_net_cleanup(net);
1988 app_fail:
1989 ip_vs_protocol_net_cleanup(net);
1990 protocol_fail:
1991 ip_vs_control_net_cleanup(net);
1992 control_fail:
1993 ip_vs_estimator_net_cleanup(net);
1994 estimator_fail:
1995 net->ipvs = NULL;
1996 return -ENOMEM;
1997 }
1998
1999 static void __net_exit __ip_vs_cleanup(struct net *net)
2000 {
2001 ip_vs_service_net_cleanup(net); /* ip_vs_flush() with locks */
2002 ip_vs_conn_net_cleanup(net);
2003 ip_vs_app_net_cleanup(net);
2004 ip_vs_protocol_net_cleanup(net);
2005 ip_vs_control_net_cleanup(net);
2006 ip_vs_estimator_net_cleanup(net);
2007 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
2008 net->ipvs = NULL;
2009 }
2010
2011 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2012 {
2013 EnterFunction(2);
2014 net_ipvs(net)->enable = 0; /* Disable packet reception */
2015 smp_wmb();
2016 ip_vs_sync_net_cleanup(net);
2017 LeaveFunction(2);
2018 }
2019
2020 static struct pernet_operations ipvs_core_ops = {
2021 .init = __ip_vs_init,
2022 .exit = __ip_vs_cleanup,
2023 .id = &ip_vs_net_id,
2024 .size = sizeof(struct netns_ipvs),
2025 };
2026
2027 static struct pernet_operations ipvs_core_dev_ops = {
2028 .exit = __ip_vs_dev_cleanup,
2029 };
2030
2031 /*
2032 * Initialize IP Virtual Server
2033 */
2034 static int __init ip_vs_init(void)
2035 {
2036 int ret;
2037
2038 ret = ip_vs_control_init();
2039 if (ret < 0) {
2040 pr_err("can't setup control.\n");
2041 goto exit;
2042 }
2043
2044 ip_vs_protocol_init();
2045
2046 ret = ip_vs_conn_init();
2047 if (ret < 0) {
2048 pr_err("can't setup connection table.\n");
2049 goto cleanup_protocol;
2050 }
2051
2052 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2053 if (ret < 0)
2054 goto cleanup_conn;
2055
2056 ret = register_pernet_device(&ipvs_core_dev_ops);
2057 if (ret < 0)
2058 goto cleanup_sub;
2059
2060 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2061 if (ret < 0) {
2062 pr_err("can't register hooks.\n");
2063 goto cleanup_dev;
2064 }
2065
2066 ret = ip_vs_register_nl_ioctl();
2067 if (ret < 0) {
2068 pr_err("can't register netlink/ioctl.\n");
2069 goto cleanup_hooks;
2070 }
2071
2072 pr_info("ipvs loaded.\n");
2073
2074 return ret;
2075
2076 cleanup_hooks:
2077 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2078 cleanup_dev:
2079 unregister_pernet_device(&ipvs_core_dev_ops);
2080 cleanup_sub:
2081 unregister_pernet_subsys(&ipvs_core_ops);
2082 cleanup_conn:
2083 ip_vs_conn_cleanup();
2084 cleanup_protocol:
2085 ip_vs_protocol_cleanup();
2086 ip_vs_control_cleanup();
2087 exit:
2088 return ret;
2089 }
2090
2091 static void __exit ip_vs_cleanup(void)
2092 {
2093 ip_vs_unregister_nl_ioctl();
2094 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2095 unregister_pernet_device(&ipvs_core_dev_ops);
2096 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2097 ip_vs_conn_cleanup();
2098 ip_vs_protocol_cleanup();
2099 ip_vs_control_cleanup();
2100 pr_info("ipvs unloaded.\n");
2101 }
2102
2103 module_init(ip_vs_init);
2104 module_exit(ip_vs_cleanup);
2105 MODULE_LICENSE("GPL");