]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/ipvs/ip_vs_proto_tcp.c
ipvs: Pass ipvs not net into register_app and unregister_app
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / ipvs / ip_vs_proto_tcp.c
1 /*
2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS
3 *
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
13 *
14 * Network name space (netns) aware.
15 * Global data moved to netns i.e struct netns_ipvs
16 * tcp_timeouts table has copy per netns in a hash table per
17 * protocol ip_vs_proto_data and is handled by netns
18 */
19
20 #define KMSG_COMPONENT "IPVS"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/ip.h>
25 #include <linux/tcp.h> /* for tcphdr */
26 #include <net/ip.h>
27 #include <net/tcp.h> /* for csum_tcpudp_magic */
28 #include <net/ip6_checksum.h>
29 #include <linux/netfilter.h>
30 #include <linux/netfilter_ipv4.h>
31
32 #include <net/ip_vs.h>
33
34 static int
35 tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
36 int *verdict, struct ip_vs_conn **cpp,
37 struct ip_vs_iphdr *iph)
38 {
39 struct net *net;
40 struct ip_vs_service *svc;
41 struct tcphdr _tcph, *th;
42 struct netns_ipvs *ipvs;
43 __be16 _ports[2], *ports = NULL;
44
45 net = skb_net(skb);
46 ipvs = net_ipvs(net);
47
48 /* In the event of icmp, we're only guaranteed to have the first 8
49 * bytes of the transport header, so we only check the rest of the
50 * TCP packet for non-ICMP packets
51 */
52 if (likely(!ip_vs_iph_icmp(iph))) {
53 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
54 if (th) {
55 if (th->rst || !(sysctl_sloppy_tcp(ipvs) || th->syn))
56 return 1;
57 ports = &th->source;
58 }
59 } else {
60 ports = skb_header_pointer(
61 skb, iph->len, sizeof(_ports), &_ports);
62 }
63
64 if (!ports) {
65 *verdict = NF_DROP;
66 return 0;
67 }
68
69 /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
70 rcu_read_lock();
71
72 if (likely(!ip_vs_iph_inverse(iph)))
73 svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
74 &iph->daddr, ports[1]);
75 else
76 svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
77 &iph->saddr, ports[0]);
78
79 if (svc) {
80 int ignored;
81
82 if (ip_vs_todrop(ipvs)) {
83 /*
84 * It seems that we are very loaded.
85 * We have to drop this packet :(
86 */
87 rcu_read_unlock();
88 *verdict = NF_DROP;
89 return 0;
90 }
91
92 /*
93 * Let the virtual server select a real server for the
94 * incoming connection, and create a connection entry.
95 */
96 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
97 if (!*cpp && ignored <= 0) {
98 if (!ignored)
99 *verdict = ip_vs_leave(svc, skb, pd, iph);
100 else
101 *verdict = NF_DROP;
102 rcu_read_unlock();
103 return 0;
104 }
105 }
106 rcu_read_unlock();
107 /* NF_ACCEPT */
108 return 1;
109 }
110
111
112 static inline void
113 tcp_fast_csum_update(int af, struct tcphdr *tcph,
114 const union nf_inet_addr *oldip,
115 const union nf_inet_addr *newip,
116 __be16 oldport, __be16 newport)
117 {
118 #ifdef CONFIG_IP_VS_IPV6
119 if (af == AF_INET6)
120 tcph->check =
121 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
122 ip_vs_check_diff2(oldport, newport,
123 ~csum_unfold(tcph->check))));
124 else
125 #endif
126 tcph->check =
127 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
128 ip_vs_check_diff2(oldport, newport,
129 ~csum_unfold(tcph->check))));
130 }
131
132
133 static inline void
134 tcp_partial_csum_update(int af, struct tcphdr *tcph,
135 const union nf_inet_addr *oldip,
136 const union nf_inet_addr *newip,
137 __be16 oldlen, __be16 newlen)
138 {
139 #ifdef CONFIG_IP_VS_IPV6
140 if (af == AF_INET6)
141 tcph->check =
142 ~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
143 ip_vs_check_diff2(oldlen, newlen,
144 csum_unfold(tcph->check))));
145 else
146 #endif
147 tcph->check =
148 ~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
149 ip_vs_check_diff2(oldlen, newlen,
150 csum_unfold(tcph->check))));
151 }
152
153
154 static int
155 tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
156 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
157 {
158 struct tcphdr *tcph;
159 unsigned int tcphoff = iph->len;
160 int oldlen;
161 int payload_csum = 0;
162
163 #ifdef CONFIG_IP_VS_IPV6
164 if (cp->af == AF_INET6 && iph->fragoffs)
165 return 1;
166 #endif
167 oldlen = skb->len - tcphoff;
168
169 /* csum_check requires unshared skb */
170 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
171 return 0;
172
173 if (unlikely(cp->app != NULL)) {
174 int ret;
175
176 /* Some checks before mangling */
177 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
178 return 0;
179
180 /* Call application helper if needed */
181 if (!(ret = ip_vs_app_pkt_out(cp, skb)))
182 return 0;
183 /* ret=2: csum update is needed after payload mangling */
184 if (ret == 1)
185 oldlen = skb->len - tcphoff;
186 else
187 payload_csum = 1;
188 }
189
190 tcph = (void *)skb_network_header(skb) + tcphoff;
191 tcph->source = cp->vport;
192
193 /* Adjust TCP checksums */
194 if (skb->ip_summed == CHECKSUM_PARTIAL) {
195 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
196 htons(oldlen),
197 htons(skb->len - tcphoff));
198 } else if (!payload_csum) {
199 /* Only port and addr are changed, do fast csum update */
200 tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
201 cp->dport, cp->vport);
202 if (skb->ip_summed == CHECKSUM_COMPLETE)
203 skb->ip_summed = (cp->app && pp->csum_check) ?
204 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
205 } else {
206 /* full checksum calculation */
207 tcph->check = 0;
208 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
209 #ifdef CONFIG_IP_VS_IPV6
210 if (cp->af == AF_INET6)
211 tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
212 &cp->caddr.in6,
213 skb->len - tcphoff,
214 cp->protocol, skb->csum);
215 else
216 #endif
217 tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
218 cp->caddr.ip,
219 skb->len - tcphoff,
220 cp->protocol,
221 skb->csum);
222 skb->ip_summed = CHECKSUM_UNNECESSARY;
223
224 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
225 pp->name, tcph->check,
226 (char*)&(tcph->check) - (char*)tcph);
227 }
228 return 1;
229 }
230
231
232 static int
233 tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
234 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
235 {
236 struct tcphdr *tcph;
237 unsigned int tcphoff = iph->len;
238 int oldlen;
239 int payload_csum = 0;
240
241 #ifdef CONFIG_IP_VS_IPV6
242 if (cp->af == AF_INET6 && iph->fragoffs)
243 return 1;
244 #endif
245 oldlen = skb->len - tcphoff;
246
247 /* csum_check requires unshared skb */
248 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
249 return 0;
250
251 if (unlikely(cp->app != NULL)) {
252 int ret;
253
254 /* Some checks before mangling */
255 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
256 return 0;
257
258 /*
259 * Attempt ip_vs_app call.
260 * It will fix ip_vs_conn and iph ack_seq stuff
261 */
262 if (!(ret = ip_vs_app_pkt_in(cp, skb)))
263 return 0;
264 /* ret=2: csum update is needed after payload mangling */
265 if (ret == 1)
266 oldlen = skb->len - tcphoff;
267 else
268 payload_csum = 1;
269 }
270
271 tcph = (void *)skb_network_header(skb) + tcphoff;
272 tcph->dest = cp->dport;
273
274 /*
275 * Adjust TCP checksums
276 */
277 if (skb->ip_summed == CHECKSUM_PARTIAL) {
278 tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
279 htons(oldlen),
280 htons(skb->len - tcphoff));
281 } else if (!payload_csum) {
282 /* Only port and addr are changed, do fast csum update */
283 tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
284 cp->vport, cp->dport);
285 if (skb->ip_summed == CHECKSUM_COMPLETE)
286 skb->ip_summed = (cp->app && pp->csum_check) ?
287 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
288 } else {
289 /* full checksum calculation */
290 tcph->check = 0;
291 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
292 #ifdef CONFIG_IP_VS_IPV6
293 if (cp->af == AF_INET6)
294 tcph->check = csum_ipv6_magic(&cp->caddr.in6,
295 &cp->daddr.in6,
296 skb->len - tcphoff,
297 cp->protocol, skb->csum);
298 else
299 #endif
300 tcph->check = csum_tcpudp_magic(cp->caddr.ip,
301 cp->daddr.ip,
302 skb->len - tcphoff,
303 cp->protocol,
304 skb->csum);
305 skb->ip_summed = CHECKSUM_UNNECESSARY;
306 }
307 return 1;
308 }
309
310
311 static int
312 tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
313 {
314 unsigned int tcphoff;
315
316 #ifdef CONFIG_IP_VS_IPV6
317 if (af == AF_INET6)
318 tcphoff = sizeof(struct ipv6hdr);
319 else
320 #endif
321 tcphoff = ip_hdrlen(skb);
322
323 switch (skb->ip_summed) {
324 case CHECKSUM_NONE:
325 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
326 case CHECKSUM_COMPLETE:
327 #ifdef CONFIG_IP_VS_IPV6
328 if (af == AF_INET6) {
329 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
330 &ipv6_hdr(skb)->daddr,
331 skb->len - tcphoff,
332 ipv6_hdr(skb)->nexthdr,
333 skb->csum)) {
334 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
335 "Failed checksum for");
336 return 0;
337 }
338 } else
339 #endif
340 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
341 ip_hdr(skb)->daddr,
342 skb->len - tcphoff,
343 ip_hdr(skb)->protocol,
344 skb->csum)) {
345 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
346 "Failed checksum for");
347 return 0;
348 }
349 break;
350 default:
351 /* No need to checksum. */
352 break;
353 }
354
355 return 1;
356 }
357
358
359 #define TCP_DIR_INPUT 0
360 #define TCP_DIR_OUTPUT 4
361 #define TCP_DIR_INPUT_ONLY 8
362
363 static const int tcp_state_off[IP_VS_DIR_LAST] = {
364 [IP_VS_DIR_INPUT] = TCP_DIR_INPUT,
365 [IP_VS_DIR_OUTPUT] = TCP_DIR_OUTPUT,
366 [IP_VS_DIR_INPUT_ONLY] = TCP_DIR_INPUT_ONLY,
367 };
368
369 /*
370 * Timeout table[state]
371 */
372 static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
373 [IP_VS_TCP_S_NONE] = 2*HZ,
374 [IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ,
375 [IP_VS_TCP_S_SYN_SENT] = 2*60*HZ,
376 [IP_VS_TCP_S_SYN_RECV] = 1*60*HZ,
377 [IP_VS_TCP_S_FIN_WAIT] = 2*60*HZ,
378 [IP_VS_TCP_S_TIME_WAIT] = 2*60*HZ,
379 [IP_VS_TCP_S_CLOSE] = 10*HZ,
380 [IP_VS_TCP_S_CLOSE_WAIT] = 60*HZ,
381 [IP_VS_TCP_S_LAST_ACK] = 30*HZ,
382 [IP_VS_TCP_S_LISTEN] = 2*60*HZ,
383 [IP_VS_TCP_S_SYNACK] = 120*HZ,
384 [IP_VS_TCP_S_LAST] = 2*HZ,
385 };
386
387 static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
388 [IP_VS_TCP_S_NONE] = "NONE",
389 [IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED",
390 [IP_VS_TCP_S_SYN_SENT] = "SYN_SENT",
391 [IP_VS_TCP_S_SYN_RECV] = "SYN_RECV",
392 [IP_VS_TCP_S_FIN_WAIT] = "FIN_WAIT",
393 [IP_VS_TCP_S_TIME_WAIT] = "TIME_WAIT",
394 [IP_VS_TCP_S_CLOSE] = "CLOSE",
395 [IP_VS_TCP_S_CLOSE_WAIT] = "CLOSE_WAIT",
396 [IP_VS_TCP_S_LAST_ACK] = "LAST_ACK",
397 [IP_VS_TCP_S_LISTEN] = "LISTEN",
398 [IP_VS_TCP_S_SYNACK] = "SYNACK",
399 [IP_VS_TCP_S_LAST] = "BUG!",
400 };
401
402 #define sNO IP_VS_TCP_S_NONE
403 #define sES IP_VS_TCP_S_ESTABLISHED
404 #define sSS IP_VS_TCP_S_SYN_SENT
405 #define sSR IP_VS_TCP_S_SYN_RECV
406 #define sFW IP_VS_TCP_S_FIN_WAIT
407 #define sTW IP_VS_TCP_S_TIME_WAIT
408 #define sCL IP_VS_TCP_S_CLOSE
409 #define sCW IP_VS_TCP_S_CLOSE_WAIT
410 #define sLA IP_VS_TCP_S_LAST_ACK
411 #define sLI IP_VS_TCP_S_LISTEN
412 #define sSA IP_VS_TCP_S_SYNACK
413
414 struct tcp_states_t {
415 int next_state[IP_VS_TCP_S_LAST];
416 };
417
418 static const char * tcp_state_name(int state)
419 {
420 if (state >= IP_VS_TCP_S_LAST)
421 return "ERR!";
422 return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
423 }
424
425 static struct tcp_states_t tcp_states [] = {
426 /* INPUT */
427 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
428 /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
429 /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }},
430 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
431 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }},
432
433 /* OUTPUT */
434 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
435 /*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }},
436 /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
437 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
438 /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
439
440 /* INPUT-ONLY */
441 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
442 /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
443 /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
444 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
445 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
446 };
447
448 static struct tcp_states_t tcp_states_dos [] = {
449 /* INPUT */
450 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
451 /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},
452 /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }},
453 /*ack*/ {{sES, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }},
454 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
455
456 /* OUTPUT */
457 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
458 /*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }},
459 /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
460 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
461 /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
462
463 /* INPUT-ONLY */
464 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
465 /*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }},
466 /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
467 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
468 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
469 };
470
471 static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
472 {
473 int on = (flags & 1); /* secure_tcp */
474
475 /*
476 ** FIXME: change secure_tcp to independent sysctl var
477 ** or make it per-service or per-app because it is valid
478 ** for most if not for all of the applications. Something
479 ** like "capabilities" (flags) for each object.
480 */
481 pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
482 }
483
484 static inline int tcp_state_idx(struct tcphdr *th)
485 {
486 if (th->rst)
487 return 3;
488 if (th->syn)
489 return 0;
490 if (th->fin)
491 return 1;
492 if (th->ack)
493 return 2;
494 return -1;
495 }
496
497 static inline void
498 set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
499 int direction, struct tcphdr *th)
500 {
501 int state_idx;
502 int new_state = IP_VS_TCP_S_CLOSE;
503 int state_off = tcp_state_off[direction];
504
505 /*
506 * Update state offset to INPUT_ONLY if necessary
507 * or delete NO_OUTPUT flag if output packet detected
508 */
509 if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {
510 if (state_off == TCP_DIR_OUTPUT)
511 cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;
512 else
513 state_off = TCP_DIR_INPUT_ONLY;
514 }
515
516 if ((state_idx = tcp_state_idx(th)) < 0) {
517 IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);
518 goto tcp_state_out;
519 }
520
521 new_state =
522 pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
523
524 tcp_state_out:
525 if (new_state != cp->state) {
526 struct ip_vs_dest *dest = cp->dest;
527
528 IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
529 "%s:%d state: %s->%s conn->refcnt:%d\n",
530 pd->pp->name,
531 ((state_off == TCP_DIR_OUTPUT) ?
532 "output " : "input "),
533 th->syn ? 'S' : '.',
534 th->fin ? 'F' : '.',
535 th->ack ? 'A' : '.',
536 th->rst ? 'R' : '.',
537 IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
538 ntohs(cp->dport),
539 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
540 ntohs(cp->cport),
541 tcp_state_name(cp->state),
542 tcp_state_name(new_state),
543 atomic_read(&cp->refcnt));
544
545 if (dest) {
546 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
547 (new_state != IP_VS_TCP_S_ESTABLISHED)) {
548 atomic_dec(&dest->activeconns);
549 atomic_inc(&dest->inactconns);
550 cp->flags |= IP_VS_CONN_F_INACTIVE;
551 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
552 (new_state == IP_VS_TCP_S_ESTABLISHED)) {
553 atomic_inc(&dest->activeconns);
554 atomic_dec(&dest->inactconns);
555 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
556 }
557 }
558 }
559
560 if (likely(pd))
561 cp->timeout = pd->timeout_table[cp->state = new_state];
562 else /* What to do ? */
563 cp->timeout = tcp_timeouts[cp->state = new_state];
564 }
565
566 /*
567 * Handle state transitions
568 */
569 static void
570 tcp_state_transition(struct ip_vs_conn *cp, int direction,
571 const struct sk_buff *skb,
572 struct ip_vs_proto_data *pd)
573 {
574 struct tcphdr _tcph, *th;
575
576 #ifdef CONFIG_IP_VS_IPV6
577 int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
578 #else
579 int ihl = ip_hdrlen(skb);
580 #endif
581
582 th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
583 if (th == NULL)
584 return;
585
586 spin_lock_bh(&cp->lock);
587 set_tcp_state(pd, cp, direction, th);
588 spin_unlock_bh(&cp->lock);
589 }
590
591 static inline __u16 tcp_app_hashkey(__be16 port)
592 {
593 return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
594 & TCP_APP_TAB_MASK;
595 }
596
597
598 static int tcp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
599 {
600 struct ip_vs_app *i;
601 __u16 hash;
602 __be16 port = inc->port;
603 int ret = 0;
604 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
605
606 hash = tcp_app_hashkey(port);
607
608 list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
609 if (i->port == port) {
610 ret = -EEXIST;
611 goto out;
612 }
613 }
614 list_add_rcu(&inc->p_list, &ipvs->tcp_apps[hash]);
615 atomic_inc(&pd->appcnt);
616
617 out:
618 return ret;
619 }
620
621
622 static void
623 tcp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
624 {
625 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
626
627 atomic_dec(&pd->appcnt);
628 list_del_rcu(&inc->p_list);
629 }
630
631
632 static int
633 tcp_app_conn_bind(struct ip_vs_conn *cp)
634 {
635 struct netns_ipvs *ipvs = cp->ipvs;
636 int hash;
637 struct ip_vs_app *inc;
638 int result = 0;
639
640 /* Default binding: bind app only for NAT */
641 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
642 return 0;
643
644 /* Lookup application incarnations and bind the right one */
645 hash = tcp_app_hashkey(cp->vport);
646
647 rcu_read_lock();
648 list_for_each_entry_rcu(inc, &ipvs->tcp_apps[hash], p_list) {
649 if (inc->port == cp->vport) {
650 if (unlikely(!ip_vs_app_inc_get(inc)))
651 break;
652 rcu_read_unlock();
653
654 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
655 "%s:%u to app %s on port %u\n",
656 __func__,
657 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
658 ntohs(cp->cport),
659 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
660 ntohs(cp->vport),
661 inc->name, ntohs(inc->port));
662
663 cp->app = inc;
664 if (inc->init_conn)
665 result = inc->init_conn(inc, cp);
666 goto out;
667 }
668 }
669 rcu_read_unlock();
670
671 out:
672 return result;
673 }
674
675
676 /*
677 * Set LISTEN timeout. (ip_vs_conn_put will setup timer)
678 */
679 void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
680 {
681 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net_ipvs(net), IPPROTO_TCP);
682
683 spin_lock_bh(&cp->lock);
684 cp->state = IP_VS_TCP_S_LISTEN;
685 cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
686 : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
687 spin_unlock_bh(&cp->lock);
688 }
689
690 /* ---------------------------------------------
691 * timeouts is netns related now.
692 * ---------------------------------------------
693 */
694 static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
695 {
696 struct netns_ipvs *ipvs = net_ipvs(net);
697
698 ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
699 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
700 sizeof(tcp_timeouts));
701 if (!pd->timeout_table)
702 return -ENOMEM;
703 pd->tcp_state_table = tcp_states;
704 return 0;
705 }
706
707 static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
708 {
709 kfree(pd->timeout_table);
710 }
711
712
713 struct ip_vs_protocol ip_vs_protocol_tcp = {
714 .name = "TCP",
715 .protocol = IPPROTO_TCP,
716 .num_states = IP_VS_TCP_S_LAST,
717 .dont_defrag = 0,
718 .init = NULL,
719 .exit = NULL,
720 .init_netns = __ip_vs_tcp_init,
721 .exit_netns = __ip_vs_tcp_exit,
722 .register_app = tcp_register_app,
723 .unregister_app = tcp_unregister_app,
724 .conn_schedule = tcp_conn_schedule,
725 .conn_in_get = ip_vs_conn_in_get_proto,
726 .conn_out_get = ip_vs_conn_out_get_proto,
727 .snat_handler = tcp_snat_handler,
728 .dnat_handler = tcp_dnat_handler,
729 .csum_check = tcp_csum_check,
730 .state_name = tcp_state_name,
731 .state_transition = tcp_state_transition,
732 .app_conn_bind = tcp_app_conn_bind,
733 .debug_packet = ip_vs_tcpudp_debug_packet,
734 .timeout_change = tcp_timeout_change,
735 };