]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nf_conntrack_proto_tcp.c
Bluetooth: Fix race condition in hci_release_sock()
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_conntrack_proto_tcp.c
1 /* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3 * (C) 2002-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4 * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/types.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/in.h>
15 #include <linux/tcp.h>
16 #include <linux/spinlock.h>
17 #include <linux/skbuff.h>
18 #include <linux/ipv6.h>
19 #include <net/ip6_checksum.h>
20 #include <asm/unaligned.h>
21
22 #include <net/tcp.h>
23
24 #include <linux/netfilter.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_l4proto.h>
29 #include <net/netfilter/nf_conntrack_ecache.h>
30 #include <net/netfilter/nf_conntrack_seqadj.h>
31 #include <net/netfilter/nf_conntrack_synproxy.h>
32 #include <net/netfilter/nf_log.h>
33 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
34 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
35
36 /* "Be conservative in what you do,
37 be liberal in what you accept from others."
38 If it's non-zero, we mark only out of window RST segments as INVALID. */
39 static int nf_ct_tcp_be_liberal __read_mostly = 0;
40
41 /* If it is set to zero, we disable picking up already established
42 connections. */
43 static int nf_ct_tcp_loose __read_mostly = 1;
44
45 /* Max number of the retransmitted packets without receiving an (acceptable)
46 ACK from the destination. If this number is reached, a shorter timer
47 will be started. */
48 static int nf_ct_tcp_max_retrans __read_mostly = 3;
49
50 /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
51 closely. They're more complex. --RR */
52
53 static const char *const tcp_conntrack_names[] = {
54 "NONE",
55 "SYN_SENT",
56 "SYN_RECV",
57 "ESTABLISHED",
58 "FIN_WAIT",
59 "CLOSE_WAIT",
60 "LAST_ACK",
61 "TIME_WAIT",
62 "CLOSE",
63 "SYN_SENT2",
64 };
65
66 #define SECS * HZ
67 #define MINS * 60 SECS
68 #define HOURS * 60 MINS
69 #define DAYS * 24 HOURS
70
71 static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
72 [TCP_CONNTRACK_SYN_SENT] = 2 MINS,
73 [TCP_CONNTRACK_SYN_RECV] = 60 SECS,
74 [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
75 [TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
76 [TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
77 [TCP_CONNTRACK_LAST_ACK] = 30 SECS,
78 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
79 [TCP_CONNTRACK_CLOSE] = 10 SECS,
80 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
81 /* RFC1122 says the R2 limit should be at least 100 seconds.
82 Linux uses 15 packets as limit, which corresponds
83 to ~13-30min depending on RTO. */
84 [TCP_CONNTRACK_RETRANS] = 5 MINS,
85 [TCP_CONNTRACK_UNACK] = 5 MINS,
86 };
87
88 #define sNO TCP_CONNTRACK_NONE
89 #define sSS TCP_CONNTRACK_SYN_SENT
90 #define sSR TCP_CONNTRACK_SYN_RECV
91 #define sES TCP_CONNTRACK_ESTABLISHED
92 #define sFW TCP_CONNTRACK_FIN_WAIT
93 #define sCW TCP_CONNTRACK_CLOSE_WAIT
94 #define sLA TCP_CONNTRACK_LAST_ACK
95 #define sTW TCP_CONNTRACK_TIME_WAIT
96 #define sCL TCP_CONNTRACK_CLOSE
97 #define sS2 TCP_CONNTRACK_SYN_SENT2
98 #define sIV TCP_CONNTRACK_MAX
99 #define sIG TCP_CONNTRACK_IGNORE
100
101 /* What TCP flags are set from RST/SYN/FIN/ACK. */
102 enum tcp_bit_set {
103 TCP_SYN_SET,
104 TCP_SYNACK_SET,
105 TCP_FIN_SET,
106 TCP_ACK_SET,
107 TCP_RST_SET,
108 TCP_NONE_SET,
109 };
110
111 /*
112 * The TCP state transition table needs a few words...
113 *
114 * We are the man in the middle. All the packets go through us
115 * but might get lost in transit to the destination.
116 * It is assumed that the destinations can't receive segments
117 * we haven't seen.
118 *
119 * The checked segment is in window, but our windows are *not*
120 * equivalent with the ones of the sender/receiver. We always
121 * try to guess the state of the current sender.
122 *
123 * The meaning of the states are:
124 *
125 * NONE: initial state
126 * SYN_SENT: SYN-only packet seen
127 * SYN_SENT2: SYN-only packet seen from reply dir, simultaneous open
128 * SYN_RECV: SYN-ACK packet seen
129 * ESTABLISHED: ACK packet seen
130 * FIN_WAIT: FIN packet seen
131 * CLOSE_WAIT: ACK seen (after FIN)
132 * LAST_ACK: FIN seen (after FIN)
133 * TIME_WAIT: last ACK seen
134 * CLOSE: closed connection (RST)
135 *
136 * Packets marked as IGNORED (sIG):
137 * if they may be either invalid or valid
138 * and the receiver may send back a connection
139 * closing RST or a SYN/ACK.
140 *
141 * Packets marked as INVALID (sIV):
142 * if we regard them as truly invalid packets
143 */
144 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
145 {
146 /* ORIGINAL */
147 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
148 /*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
149 /*
150 * sNO -> sSS Initialize a new connection
151 * sSS -> sSS Retransmitted SYN
152 * sS2 -> sS2 Late retransmitted SYN
153 * sSR -> sIG
154 * sES -> sIG Error: SYNs in window outside the SYN_SENT state
155 * are errors. Receiver will reply with RST
156 * and close the connection.
157 * Or we are not in sync and hold a dead connection.
158 * sFW -> sIG
159 * sCW -> sIG
160 * sLA -> sIG
161 * sTW -> sSS Reopened connection (RFC 1122).
162 * sCL -> sSS
163 */
164 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
165 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
166 /*
167 * sNO -> sIV Too late and no reason to do anything
168 * sSS -> sIV Client can't send SYN and then SYN/ACK
169 * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
170 * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
171 * sES -> sIV Invalid SYN/ACK packets sent by the client
172 * sFW -> sIV
173 * sCW -> sIV
174 * sLA -> sIV
175 * sTW -> sIV
176 * sCL -> sIV
177 */
178 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
179 /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
180 /*
181 * sNO -> sIV Too late and no reason to do anything...
182 * sSS -> sIV Client migth not send FIN in this state:
183 * we enforce waiting for a SYN/ACK reply first.
184 * sS2 -> sIV
185 * sSR -> sFW Close started.
186 * sES -> sFW
187 * sFW -> sLA FIN seen in both directions, waiting for
188 * the last ACK.
189 * Migth be a retransmitted FIN as well...
190 * sCW -> sLA
191 * sLA -> sLA Retransmitted FIN. Remain in the same state.
192 * sTW -> sTW
193 * sCL -> sCL
194 */
195 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
196 /*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
197 /*
198 * sNO -> sES Assumed.
199 * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
200 * sS2 -> sIV
201 * sSR -> sES Established state is reached.
202 * sES -> sES :-)
203 * sFW -> sCW Normal close request answered by ACK.
204 * sCW -> sCW
205 * sLA -> sTW Last ACK detected (RFC5961 challenged)
206 * sTW -> sTW Retransmitted last ACK. Remain in the same state.
207 * sCL -> sCL
208 */
209 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
210 /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
211 /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
212 },
213 {
214 /* REPLY */
215 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
216 /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
217 /*
218 * sNO -> sIV Never reached.
219 * sSS -> sS2 Simultaneous open
220 * sS2 -> sS2 Retransmitted simultaneous SYN
221 * sSR -> sIV Invalid SYN packets sent by the server
222 * sES -> sIV
223 * sFW -> sIV
224 * sCW -> sIV
225 * sLA -> sIV
226 * sTW -> sSS Reopened connection, but server may have switched role
227 * sCL -> sIV
228 */
229 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
230 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
231 /*
232 * sSS -> sSR Standard open.
233 * sS2 -> sSR Simultaneous open
234 * sSR -> sIG Retransmitted SYN/ACK, ignore it.
235 * sES -> sIG Late retransmitted SYN/ACK?
236 * sFW -> sIG Might be SYN/ACK answering ignored SYN
237 * sCW -> sIG
238 * sLA -> sIG
239 * sTW -> sIG
240 * sCL -> sIG
241 */
242 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
243 /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
244 /*
245 * sSS -> sIV Server might not send FIN in this state.
246 * sS2 -> sIV
247 * sSR -> sFW Close started.
248 * sES -> sFW
249 * sFW -> sLA FIN seen in both directions.
250 * sCW -> sLA
251 * sLA -> sLA Retransmitted FIN.
252 * sTW -> sTW
253 * sCL -> sCL
254 */
255 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
256 /*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
257 /*
258 * sSS -> sIG Might be a half-open connection.
259 * sS2 -> sIG
260 * sSR -> sSR Might answer late resent SYN.
261 * sES -> sES :-)
262 * sFW -> sCW Normal close request answered by ACK.
263 * sCW -> sCW
264 * sLA -> sTW Last ACK detected (RFC5961 challenged)
265 * sTW -> sTW Retransmitted last ACK.
266 * sCL -> sCL
267 */
268 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
269 /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
270 /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
271 }
272 };
273
274 static inline struct nf_tcp_net *tcp_pernet(struct net *net)
275 {
276 return &net->ct.nf_ct_proto.tcp;
277 }
278
279 static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
280 struct net *net, struct nf_conntrack_tuple *tuple)
281 {
282 const struct tcphdr *hp;
283 struct tcphdr _hdr;
284
285 /* Actually only need first 4 bytes to get ports. */
286 hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
287 if (hp == NULL)
288 return false;
289
290 tuple->src.u.tcp.port = hp->source;
291 tuple->dst.u.tcp.port = hp->dest;
292
293 return true;
294 }
295
296 static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
297 const struct nf_conntrack_tuple *orig)
298 {
299 tuple->src.u.tcp.port = orig->dst.u.tcp.port;
300 tuple->dst.u.tcp.port = orig->src.u.tcp.port;
301 return true;
302 }
303
304 #ifdef CONFIG_NF_CONNTRACK_PROCFS
305 /* Print out the private part of the conntrack. */
306 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
307 {
308 seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
309 }
310 #endif
311
312 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
313 {
314 if (tcph->rst) return TCP_RST_SET;
315 else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
316 else if (tcph->fin) return TCP_FIN_SET;
317 else if (tcph->ack) return TCP_ACK_SET;
318 else return TCP_NONE_SET;
319 }
320
321 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
322 in IP Filter' by Guido van Rooij.
323
324 http://www.sane.nl/events/sane2000/papers.html
325 http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
326
327 The boundaries and the conditions are changed according to RFC793:
328 the packet must intersect the window (i.e. segments may be
329 after the right or before the left edge) and thus receivers may ACK
330 segments after the right edge of the window.
331
332 td_maxend = max(sack + max(win,1)) seen in reply packets
333 td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
334 td_maxwin += seq + len - sender.td_maxend
335 if seq + len > sender.td_maxend
336 td_end = max(seq + len) seen in sent packets
337
338 I. Upper bound for valid data: seq <= sender.td_maxend
339 II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
340 III. Upper bound for valid (s)ack: sack <= receiver.td_end
341 IV. Lower bound for valid (s)ack: sack >= receiver.td_end - MAXACKWINDOW
342
343 where sack is the highest right edge of sack block found in the packet
344 or ack in the case of packet without SACK option.
345
346 The upper bound limit for a valid (s)ack is not ignored -
347 we doesn't have to deal with fragments.
348 */
349
350 static inline __u32 segment_seq_plus_len(__u32 seq,
351 size_t len,
352 unsigned int dataoff,
353 const struct tcphdr *tcph)
354 {
355 /* XXX Should I use payload length field in IP/IPv6 header ?
356 * - YK */
357 return (seq + len - dataoff - tcph->doff*4
358 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
359 }
360
361 /* Fixme: what about big packets? */
362 #define MAXACKWINCONST 66000
363 #define MAXACKWINDOW(sender) \
364 ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
365 : MAXACKWINCONST)
366
367 /*
368 * Simplified tcp_parse_options routine from tcp_input.c
369 */
370 static void tcp_options(const struct sk_buff *skb,
371 unsigned int dataoff,
372 const struct tcphdr *tcph,
373 struct ip_ct_tcp_state *state)
374 {
375 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
376 const unsigned char *ptr;
377 int length = (tcph->doff*4) - sizeof(struct tcphdr);
378
379 if (!length)
380 return;
381
382 ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
383 length, buff);
384 BUG_ON(ptr == NULL);
385
386 state->td_scale =
387 state->flags = 0;
388
389 while (length > 0) {
390 int opcode=*ptr++;
391 int opsize;
392
393 switch (opcode) {
394 case TCPOPT_EOL:
395 return;
396 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
397 length--;
398 continue;
399 default:
400 if (length < 2)
401 return;
402 opsize=*ptr++;
403 if (opsize < 2) /* "silly options" */
404 return;
405 if (opsize > length)
406 return; /* don't parse partial options */
407
408 if (opcode == TCPOPT_SACK_PERM
409 && opsize == TCPOLEN_SACK_PERM)
410 state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
411 else if (opcode == TCPOPT_WINDOW
412 && opsize == TCPOLEN_WINDOW) {
413 state->td_scale = *(u_int8_t *)ptr;
414
415 if (state->td_scale > TCP_MAX_WSCALE)
416 state->td_scale = TCP_MAX_WSCALE;
417
418 state->flags |=
419 IP_CT_TCP_FLAG_WINDOW_SCALE;
420 }
421 ptr += opsize - 2;
422 length -= opsize;
423 }
424 }
425 }
426
427 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
428 const struct tcphdr *tcph, __u32 *sack)
429 {
430 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
431 const unsigned char *ptr;
432 int length = (tcph->doff*4) - sizeof(struct tcphdr);
433 __u32 tmp;
434
435 if (!length)
436 return;
437
438 ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
439 length, buff);
440 BUG_ON(ptr == NULL);
441
442 /* Fast path for timestamp-only option */
443 if (length == TCPOLEN_TSTAMP_ALIGNED
444 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
445 | (TCPOPT_NOP << 16)
446 | (TCPOPT_TIMESTAMP << 8)
447 | TCPOLEN_TIMESTAMP))
448 return;
449
450 while (length > 0) {
451 int opcode = *ptr++;
452 int opsize, i;
453
454 switch (opcode) {
455 case TCPOPT_EOL:
456 return;
457 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
458 length--;
459 continue;
460 default:
461 if (length < 2)
462 return;
463 opsize = *ptr++;
464 if (opsize < 2) /* "silly options" */
465 return;
466 if (opsize > length)
467 return; /* don't parse partial options */
468
469 if (opcode == TCPOPT_SACK
470 && opsize >= (TCPOLEN_SACK_BASE
471 + TCPOLEN_SACK_PERBLOCK)
472 && !((opsize - TCPOLEN_SACK_BASE)
473 % TCPOLEN_SACK_PERBLOCK)) {
474 for (i = 0;
475 i < (opsize - TCPOLEN_SACK_BASE);
476 i += TCPOLEN_SACK_PERBLOCK) {
477 tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
478
479 if (after(tmp, *sack))
480 *sack = tmp;
481 }
482 return;
483 }
484 ptr += opsize - 2;
485 length -= opsize;
486 }
487 }
488 }
489
490 static bool tcp_in_window(const struct nf_conn *ct,
491 struct ip_ct_tcp *state,
492 enum ip_conntrack_dir dir,
493 unsigned int index,
494 const struct sk_buff *skb,
495 unsigned int dataoff,
496 const struct tcphdr *tcph)
497 {
498 struct net *net = nf_ct_net(ct);
499 struct nf_tcp_net *tn = tcp_pernet(net);
500 struct ip_ct_tcp_state *sender = &state->seen[dir];
501 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
502 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
503 __u32 seq, ack, sack, end, win, swin;
504 u16 win_raw;
505 s32 receiver_offset;
506 bool res, in_recv_win;
507
508 /*
509 * Get the required data from the packet.
510 */
511 seq = ntohl(tcph->seq);
512 ack = sack = ntohl(tcph->ack_seq);
513 win_raw = ntohs(tcph->window);
514 win = win_raw;
515 end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
516
517 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
518 tcp_sack(skb, dataoff, tcph, &sack);
519
520 /* Take into account NAT sequence number mangling */
521 receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
522 ack -= receiver_offset;
523 sack -= receiver_offset;
524
525 pr_debug("tcp_in_window: START\n");
526 pr_debug("tcp_in_window: ");
527 nf_ct_dump_tuple(tuple);
528 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
529 seq, ack, receiver_offset, sack, receiver_offset, win, end);
530 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
531 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
532 sender->td_end, sender->td_maxend, sender->td_maxwin,
533 sender->td_scale,
534 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
535 receiver->td_scale);
536
537 if (sender->td_maxwin == 0) {
538 /*
539 * Initialize sender data.
540 */
541 if (tcph->syn) {
542 /*
543 * SYN-ACK in reply to a SYN
544 * or SYN from reply direction in simultaneous open.
545 */
546 sender->td_end =
547 sender->td_maxend = end;
548 sender->td_maxwin = (win == 0 ? 1 : win);
549
550 tcp_options(skb, dataoff, tcph, sender);
551 /*
552 * RFC 1323:
553 * Both sides must send the Window Scale option
554 * to enable window scaling in either direction.
555 */
556 if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
557 && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
558 sender->td_scale =
559 receiver->td_scale = 0;
560 if (!tcph->ack)
561 /* Simultaneous open */
562 return true;
563 } else {
564 /*
565 * We are in the middle of a connection,
566 * its history is lost for us.
567 * Let's try to use the data from the packet.
568 */
569 sender->td_end = end;
570 swin = win << sender->td_scale;
571 sender->td_maxwin = (swin == 0 ? 1 : swin);
572 sender->td_maxend = end + sender->td_maxwin;
573 /*
574 * We haven't seen traffic in the other direction yet
575 * but we have to tweak window tracking to pass III
576 * and IV until that happens.
577 */
578 if (receiver->td_maxwin == 0)
579 receiver->td_end = receiver->td_maxend = sack;
580 }
581 } else if (((state->state == TCP_CONNTRACK_SYN_SENT
582 && dir == IP_CT_DIR_ORIGINAL)
583 || (state->state == TCP_CONNTRACK_SYN_RECV
584 && dir == IP_CT_DIR_REPLY))
585 && after(end, sender->td_end)) {
586 /*
587 * RFC 793: "if a TCP is reinitialized ... then it need
588 * not wait at all; it must only be sure to use sequence
589 * numbers larger than those recently used."
590 */
591 sender->td_end =
592 sender->td_maxend = end;
593 sender->td_maxwin = (win == 0 ? 1 : win);
594
595 tcp_options(skb, dataoff, tcph, sender);
596 }
597
598 if (!(tcph->ack)) {
599 /*
600 * If there is no ACK, just pretend it was set and OK.
601 */
602 ack = sack = receiver->td_end;
603 } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
604 (TCP_FLAG_ACK|TCP_FLAG_RST))
605 && (ack == 0)) {
606 /*
607 * Broken TCP stacks, that set ACK in RST packets as well
608 * with zero ack value.
609 */
610 ack = sack = receiver->td_end;
611 }
612
613 if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
614 /*
615 * RST sent answering SYN.
616 */
617 seq = end = sender->td_end;
618
619 pr_debug("tcp_in_window: ");
620 nf_ct_dump_tuple(tuple);
621 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
622 seq, ack, receiver_offset, sack, receiver_offset, win, end);
623 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
624 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
625 sender->td_end, sender->td_maxend, sender->td_maxwin,
626 sender->td_scale,
627 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
628 receiver->td_scale);
629
630 /* Is the ending sequence in the receive window (if available)? */
631 in_recv_win = !receiver->td_maxwin ||
632 after(end, sender->td_end - receiver->td_maxwin - 1);
633
634 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
635 before(seq, sender->td_maxend + 1),
636 (in_recv_win ? 1 : 0),
637 before(sack, receiver->td_end + 1),
638 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
639
640 if (before(seq, sender->td_maxend + 1) &&
641 in_recv_win &&
642 before(sack, receiver->td_end + 1) &&
643 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
644 /*
645 * Take into account window scaling (RFC 1323).
646 */
647 if (!tcph->syn)
648 win <<= sender->td_scale;
649
650 /*
651 * Update sender data.
652 */
653 swin = win + (sack - ack);
654 if (sender->td_maxwin < swin)
655 sender->td_maxwin = swin;
656 if (after(end, sender->td_end)) {
657 sender->td_end = end;
658 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
659 }
660 if (tcph->ack) {
661 if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
662 sender->td_maxack = ack;
663 sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
664 } else if (after(ack, sender->td_maxack))
665 sender->td_maxack = ack;
666 }
667
668 /*
669 * Update receiver data.
670 */
671 if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
672 receiver->td_maxwin += end - sender->td_maxend;
673 if (after(sack + win, receiver->td_maxend - 1)) {
674 receiver->td_maxend = sack + win;
675 if (win == 0)
676 receiver->td_maxend++;
677 }
678 if (ack == receiver->td_end)
679 receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
680
681 /*
682 * Check retransmissions.
683 */
684 if (index == TCP_ACK_SET) {
685 if (state->last_dir == dir
686 && state->last_seq == seq
687 && state->last_ack == ack
688 && state->last_end == end
689 && state->last_win == win_raw)
690 state->retrans++;
691 else {
692 state->last_dir = dir;
693 state->last_seq = seq;
694 state->last_ack = ack;
695 state->last_end = end;
696 state->last_win = win_raw;
697 state->retrans = 0;
698 }
699 }
700 res = true;
701 } else {
702 res = false;
703 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
704 tn->tcp_be_liberal)
705 res = true;
706 if (!res) {
707 nf_ct_l4proto_log_invalid(skb, ct,
708 "%s",
709 before(seq, sender->td_maxend + 1) ?
710 in_recv_win ?
711 before(sack, receiver->td_end + 1) ?
712 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
713 : "ACK is under the lower bound (possible overly delayed ACK)"
714 : "ACK is over the upper bound (ACKed data not seen yet)"
715 : "SEQ is under the lower bound (already ACKed data retransmitted)"
716 : "SEQ is over the upper bound (over the window of the receiver)");
717 }
718 }
719
720 pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
721 "receiver end=%u maxend=%u maxwin=%u\n",
722 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
723 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
724
725 return res;
726 }
727
728 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
729 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
730 TCPHDR_URG) + 1] =
731 {
732 [TCPHDR_SYN] = 1,
733 [TCPHDR_SYN|TCPHDR_URG] = 1,
734 [TCPHDR_SYN|TCPHDR_ACK] = 1,
735 [TCPHDR_RST] = 1,
736 [TCPHDR_RST|TCPHDR_ACK] = 1,
737 [TCPHDR_FIN|TCPHDR_ACK] = 1,
738 [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
739 [TCPHDR_ACK] = 1,
740 [TCPHDR_ACK|TCPHDR_URG] = 1,
741 };
742
743 static void tcp_error_log(const struct sk_buff *skb, struct net *net,
744 u8 pf, const char *msg)
745 {
746 nf_l4proto_log_invalid(skb, net, pf, IPPROTO_TCP, "%s", msg);
747 }
748
749 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
750 static int tcp_error(struct net *net, struct nf_conn *tmpl,
751 struct sk_buff *skb,
752 unsigned int dataoff,
753 u_int8_t pf,
754 unsigned int hooknum)
755 {
756 const struct tcphdr *th;
757 struct tcphdr _tcph;
758 unsigned int tcplen = skb->len - dataoff;
759 u_int8_t tcpflags;
760
761 /* Smaller that minimal TCP header? */
762 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
763 if (th == NULL) {
764 tcp_error_log(skb, net, pf, "short packet");
765 return -NF_ACCEPT;
766 }
767
768 /* Not whole TCP header or malformed packet */
769 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
770 tcp_error_log(skb, net, pf, "truncated packet");
771 return -NF_ACCEPT;
772 }
773
774 /* Checksum invalid? Ignore.
775 * We skip checking packets on the outgoing path
776 * because the checksum is assumed to be correct.
777 */
778 /* FIXME: Source route IP option packets --RR */
779 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
780 nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
781 tcp_error_log(skb, net, pf, "bad checksum");
782 return -NF_ACCEPT;
783 }
784
785 /* Check TCP flags. */
786 tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
787 if (!tcp_valid_flags[tcpflags]) {
788 tcp_error_log(skb, net, pf, "invalid tcp flag combination");
789 return -NF_ACCEPT;
790 }
791
792 return NF_ACCEPT;
793 }
794
795 static unsigned int *tcp_get_timeouts(struct net *net)
796 {
797 return tcp_pernet(net)->timeouts;
798 }
799
800 static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
801 {
802 return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
803 test_bit(IPS_ASSURED_BIT, &ct->status);
804 }
805
806 /* Returns verdict for packet, or -1 for invalid. */
807 static int tcp_packet(struct nf_conn *ct,
808 const struct sk_buff *skb,
809 unsigned int dataoff,
810 enum ip_conntrack_info ctinfo,
811 unsigned int *timeouts)
812 {
813 struct net *net = nf_ct_net(ct);
814 struct nf_tcp_net *tn = tcp_pernet(net);
815 struct nf_conntrack_tuple *tuple;
816 enum tcp_conntrack new_state, old_state;
817 enum ip_conntrack_dir dir;
818 const struct tcphdr *th;
819 struct tcphdr _tcph;
820 unsigned long timeout;
821 unsigned int index;
822
823 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
824 BUG_ON(th == NULL);
825
826 spin_lock_bh(&ct->lock);
827 old_state = ct->proto.tcp.state;
828 dir = CTINFO2DIR(ctinfo);
829 index = get_conntrack_index(th);
830 new_state = tcp_conntracks[dir][index][old_state];
831 tuple = &ct->tuplehash[dir].tuple;
832
833 switch (new_state) {
834 case TCP_CONNTRACK_SYN_SENT:
835 if (old_state < TCP_CONNTRACK_TIME_WAIT)
836 break;
837 /* RFC 1122: "When a connection is closed actively,
838 * it MUST linger in TIME-WAIT state for a time 2xMSL
839 * (Maximum Segment Lifetime). However, it MAY accept
840 * a new SYN from the remote TCP to reopen the connection
841 * directly from TIME-WAIT state, if..."
842 * We ignore the conditions because we are in the
843 * TIME-WAIT state anyway.
844 *
845 * Handle aborted connections: we and the server
846 * think there is an existing connection but the client
847 * aborts it and starts a new one.
848 */
849 if (((ct->proto.tcp.seen[dir].flags
850 | ct->proto.tcp.seen[!dir].flags)
851 & IP_CT_TCP_FLAG_CLOSE_INIT)
852 || (ct->proto.tcp.last_dir == dir
853 && ct->proto.tcp.last_index == TCP_RST_SET)) {
854 /* Attempt to reopen a closed/aborted connection.
855 * Delete this connection and look up again. */
856 spin_unlock_bh(&ct->lock);
857
858 /* Only repeat if we can actually remove the timer.
859 * Destruction may already be in progress in process
860 * context and we must give it a chance to terminate.
861 */
862 if (nf_ct_kill(ct))
863 return -NF_REPEAT;
864 return NF_DROP;
865 }
866 /* Fall through */
867 case TCP_CONNTRACK_IGNORE:
868 /* Ignored packets:
869 *
870 * Our connection entry may be out of sync, so ignore
871 * packets which may signal the real connection between
872 * the client and the server.
873 *
874 * a) SYN in ORIGINAL
875 * b) SYN/ACK in REPLY
876 * c) ACK in reply direction after initial SYN in original.
877 *
878 * If the ignored packet is invalid, the receiver will send
879 * a RST we'll catch below.
880 */
881 if (index == TCP_SYNACK_SET
882 && ct->proto.tcp.last_index == TCP_SYN_SET
883 && ct->proto.tcp.last_dir != dir
884 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
885 /* b) This SYN/ACK acknowledges a SYN that we earlier
886 * ignored as invalid. This means that the client and
887 * the server are both in sync, while the firewall is
888 * not. We get in sync from the previously annotated
889 * values.
890 */
891 old_state = TCP_CONNTRACK_SYN_SENT;
892 new_state = TCP_CONNTRACK_SYN_RECV;
893 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
894 ct->proto.tcp.last_end;
895 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
896 ct->proto.tcp.last_end;
897 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
898 ct->proto.tcp.last_win == 0 ?
899 1 : ct->proto.tcp.last_win;
900 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
901 ct->proto.tcp.last_wscale;
902 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
903 ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
904 ct->proto.tcp.last_flags;
905 memset(&ct->proto.tcp.seen[dir], 0,
906 sizeof(struct ip_ct_tcp_state));
907 break;
908 }
909 ct->proto.tcp.last_index = index;
910 ct->proto.tcp.last_dir = dir;
911 ct->proto.tcp.last_seq = ntohl(th->seq);
912 ct->proto.tcp.last_end =
913 segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
914 ct->proto.tcp.last_win = ntohs(th->window);
915
916 /* a) This is a SYN in ORIGINAL. The client and the server
917 * may be in sync but we are not. In that case, we annotate
918 * the TCP options and let the packet go through. If it is a
919 * valid SYN packet, the server will reply with a SYN/ACK, and
920 * then we'll get in sync. Otherwise, the server potentially
921 * responds with a challenge ACK if implementing RFC5961.
922 */
923 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
924 struct ip_ct_tcp_state seen = {};
925
926 ct->proto.tcp.last_flags =
927 ct->proto.tcp.last_wscale = 0;
928 tcp_options(skb, dataoff, th, &seen);
929 if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
930 ct->proto.tcp.last_flags |=
931 IP_CT_TCP_FLAG_WINDOW_SCALE;
932 ct->proto.tcp.last_wscale = seen.td_scale;
933 }
934 if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
935 ct->proto.tcp.last_flags |=
936 IP_CT_TCP_FLAG_SACK_PERM;
937 }
938 /* Mark the potential for RFC5961 challenge ACK,
939 * this pose a special problem for LAST_ACK state
940 * as ACK is intrepretated as ACKing last FIN.
941 */
942 if (old_state == TCP_CONNTRACK_LAST_ACK)
943 ct->proto.tcp.last_flags |=
944 IP_CT_EXP_CHALLENGE_ACK;
945 }
946 spin_unlock_bh(&ct->lock);
947 nf_ct_l4proto_log_invalid(skb, ct, "invalid packet ignored in "
948 "state %s ", tcp_conntrack_names[old_state]);
949 return NF_ACCEPT;
950 case TCP_CONNTRACK_MAX:
951 /* Special case for SYN proxy: when the SYN to the server or
952 * the SYN/ACK from the server is lost, the client may transmit
953 * a keep-alive packet while in SYN_SENT state. This needs to
954 * be associated with the original conntrack entry in order to
955 * generate a new SYN with the correct sequence number.
956 */
957 if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
958 index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
959 ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
960 ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
961 pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
962 spin_unlock_bh(&ct->lock);
963 return NF_ACCEPT;
964 }
965
966 /* Invalid packet */
967 pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
968 dir, get_conntrack_index(th), old_state);
969 spin_unlock_bh(&ct->lock);
970 nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
971 return -NF_ACCEPT;
972 case TCP_CONNTRACK_TIME_WAIT:
973 /* RFC5961 compliance cause stack to send "challenge-ACK"
974 * e.g. in response to spurious SYNs. Conntrack MUST
975 * not believe this ACK is acking last FIN.
976 */
977 if (old_state == TCP_CONNTRACK_LAST_ACK &&
978 index == TCP_ACK_SET &&
979 ct->proto.tcp.last_dir != dir &&
980 ct->proto.tcp.last_index == TCP_SYN_SET &&
981 (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
982 /* Detected RFC5961 challenge ACK */
983 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
984 spin_unlock_bh(&ct->lock);
985 nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
986 return NF_ACCEPT; /* Don't change state */
987 }
988 break;
989 case TCP_CONNTRACK_CLOSE:
990 if (index != TCP_RST_SET)
991 break;
992
993 if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
994 u32 seq = ntohl(th->seq);
995
996 if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
997 /* Invalid RST */
998 spin_unlock_bh(&ct->lock);
999 nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
1000 return -NF_ACCEPT;
1001 }
1002
1003 if (!nf_conntrack_tcp_established(ct) ||
1004 seq == ct->proto.tcp.seen[!dir].td_maxack)
1005 break;
1006
1007 /* Check if rst is part of train, such as
1008 * foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
1009 * foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
1010 */
1011 if (ct->proto.tcp.last_index == TCP_ACK_SET &&
1012 ct->proto.tcp.last_dir == dir &&
1013 seq == ct->proto.tcp.last_end)
1014 break;
1015
1016 /* ... RST sequence number doesn't match exactly, keep
1017 * established state to allow a possible challenge ACK.
1018 */
1019 new_state = old_state;
1020 }
1021 if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1022 && ct->proto.tcp.last_index == TCP_SYN_SET)
1023 || (!test_bit(IPS_ASSURED_BIT, &ct->status)
1024 && ct->proto.tcp.last_index == TCP_ACK_SET))
1025 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1026 /* RST sent to invalid SYN or ACK we had let through
1027 * at a) and c) above:
1028 *
1029 * a) SYN was in window then
1030 * c) we hold a half-open connection.
1031 *
1032 * Delete our connection entry.
1033 * We skip window checking, because packet might ACK
1034 * segments we ignored. */
1035 goto in_window;
1036 }
1037 break;
1038 default:
1039 /* Keep compilers happy. */
1040 break;
1041 }
1042
1043 if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
1044 skb, dataoff, th)) {
1045 spin_unlock_bh(&ct->lock);
1046 return -NF_ACCEPT;
1047 }
1048 in_window:
1049 /* From now on we have got in-window packets */
1050 ct->proto.tcp.last_index = index;
1051 ct->proto.tcp.last_dir = dir;
1052
1053 pr_debug("tcp_conntracks: ");
1054 nf_ct_dump_tuple(tuple);
1055 pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
1056 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
1057 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
1058 old_state, new_state);
1059
1060 ct->proto.tcp.state = new_state;
1061 if (old_state != new_state
1062 && new_state == TCP_CONNTRACK_FIN_WAIT)
1063 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1064
1065 if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1066 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1067 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1068 else if (unlikely(index == TCP_RST_SET))
1069 timeout = timeouts[TCP_CONNTRACK_CLOSE];
1070 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1071 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1072 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1073 timeout = timeouts[TCP_CONNTRACK_UNACK];
1074 else if (ct->proto.tcp.last_win == 0 &&
1075 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1076 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1077 else
1078 timeout = timeouts[new_state];
1079 spin_unlock_bh(&ct->lock);
1080
1081 if (new_state != old_state)
1082 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1083
1084 if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1085 /* If only reply is a RST, we can consider ourselves not to
1086 have an established connection: this is a fairly common
1087 problem case, so we can delete the conntrack
1088 immediately. --RR */
1089 if (th->rst) {
1090 nf_ct_kill_acct(ct, ctinfo, skb);
1091 return NF_ACCEPT;
1092 }
1093 /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1094 * pickup with loose=1. Avoid large ESTABLISHED timeout.
1095 */
1096 if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1097 timeout > timeouts[TCP_CONNTRACK_UNACK])
1098 timeout = timeouts[TCP_CONNTRACK_UNACK];
1099 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1100 && (old_state == TCP_CONNTRACK_SYN_RECV
1101 || old_state == TCP_CONNTRACK_ESTABLISHED)
1102 && new_state == TCP_CONNTRACK_ESTABLISHED) {
1103 /* Set ASSURED if we see see valid ack in ESTABLISHED
1104 after SYN_RECV or a valid answer for a picked up
1105 connection. */
1106 set_bit(IPS_ASSURED_BIT, &ct->status);
1107 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1108 }
1109 nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1110
1111 return NF_ACCEPT;
1112 }
1113
1114 /* Called when a new connection for this protocol found. */
1115 static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1116 unsigned int dataoff, unsigned int *timeouts)
1117 {
1118 enum tcp_conntrack new_state;
1119 const struct tcphdr *th;
1120 struct tcphdr _tcph;
1121 struct net *net = nf_ct_net(ct);
1122 struct nf_tcp_net *tn = tcp_pernet(net);
1123 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
1124 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
1125
1126 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
1127 BUG_ON(th == NULL);
1128
1129 /* Don't need lock here: this conntrack not in circulation yet */
1130 new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
1131
1132 /* Invalid: delete conntrack */
1133 if (new_state >= TCP_CONNTRACK_MAX) {
1134 pr_debug("nf_ct_tcp: invalid new deleting.\n");
1135 return false;
1136 }
1137
1138 if (new_state == TCP_CONNTRACK_SYN_SENT) {
1139 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
1140 /* SYN packet */
1141 ct->proto.tcp.seen[0].td_end =
1142 segment_seq_plus_len(ntohl(th->seq), skb->len,
1143 dataoff, th);
1144 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
1145 if (ct->proto.tcp.seen[0].td_maxwin == 0)
1146 ct->proto.tcp.seen[0].td_maxwin = 1;
1147 ct->proto.tcp.seen[0].td_maxend =
1148 ct->proto.tcp.seen[0].td_end;
1149
1150 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
1151 } else if (tn->tcp_loose == 0) {
1152 /* Don't try to pick up connections. */
1153 return false;
1154 } else {
1155 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
1156 /*
1157 * We are in the middle of a connection,
1158 * its history is lost for us.
1159 * Let's try to use the data from the packet.
1160 */
1161 ct->proto.tcp.seen[0].td_end =
1162 segment_seq_plus_len(ntohl(th->seq), skb->len,
1163 dataoff, th);
1164 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
1165 if (ct->proto.tcp.seen[0].td_maxwin == 0)
1166 ct->proto.tcp.seen[0].td_maxwin = 1;
1167 ct->proto.tcp.seen[0].td_maxend =
1168 ct->proto.tcp.seen[0].td_end +
1169 ct->proto.tcp.seen[0].td_maxwin;
1170
1171 /* We assume SACK and liberal window checking to handle
1172 * window scaling */
1173 ct->proto.tcp.seen[0].flags =
1174 ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1175 IP_CT_TCP_FLAG_BE_LIBERAL;
1176 }
1177
1178 /* tcp_packet will set them */
1179 ct->proto.tcp.last_index = TCP_NONE_SET;
1180
1181 pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
1182 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
1183 sender->td_end, sender->td_maxend, sender->td_maxwin,
1184 sender->td_scale,
1185 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
1186 receiver->td_scale);
1187 return true;
1188 }
1189
1190 static bool tcp_can_early_drop(const struct nf_conn *ct)
1191 {
1192 switch (ct->proto.tcp.state) {
1193 case TCP_CONNTRACK_FIN_WAIT:
1194 case TCP_CONNTRACK_LAST_ACK:
1195 case TCP_CONNTRACK_TIME_WAIT:
1196 case TCP_CONNTRACK_CLOSE:
1197 case TCP_CONNTRACK_CLOSE_WAIT:
1198 return true;
1199 default:
1200 break;
1201 }
1202
1203 return false;
1204 }
1205
1206 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1207
1208 #include <linux/netfilter/nfnetlink.h>
1209 #include <linux/netfilter/nfnetlink_conntrack.h>
1210
1211 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1212 struct nf_conn *ct)
1213 {
1214 struct nlattr *nest_parms;
1215 struct nf_ct_tcp_flags tmp = {};
1216
1217 spin_lock_bh(&ct->lock);
1218 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED);
1219 if (!nest_parms)
1220 goto nla_put_failure;
1221
1222 if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
1223 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1224 ct->proto.tcp.seen[0].td_scale) ||
1225 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1226 ct->proto.tcp.seen[1].td_scale))
1227 goto nla_put_failure;
1228
1229 tmp.flags = ct->proto.tcp.seen[0].flags;
1230 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1231 sizeof(struct nf_ct_tcp_flags), &tmp))
1232 goto nla_put_failure;
1233
1234 tmp.flags = ct->proto.tcp.seen[1].flags;
1235 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1236 sizeof(struct nf_ct_tcp_flags), &tmp))
1237 goto nla_put_failure;
1238 spin_unlock_bh(&ct->lock);
1239
1240 nla_nest_end(skb, nest_parms);
1241
1242 return 0;
1243
1244 nla_put_failure:
1245 spin_unlock_bh(&ct->lock);
1246 return -1;
1247 }
1248
1249 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1250 [CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 },
1251 [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1252 [CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 },
1253 [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) },
1254 [CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
1255 };
1256
1257 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1258 {
1259 struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1260 struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1261 int err;
1262
1263 /* updates could not contain anything about the private
1264 * protocol info, in that case skip the parsing */
1265 if (!pattr)
1266 return 0;
1267
1268 err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1269 tcp_nla_policy, NULL);
1270 if (err < 0)
1271 return err;
1272
1273 if (tb[CTA_PROTOINFO_TCP_STATE] &&
1274 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1275 return -EINVAL;
1276
1277 spin_lock_bh(&ct->lock);
1278 if (tb[CTA_PROTOINFO_TCP_STATE])
1279 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1280
1281 if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1282 struct nf_ct_tcp_flags *attr =
1283 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1284 ct->proto.tcp.seen[0].flags &= ~attr->mask;
1285 ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1286 }
1287
1288 if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1289 struct nf_ct_tcp_flags *attr =
1290 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1291 ct->proto.tcp.seen[1].flags &= ~attr->mask;
1292 ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1293 }
1294
1295 if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1296 tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1297 ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1298 ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1299 ct->proto.tcp.seen[0].td_scale =
1300 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1301 ct->proto.tcp.seen[1].td_scale =
1302 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1303 }
1304 spin_unlock_bh(&ct->lock);
1305
1306 return 0;
1307 }
1308
1309 static int tcp_nlattr_size(void)
1310 {
1311 return nla_total_size(0) /* CTA_PROTOINFO_TCP */
1312 + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
1313 }
1314
1315 static unsigned int tcp_nlattr_tuple_size(void)
1316 {
1317 static unsigned int size __read_mostly;
1318
1319 if (!size)
1320 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1321
1322 return size;
1323 }
1324 #endif
1325
1326 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1327
1328 #include <linux/netfilter/nfnetlink.h>
1329 #include <linux/netfilter/nfnetlink_cttimeout.h>
1330
1331 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1332 struct net *net, void *data)
1333 {
1334 unsigned int *timeouts = data;
1335 struct nf_tcp_net *tn = tcp_pernet(net);
1336 int i;
1337
1338 /* set default TCP timeouts. */
1339 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1340 timeouts[i] = tn->timeouts[i];
1341
1342 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1343 timeouts[TCP_CONNTRACK_SYN_SENT] =
1344 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1345 }
1346 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1347 timeouts[TCP_CONNTRACK_SYN_RECV] =
1348 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1349 }
1350 if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1351 timeouts[TCP_CONNTRACK_ESTABLISHED] =
1352 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1353 }
1354 if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1355 timeouts[TCP_CONNTRACK_FIN_WAIT] =
1356 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1357 }
1358 if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1359 timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1360 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1361 }
1362 if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1363 timeouts[TCP_CONNTRACK_LAST_ACK] =
1364 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1365 }
1366 if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1367 timeouts[TCP_CONNTRACK_TIME_WAIT] =
1368 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1369 }
1370 if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1371 timeouts[TCP_CONNTRACK_CLOSE] =
1372 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1373 }
1374 if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1375 timeouts[TCP_CONNTRACK_SYN_SENT2] =
1376 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1377 }
1378 if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1379 timeouts[TCP_CONNTRACK_RETRANS] =
1380 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1381 }
1382 if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1383 timeouts[TCP_CONNTRACK_UNACK] =
1384 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1385 }
1386 return 0;
1387 }
1388
1389 static int
1390 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1391 {
1392 const unsigned int *timeouts = data;
1393
1394 if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1395 htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1396 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1397 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1398 nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1399 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1400 nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1401 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1402 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1403 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1404 nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1405 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1406 nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1407 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1408 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1409 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1410 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1411 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1412 nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1413 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1414 nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1415 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1416 goto nla_put_failure;
1417 return 0;
1418
1419 nla_put_failure:
1420 return -ENOSPC;
1421 }
1422
1423 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1424 [CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
1425 [CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
1426 [CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
1427 [CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
1428 [CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
1429 [CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
1430 [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
1431 [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
1432 [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
1433 [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
1434 [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
1435 };
1436 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1437
1438 #ifdef CONFIG_SYSCTL
1439 static struct ctl_table tcp_sysctl_table[] = {
1440 {
1441 .procname = "nf_conntrack_tcp_timeout_syn_sent",
1442 .maxlen = sizeof(unsigned int),
1443 .mode = 0644,
1444 .proc_handler = proc_dointvec_jiffies,
1445 },
1446 {
1447 .procname = "nf_conntrack_tcp_timeout_syn_recv",
1448 .maxlen = sizeof(unsigned int),
1449 .mode = 0644,
1450 .proc_handler = proc_dointvec_jiffies,
1451 },
1452 {
1453 .procname = "nf_conntrack_tcp_timeout_established",
1454 .maxlen = sizeof(unsigned int),
1455 .mode = 0644,
1456 .proc_handler = proc_dointvec_jiffies,
1457 },
1458 {
1459 .procname = "nf_conntrack_tcp_timeout_fin_wait",
1460 .maxlen = sizeof(unsigned int),
1461 .mode = 0644,
1462 .proc_handler = proc_dointvec_jiffies,
1463 },
1464 {
1465 .procname = "nf_conntrack_tcp_timeout_close_wait",
1466 .maxlen = sizeof(unsigned int),
1467 .mode = 0644,
1468 .proc_handler = proc_dointvec_jiffies,
1469 },
1470 {
1471 .procname = "nf_conntrack_tcp_timeout_last_ack",
1472 .maxlen = sizeof(unsigned int),
1473 .mode = 0644,
1474 .proc_handler = proc_dointvec_jiffies,
1475 },
1476 {
1477 .procname = "nf_conntrack_tcp_timeout_time_wait",
1478 .maxlen = sizeof(unsigned int),
1479 .mode = 0644,
1480 .proc_handler = proc_dointvec_jiffies,
1481 },
1482 {
1483 .procname = "nf_conntrack_tcp_timeout_close",
1484 .maxlen = sizeof(unsigned int),
1485 .mode = 0644,
1486 .proc_handler = proc_dointvec_jiffies,
1487 },
1488 {
1489 .procname = "nf_conntrack_tcp_timeout_max_retrans",
1490 .maxlen = sizeof(unsigned int),
1491 .mode = 0644,
1492 .proc_handler = proc_dointvec_jiffies,
1493 },
1494 {
1495 .procname = "nf_conntrack_tcp_timeout_unacknowledged",
1496 .maxlen = sizeof(unsigned int),
1497 .mode = 0644,
1498 .proc_handler = proc_dointvec_jiffies,
1499 },
1500 {
1501 .procname = "nf_conntrack_tcp_loose",
1502 .maxlen = sizeof(unsigned int),
1503 .mode = 0644,
1504 .proc_handler = proc_dointvec,
1505 },
1506 {
1507 .procname = "nf_conntrack_tcp_be_liberal",
1508 .maxlen = sizeof(unsigned int),
1509 .mode = 0644,
1510 .proc_handler = proc_dointvec,
1511 },
1512 {
1513 .procname = "nf_conntrack_tcp_max_retrans",
1514 .maxlen = sizeof(unsigned int),
1515 .mode = 0644,
1516 .proc_handler = proc_dointvec,
1517 },
1518 { }
1519 };
1520 #endif /* CONFIG_SYSCTL */
1521
1522 static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
1523 struct nf_tcp_net *tn)
1524 {
1525 #ifdef CONFIG_SYSCTL
1526 if (pn->ctl_table)
1527 return 0;
1528
1529 pn->ctl_table = kmemdup(tcp_sysctl_table,
1530 sizeof(tcp_sysctl_table),
1531 GFP_KERNEL);
1532 if (!pn->ctl_table)
1533 return -ENOMEM;
1534
1535 pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1536 pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1537 pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1538 pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1539 pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1540 pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1541 pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1542 pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1543 pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1544 pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
1545 pn->ctl_table[10].data = &tn->tcp_loose;
1546 pn->ctl_table[11].data = &tn->tcp_be_liberal;
1547 pn->ctl_table[12].data = &tn->tcp_max_retrans;
1548 #endif
1549 return 0;
1550 }
1551
1552 static int tcp_init_net(struct net *net, u_int16_t proto)
1553 {
1554 struct nf_tcp_net *tn = tcp_pernet(net);
1555 struct nf_proto_net *pn = &tn->pn;
1556
1557 if (!pn->users) {
1558 int i;
1559
1560 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1561 tn->timeouts[i] = tcp_timeouts[i];
1562
1563 tn->tcp_loose = nf_ct_tcp_loose;
1564 tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1565 tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
1566 }
1567
1568 return tcp_kmemdup_sysctl_table(pn, tn);
1569 }
1570
1571 static struct nf_proto_net *tcp_get_net_proto(struct net *net)
1572 {
1573 return &net->ct.nf_ct_proto.tcp.pn;
1574 }
1575
1576 struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1577 {
1578 .l3proto = PF_INET,
1579 .l4proto = IPPROTO_TCP,
1580 .pkt_to_tuple = tcp_pkt_to_tuple,
1581 .invert_tuple = tcp_invert_tuple,
1582 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1583 .print_conntrack = tcp_print_conntrack,
1584 #endif
1585 .packet = tcp_packet,
1586 .get_timeouts = tcp_get_timeouts,
1587 .new = tcp_new,
1588 .error = tcp_error,
1589 .can_early_drop = tcp_can_early_drop,
1590 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1591 .to_nlattr = tcp_to_nlattr,
1592 .nlattr_size = tcp_nlattr_size,
1593 .from_nlattr = nlattr_to_tcp,
1594 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
1595 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
1596 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1597 .nla_policy = nf_ct_port_nla_policy,
1598 #endif
1599 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1600 .ctnl_timeout = {
1601 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1602 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1603 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1604 .obj_size = sizeof(unsigned int) *
1605 TCP_CONNTRACK_TIMEOUT_MAX,
1606 .nla_policy = tcp_timeout_nla_policy,
1607 },
1608 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1609 .init_net = tcp_init_net,
1610 .get_net_proto = tcp_get_net_proto,
1611 };
1612 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
1613
1614 struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1615 {
1616 .l3proto = PF_INET6,
1617 .l4proto = IPPROTO_TCP,
1618 .pkt_to_tuple = tcp_pkt_to_tuple,
1619 .invert_tuple = tcp_invert_tuple,
1620 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1621 .print_conntrack = tcp_print_conntrack,
1622 #endif
1623 .packet = tcp_packet,
1624 .get_timeouts = tcp_get_timeouts,
1625 .new = tcp_new,
1626 .error = tcp_error,
1627 .can_early_drop = tcp_can_early_drop,
1628 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1629 .to_nlattr = tcp_to_nlattr,
1630 .nlattr_size = tcp_nlattr_size,
1631 .from_nlattr = nlattr_to_tcp,
1632 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
1633 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
1634 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1635 .nla_policy = nf_ct_port_nla_policy,
1636 #endif
1637 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1638 .ctnl_timeout = {
1639 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1640 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1641 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1642 .obj_size = sizeof(unsigned int) *
1643 TCP_CONNTRACK_TIMEOUT_MAX,
1644 .nla_policy = tcp_timeout_nla_policy,
1645 },
1646 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1647 .init_net = tcp_init_net,
1648 .get_net_proto = tcp_get_net_proto,
1649 };
1650 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);