]>
Commit | Line | Data |
---|---|---|
a489b168 DDP |
1 | /*- |
2 | * Copyright (c) 2001 Daniel Hartmeier | |
3 | * Copyright (c) 2002 - 2008 Henning Brauer | |
4 | * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> | |
5 | * Copyright (c) 2015, 2016 Nicira, Inc. | |
6 | * All rights reserved. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * | |
12 | * - Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | |
14 | * - Redistributions in binary form must reproduce the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer in the documentation and/or other materials provided | |
17 | * with the distribution. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
23 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
27 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
30 | * POSSIBILITY OF SUCH DAMAGE. | |
31 | * | |
32 | * Effort sponsored in part by the Defense Advanced Research Projects | |
33 | * Agency (DARPA) and Air Force Research Laboratory, Air Force | |
34 | * Materiel Command, USAF, under agreement number F30602-01-2-0537. | |
35 | * | |
36 | * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ | |
37 | */ | |
38 | ||
39 | #include <config.h> | |
40 | ||
41 | #include "conntrack-private.h" | |
64207120 | 42 | #include "coverage.h" |
a489b168 DDP |
43 | #include "ct-dpif.h" |
44 | #include "dp-packet.h" | |
45 | #include "util.h" | |
46 | ||
64207120 DB |
47 | COVERAGE_DEFINE(conntrack_tcp_seq_chk_bypass); |
48 | COVERAGE_DEFINE(conntrack_tcp_seq_chk_failed); | |
49 | COVERAGE_DEFINE(conntrack_invalid_tcp_flags); | |
50 | ||
a489b168 | 51 | struct tcp_peer { |
a489b168 DDP |
52 | uint32_t seqlo; /* Max sequence number sent */ |
53 | uint32_t seqhi; /* Max the other end ACKd + win */ | |
54 | uint16_t max_win; /* largest window (pre scaling) */ | |
55 | uint8_t wscale; /* window scaling factor */ | |
967bb5c5 | 56 | enum ct_dpif_tcp_state state; |
a489b168 DDP |
57 | }; |
58 | ||
59 | struct conn_tcp { | |
60 | struct conn up; | |
967bb5c5 | 61 | struct tcp_peer peer[2]; /* 'conn' lock protected. */ |
a489b168 DDP |
62 | }; |
63 | ||
64 | enum { | |
65 | TCPOPT_EOL, | |
66 | TCPOPT_NOP, | |
67 | TCPOPT_WINDOW = 3, | |
68 | }; | |
69 | ||
70 | /* TCP sequence numbers are 32 bit integers operated | |
71 | * on with modular arithmetic. These macros can be | |
72 | * used to compare such integers. */ | |
73 | #define SEQ_LT(a,b) INT_MOD_LT(a, b) | |
74 | #define SEQ_LEQ(a,b) INT_MOD_LEQ(a, b) | |
75 | #define SEQ_GT(a,b) INT_MOD_GT(a, b) | |
76 | #define SEQ_GEQ(a,b) INT_MOD_GEQ(a, b) | |
77 | ||
78 | #define SEQ_MIN(a, b) INT_MOD_MIN(a, b) | |
79 | #define SEQ_MAX(a, b) INT_MOD_MAX(a, b) | |
80 | ||
81 | static struct conn_tcp* | |
82 | conn_tcp_cast(const struct conn* conn) | |
83 | { | |
84 | return CONTAINER_OF(conn, struct conn_tcp, up); | |
85 | } | |
86 | ||
87 | /* pf does this in in pf_normalize_tcp(), and it is called only if scrub | |
88 | * is enabled. We're not scrubbing, but this check seems reasonable. */ | |
89 | static bool | |
90 | tcp_invalid_flags(uint16_t flags) | |
91 | { | |
92 | ||
93 | if (flags & TCP_SYN) { | |
94 | if (flags & TCP_RST || flags & TCP_FIN) { | |
95 | return true; | |
96 | } | |
97 | } else { | |
98 | /* Illegal packet */ | |
99 | if (!(flags & (TCP_ACK|TCP_RST))) { | |
100 | return true; | |
101 | } | |
102 | } | |
103 | ||
104 | if (!(flags & TCP_ACK)) { | |
105 | /* These flags are only valid if ACK is set */ | |
106 | if ((flags & TCP_FIN) || (flags & TCP_PSH) || (flags & TCP_URG)) { | |
107 | return true; | |
108 | } | |
109 | } | |
110 | ||
111 | return false; | |
112 | } | |
113 | ||
114 | #define TCP_MAX_WSCALE 14 | |
115 | #define CT_WSCALE_FLAG 0x80 | |
116 | #define CT_WSCALE_UNKNOWN 0x40 | |
117 | #define CT_WSCALE_MASK 0xf | |
118 | ||
119 | static uint8_t | |
120 | tcp_get_wscale(const struct tcp_header *tcp) | |
121 | { | |
122 | int len = TCP_OFFSET(tcp->tcp_ctl) * 4 - sizeof *tcp; | |
123 | const uint8_t *opt = (const uint8_t *)(tcp + 1); | |
124 | uint8_t wscale = 0; | |
125 | uint8_t optlen; | |
126 | ||
127 | while (len >= 3) { | |
128 | switch (*opt) { | |
129 | case TCPOPT_EOL: | |
130 | return wscale; | |
131 | case TCPOPT_NOP: | |
132 | opt++; | |
133 | len--; | |
134 | break; | |
135 | case TCPOPT_WINDOW: | |
136 | wscale = MIN(opt[2], TCP_MAX_WSCALE); | |
137 | wscale |= CT_WSCALE_FLAG; | |
138 | /* fall through */ | |
139 | default: | |
140 | optlen = opt[1]; | |
141 | if (optlen < 2) { | |
142 | optlen = 2; | |
143 | } | |
144 | len -= optlen; | |
145 | opt += optlen; | |
146 | } | |
147 | } | |
148 | ||
149 | return wscale; | |
150 | } | |
151 | ||
64207120 DB |
152 | static bool |
153 | tcp_bypass_seq_chk(struct conntrack *ct) | |
154 | { | |
155 | if (!conntrack_get_tcp_seq_chk(ct)) { | |
156 | COVERAGE_INC(conntrack_tcp_seq_chk_bypass); | |
157 | return true; | |
158 | } | |
159 | return false; | |
160 | } | |
161 | ||
a489b168 | 162 | static enum ct_update_res |
967bb5c5 | 163 | tcp_conn_update(struct conntrack *ct, struct conn *conn_, |
e6ef6cc6 | 164 | struct dp_packet *pkt, bool reply, long long now) |
a489b168 DDP |
165 | { |
166 | struct conn_tcp *conn = conn_tcp_cast(conn_); | |
167 | struct tcp_header *tcp = dp_packet_l4(pkt); | |
168 | /* The peer that sent 'pkt' */ | |
169 | struct tcp_peer *src = &conn->peer[reply ? 1 : 0]; | |
170 | /* The peer that should receive 'pkt' */ | |
171 | struct tcp_peer *dst = &conn->peer[reply ? 0 : 1]; | |
172 | uint8_t sws = 0, dws = 0; | |
173 | uint16_t tcp_flags = TCP_FLAGS(tcp->tcp_ctl); | |
174 | ||
175 | uint16_t win = ntohs(tcp->tcp_winsz); | |
176 | uint32_t ack, end, seq, orig_seq; | |
177 | uint32_t p_len = tcp_payload_length(pkt); | |
a489b168 DDP |
178 | |
179 | if (tcp_invalid_flags(tcp_flags)) { | |
64207120 | 180 | COVERAGE_INC(conntrack_invalid_tcp_flags); |
a489b168 DDP |
181 | return CT_UPDATE_INVALID; |
182 | } | |
183 | ||
184 | if (((tcp_flags & (TCP_SYN | TCP_ACK)) == TCP_SYN) | |
185 | && dst->state >= CT_DPIF_TCPS_FIN_WAIT_2 | |
186 | && src->state >= CT_DPIF_TCPS_FIN_WAIT_2) { | |
187 | src->state = dst->state = CT_DPIF_TCPS_CLOSED; | |
188 | return CT_UPDATE_NEW; | |
189 | } | |
190 | ||
191 | if (src->wscale & CT_WSCALE_FLAG | |
192 | && dst->wscale & CT_WSCALE_FLAG | |
193 | && !(tcp_flags & TCP_SYN)) { | |
194 | ||
195 | sws = src->wscale & CT_WSCALE_MASK; | |
196 | dws = dst->wscale & CT_WSCALE_MASK; | |
197 | ||
198 | } else if (src->wscale & CT_WSCALE_UNKNOWN | |
199 | && dst->wscale & CT_WSCALE_UNKNOWN | |
200 | && !(tcp_flags & TCP_SYN)) { | |
201 | ||
202 | sws = TCP_MAX_WSCALE; | |
203 | dws = TCP_MAX_WSCALE; | |
204 | } | |
205 | ||
206 | /* | |
207 | * Sequence tracking algorithm from Guido van Rooij's paper: | |
208 | * http://www.madison-gurkha.com/publications/tcp_filtering/ | |
209 | * tcp_filtering.ps | |
210 | */ | |
211 | ||
212 | orig_seq = seq = ntohl(get_16aligned_be32(&tcp->tcp_seq)); | |
7c0cb293 | 213 | bool check_ackskew = true; |
a489b168 DDP |
214 | if (src->state < CT_DPIF_TCPS_SYN_SENT) { |
215 | /* First packet from this end. Set its state */ | |
216 | ||
217 | ack = ntohl(get_16aligned_be32(&tcp->tcp_ack)); | |
218 | ||
219 | end = seq + p_len; | |
220 | if (tcp_flags & TCP_SYN) { | |
221 | end++; | |
222 | if (dst->wscale & CT_WSCALE_FLAG) { | |
223 | src->wscale = tcp_get_wscale(tcp); | |
224 | if (src->wscale & CT_WSCALE_FLAG) { | |
225 | /* Remove scale factor from initial window */ | |
226 | sws = src->wscale & CT_WSCALE_MASK; | |
227 | win = DIV_ROUND_UP((uint32_t) win, 1 << sws); | |
228 | dws = dst->wscale & CT_WSCALE_MASK; | |
229 | } else { | |
230 | /* fixup other window */ | |
231 | dst->max_win <<= dst->wscale & CT_WSCALE_MASK; | |
232 | /* in case of a retrans SYN|ACK */ | |
233 | dst->wscale = 0; | |
234 | } | |
235 | } | |
236 | } | |
237 | if (tcp_flags & TCP_FIN) { | |
238 | end++; | |
239 | } | |
240 | ||
241 | src->seqlo = seq; | |
242 | src->state = CT_DPIF_TCPS_SYN_SENT; | |
243 | /* | |
244 | * May need to slide the window (seqhi may have been set by | |
245 | * the crappy stack check or if we picked up the connection | |
246 | * after establishment) | |
247 | */ | |
248 | if (src->seqhi == 1 | |
249 | || SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) { | |
250 | src->seqhi = end + MAX(1, dst->max_win << dws); | |
7c0cb293 DB |
251 | /* We are either picking up a new connection or a connection which |
252 | * was already in place. We are more permissive in terms of | |
253 | * ackskew checking in these cases. | |
254 | */ | |
255 | check_ackskew = false; | |
a489b168 DDP |
256 | } |
257 | if (win > src->max_win) { | |
258 | src->max_win = win; | |
259 | } | |
260 | ||
261 | } else { | |
262 | ack = ntohl(get_16aligned_be32(&tcp->tcp_ack)); | |
263 | end = seq + p_len; | |
264 | if (tcp_flags & TCP_SYN) { | |
265 | end++; | |
266 | } | |
267 | if (tcp_flags & TCP_FIN) { | |
268 | end++; | |
269 | } | |
270 | } | |
271 | ||
272 | if ((tcp_flags & TCP_ACK) == 0) { | |
273 | /* Let it pass through the ack skew check */ | |
274 | ack = dst->seqlo; | |
275 | } else if ((ack == 0 | |
276 | && (tcp_flags & (TCP_ACK|TCP_RST)) == (TCP_ACK|TCP_RST)) | |
277 | /* broken tcp stacks do not set ack */) { | |
278 | /* Many stacks (ours included) will set the ACK number in an | |
279 | * FIN|ACK if the SYN times out -- no sequence to ACK. */ | |
280 | ack = dst->seqlo; | |
281 | } | |
282 | ||
283 | if (seq == end) { | |
284 | /* Ease sequencing restrictions on no data packets */ | |
285 | seq = src->seqlo; | |
286 | end = seq; | |
287 | } | |
288 | ||
7c0cb293 | 289 | int ackskew = check_ackskew ? dst->seqlo - ack : 0; |
a489b168 | 290 | #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ |
64207120 | 291 | if ((SEQ_GEQ(src->seqhi, end) |
a489b168 DDP |
292 | /* Last octet inside other's window space */ |
293 | && SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) | |
294 | /* Retrans: not more than one window back */ | |
295 | && (ackskew >= -MAXACKWINDOW) | |
296 | /* Acking not more than one reassembled fragment backwards */ | |
297 | && (ackskew <= (MAXACKWINDOW << sws)) | |
298 | /* Acking not more than one window forward */ | |
299 | && ((tcp_flags & TCP_RST) == 0 || orig_seq == src->seqlo | |
64207120 DB |
300 | || (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) |
301 | || tcp_bypass_seq_chk(ct)) { | |
a489b168 DDP |
302 | /* Require an exact/+1 sequence match on resets when possible */ |
303 | ||
304 | /* update max window */ | |
305 | if (src->max_win < win) { | |
306 | src->max_win = win; | |
307 | } | |
308 | /* synchronize sequencing */ | |
309 | if (SEQ_GT(end, src->seqlo)) { | |
310 | src->seqlo = end; | |
311 | } | |
312 | /* slide the window of what the other end can send */ | |
313 | if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) { | |
314 | dst->seqhi = ack + MAX((win << sws), 1); | |
315 | } | |
316 | ||
317 | /* update states */ | |
318 | if (tcp_flags & TCP_SYN && src->state < CT_DPIF_TCPS_SYN_SENT) { | |
319 | src->state = CT_DPIF_TCPS_SYN_SENT; | |
320 | } | |
321 | if (tcp_flags & TCP_FIN && src->state < CT_DPIF_TCPS_CLOSING) { | |
322 | src->state = CT_DPIF_TCPS_CLOSING; | |
323 | } | |
324 | if (tcp_flags & TCP_ACK) { | |
325 | if (dst->state == CT_DPIF_TCPS_SYN_SENT) { | |
326 | dst->state = CT_DPIF_TCPS_ESTABLISHED; | |
327 | } else if (dst->state == CT_DPIF_TCPS_CLOSING) { | |
328 | dst->state = CT_DPIF_TCPS_FIN_WAIT_2; | |
329 | } | |
330 | } | |
331 | if (tcp_flags & TCP_RST) { | |
332 | src->state = dst->state = CT_DPIF_TCPS_TIME_WAIT; | |
333 | } | |
334 | ||
335 | if (src->state >= CT_DPIF_TCPS_FIN_WAIT_2 | |
336 | && dst->state >= CT_DPIF_TCPS_FIN_WAIT_2) { | |
967bb5c5 | 337 | conn_update_expiration(ct, &conn->up, CT_TM_TCP_CLOSED, now); |
a489b168 DDP |
338 | } else if (src->state >= CT_DPIF_TCPS_CLOSING |
339 | && dst->state >= CT_DPIF_TCPS_CLOSING) { | |
967bb5c5 | 340 | conn_update_expiration(ct, &conn->up, CT_TM_TCP_FIN_WAIT, now); |
a489b168 DDP |
341 | } else if (src->state < CT_DPIF_TCPS_ESTABLISHED |
342 | || dst->state < CT_DPIF_TCPS_ESTABLISHED) { | |
967bb5c5 | 343 | conn_update_expiration(ct, &conn->up, CT_TM_TCP_OPENING, now); |
a489b168 DDP |
344 | } else if (src->state >= CT_DPIF_TCPS_CLOSING |
345 | || dst->state >= CT_DPIF_TCPS_CLOSING) { | |
967bb5c5 | 346 | conn_update_expiration(ct, &conn->up, CT_TM_TCP_CLOSING, now); |
a489b168 | 347 | } else { |
967bb5c5 | 348 | conn_update_expiration(ct, &conn->up, CT_TM_TCP_ESTABLISHED, now); |
a489b168 DDP |
349 | } |
350 | } else if ((dst->state < CT_DPIF_TCPS_SYN_SENT | |
351 | || dst->state >= CT_DPIF_TCPS_FIN_WAIT_2 | |
352 | || src->state >= CT_DPIF_TCPS_FIN_WAIT_2) | |
353 | && SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) | |
354 | /* Within a window forward of the originating packet */ | |
355 | && SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { | |
356 | /* Within a window backward of the originating packet */ | |
357 | ||
358 | /* | |
359 | * This currently handles three situations: | |
360 | * 1) Stupid stacks will shotgun SYNs before their peer | |
361 | * replies. | |
362 | * 2) When PF catches an already established stream (the | |
363 | * firewall rebooted, the state table was flushed, routes | |
364 | * changed...) | |
365 | * 3) Packets get funky immediately after the connection | |
366 | * closes (this should catch Solaris spurious ACK|FINs | |
367 | * that web servers like to spew after a close) | |
368 | * | |
369 | * This must be a little more careful than the above code | |
370 | * since packet floods will also be caught here. We don't | |
371 | * update the TTL here to mitigate the damage of a packet | |
372 | * flood and so the same code can handle awkward establishment | |
373 | * and a loosened connection close. | |
374 | * In the establishment case, a correct peer response will | |
375 | * validate the connection, go through the normal state code | |
376 | * and keep updating the state TTL. | |
377 | */ | |
378 | ||
379 | /* update max window */ | |
380 | if (src->max_win < win) { | |
381 | src->max_win = win; | |
382 | } | |
383 | /* synchronize sequencing */ | |
384 | if (SEQ_GT(end, src->seqlo)) { | |
385 | src->seqlo = end; | |
386 | } | |
387 | /* slide the window of what the other end can send */ | |
388 | if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) { | |
389 | dst->seqhi = ack + MAX((win << sws), 1); | |
390 | } | |
391 | ||
392 | /* | |
393 | * Cannot set dst->seqhi here since this could be a shotgunned | |
394 | * SYN and not an already established connection. | |
395 | */ | |
396 | ||
397 | if (tcp_flags & TCP_FIN && src->state < CT_DPIF_TCPS_CLOSING) { | |
398 | src->state = CT_DPIF_TCPS_CLOSING; | |
399 | } | |
400 | ||
401 | if (tcp_flags & TCP_RST) { | |
402 | src->state = dst->state = CT_DPIF_TCPS_TIME_WAIT; | |
403 | } | |
404 | } else { | |
64207120 | 405 | COVERAGE_INC(conntrack_tcp_seq_chk_failed); |
a489b168 DDP |
406 | return CT_UPDATE_INVALID; |
407 | } | |
408 | ||
409 | return CT_UPDATE_VALID; | |
410 | } | |
411 | ||
412 | static bool | |
413 | tcp_valid_new(struct dp_packet *pkt) | |
414 | { | |
415 | struct tcp_header *tcp = dp_packet_l4(pkt); | |
416 | uint16_t tcp_flags = TCP_FLAGS(tcp->tcp_ctl); | |
417 | ||
418 | if (tcp_invalid_flags(tcp_flags)) { | |
419 | return false; | |
420 | } | |
421 | ||
422 | /* A syn+ack is not allowed to create a connection. We want to allow | |
423 | * totally new connections (syn) or already established, not partially | |
424 | * open (syn+ack). */ | |
425 | if ((tcp_flags & TCP_SYN) && (tcp_flags & TCP_ACK)) { | |
426 | return false; | |
427 | } | |
428 | ||
429 | return true; | |
430 | } | |
431 | ||
432 | static struct conn * | |
967bb5c5 | 433 | tcp_new_conn(struct conntrack *ct, struct dp_packet *pkt, long long now) |
a489b168 DDP |
434 | { |
435 | struct conn_tcp* newconn = NULL; | |
436 | struct tcp_header *tcp = dp_packet_l4(pkt); | |
437 | struct tcp_peer *src, *dst; | |
438 | uint16_t tcp_flags = TCP_FLAGS(tcp->tcp_ctl); | |
439 | ||
440 | newconn = xzalloc(sizeof *newconn); | |
441 | ||
442 | src = &newconn->peer[0]; | |
443 | dst = &newconn->peer[1]; | |
444 | ||
445 | src->seqlo = ntohl(get_16aligned_be32(&tcp->tcp_seq)); | |
446 | src->seqhi = src->seqlo + tcp_payload_length(pkt) + 1; | |
447 | ||
448 | if (tcp_flags & TCP_SYN) { | |
449 | src->seqhi++; | |
450 | src->wscale = tcp_get_wscale(tcp); | |
451 | } else { | |
452 | src->wscale = CT_WSCALE_UNKNOWN; | |
453 | dst->wscale = CT_WSCALE_UNKNOWN; | |
454 | } | |
455 | src->max_win = MAX(ntohs(tcp->tcp_winsz), 1); | |
456 | if (src->wscale & CT_WSCALE_MASK) { | |
457 | /* Remove scale factor from initial window */ | |
458 | uint8_t sws = src->wscale & CT_WSCALE_MASK; | |
459 | src->max_win = DIV_ROUND_UP((uint32_t) src->max_win, 1 << sws); | |
460 | } | |
461 | if (tcp_flags & TCP_FIN) { | |
462 | src->seqhi++; | |
463 | } | |
464 | dst->seqhi = 1; | |
465 | dst->max_win = 1; | |
466 | src->state = CT_DPIF_TCPS_SYN_SENT; | |
467 | dst->state = CT_DPIF_TCPS_CLOSED; | |
468 | ||
967bb5c5 | 469 | conn_init_expiration(ct, &newconn->up, CT_TM_TCP_FIRST_PACKET, now); |
a489b168 DDP |
470 | |
471 | return &newconn->up; | |
472 | } | |
473 | ||
4d4e68ed DDP |
474 | static uint8_t |
475 | tcp_peer_to_protoinfo_flags(const struct tcp_peer *peer) | |
476 | { | |
477 | uint8_t res = 0; | |
478 | ||
479 | if (peer->wscale & CT_WSCALE_FLAG) { | |
480 | res |= CT_DPIF_TCPF_WINDOW_SCALE; | |
481 | } | |
482 | ||
483 | if (peer->wscale & CT_WSCALE_UNKNOWN) { | |
484 | res |= CT_DPIF_TCPF_BE_LIBERAL; | |
485 | } | |
486 | ||
487 | return res; | |
488 | } | |
489 | ||
490 | static void | |
491 | tcp_conn_get_protoinfo(const struct conn *conn_, | |
492 | struct ct_dpif_protoinfo *protoinfo) | |
493 | { | |
494 | const struct conn_tcp *conn = conn_tcp_cast(conn_); | |
495 | ||
496 | protoinfo->proto = IPPROTO_TCP; | |
497 | protoinfo->tcp.state_orig = conn->peer[0].state; | |
498 | protoinfo->tcp.state_reply = conn->peer[1].state; | |
499 | ||
500 | protoinfo->tcp.wscale_orig = conn->peer[0].wscale & CT_WSCALE_MASK; | |
501 | protoinfo->tcp.wscale_reply = conn->peer[1].wscale & CT_WSCALE_MASK; | |
502 | ||
503 | protoinfo->tcp.flags_orig = tcp_peer_to_protoinfo_flags(&conn->peer[0]); | |
504 | protoinfo->tcp.flags_reply = tcp_peer_to_protoinfo_flags(&conn->peer[1]); | |
505 | } | |
506 | ||
a489b168 DDP |
507 | struct ct_l4_proto ct_proto_tcp = { |
508 | .new_conn = tcp_new_conn, | |
509 | .valid_new = tcp_valid_new, | |
510 | .conn_update = tcp_conn_update, | |
4d4e68ed | 511 | .conn_get_protoinfo = tcp_conn_get_protoinfo, |
a489b168 | 512 | }; |