]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/tcp_metrics.c
net-tcp: Fast Open client - sendmsg(MSG_FASTOPEN)
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_metrics.c
CommitLineData
51c5d0c4
DM
1#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/bootmem.h>
ab92bb2f 5#include <linux/module.h>
4aabd8ef 6#include <linux/cache.h>
51c5d0c4
DM
7#include <linux/slab.h>
8#include <linux/init.h>
4aabd8ef
DM
9#include <linux/tcp.h>
10
11#include <net/inet_connection_sock.h>
51c5d0c4 12#include <net/net_namespace.h>
ab92bb2f 13#include <net/request_sock.h>
51c5d0c4 14#include <net/inetpeer.h>
4aabd8ef 15#include <net/sock.h>
51c5d0c4 16#include <net/ipv6.h>
4aabd8ef
DM
17#include <net/dst.h>
18#include <net/tcp.h>
19
20int sysctl_tcp_nometrics_save __read_mostly;
21
51c5d0c4
DM
22enum tcp_metric_index {
23 TCP_METRIC_RTT,
24 TCP_METRIC_RTTVAR,
25 TCP_METRIC_SSTHRESH,
26 TCP_METRIC_CWND,
27 TCP_METRIC_REORDERING,
28
29 /* Always last. */
30 TCP_METRIC_MAX,
31};
32
1fe4c481
YC
33struct tcp_fastopen_metrics {
34 u16 mss;
35 struct tcp_fastopen_cookie cookie;
36};
37
51c5d0c4
DM
38struct tcp_metrics_block {
39 struct tcp_metrics_block __rcu *tcpm_next;
40 struct inetpeer_addr tcpm_addr;
41 unsigned long tcpm_stamp;
81166dd6
DM
42 u32 tcpm_ts;
43 u32 tcpm_ts_stamp;
51c5d0c4
DM
44 u32 tcpm_lock;
45 u32 tcpm_vals[TCP_METRIC_MAX];
1fe4c481 46 struct tcp_fastopen_metrics tcpm_fastopen;
51c5d0c4
DM
47};
48
49static bool tcp_metric_locked(struct tcp_metrics_block *tm,
50 enum tcp_metric_index idx)
51{
52 return tm->tcpm_lock & (1 << idx);
53}
54
55static u32 tcp_metric_get(struct tcp_metrics_block *tm,
56 enum tcp_metric_index idx)
57{
58 return tm->tcpm_vals[idx];
59}
60
61static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
62 enum tcp_metric_index idx)
63{
64 return msecs_to_jiffies(tm->tcpm_vals[idx]);
65}
66
67static void tcp_metric_set(struct tcp_metrics_block *tm,
68 enum tcp_metric_index idx,
69 u32 val)
70{
71 tm->tcpm_vals[idx] = val;
72}
73
74static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
75 enum tcp_metric_index idx,
76 u32 val)
77{
78 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
79}
80
81static bool addr_same(const struct inetpeer_addr *a,
82 const struct inetpeer_addr *b)
83{
84 const struct in6_addr *a6, *b6;
85
86 if (a->family != b->family)
87 return false;
88 if (a->family == AF_INET)
89 return a->addr.a4 == b->addr.a4;
90
91 a6 = (const struct in6_addr *) &a->addr.a6[0];
92 b6 = (const struct in6_addr *) &b->addr.a6[0];
93
94 return ipv6_addr_equal(a6, b6);
95}
96
97struct tcpm_hash_bucket {
98 struct tcp_metrics_block __rcu *chain;
99};
100
101static DEFINE_SPINLOCK(tcp_metrics_lock);
102
103static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
104{
105 u32 val;
106
107 val = 0;
108 if (dst_metric_locked(dst, RTAX_RTT))
109 val |= 1 << TCP_METRIC_RTT;
110 if (dst_metric_locked(dst, RTAX_RTTVAR))
111 val |= 1 << TCP_METRIC_RTTVAR;
112 if (dst_metric_locked(dst, RTAX_SSTHRESH))
113 val |= 1 << TCP_METRIC_SSTHRESH;
114 if (dst_metric_locked(dst, RTAX_CWND))
115 val |= 1 << TCP_METRIC_CWND;
116 if (dst_metric_locked(dst, RTAX_REORDERING))
117 val |= 1 << TCP_METRIC_REORDERING;
118 tm->tcpm_lock = val;
119
120 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
121 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
122 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
123 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
124 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
81166dd6
DM
125 tm->tcpm_ts = 0;
126 tm->tcpm_ts_stamp = 0;
1fe4c481
YC
127 tm->tcpm_fastopen.mss = 0;
128 tm->tcpm_fastopen.cookie.len = 0;
51c5d0c4
DM
129}
130
131static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
132 struct inetpeer_addr *addr,
133 unsigned int hash,
134 bool reclaim)
135{
136 struct tcp_metrics_block *tm;
137 struct net *net;
138
139 spin_lock_bh(&tcp_metrics_lock);
140 net = dev_net(dst->dev);
141 if (unlikely(reclaim)) {
142 struct tcp_metrics_block *oldest;
143
144 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
145 for (tm = rcu_dereference(oldest->tcpm_next); tm;
146 tm = rcu_dereference(tm->tcpm_next)) {
147 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
148 oldest = tm;
149 }
150 tm = oldest;
151 } else {
152 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
153 if (!tm)
154 goto out_unlock;
155 }
156 tm->tcpm_addr = *addr;
157 tm->tcpm_stamp = jiffies;
158
159 tcpm_suck_dst(tm, dst);
160
161 if (likely(!reclaim)) {
162 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
163 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
164 }
165
166out_unlock:
167 spin_unlock_bh(&tcp_metrics_lock);
168 return tm;
169}
170
171#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
172
173static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
174{
175 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
176 tcpm_suck_dst(tm, dst);
177}
178
179#define TCP_METRICS_RECLAIM_DEPTH 5
180#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
181
182static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
183{
184 if (tm)
185 return tm;
186 if (depth > TCP_METRICS_RECLAIM_DEPTH)
187 return TCP_METRICS_RECLAIM_PTR;
188 return NULL;
189}
190
191static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
192 struct net *net, unsigned int hash)
193{
194 struct tcp_metrics_block *tm;
195 int depth = 0;
196
197 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
198 tm = rcu_dereference(tm->tcpm_next)) {
199 if (addr_same(&tm->tcpm_addr, addr))
200 break;
201 depth++;
202 }
203 return tcp_get_encode(tm, depth);
204}
205
206static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
207 struct dst_entry *dst)
208{
209 struct tcp_metrics_block *tm;
210 struct inetpeer_addr addr;
211 unsigned int hash;
212 struct net *net;
213
214 addr.family = req->rsk_ops->family;
215 switch (addr.family) {
216 case AF_INET:
217 addr.addr.a4 = inet_rsk(req)->rmt_addr;
218 hash = (__force unsigned int) addr.addr.a4;
219 break;
220 case AF_INET6:
221 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
ddbe5032 222 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
51c5d0c4
DM
223 break;
224 default:
225 return NULL;
226 }
227
228 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
229
230 net = dev_net(dst->dev);
231 hash &= net->ipv4.tcp_metrics_hash_mask;
232
233 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
234 tm = rcu_dereference(tm->tcpm_next)) {
235 if (addr_same(&tm->tcpm_addr, &addr))
236 break;
237 }
238 tcpm_check_stamp(tm, dst);
239 return tm;
240}
241
81166dd6
DM
242static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
243{
244 struct inet6_timewait_sock *tw6;
245 struct tcp_metrics_block *tm;
246 struct inetpeer_addr addr;
247 unsigned int hash;
248 struct net *net;
249
250 addr.family = tw->tw_family;
251 switch (addr.family) {
252 case AF_INET:
253 addr.addr.a4 = tw->tw_daddr;
254 hash = (__force unsigned int) addr.addr.a4;
255 break;
256 case AF_INET6:
257 tw6 = inet6_twsk((struct sock *)tw);
258 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
ddbe5032 259 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
81166dd6
DM
260 break;
261 default:
262 return NULL;
263 }
264
265 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
266
267 net = twsk_net(tw);
268 hash &= net->ipv4.tcp_metrics_hash_mask;
269
270 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
271 tm = rcu_dereference(tm->tcpm_next)) {
272 if (addr_same(&tm->tcpm_addr, &addr))
273 break;
274 }
275 return tm;
276}
277
51c5d0c4
DM
278static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
279 struct dst_entry *dst,
280 bool create)
281{
282 struct tcp_metrics_block *tm;
283 struct inetpeer_addr addr;
284 unsigned int hash;
285 struct net *net;
286 bool reclaim;
287
288 addr.family = sk->sk_family;
289 switch (addr.family) {
290 case AF_INET:
291 addr.addr.a4 = inet_sk(sk)->inet_daddr;
292 hash = (__force unsigned int) addr.addr.a4;
293 break;
294 case AF_INET6:
295 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
ddbe5032 296 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
51c5d0c4
DM
297 break;
298 default:
299 return NULL;
300 }
301
302 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
303
304 net = dev_net(dst->dev);
305 hash &= net->ipv4.tcp_metrics_hash_mask;
306
307 tm = __tcp_get_metrics(&addr, net, hash);
308 reclaim = false;
309 if (tm == TCP_METRICS_RECLAIM_PTR) {
310 reclaim = true;
311 tm = NULL;
312 }
313 if (!tm && create)
314 tm = tcpm_new(dst, &addr, hash, reclaim);
315 else
316 tcpm_check_stamp(tm, dst);
317
318 return tm;
319}
320
4aabd8ef
DM
321/* Save metrics learned by this TCP session. This function is called
322 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
323 * or goes from LAST-ACK to CLOSE.
324 */
325void tcp_update_metrics(struct sock *sk)
326{
51c5d0c4 327 const struct inet_connection_sock *icsk = inet_csk(sk);
4aabd8ef 328 struct dst_entry *dst = __sk_dst_get(sk);
51c5d0c4
DM
329 struct tcp_sock *tp = tcp_sk(sk);
330 struct tcp_metrics_block *tm;
331 unsigned long rtt;
332 u32 val;
333 int m;
4aabd8ef 334
51c5d0c4 335 if (sysctl_tcp_nometrics_save || !dst)
4aabd8ef
DM
336 return;
337
51c5d0c4 338 if (dst->flags & DST_HOST)
4aabd8ef
DM
339 dst_confirm(dst);
340
51c5d0c4
DM
341 rcu_read_lock();
342 if (icsk->icsk_backoff || !tp->srtt) {
343 /* This session failed to estimate rtt. Why?
344 * Probably, no packets returned in time. Reset our
345 * results.
346 */
347 tm = tcp_get_metrics(sk, dst, false);
348 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
349 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
350 goto out_unlock;
351 } else
352 tm = tcp_get_metrics(sk, dst, true);
4aabd8ef 353
51c5d0c4
DM
354 if (!tm)
355 goto out_unlock;
4aabd8ef 356
51c5d0c4
DM
357 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
358 m = rtt - tp->srtt;
4aabd8ef 359
51c5d0c4
DM
360 /* If newly calculated rtt larger than stored one, store new
361 * one. Otherwise, use EWMA. Remember, rtt overestimation is
362 * always better than underestimation.
363 */
364 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
365 if (m <= 0)
366 rtt = tp->srtt;
367 else
368 rtt -= (m >> 3);
369 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
370 }
4aabd8ef 371
51c5d0c4
DM
372 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
373 unsigned long var;
4aabd8ef 374
51c5d0c4
DM
375 if (m < 0)
376 m = -m;
4aabd8ef 377
51c5d0c4
DM
378 /* Scale deviation to rttvar fixed point */
379 m >>= 1;
380 if (m < tp->mdev)
381 m = tp->mdev;
4aabd8ef 382
51c5d0c4
DM
383 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
384 if (m >= var)
385 var = m;
386 else
387 var -= (var - m) >> 2;
4aabd8ef 388
51c5d0c4
DM
389 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
390 }
391
392 if (tcp_in_initial_slowstart(tp)) {
393 /* Slow start still did not finish. */
394 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
395 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
396 if (val && (tp->snd_cwnd >> 1) > val)
397 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
398 tp->snd_cwnd >> 1);
399 }
400 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
401 val = tcp_metric_get(tm, TCP_METRIC_CWND);
402 if (tp->snd_cwnd > val)
403 tcp_metric_set(tm, TCP_METRIC_CWND,
404 tp->snd_cwnd);
405 }
406 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
407 icsk->icsk_ca_state == TCP_CA_Open) {
408 /* Cong. avoidance phase, cwnd is reliable. */
409 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
410 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
411 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
412 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
413 val = tcp_metric_get(tm, TCP_METRIC_CWND);
2100844c 414 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
51c5d0c4
DM
415 }
416 } else {
417 /* Else slow start did not finish, cwnd is non-sense,
418 * ssthresh may be also invalid.
419 */
420 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
421 val = tcp_metric_get(tm, TCP_METRIC_CWND);
422 tcp_metric_set(tm, TCP_METRIC_CWND,
423 (val + tp->snd_ssthresh) >> 1);
424 }
425 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
426 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
427 if (val && tp->snd_ssthresh > val)
428 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
429 tp->snd_ssthresh);
430 }
431 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
432 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
433 if (val < tp->reordering &&
4aabd8ef 434 tp->reordering != sysctl_tcp_reordering)
51c5d0c4
DM
435 tcp_metric_set(tm, TCP_METRIC_REORDERING,
436 tp->reordering);
4aabd8ef
DM
437 }
438 }
51c5d0c4
DM
439 tm->tcpm_stamp = jiffies;
440out_unlock:
441 rcu_read_unlock();
4aabd8ef
DM
442}
443
444/* Initialize metrics on socket. */
445
446void tcp_init_metrics(struct sock *sk)
447{
4aabd8ef 448 struct dst_entry *dst = __sk_dst_get(sk);
51c5d0c4
DM
449 struct tcp_sock *tp = tcp_sk(sk);
450 struct tcp_metrics_block *tm;
451 u32 val;
4aabd8ef
DM
452
453 if (dst == NULL)
454 goto reset;
455
456 dst_confirm(dst);
457
51c5d0c4
DM
458 rcu_read_lock();
459 tm = tcp_get_metrics(sk, dst, true);
460 if (!tm) {
461 rcu_read_unlock();
462 goto reset;
463 }
464
465 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
466 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
467
468 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
469 if (val) {
470 tp->snd_ssthresh = val;
4aabd8ef
DM
471 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
472 tp->snd_ssthresh = tp->snd_cwnd_clamp;
473 } else {
474 /* ssthresh may have been reduced unnecessarily during.
475 * 3WHS. Restore it back to its initial default.
476 */
477 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
478 }
51c5d0c4
DM
479 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
480 if (val && tp->reordering != val) {
4aabd8ef
DM
481 tcp_disable_fack(tp);
482 tcp_disable_early_retrans(tp);
51c5d0c4 483 tp->reordering = val;
4aabd8ef
DM
484 }
485
51c5d0c4
DM
486 val = tcp_metric_get(tm, TCP_METRIC_RTT);
487 if (val == 0 || tp->srtt == 0) {
488 rcu_read_unlock();
4aabd8ef 489 goto reset;
51c5d0c4 490 }
4aabd8ef
DM
491 /* Initial rtt is determined from SYN,SYN-ACK.
492 * The segment is small and rtt may appear much
493 * less than real one. Use per-dst memory
494 * to make it more realistic.
495 *
496 * A bit of theory. RTT is time passed after "normal" sized packet
497 * is sent until it is ACKed. In normal circumstances sending small
498 * packets force peer to delay ACKs and calculation is correct too.
499 * The algorithm is adaptive and, provided we follow specs, it
500 * NEVER underestimate RTT. BUT! If peer tries to make some clever
501 * tricks sort of "quick acks" for time long enough to decrease RTT
502 * to low value, and then abruptly stops to do it and starts to delay
503 * ACKs, wait for troubles.
504 */
51c5d0c4
DM
505 val = msecs_to_jiffies(val);
506 if (val > tp->srtt) {
507 tp->srtt = val;
4aabd8ef
DM
508 tp->rtt_seq = tp->snd_nxt;
509 }
51c5d0c4
DM
510 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
511 if (val > tp->mdev) {
512 tp->mdev = val;
4aabd8ef
DM
513 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
514 }
51c5d0c4
DM
515 rcu_read_unlock();
516
4aabd8ef
DM
517 tcp_set_rto(sk);
518reset:
519 if (tp->srtt == 0) {
520 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
521 * 3WHS. This is most likely due to retransmission,
522 * including spurious one. Reset the RTO back to 3secs
523 * from the more aggressive 1sec to avoid more spurious
524 * retransmission.
525 */
526 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
527 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
528 }
529 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
530 * retransmitted. In light of RFC6298 more aggressive 1sec
531 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
532 * retransmission has occurred.
533 */
534 if (tp->total_retrans > 1)
535 tp->snd_cwnd = 1;
536 else
537 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
538 tp->snd_cwnd_stamp = tcp_time_stamp;
539}
ab92bb2f 540
81166dd6 541bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
ab92bb2f 542{
51c5d0c4
DM
543 struct tcp_metrics_block *tm;
544 bool ret;
545
ab92bb2f
DM
546 if (!dst)
547 return false;
51c5d0c4
DM
548
549 rcu_read_lock();
550 tm = __tcp_get_metrics_req(req, dst);
81166dd6
DM
551 if (paws_check) {
552 if (tm &&
553 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
554 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
555 ret = false;
556 else
557 ret = true;
558 } else {
559 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
560 ret = true;
561 else
562 ret = false;
563 }
51c5d0c4
DM
564 rcu_read_unlock();
565
566 return ret;
ab92bb2f
DM
567}
568EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
51c5d0c4 569
81166dd6
DM
570void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
571{
572 struct tcp_metrics_block *tm;
573
574 rcu_read_lock();
575 tm = tcp_get_metrics(sk, dst, true);
576 if (tm) {
577 struct tcp_sock *tp = tcp_sk(sk);
578
579 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
580 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
581 tp->rx_opt.ts_recent = tm->tcpm_ts;
582 }
583 }
584 rcu_read_unlock();
585}
586EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
587
588/* VJ's idea. Save last timestamp seen from this destination and hold
589 * it at least for normal timewait interval to use for duplicate
590 * segment detection in subsequent connections, before they enter
591 * synchronized state.
592 */
593bool tcp_remember_stamp(struct sock *sk)
594{
595 struct dst_entry *dst = __sk_dst_get(sk);
596 bool ret = false;
597
598 if (dst) {
599 struct tcp_metrics_block *tm;
600
601 rcu_read_lock();
602 tm = tcp_get_metrics(sk, dst, true);
603 if (tm) {
604 struct tcp_sock *tp = tcp_sk(sk);
605
606 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
607 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
608 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
609 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
610 tm->tcpm_ts = tp->rx_opt.ts_recent;
611 }
612 ret = true;
613 }
614 rcu_read_unlock();
615 }
616 return ret;
617}
618
619bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
620{
621 struct tcp_metrics_block *tm;
622 bool ret = false;
623
624 rcu_read_lock();
625 tm = __tcp_get_metrics_tw(tw);
626 if (tw) {
627 const struct tcp_timewait_sock *tcptw;
628 struct sock *sk = (struct sock *) tw;
629
630 tcptw = tcp_twsk(sk);
631 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
632 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
633 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
634 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
635 tm->tcpm_ts = tcptw->tw_ts_recent;
636 }
637 ret = true;
638 }
639 rcu_read_unlock();
640
641 return ret;
642}
643
1fe4c481
YC
644static DEFINE_SEQLOCK(fastopen_seqlock);
645
646void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
647 struct tcp_fastopen_cookie *cookie)
648{
649 struct tcp_metrics_block *tm;
650
651 rcu_read_lock();
652 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
653 if (tm) {
654 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
655 unsigned int seq;
656
657 do {
658 seq = read_seqbegin(&fastopen_seqlock);
659 if (tfom->mss)
660 *mss = tfom->mss;
661 *cookie = tfom->cookie;
662 } while (read_seqretry(&fastopen_seqlock, seq));
663 }
664 rcu_read_unlock();
665}
666
667
668void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
669 struct tcp_fastopen_cookie *cookie)
670{
671 struct tcp_metrics_block *tm;
672
673 rcu_read_lock();
674 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
675 if (tm) {
676 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
677
678 write_seqlock_bh(&fastopen_seqlock);
679 tfom->mss = mss;
680 if (cookie->len > 0)
681 tfom->cookie = *cookie;
682 write_sequnlock_bh(&fastopen_seqlock);
683 }
684 rcu_read_unlock();
685}
686
51c5d0c4
DM
687static unsigned long tcpmhash_entries;
688static int __init set_tcpmhash_entries(char *str)
689{
690 ssize_t ret;
691
692 if (!str)
693 return 0;
694
695 ret = kstrtoul(str, 0, &tcpmhash_entries);
696 if (ret)
697 return 0;
698
699 return 1;
700}
701__setup("tcpmhash_entries=", set_tcpmhash_entries);
702
703static int __net_init tcp_net_metrics_init(struct net *net)
704{
705 int slots, size;
706
707 slots = tcpmhash_entries;
708 if (!slots) {
709 if (totalram_pages >= 128 * 1024)
710 slots = 16 * 1024;
711 else
712 slots = 8 * 1024;
713 }
714
715 size = slots * sizeof(struct tcpm_hash_bucket);
716
717 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
718 if (!net->ipv4.tcp_metrics_hash)
719 return -ENOMEM;
720
721 net->ipv4.tcp_metrics_hash_mask = (slots - 1);
722
723 return 0;
724}
725
726static void __net_exit tcp_net_metrics_exit(struct net *net)
727{
728 kfree(net->ipv4.tcp_metrics_hash);
729}
730
731static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
732 .init = tcp_net_metrics_init,
733 .exit = tcp_net_metrics_exit,
734};
735
736void __init tcp_metrics_init(void)
737{
738 register_pernet_subsys(&tcp_net_metrics_ops);
739}