]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/tcp_metrics.c
Merge branch 'WIP.x86-pti.base.prep-for-linus' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_metrics.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/tcp.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
13
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
18 #include <net/sock.h>
19 #include <net/ipv6.h>
20 #include <net/dst.h>
21 #include <net/tcp.h>
22 #include <net/genetlink.h>
23
24 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
25 const struct inetpeer_addr *daddr,
26 struct net *net, unsigned int hash);
27
28 struct tcp_fastopen_metrics {
29 u16 mss;
30 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
31 try_exp:2; /* Request w/ exp. option (once) */
32 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie;
34 };
35
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37 * Kernel only stores RTT and RTTVAR in usec resolution
38 */
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40
41 struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
43 possible_net_t tcpm_net;
44 struct inetpeer_addr tcpm_saddr;
45 struct inetpeer_addr tcpm_daddr;
46 unsigned long tcpm_stamp;
47 u32 tcpm_lock;
48 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
49 struct tcp_fastopen_metrics tcpm_fastopen;
50
51 struct rcu_head rcu_head;
52 };
53
54 static inline struct net *tm_net(struct tcp_metrics_block *tm)
55 {
56 return read_pnet(&tm->tcpm_net);
57 }
58
59 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
60 enum tcp_metric_index idx)
61 {
62 return tm->tcpm_lock & (1 << idx);
63 }
64
65 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
66 enum tcp_metric_index idx)
67 {
68 return tm->tcpm_vals[idx];
69 }
70
71 static void tcp_metric_set(struct tcp_metrics_block *tm,
72 enum tcp_metric_index idx,
73 u32 val)
74 {
75 tm->tcpm_vals[idx] = val;
76 }
77
78 static bool addr_same(const struct inetpeer_addr *a,
79 const struct inetpeer_addr *b)
80 {
81 return inetpeer_addr_cmp(a, b) == 0;
82 }
83
84 struct tcpm_hash_bucket {
85 struct tcp_metrics_block __rcu *chain;
86 };
87
88 static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
89 static unsigned int tcp_metrics_hash_log __read_mostly;
90
91 static DEFINE_SPINLOCK(tcp_metrics_lock);
92
93 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
94 const struct dst_entry *dst,
95 bool fastopen_clear)
96 {
97 u32 msval;
98 u32 val;
99
100 tm->tcpm_stamp = jiffies;
101
102 val = 0;
103 if (dst_metric_locked(dst, RTAX_RTT))
104 val |= 1 << TCP_METRIC_RTT;
105 if (dst_metric_locked(dst, RTAX_RTTVAR))
106 val |= 1 << TCP_METRIC_RTTVAR;
107 if (dst_metric_locked(dst, RTAX_SSTHRESH))
108 val |= 1 << TCP_METRIC_SSTHRESH;
109 if (dst_metric_locked(dst, RTAX_CWND))
110 val |= 1 << TCP_METRIC_CWND;
111 if (dst_metric_locked(dst, RTAX_REORDERING))
112 val |= 1 << TCP_METRIC_REORDERING;
113 tm->tcpm_lock = val;
114
115 msval = dst_metric_raw(dst, RTAX_RTT);
116 tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
117
118 msval = dst_metric_raw(dst, RTAX_RTTVAR);
119 tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
120 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
121 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
122 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
123 if (fastopen_clear) {
124 tm->tcpm_fastopen.mss = 0;
125 tm->tcpm_fastopen.syn_loss = 0;
126 tm->tcpm_fastopen.try_exp = 0;
127 tm->tcpm_fastopen.cookie.exp = false;
128 tm->tcpm_fastopen.cookie.len = 0;
129 }
130 }
131
132 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
133
134 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
135 {
136 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
137 tcpm_suck_dst(tm, dst, false);
138 }
139
140 #define TCP_METRICS_RECLAIM_DEPTH 5
141 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
142
143 #define deref_locked(p) \
144 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
145
146 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
147 struct inetpeer_addr *saddr,
148 struct inetpeer_addr *daddr,
149 unsigned int hash)
150 {
151 struct tcp_metrics_block *tm;
152 struct net *net;
153 bool reclaim = false;
154
155 spin_lock_bh(&tcp_metrics_lock);
156 net = dev_net(dst->dev);
157
158 /* While waiting for the spin-lock the cache might have been populated
159 * with this entry and so we have to check again.
160 */
161 tm = __tcp_get_metrics(saddr, daddr, net, hash);
162 if (tm == TCP_METRICS_RECLAIM_PTR) {
163 reclaim = true;
164 tm = NULL;
165 }
166 if (tm) {
167 tcpm_check_stamp(tm, dst);
168 goto out_unlock;
169 }
170
171 if (unlikely(reclaim)) {
172 struct tcp_metrics_block *oldest;
173
174 oldest = deref_locked(tcp_metrics_hash[hash].chain);
175 for (tm = deref_locked(oldest->tcpm_next); tm;
176 tm = deref_locked(tm->tcpm_next)) {
177 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
178 oldest = tm;
179 }
180 tm = oldest;
181 } else {
182 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
183 if (!tm)
184 goto out_unlock;
185 }
186 write_pnet(&tm->tcpm_net, net);
187 tm->tcpm_saddr = *saddr;
188 tm->tcpm_daddr = *daddr;
189
190 tcpm_suck_dst(tm, dst, true);
191
192 if (likely(!reclaim)) {
193 tm->tcpm_next = tcp_metrics_hash[hash].chain;
194 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
195 }
196
197 out_unlock:
198 spin_unlock_bh(&tcp_metrics_lock);
199 return tm;
200 }
201
202 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
203 {
204 if (tm)
205 return tm;
206 if (depth > TCP_METRICS_RECLAIM_DEPTH)
207 return TCP_METRICS_RECLAIM_PTR;
208 return NULL;
209 }
210
211 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
212 const struct inetpeer_addr *daddr,
213 struct net *net, unsigned int hash)
214 {
215 struct tcp_metrics_block *tm;
216 int depth = 0;
217
218 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
219 tm = rcu_dereference(tm->tcpm_next)) {
220 if (addr_same(&tm->tcpm_saddr, saddr) &&
221 addr_same(&tm->tcpm_daddr, daddr) &&
222 net_eq(tm_net(tm), net))
223 break;
224 depth++;
225 }
226 return tcp_get_encode(tm, depth);
227 }
228
229 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
230 struct dst_entry *dst)
231 {
232 struct tcp_metrics_block *tm;
233 struct inetpeer_addr saddr, daddr;
234 unsigned int hash;
235 struct net *net;
236
237 saddr.family = req->rsk_ops->family;
238 daddr.family = req->rsk_ops->family;
239 switch (daddr.family) {
240 case AF_INET:
241 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
242 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
243 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
244 break;
245 #if IS_ENABLED(CONFIG_IPV6)
246 case AF_INET6:
247 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
248 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
249 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
250 break;
251 #endif
252 default:
253 return NULL;
254 }
255
256 net = dev_net(dst->dev);
257 hash ^= net_hash_mix(net);
258 hash = hash_32(hash, tcp_metrics_hash_log);
259
260 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
261 tm = rcu_dereference(tm->tcpm_next)) {
262 if (addr_same(&tm->tcpm_saddr, &saddr) &&
263 addr_same(&tm->tcpm_daddr, &daddr) &&
264 net_eq(tm_net(tm), net))
265 break;
266 }
267 tcpm_check_stamp(tm, dst);
268 return tm;
269 }
270
271 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
272 struct dst_entry *dst,
273 bool create)
274 {
275 struct tcp_metrics_block *tm;
276 struct inetpeer_addr saddr, daddr;
277 unsigned int hash;
278 struct net *net;
279
280 if (sk->sk_family == AF_INET) {
281 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
282 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
283 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
284 }
285 #if IS_ENABLED(CONFIG_IPV6)
286 else if (sk->sk_family == AF_INET6) {
287 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
288 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
289 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
290 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
291 } else {
292 inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
293 inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
294 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
295 }
296 }
297 #endif
298 else
299 return NULL;
300
301 net = dev_net(dst->dev);
302 hash ^= net_hash_mix(net);
303 hash = hash_32(hash, tcp_metrics_hash_log);
304
305 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
306 if (tm == TCP_METRICS_RECLAIM_PTR)
307 tm = NULL;
308 if (!tm && create)
309 tm = tcpm_new(dst, &saddr, &daddr, hash);
310 else
311 tcpm_check_stamp(tm, dst);
312
313 return tm;
314 }
315
316 /* Save metrics learned by this TCP session. This function is called
317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
318 * or goes from LAST-ACK to CLOSE.
319 */
320 void tcp_update_metrics(struct sock *sk)
321 {
322 const struct inet_connection_sock *icsk = inet_csk(sk);
323 struct dst_entry *dst = __sk_dst_get(sk);
324 struct tcp_sock *tp = tcp_sk(sk);
325 struct net *net = sock_net(sk);
326 struct tcp_metrics_block *tm;
327 unsigned long rtt;
328 u32 val;
329 int m;
330
331 sk_dst_confirm(sk);
332 if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
333 return;
334
335 rcu_read_lock();
336 if (icsk->icsk_backoff || !tp->srtt_us) {
337 /* This session failed to estimate rtt. Why?
338 * Probably, no packets returned in time. Reset our
339 * results.
340 */
341 tm = tcp_get_metrics(sk, dst, false);
342 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
343 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
344 goto out_unlock;
345 } else
346 tm = tcp_get_metrics(sk, dst, true);
347
348 if (!tm)
349 goto out_unlock;
350
351 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
352 m = rtt - tp->srtt_us;
353
354 /* If newly calculated rtt larger than stored one, store new
355 * one. Otherwise, use EWMA. Remember, rtt overestimation is
356 * always better than underestimation.
357 */
358 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
359 if (m <= 0)
360 rtt = tp->srtt_us;
361 else
362 rtt -= (m >> 3);
363 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
364 }
365
366 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
367 unsigned long var;
368
369 if (m < 0)
370 m = -m;
371
372 /* Scale deviation to rttvar fixed point */
373 m >>= 1;
374 if (m < tp->mdev_us)
375 m = tp->mdev_us;
376
377 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
378 if (m >= var)
379 var = m;
380 else
381 var -= (var - m) >> 2;
382
383 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
384 }
385
386 if (tcp_in_initial_slowstart(tp)) {
387 /* Slow start still did not finish. */
388 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
389 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
390 if (val && (tp->snd_cwnd >> 1) > val)
391 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
392 tp->snd_cwnd >> 1);
393 }
394 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
395 val = tcp_metric_get(tm, TCP_METRIC_CWND);
396 if (tp->snd_cwnd > val)
397 tcp_metric_set(tm, TCP_METRIC_CWND,
398 tp->snd_cwnd);
399 }
400 } else if (!tcp_in_slow_start(tp) &&
401 icsk->icsk_ca_state == TCP_CA_Open) {
402 /* Cong. avoidance phase, cwnd is reliable. */
403 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
404 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
405 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
406 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
407 val = tcp_metric_get(tm, TCP_METRIC_CWND);
408 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
409 }
410 } else {
411 /* Else slow start did not finish, cwnd is non-sense,
412 * ssthresh may be also invalid.
413 */
414 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
415 val = tcp_metric_get(tm, TCP_METRIC_CWND);
416 tcp_metric_set(tm, TCP_METRIC_CWND,
417 (val + tp->snd_ssthresh) >> 1);
418 }
419 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
420 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
421 if (val && tp->snd_ssthresh > val)
422 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
423 tp->snd_ssthresh);
424 }
425 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
426 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
427 if (val < tp->reordering &&
428 tp->reordering != net->ipv4.sysctl_tcp_reordering)
429 tcp_metric_set(tm, TCP_METRIC_REORDERING,
430 tp->reordering);
431 }
432 }
433 tm->tcpm_stamp = jiffies;
434 out_unlock:
435 rcu_read_unlock();
436 }
437
438 /* Initialize metrics on socket. */
439
440 void tcp_init_metrics(struct sock *sk)
441 {
442 struct dst_entry *dst = __sk_dst_get(sk);
443 struct tcp_sock *tp = tcp_sk(sk);
444 struct tcp_metrics_block *tm;
445 u32 val, crtt = 0; /* cached RTT scaled by 8 */
446
447 sk_dst_confirm(sk);
448 if (!dst)
449 goto reset;
450
451 rcu_read_lock();
452 tm = tcp_get_metrics(sk, dst, true);
453 if (!tm) {
454 rcu_read_unlock();
455 goto reset;
456 }
457
458 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
459 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
460
461 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
462 if (val) {
463 tp->snd_ssthresh = val;
464 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
465 tp->snd_ssthresh = tp->snd_cwnd_clamp;
466 } else {
467 /* ssthresh may have been reduced unnecessarily during.
468 * 3WHS. Restore it back to its initial default.
469 */
470 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
471 }
472 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
473 if (val && tp->reordering != val)
474 tp->reordering = val;
475
476 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
477 rcu_read_unlock();
478 reset:
479 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
480 * to seed the RTO for later data packets because SYN packets are
481 * small. Use the per-dst cached values to seed the RTO but keep
482 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
483 * Later the RTO will be updated immediately upon obtaining the first
484 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
485 * influences the first RTO but not later RTT estimation.
486 *
487 * But if RTT is not available from the SYN (due to retransmits or
488 * syn cookies) or the cache, force a conservative 3secs timeout.
489 *
490 * A bit of theory. RTT is time passed after "normal" sized packet
491 * is sent until it is ACKed. In normal circumstances sending small
492 * packets force peer to delay ACKs and calculation is correct too.
493 * The algorithm is adaptive and, provided we follow specs, it
494 * NEVER underestimate RTT. BUT! If peer tries to make some clever
495 * tricks sort of "quick acks" for time long enough to decrease RTT
496 * to low value, and then abruptly stops to do it and starts to delay
497 * ACKs, wait for troubles.
498 */
499 if (crtt > tp->srtt_us) {
500 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
501 crtt /= 8 * USEC_PER_SEC / HZ;
502 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
503 } else if (tp->srtt_us == 0) {
504 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
505 * 3WHS. This is most likely due to retransmission,
506 * including spurious one. Reset the RTO back to 3secs
507 * from the more aggressive 1sec to avoid more spurious
508 * retransmission.
509 */
510 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
511 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
512
513 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
514 }
515 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
516 * retransmitted. In light of RFC6298 more aggressive 1sec
517 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
518 * retransmission has occurred.
519 */
520 if (tp->total_retrans > 1)
521 tp->snd_cwnd = 1;
522 else
523 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
524 tp->snd_cwnd_stamp = tcp_jiffies32;
525 }
526
527 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
528 {
529 struct tcp_metrics_block *tm;
530 bool ret;
531
532 if (!dst)
533 return false;
534
535 rcu_read_lock();
536 tm = __tcp_get_metrics_req(req, dst);
537 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
538 ret = true;
539 else
540 ret = false;
541 rcu_read_unlock();
542
543 return ret;
544 }
545
546 static DEFINE_SEQLOCK(fastopen_seqlock);
547
548 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
549 struct tcp_fastopen_cookie *cookie,
550 int *syn_loss, unsigned long *last_syn_loss)
551 {
552 struct tcp_metrics_block *tm;
553
554 rcu_read_lock();
555 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
556 if (tm) {
557 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
558 unsigned int seq;
559
560 do {
561 seq = read_seqbegin(&fastopen_seqlock);
562 if (tfom->mss)
563 *mss = tfom->mss;
564 *cookie = tfom->cookie;
565 if (cookie->len <= 0 && tfom->try_exp == 1)
566 cookie->exp = true;
567 *syn_loss = tfom->syn_loss;
568 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
569 } while (read_seqretry(&fastopen_seqlock, seq));
570 }
571 rcu_read_unlock();
572 }
573
574 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
575 struct tcp_fastopen_cookie *cookie, bool syn_lost,
576 u16 try_exp)
577 {
578 struct dst_entry *dst = __sk_dst_get(sk);
579 struct tcp_metrics_block *tm;
580
581 if (!dst)
582 return;
583 rcu_read_lock();
584 tm = tcp_get_metrics(sk, dst, true);
585 if (tm) {
586 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
587
588 write_seqlock_bh(&fastopen_seqlock);
589 if (mss)
590 tfom->mss = mss;
591 if (cookie && cookie->len > 0)
592 tfom->cookie = *cookie;
593 else if (try_exp > tfom->try_exp &&
594 tfom->cookie.len <= 0 && !tfom->cookie.exp)
595 tfom->try_exp = try_exp;
596 if (syn_lost) {
597 ++tfom->syn_loss;
598 tfom->last_syn_loss = jiffies;
599 } else
600 tfom->syn_loss = 0;
601 write_sequnlock_bh(&fastopen_seqlock);
602 }
603 rcu_read_unlock();
604 }
605
606 static struct genl_family tcp_metrics_nl_family;
607
608 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
609 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
610 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
611 .len = sizeof(struct in6_addr), },
612 /* Following attributes are not received for GET/DEL,
613 * we keep them for reference
614 */
615 #if 0
616 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
617 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
618 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
619 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
620 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
621 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
622 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
623 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
624 .len = TCP_FASTOPEN_COOKIE_MAX, },
625 #endif
626 };
627
628 /* Add attributes, caller cancels its header on failure */
629 static int tcp_metrics_fill_info(struct sk_buff *msg,
630 struct tcp_metrics_block *tm)
631 {
632 struct nlattr *nest;
633 int i;
634
635 switch (tm->tcpm_daddr.family) {
636 case AF_INET:
637 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
638 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
639 goto nla_put_failure;
640 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
641 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
642 goto nla_put_failure;
643 break;
644 case AF_INET6:
645 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
646 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
647 goto nla_put_failure;
648 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
649 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
650 goto nla_put_failure;
651 break;
652 default:
653 return -EAFNOSUPPORT;
654 }
655
656 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
657 jiffies - tm->tcpm_stamp,
658 TCP_METRICS_ATTR_PAD) < 0)
659 goto nla_put_failure;
660
661 {
662 int n = 0;
663
664 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
665 if (!nest)
666 goto nla_put_failure;
667 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
668 u32 val = tm->tcpm_vals[i];
669
670 if (!val)
671 continue;
672 if (i == TCP_METRIC_RTT) {
673 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
674 val) < 0)
675 goto nla_put_failure;
676 n++;
677 val = max(val / 1000, 1U);
678 }
679 if (i == TCP_METRIC_RTTVAR) {
680 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
681 val) < 0)
682 goto nla_put_failure;
683 n++;
684 val = max(val / 1000, 1U);
685 }
686 if (nla_put_u32(msg, i + 1, val) < 0)
687 goto nla_put_failure;
688 n++;
689 }
690 if (n)
691 nla_nest_end(msg, nest);
692 else
693 nla_nest_cancel(msg, nest);
694 }
695
696 {
697 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
698 unsigned int seq;
699
700 do {
701 seq = read_seqbegin(&fastopen_seqlock);
702 tfom_copy[0] = tm->tcpm_fastopen;
703 } while (read_seqretry(&fastopen_seqlock, seq));
704
705 tfom = tfom_copy;
706 if (tfom->mss &&
707 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
708 tfom->mss) < 0)
709 goto nla_put_failure;
710 if (tfom->syn_loss &&
711 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
712 tfom->syn_loss) < 0 ||
713 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
714 jiffies - tfom->last_syn_loss,
715 TCP_METRICS_ATTR_PAD) < 0))
716 goto nla_put_failure;
717 if (tfom->cookie.len > 0 &&
718 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
719 tfom->cookie.len, tfom->cookie.val) < 0)
720 goto nla_put_failure;
721 }
722
723 return 0;
724
725 nla_put_failure:
726 return -EMSGSIZE;
727 }
728
729 static int tcp_metrics_dump_info(struct sk_buff *skb,
730 struct netlink_callback *cb,
731 struct tcp_metrics_block *tm)
732 {
733 void *hdr;
734
735 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
736 &tcp_metrics_nl_family, NLM_F_MULTI,
737 TCP_METRICS_CMD_GET);
738 if (!hdr)
739 return -EMSGSIZE;
740
741 if (tcp_metrics_fill_info(skb, tm) < 0)
742 goto nla_put_failure;
743
744 genlmsg_end(skb, hdr);
745 return 0;
746
747 nla_put_failure:
748 genlmsg_cancel(skb, hdr);
749 return -EMSGSIZE;
750 }
751
752 static int tcp_metrics_nl_dump(struct sk_buff *skb,
753 struct netlink_callback *cb)
754 {
755 struct net *net = sock_net(skb->sk);
756 unsigned int max_rows = 1U << tcp_metrics_hash_log;
757 unsigned int row, s_row = cb->args[0];
758 int s_col = cb->args[1], col = s_col;
759
760 for (row = s_row; row < max_rows; row++, s_col = 0) {
761 struct tcp_metrics_block *tm;
762 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
763
764 rcu_read_lock();
765 for (col = 0, tm = rcu_dereference(hb->chain); tm;
766 tm = rcu_dereference(tm->tcpm_next), col++) {
767 if (!net_eq(tm_net(tm), net))
768 continue;
769 if (col < s_col)
770 continue;
771 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
772 rcu_read_unlock();
773 goto done;
774 }
775 }
776 rcu_read_unlock();
777 }
778
779 done:
780 cb->args[0] = row;
781 cb->args[1] = col;
782 return skb->len;
783 }
784
785 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
786 unsigned int *hash, int optional, int v4, int v6)
787 {
788 struct nlattr *a;
789
790 a = info->attrs[v4];
791 if (a) {
792 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
793 if (hash)
794 *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
795 return 0;
796 }
797 a = info->attrs[v6];
798 if (a) {
799 struct in6_addr in6;
800
801 if (nla_len(a) != sizeof(struct in6_addr))
802 return -EINVAL;
803 in6 = nla_get_in6_addr(a);
804 inetpeer_set_addr_v6(addr, &in6);
805 if (hash)
806 *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
807 return 0;
808 }
809 return optional ? 1 : -EAFNOSUPPORT;
810 }
811
812 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
813 unsigned int *hash, int optional)
814 {
815 return __parse_nl_addr(info, addr, hash, optional,
816 TCP_METRICS_ATTR_ADDR_IPV4,
817 TCP_METRICS_ATTR_ADDR_IPV6);
818 }
819
820 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
821 {
822 return __parse_nl_addr(info, addr, NULL, 0,
823 TCP_METRICS_ATTR_SADDR_IPV4,
824 TCP_METRICS_ATTR_SADDR_IPV6);
825 }
826
827 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
828 {
829 struct tcp_metrics_block *tm;
830 struct inetpeer_addr saddr, daddr;
831 unsigned int hash;
832 struct sk_buff *msg;
833 struct net *net = genl_info_net(info);
834 void *reply;
835 int ret;
836 bool src = true;
837
838 ret = parse_nl_addr(info, &daddr, &hash, 0);
839 if (ret < 0)
840 return ret;
841
842 ret = parse_nl_saddr(info, &saddr);
843 if (ret < 0)
844 src = false;
845
846 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
847 if (!msg)
848 return -ENOMEM;
849
850 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
851 info->genlhdr->cmd);
852 if (!reply)
853 goto nla_put_failure;
854
855 hash ^= net_hash_mix(net);
856 hash = hash_32(hash, tcp_metrics_hash_log);
857 ret = -ESRCH;
858 rcu_read_lock();
859 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
860 tm = rcu_dereference(tm->tcpm_next)) {
861 if (addr_same(&tm->tcpm_daddr, &daddr) &&
862 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
863 net_eq(tm_net(tm), net)) {
864 ret = tcp_metrics_fill_info(msg, tm);
865 break;
866 }
867 }
868 rcu_read_unlock();
869 if (ret < 0)
870 goto out_free;
871
872 genlmsg_end(msg, reply);
873 return genlmsg_reply(msg, info);
874
875 nla_put_failure:
876 ret = -EMSGSIZE;
877
878 out_free:
879 nlmsg_free(msg);
880 return ret;
881 }
882
883 static void tcp_metrics_flush_all(struct net *net)
884 {
885 unsigned int max_rows = 1U << tcp_metrics_hash_log;
886 struct tcpm_hash_bucket *hb = tcp_metrics_hash;
887 struct tcp_metrics_block *tm;
888 unsigned int row;
889
890 for (row = 0; row < max_rows; row++, hb++) {
891 struct tcp_metrics_block __rcu **pp;
892 bool match;
893
894 spin_lock_bh(&tcp_metrics_lock);
895 pp = &hb->chain;
896 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
897 match = net ? net_eq(tm_net(tm), net) :
898 !atomic_read(&tm_net(tm)->count);
899 if (match) {
900 *pp = tm->tcpm_next;
901 kfree_rcu(tm, rcu_head);
902 } else {
903 pp = &tm->tcpm_next;
904 }
905 }
906 spin_unlock_bh(&tcp_metrics_lock);
907 }
908 }
909
910 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
911 {
912 struct tcpm_hash_bucket *hb;
913 struct tcp_metrics_block *tm;
914 struct tcp_metrics_block __rcu **pp;
915 struct inetpeer_addr saddr, daddr;
916 unsigned int hash;
917 struct net *net = genl_info_net(info);
918 int ret;
919 bool src = true, found = false;
920
921 ret = parse_nl_addr(info, &daddr, &hash, 1);
922 if (ret < 0)
923 return ret;
924 if (ret > 0) {
925 tcp_metrics_flush_all(net);
926 return 0;
927 }
928 ret = parse_nl_saddr(info, &saddr);
929 if (ret < 0)
930 src = false;
931
932 hash ^= net_hash_mix(net);
933 hash = hash_32(hash, tcp_metrics_hash_log);
934 hb = tcp_metrics_hash + hash;
935 pp = &hb->chain;
936 spin_lock_bh(&tcp_metrics_lock);
937 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
938 if (addr_same(&tm->tcpm_daddr, &daddr) &&
939 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
940 net_eq(tm_net(tm), net)) {
941 *pp = tm->tcpm_next;
942 kfree_rcu(tm, rcu_head);
943 found = true;
944 } else {
945 pp = &tm->tcpm_next;
946 }
947 }
948 spin_unlock_bh(&tcp_metrics_lock);
949 if (!found)
950 return -ESRCH;
951 return 0;
952 }
953
954 static const struct genl_ops tcp_metrics_nl_ops[] = {
955 {
956 .cmd = TCP_METRICS_CMD_GET,
957 .doit = tcp_metrics_nl_cmd_get,
958 .dumpit = tcp_metrics_nl_dump,
959 .policy = tcp_metrics_nl_policy,
960 },
961 {
962 .cmd = TCP_METRICS_CMD_DEL,
963 .doit = tcp_metrics_nl_cmd_del,
964 .policy = tcp_metrics_nl_policy,
965 .flags = GENL_ADMIN_PERM,
966 },
967 };
968
969 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
970 .hdrsize = 0,
971 .name = TCP_METRICS_GENL_NAME,
972 .version = TCP_METRICS_GENL_VERSION,
973 .maxattr = TCP_METRICS_ATTR_MAX,
974 .netnsok = true,
975 .module = THIS_MODULE,
976 .ops = tcp_metrics_nl_ops,
977 .n_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
978 };
979
980 static unsigned int tcpmhash_entries;
981 static int __init set_tcpmhash_entries(char *str)
982 {
983 ssize_t ret;
984
985 if (!str)
986 return 0;
987
988 ret = kstrtouint(str, 0, &tcpmhash_entries);
989 if (ret)
990 return 0;
991
992 return 1;
993 }
994 __setup("tcpmhash_entries=", set_tcpmhash_entries);
995
996 static int __net_init tcp_net_metrics_init(struct net *net)
997 {
998 size_t size;
999 unsigned int slots;
1000
1001 if (!net_eq(net, &init_net))
1002 return 0;
1003
1004 slots = tcpmhash_entries;
1005 if (!slots) {
1006 if (totalram_pages >= 128 * 1024)
1007 slots = 16 * 1024;
1008 else
1009 slots = 8 * 1024;
1010 }
1011
1012 tcp_metrics_hash_log = order_base_2(slots);
1013 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1014
1015 tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1016 if (!tcp_metrics_hash)
1017 return -ENOMEM;
1018
1019 return 0;
1020 }
1021
1022 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1023 {
1024 tcp_metrics_flush_all(NULL);
1025 }
1026
1027 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1028 .init = tcp_net_metrics_init,
1029 .exit_batch = tcp_net_metrics_exit_batch,
1030 };
1031
1032 void __init tcp_metrics_init(void)
1033 {
1034 int ret;
1035
1036 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1037 if (ret < 0)
1038 panic("Could not allocate the tcp_metrics hash table\n");
1039
1040 ret = genl_register_family(&tcp_metrics_nl_family);
1041 if (ret < 0)
1042 panic("Could not register tcp_metrics generic netlink\n");
1043 }