]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/tcp_metrics.c
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_metrics.c
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
12
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
22
23 int sysctl_tcp_nometrics_save __read_mostly;
24
25 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26 const struct inetpeer_addr *daddr,
27 struct net *net, unsigned int hash);
28
29 struct tcp_fastopen_metrics {
30 u16 mss;
31 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
32 try_exp:2; /* Request w/ exp. option (once) */
33 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
34 struct tcp_fastopen_cookie cookie;
35 };
36
37 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
38 * Kernel only stores RTT and RTTVAR in usec resolution
39 */
40 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
41
42 struct tcp_metrics_block {
43 struct tcp_metrics_block __rcu *tcpm_next;
44 possible_net_t tcpm_net;
45 struct inetpeer_addr tcpm_saddr;
46 struct inetpeer_addr tcpm_daddr;
47 unsigned long tcpm_stamp;
48 u32 tcpm_lock;
49 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
50 struct tcp_fastopen_metrics tcpm_fastopen;
51
52 struct rcu_head rcu_head;
53 };
54
55 static inline struct net *tm_net(struct tcp_metrics_block *tm)
56 {
57 return read_pnet(&tm->tcpm_net);
58 }
59
60 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
61 enum tcp_metric_index idx)
62 {
63 return tm->tcpm_lock & (1 << idx);
64 }
65
66 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
67 enum tcp_metric_index idx)
68 {
69 return tm->tcpm_vals[idx];
70 }
71
72 static void tcp_metric_set(struct tcp_metrics_block *tm,
73 enum tcp_metric_index idx,
74 u32 val)
75 {
76 tm->tcpm_vals[idx] = val;
77 }
78
79 static bool addr_same(const struct inetpeer_addr *a,
80 const struct inetpeer_addr *b)
81 {
82 return inetpeer_addr_cmp(a, b) == 0;
83 }
84
85 struct tcpm_hash_bucket {
86 struct tcp_metrics_block __rcu *chain;
87 };
88
89 static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
90 static unsigned int tcp_metrics_hash_log __read_mostly;
91
92 static DEFINE_SPINLOCK(tcp_metrics_lock);
93
94 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
95 const struct dst_entry *dst,
96 bool fastopen_clear)
97 {
98 u32 msval;
99 u32 val;
100
101 tm->tcpm_stamp = jiffies;
102
103 val = 0;
104 if (dst_metric_locked(dst, RTAX_RTT))
105 val |= 1 << TCP_METRIC_RTT;
106 if (dst_metric_locked(dst, RTAX_RTTVAR))
107 val |= 1 << TCP_METRIC_RTTVAR;
108 if (dst_metric_locked(dst, RTAX_SSTHRESH))
109 val |= 1 << TCP_METRIC_SSTHRESH;
110 if (dst_metric_locked(dst, RTAX_CWND))
111 val |= 1 << TCP_METRIC_CWND;
112 if (dst_metric_locked(dst, RTAX_REORDERING))
113 val |= 1 << TCP_METRIC_REORDERING;
114 tm->tcpm_lock = val;
115
116 msval = dst_metric_raw(dst, RTAX_RTT);
117 tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
118
119 msval = dst_metric_raw(dst, RTAX_RTTVAR);
120 tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
121 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
122 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
123 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
124 if (fastopen_clear) {
125 tm->tcpm_fastopen.mss = 0;
126 tm->tcpm_fastopen.syn_loss = 0;
127 tm->tcpm_fastopen.try_exp = 0;
128 tm->tcpm_fastopen.cookie.exp = false;
129 tm->tcpm_fastopen.cookie.len = 0;
130 }
131 }
132
133 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
134
135 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
136 {
137 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
138 tcpm_suck_dst(tm, dst, false);
139 }
140
141 #define TCP_METRICS_RECLAIM_DEPTH 5
142 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
143
144 #define deref_locked(p) \
145 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
146
147 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
148 struct inetpeer_addr *saddr,
149 struct inetpeer_addr *daddr,
150 unsigned int hash)
151 {
152 struct tcp_metrics_block *tm;
153 struct net *net;
154 bool reclaim = false;
155
156 spin_lock_bh(&tcp_metrics_lock);
157 net = dev_net(dst->dev);
158
159 /* While waiting for the spin-lock the cache might have been populated
160 * with this entry and so we have to check again.
161 */
162 tm = __tcp_get_metrics(saddr, daddr, net, hash);
163 if (tm == TCP_METRICS_RECLAIM_PTR) {
164 reclaim = true;
165 tm = NULL;
166 }
167 if (tm) {
168 tcpm_check_stamp(tm, dst);
169 goto out_unlock;
170 }
171
172 if (unlikely(reclaim)) {
173 struct tcp_metrics_block *oldest;
174
175 oldest = deref_locked(tcp_metrics_hash[hash].chain);
176 for (tm = deref_locked(oldest->tcpm_next); tm;
177 tm = deref_locked(tm->tcpm_next)) {
178 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
179 oldest = tm;
180 }
181 tm = oldest;
182 } else {
183 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
184 if (!tm)
185 goto out_unlock;
186 }
187 write_pnet(&tm->tcpm_net, net);
188 tm->tcpm_saddr = *saddr;
189 tm->tcpm_daddr = *daddr;
190
191 tcpm_suck_dst(tm, dst, true);
192
193 if (likely(!reclaim)) {
194 tm->tcpm_next = tcp_metrics_hash[hash].chain;
195 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
196 }
197
198 out_unlock:
199 spin_unlock_bh(&tcp_metrics_lock);
200 return tm;
201 }
202
203 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
204 {
205 if (tm)
206 return tm;
207 if (depth > TCP_METRICS_RECLAIM_DEPTH)
208 return TCP_METRICS_RECLAIM_PTR;
209 return NULL;
210 }
211
212 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
213 const struct inetpeer_addr *daddr,
214 struct net *net, unsigned int hash)
215 {
216 struct tcp_metrics_block *tm;
217 int depth = 0;
218
219 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
220 tm = rcu_dereference(tm->tcpm_next)) {
221 if (addr_same(&tm->tcpm_saddr, saddr) &&
222 addr_same(&tm->tcpm_daddr, daddr) &&
223 net_eq(tm_net(tm), net))
224 break;
225 depth++;
226 }
227 return tcp_get_encode(tm, depth);
228 }
229
230 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
231 struct dst_entry *dst)
232 {
233 struct tcp_metrics_block *tm;
234 struct inetpeer_addr saddr, daddr;
235 unsigned int hash;
236 struct net *net;
237
238 saddr.family = req->rsk_ops->family;
239 daddr.family = req->rsk_ops->family;
240 switch (daddr.family) {
241 case AF_INET:
242 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
243 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
244 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
245 break;
246 #if IS_ENABLED(CONFIG_IPV6)
247 case AF_INET6:
248 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
249 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
250 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
251 break;
252 #endif
253 default:
254 return NULL;
255 }
256
257 net = dev_net(dst->dev);
258 hash ^= net_hash_mix(net);
259 hash = hash_32(hash, tcp_metrics_hash_log);
260
261 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
262 tm = rcu_dereference(tm->tcpm_next)) {
263 if (addr_same(&tm->tcpm_saddr, &saddr) &&
264 addr_same(&tm->tcpm_daddr, &daddr) &&
265 net_eq(tm_net(tm), net))
266 break;
267 }
268 tcpm_check_stamp(tm, dst);
269 return tm;
270 }
271
272 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
273 struct dst_entry *dst,
274 bool create)
275 {
276 struct tcp_metrics_block *tm;
277 struct inetpeer_addr saddr, daddr;
278 unsigned int hash;
279 struct net *net;
280
281 if (sk->sk_family == AF_INET) {
282 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
283 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
284 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
285 }
286 #if IS_ENABLED(CONFIG_IPV6)
287 else if (sk->sk_family == AF_INET6) {
288 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
289 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
290 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
291 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
292 } else {
293 inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
294 inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
295 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
296 }
297 }
298 #endif
299 else
300 return NULL;
301
302 net = dev_net(dst->dev);
303 hash ^= net_hash_mix(net);
304 hash = hash_32(hash, tcp_metrics_hash_log);
305
306 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
307 if (tm == TCP_METRICS_RECLAIM_PTR)
308 tm = NULL;
309 if (!tm && create)
310 tm = tcpm_new(dst, &saddr, &daddr, hash);
311 else
312 tcpm_check_stamp(tm, dst);
313
314 return tm;
315 }
316
317 /* Save metrics learned by this TCP session. This function is called
318 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
319 * or goes from LAST-ACK to CLOSE.
320 */
321 void tcp_update_metrics(struct sock *sk)
322 {
323 const struct inet_connection_sock *icsk = inet_csk(sk);
324 struct dst_entry *dst = __sk_dst_get(sk);
325 struct tcp_sock *tp = tcp_sk(sk);
326 struct net *net = sock_net(sk);
327 struct tcp_metrics_block *tm;
328 unsigned long rtt;
329 u32 val;
330 int m;
331
332 sk_dst_confirm(sk);
333 if (sysctl_tcp_nometrics_save || !dst)
334 return;
335
336 rcu_read_lock();
337 if (icsk->icsk_backoff || !tp->srtt_us) {
338 /* This session failed to estimate rtt. Why?
339 * Probably, no packets returned in time. Reset our
340 * results.
341 */
342 tm = tcp_get_metrics(sk, dst, false);
343 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
344 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
345 goto out_unlock;
346 } else
347 tm = tcp_get_metrics(sk, dst, true);
348
349 if (!tm)
350 goto out_unlock;
351
352 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
353 m = rtt - tp->srtt_us;
354
355 /* If newly calculated rtt larger than stored one, store new
356 * one. Otherwise, use EWMA. Remember, rtt overestimation is
357 * always better than underestimation.
358 */
359 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
360 if (m <= 0)
361 rtt = tp->srtt_us;
362 else
363 rtt -= (m >> 3);
364 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
365 }
366
367 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
368 unsigned long var;
369
370 if (m < 0)
371 m = -m;
372
373 /* Scale deviation to rttvar fixed point */
374 m >>= 1;
375 if (m < tp->mdev_us)
376 m = tp->mdev_us;
377
378 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
379 if (m >= var)
380 var = m;
381 else
382 var -= (var - m) >> 2;
383
384 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
385 }
386
387 if (tcp_in_initial_slowstart(tp)) {
388 /* Slow start still did not finish. */
389 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
390 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
391 if (val && (tp->snd_cwnd >> 1) > val)
392 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
393 tp->snd_cwnd >> 1);
394 }
395 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
396 val = tcp_metric_get(tm, TCP_METRIC_CWND);
397 if (tp->snd_cwnd > val)
398 tcp_metric_set(tm, TCP_METRIC_CWND,
399 tp->snd_cwnd);
400 }
401 } else if (!tcp_in_slow_start(tp) &&
402 icsk->icsk_ca_state == TCP_CA_Open) {
403 /* Cong. avoidance phase, cwnd is reliable. */
404 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
405 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
406 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
407 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
408 val = tcp_metric_get(tm, TCP_METRIC_CWND);
409 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
410 }
411 } else {
412 /* Else slow start did not finish, cwnd is non-sense,
413 * ssthresh may be also invalid.
414 */
415 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
416 val = tcp_metric_get(tm, TCP_METRIC_CWND);
417 tcp_metric_set(tm, TCP_METRIC_CWND,
418 (val + tp->snd_ssthresh) >> 1);
419 }
420 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
421 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
422 if (val && tp->snd_ssthresh > val)
423 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
424 tp->snd_ssthresh);
425 }
426 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
427 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
428 if (val < tp->reordering &&
429 tp->reordering != net->ipv4.sysctl_tcp_reordering)
430 tcp_metric_set(tm, TCP_METRIC_REORDERING,
431 tp->reordering);
432 }
433 }
434 tm->tcpm_stamp = jiffies;
435 out_unlock:
436 rcu_read_unlock();
437 }
438
439 /* Initialize metrics on socket. */
440
441 void tcp_init_metrics(struct sock *sk)
442 {
443 struct dst_entry *dst = __sk_dst_get(sk);
444 struct tcp_sock *tp = tcp_sk(sk);
445 struct tcp_metrics_block *tm;
446 u32 val, crtt = 0; /* cached RTT scaled by 8 */
447
448 sk_dst_confirm(sk);
449 if (!dst)
450 goto reset;
451
452 rcu_read_lock();
453 tm = tcp_get_metrics(sk, dst, true);
454 if (!tm) {
455 rcu_read_unlock();
456 goto reset;
457 }
458
459 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
460 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
461
462 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
463 if (val) {
464 tp->snd_ssthresh = val;
465 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
466 tp->snd_ssthresh = tp->snd_cwnd_clamp;
467 } else {
468 /* ssthresh may have been reduced unnecessarily during.
469 * 3WHS. Restore it back to its initial default.
470 */
471 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
472 }
473 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
474 if (val && tp->reordering != val) {
475 tcp_disable_fack(tp);
476 tp->reordering = val;
477 }
478
479 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
480 rcu_read_unlock();
481 reset:
482 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
483 * to seed the RTO for later data packets because SYN packets are
484 * small. Use the per-dst cached values to seed the RTO but keep
485 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
486 * Later the RTO will be updated immediately upon obtaining the first
487 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
488 * influences the first RTO but not later RTT estimation.
489 *
490 * But if RTT is not available from the SYN (due to retransmits or
491 * syn cookies) or the cache, force a conservative 3secs timeout.
492 *
493 * A bit of theory. RTT is time passed after "normal" sized packet
494 * is sent until it is ACKed. In normal circumstances sending small
495 * packets force peer to delay ACKs and calculation is correct too.
496 * The algorithm is adaptive and, provided we follow specs, it
497 * NEVER underestimate RTT. BUT! If peer tries to make some clever
498 * tricks sort of "quick acks" for time long enough to decrease RTT
499 * to low value, and then abruptly stops to do it and starts to delay
500 * ACKs, wait for troubles.
501 */
502 if (crtt > tp->srtt_us) {
503 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
504 crtt /= 8 * USEC_PER_SEC / HZ;
505 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
506 } else if (tp->srtt_us == 0) {
507 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
508 * 3WHS. This is most likely due to retransmission,
509 * including spurious one. Reset the RTO back to 3secs
510 * from the more aggressive 1sec to avoid more spurious
511 * retransmission.
512 */
513 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
514 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
515
516 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
517 }
518 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
519 * retransmitted. In light of RFC6298 more aggressive 1sec
520 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
521 * retransmission has occurred.
522 */
523 if (tp->total_retrans > 1)
524 tp->snd_cwnd = 1;
525 else
526 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
527 tp->snd_cwnd_stamp = tcp_jiffies32;
528 }
529
530 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
531 {
532 struct tcp_metrics_block *tm;
533 bool ret;
534
535 if (!dst)
536 return false;
537
538 rcu_read_lock();
539 tm = __tcp_get_metrics_req(req, dst);
540 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
541 ret = true;
542 else
543 ret = false;
544 rcu_read_unlock();
545
546 return ret;
547 }
548
549 static DEFINE_SEQLOCK(fastopen_seqlock);
550
551 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
552 struct tcp_fastopen_cookie *cookie,
553 int *syn_loss, unsigned long *last_syn_loss)
554 {
555 struct tcp_metrics_block *tm;
556
557 rcu_read_lock();
558 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
559 if (tm) {
560 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
561 unsigned int seq;
562
563 do {
564 seq = read_seqbegin(&fastopen_seqlock);
565 if (tfom->mss)
566 *mss = tfom->mss;
567 *cookie = tfom->cookie;
568 if (cookie->len <= 0 && tfom->try_exp == 1)
569 cookie->exp = true;
570 *syn_loss = tfom->syn_loss;
571 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
572 } while (read_seqretry(&fastopen_seqlock, seq));
573 }
574 rcu_read_unlock();
575 }
576
577 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
578 struct tcp_fastopen_cookie *cookie, bool syn_lost,
579 u16 try_exp)
580 {
581 struct dst_entry *dst = __sk_dst_get(sk);
582 struct tcp_metrics_block *tm;
583
584 if (!dst)
585 return;
586 rcu_read_lock();
587 tm = tcp_get_metrics(sk, dst, true);
588 if (tm) {
589 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
590
591 write_seqlock_bh(&fastopen_seqlock);
592 if (mss)
593 tfom->mss = mss;
594 if (cookie && cookie->len > 0)
595 tfom->cookie = *cookie;
596 else if (try_exp > tfom->try_exp &&
597 tfom->cookie.len <= 0 && !tfom->cookie.exp)
598 tfom->try_exp = try_exp;
599 if (syn_lost) {
600 ++tfom->syn_loss;
601 tfom->last_syn_loss = jiffies;
602 } else
603 tfom->syn_loss = 0;
604 write_sequnlock_bh(&fastopen_seqlock);
605 }
606 rcu_read_unlock();
607 }
608
609 static struct genl_family tcp_metrics_nl_family;
610
611 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
612 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
613 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
614 .len = sizeof(struct in6_addr), },
615 /* Following attributes are not received for GET/DEL,
616 * we keep them for reference
617 */
618 #if 0
619 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
620 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
621 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
622 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
623 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
624 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
625 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
626 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
627 .len = TCP_FASTOPEN_COOKIE_MAX, },
628 #endif
629 };
630
631 /* Add attributes, caller cancels its header on failure */
632 static int tcp_metrics_fill_info(struct sk_buff *msg,
633 struct tcp_metrics_block *tm)
634 {
635 struct nlattr *nest;
636 int i;
637
638 switch (tm->tcpm_daddr.family) {
639 case AF_INET:
640 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
641 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
642 goto nla_put_failure;
643 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
644 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
645 goto nla_put_failure;
646 break;
647 case AF_INET6:
648 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
649 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
650 goto nla_put_failure;
651 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
652 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
653 goto nla_put_failure;
654 break;
655 default:
656 return -EAFNOSUPPORT;
657 }
658
659 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
660 jiffies - tm->tcpm_stamp,
661 TCP_METRICS_ATTR_PAD) < 0)
662 goto nla_put_failure;
663
664 {
665 int n = 0;
666
667 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
668 if (!nest)
669 goto nla_put_failure;
670 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
671 u32 val = tm->tcpm_vals[i];
672
673 if (!val)
674 continue;
675 if (i == TCP_METRIC_RTT) {
676 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
677 val) < 0)
678 goto nla_put_failure;
679 n++;
680 val = max(val / 1000, 1U);
681 }
682 if (i == TCP_METRIC_RTTVAR) {
683 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
684 val) < 0)
685 goto nla_put_failure;
686 n++;
687 val = max(val / 1000, 1U);
688 }
689 if (nla_put_u32(msg, i + 1, val) < 0)
690 goto nla_put_failure;
691 n++;
692 }
693 if (n)
694 nla_nest_end(msg, nest);
695 else
696 nla_nest_cancel(msg, nest);
697 }
698
699 {
700 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
701 unsigned int seq;
702
703 do {
704 seq = read_seqbegin(&fastopen_seqlock);
705 tfom_copy[0] = tm->tcpm_fastopen;
706 } while (read_seqretry(&fastopen_seqlock, seq));
707
708 tfom = tfom_copy;
709 if (tfom->mss &&
710 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
711 tfom->mss) < 0)
712 goto nla_put_failure;
713 if (tfom->syn_loss &&
714 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
715 tfom->syn_loss) < 0 ||
716 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
717 jiffies - tfom->last_syn_loss,
718 TCP_METRICS_ATTR_PAD) < 0))
719 goto nla_put_failure;
720 if (tfom->cookie.len > 0 &&
721 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
722 tfom->cookie.len, tfom->cookie.val) < 0)
723 goto nla_put_failure;
724 }
725
726 return 0;
727
728 nla_put_failure:
729 return -EMSGSIZE;
730 }
731
732 static int tcp_metrics_dump_info(struct sk_buff *skb,
733 struct netlink_callback *cb,
734 struct tcp_metrics_block *tm)
735 {
736 void *hdr;
737
738 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
739 &tcp_metrics_nl_family, NLM_F_MULTI,
740 TCP_METRICS_CMD_GET);
741 if (!hdr)
742 return -EMSGSIZE;
743
744 if (tcp_metrics_fill_info(skb, tm) < 0)
745 goto nla_put_failure;
746
747 genlmsg_end(skb, hdr);
748 return 0;
749
750 nla_put_failure:
751 genlmsg_cancel(skb, hdr);
752 return -EMSGSIZE;
753 }
754
755 static int tcp_metrics_nl_dump(struct sk_buff *skb,
756 struct netlink_callback *cb)
757 {
758 struct net *net = sock_net(skb->sk);
759 unsigned int max_rows = 1U << tcp_metrics_hash_log;
760 unsigned int row, s_row = cb->args[0];
761 int s_col = cb->args[1], col = s_col;
762
763 for (row = s_row; row < max_rows; row++, s_col = 0) {
764 struct tcp_metrics_block *tm;
765 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
766
767 rcu_read_lock();
768 for (col = 0, tm = rcu_dereference(hb->chain); tm;
769 tm = rcu_dereference(tm->tcpm_next), col++) {
770 if (!net_eq(tm_net(tm), net))
771 continue;
772 if (col < s_col)
773 continue;
774 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
775 rcu_read_unlock();
776 goto done;
777 }
778 }
779 rcu_read_unlock();
780 }
781
782 done:
783 cb->args[0] = row;
784 cb->args[1] = col;
785 return skb->len;
786 }
787
788 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
789 unsigned int *hash, int optional, int v4, int v6)
790 {
791 struct nlattr *a;
792
793 a = info->attrs[v4];
794 if (a) {
795 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
796 if (hash)
797 *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
798 return 0;
799 }
800 a = info->attrs[v6];
801 if (a) {
802 struct in6_addr in6;
803
804 if (nla_len(a) != sizeof(struct in6_addr))
805 return -EINVAL;
806 in6 = nla_get_in6_addr(a);
807 inetpeer_set_addr_v6(addr, &in6);
808 if (hash)
809 *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
810 return 0;
811 }
812 return optional ? 1 : -EAFNOSUPPORT;
813 }
814
815 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
816 unsigned int *hash, int optional)
817 {
818 return __parse_nl_addr(info, addr, hash, optional,
819 TCP_METRICS_ATTR_ADDR_IPV4,
820 TCP_METRICS_ATTR_ADDR_IPV6);
821 }
822
823 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
824 {
825 return __parse_nl_addr(info, addr, NULL, 0,
826 TCP_METRICS_ATTR_SADDR_IPV4,
827 TCP_METRICS_ATTR_SADDR_IPV6);
828 }
829
830 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
831 {
832 struct tcp_metrics_block *tm;
833 struct inetpeer_addr saddr, daddr;
834 unsigned int hash;
835 struct sk_buff *msg;
836 struct net *net = genl_info_net(info);
837 void *reply;
838 int ret;
839 bool src = true;
840
841 ret = parse_nl_addr(info, &daddr, &hash, 0);
842 if (ret < 0)
843 return ret;
844
845 ret = parse_nl_saddr(info, &saddr);
846 if (ret < 0)
847 src = false;
848
849 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
850 if (!msg)
851 return -ENOMEM;
852
853 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
854 info->genlhdr->cmd);
855 if (!reply)
856 goto nla_put_failure;
857
858 hash ^= net_hash_mix(net);
859 hash = hash_32(hash, tcp_metrics_hash_log);
860 ret = -ESRCH;
861 rcu_read_lock();
862 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
863 tm = rcu_dereference(tm->tcpm_next)) {
864 if (addr_same(&tm->tcpm_daddr, &daddr) &&
865 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
866 net_eq(tm_net(tm), net)) {
867 ret = tcp_metrics_fill_info(msg, tm);
868 break;
869 }
870 }
871 rcu_read_unlock();
872 if (ret < 0)
873 goto out_free;
874
875 genlmsg_end(msg, reply);
876 return genlmsg_reply(msg, info);
877
878 nla_put_failure:
879 ret = -EMSGSIZE;
880
881 out_free:
882 nlmsg_free(msg);
883 return ret;
884 }
885
886 static void tcp_metrics_flush_all(struct net *net)
887 {
888 unsigned int max_rows = 1U << tcp_metrics_hash_log;
889 struct tcpm_hash_bucket *hb = tcp_metrics_hash;
890 struct tcp_metrics_block *tm;
891 unsigned int row;
892
893 for (row = 0; row < max_rows; row++, hb++) {
894 struct tcp_metrics_block __rcu **pp;
895 spin_lock_bh(&tcp_metrics_lock);
896 pp = &hb->chain;
897 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
898 if (net_eq(tm_net(tm), net)) {
899 *pp = tm->tcpm_next;
900 kfree_rcu(tm, rcu_head);
901 } else {
902 pp = &tm->tcpm_next;
903 }
904 }
905 spin_unlock_bh(&tcp_metrics_lock);
906 }
907 }
908
909 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
910 {
911 struct tcpm_hash_bucket *hb;
912 struct tcp_metrics_block *tm;
913 struct tcp_metrics_block __rcu **pp;
914 struct inetpeer_addr saddr, daddr;
915 unsigned int hash;
916 struct net *net = genl_info_net(info);
917 int ret;
918 bool src = true, found = false;
919
920 ret = parse_nl_addr(info, &daddr, &hash, 1);
921 if (ret < 0)
922 return ret;
923 if (ret > 0) {
924 tcp_metrics_flush_all(net);
925 return 0;
926 }
927 ret = parse_nl_saddr(info, &saddr);
928 if (ret < 0)
929 src = false;
930
931 hash ^= net_hash_mix(net);
932 hash = hash_32(hash, tcp_metrics_hash_log);
933 hb = tcp_metrics_hash + hash;
934 pp = &hb->chain;
935 spin_lock_bh(&tcp_metrics_lock);
936 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
937 if (addr_same(&tm->tcpm_daddr, &daddr) &&
938 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
939 net_eq(tm_net(tm), net)) {
940 *pp = tm->tcpm_next;
941 kfree_rcu(tm, rcu_head);
942 found = true;
943 } else {
944 pp = &tm->tcpm_next;
945 }
946 }
947 spin_unlock_bh(&tcp_metrics_lock);
948 if (!found)
949 return -ESRCH;
950 return 0;
951 }
952
953 static const struct genl_ops tcp_metrics_nl_ops[] = {
954 {
955 .cmd = TCP_METRICS_CMD_GET,
956 .doit = tcp_metrics_nl_cmd_get,
957 .dumpit = tcp_metrics_nl_dump,
958 .policy = tcp_metrics_nl_policy,
959 },
960 {
961 .cmd = TCP_METRICS_CMD_DEL,
962 .doit = tcp_metrics_nl_cmd_del,
963 .policy = tcp_metrics_nl_policy,
964 .flags = GENL_ADMIN_PERM,
965 },
966 };
967
968 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
969 .hdrsize = 0,
970 .name = TCP_METRICS_GENL_NAME,
971 .version = TCP_METRICS_GENL_VERSION,
972 .maxattr = TCP_METRICS_ATTR_MAX,
973 .netnsok = true,
974 .module = THIS_MODULE,
975 .ops = tcp_metrics_nl_ops,
976 .n_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
977 };
978
979 static unsigned int tcpmhash_entries;
980 static int __init set_tcpmhash_entries(char *str)
981 {
982 ssize_t ret;
983
984 if (!str)
985 return 0;
986
987 ret = kstrtouint(str, 0, &tcpmhash_entries);
988 if (ret)
989 return 0;
990
991 return 1;
992 }
993 __setup("tcpmhash_entries=", set_tcpmhash_entries);
994
995 static int __net_init tcp_net_metrics_init(struct net *net)
996 {
997 size_t size;
998 unsigned int slots;
999
1000 if (!net_eq(net, &init_net))
1001 return 0;
1002
1003 slots = tcpmhash_entries;
1004 if (!slots) {
1005 if (totalram_pages >= 128 * 1024)
1006 slots = 16 * 1024;
1007 else
1008 slots = 8 * 1024;
1009 }
1010
1011 tcp_metrics_hash_log = order_base_2(slots);
1012 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1013
1014 tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1015 if (!tcp_metrics_hash)
1016 return -ENOMEM;
1017
1018 return 0;
1019 }
1020
1021 static void __net_exit tcp_net_metrics_exit(struct net *net)
1022 {
1023 tcp_metrics_flush_all(net);
1024 }
1025
1026 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1027 .init = tcp_net_metrics_init,
1028 .exit = tcp_net_metrics_exit,
1029 };
1030
1031 void __init tcp_metrics_init(void)
1032 {
1033 int ret;
1034
1035 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1036 if (ret < 0)
1037 panic("Could not allocate the tcp_metrics hash table\n");
1038
1039 ret = genl_register_family(&tcp_metrics_nl_family);
1040 if (ret < 0)
1041 panic("Could not register tcp_metrics generic netlink\n");
1042 }