]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
net: annotate lockless accesses to sk->sk_pacing_shift
authorEric Dumazet <edumazet@google.com>
Tue, 17 Dec 2019 02:51:03 +0000 (18:51 -0800)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 14 Feb 2020 06:00:53 +0000 (01:00 -0500)
BugLink: https://bugs.launchpad.net/bugs/1861710
[ Upstream commit 7c68fa2bddda6d942bd387c9ba5b4300737fd991 ]

sk->sk_pacing_shift can be read and written without lock
synchronization. This patch adds annotations to
document this fact and avoid future syzbot complains.

This might also avoid unexpected false sharing
in sk_pacing_shift_update(), as the compiler
could remove the conditional check and always
write over sk->sk_pacing_shift :

if (sk->sk_pacing_shift != val)
sk->sk_pacing_shift = val;

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
include/net/sock.h
net/core/sock.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_output.c

index 74e39e6af98f69977c245110969159b44b978826..5bcca88ee06776fb039f5b94d8f1b4549a940c64 100644 (file)
@@ -2578,9 +2578,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
  */
 static inline void sk_pacing_shift_update(struct sock *sk, int val)
 {
-       if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+       if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
                return;
-       sk->sk_pacing_shift = val;
+       WRITE_ONCE(sk->sk_pacing_shift, val);
 }
 
 /* if a socket is bound to a device, check that the given device
index b4247635c4a2d5ea1098e575bb86ee94de542e26..2fae3a6e66cef2b62739d02d48911a1d98ce2682 100644 (file)
@@ -2911,7 +2911,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
        sk->sk_max_pacing_rate = ~0UL;
        sk->sk_pacing_rate = ~0UL;
-       sk->sk_pacing_shift = 10;
+       WRITE_ONCE(sk->sk_pacing_shift, 10);
        sk->sk_incoming_cpu = -1;
 
        sk_rx_queue_clear(sk);
index 00ade9c185ea4f29af1a01917737ba7796c445e8..ccfd5d432c6d7f004fededa9900902125f170c69 100644 (file)
@@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
        /* Sort of tcp_tso_autosize() but ignoring
         * driver provided sk_gso_max_size.
         */
-       bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift,
+       bytes = min_t(unsigned long,
+                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
                      GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
        segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
 
index bd94d80011fb4bfdfd4c76e5cae5608377791f08..c2095382a80036c3c6c05efd42c344bf68e5a4b8 100644 (file)
@@ -1717,7 +1717,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
        u32 bytes, segs;
 
        bytes = min_t(unsigned long,
-                     sk->sk_pacing_rate >> sk->sk_pacing_shift,
+                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
                      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
 
        /* Goal is to send at least one packet per ms,
@@ -2252,7 +2252,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 
        limit = max_t(unsigned long,
                      2 * skb->truesize,
-                     sk->sk_pacing_rate >> sk->sk_pacing_shift);
+                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
        if (sk->sk_pacing_status == SK_PACING_NONE)
                limit = min_t(unsigned long, limit,
                              sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);