]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
mptcp: call tcp_cleanup_rbuf on subflows
authorPaolo Abeni <pabeni@redhat.com>
Mon, 14 Sep 2020 08:01:18 +0000 (10:01 +0200)
committerDavid S. Miller <davem@davemloft.net>
Mon, 14 Sep 2020 20:28:02 +0000 (13:28 -0700)
That is needed to let the subflows announce promptly when new
space is available in the receive buffer.

tcp_cleanup_rbuf() is currently a static function, drop the
scope modifier and add a declaration in the TCP header.

Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/tcp.c
net/mptcp/protocol.c
net/mptcp/subflow.c

index e85d564446c651680dee37ab9f5a1c430eb26377..852f0d71dd40be00a91a24cfc8dd311fbf327811 100644 (file)
@@ -1414,6 +1414,8 @@ static inline int tcp_full_space(const struct sock *sk)
        return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 }
 
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+
 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
  * If 87.5 % (7/8) of the space has been consumed, we want to override
  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
index 57a5688755391de9e7651af0a8af8b212a34ebaf..d3781b6087cb1d69b85fe9f533cf7ead04f0e2dd 100644 (file)
@@ -1527,7 +1527,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
  * calculation of whether or not we must ACK for the sake of
  * a window update.
  */
-static void tcp_cleanup_rbuf(struct sock *sk, int copied)
+void tcp_cleanup_rbuf(struct sock *sk, int copied)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        bool time_to_ack = false;
index d7af96a900c4551e0cb40538fee34582f7fa516a..ef0dd2f2348277292527e5821e7bd295a680ce68 100644 (file)
@@ -515,6 +515,8 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
        } while (more_data_avail);
 
        *bytes += moved;
+       if (moved)
+               tcp_cleanup_rbuf(ssk, moved);
 
        return done;
 }
@@ -1424,10 +1426,14 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
                         */
                        mptcp_for_each_subflow(msk, subflow) {
                                struct sock *ssk;
+                               bool slow;
 
                                ssk = mptcp_subflow_tcp_sock(subflow);
+                               slow = lock_sock_fast(ssk);
                                WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
                                tcp_sk(ssk)->window_clamp = window_clamp;
+                               tcp_cleanup_rbuf(ssk, 1);
+                               unlock_sock_fast(ssk, slow);
                        }
                }
        }
index 8be401349d9f11e1cbb483c85df2710ee53c3986..a2ae3087e24d84036948a7cea399c0f47bd30c48 100644 (file)
@@ -823,6 +823,8 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
                sk_eat_skb(ssk, skb);
        if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
                subflow->map_valid = 0;
+       if (incr)
+               tcp_cleanup_rbuf(ssk, incr);
 }
 
 static bool subflow_check_data_avail(struct sock *ssk)