]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mptcp: use mptcp_schedule_work instead of open-coding it
authorPaolo Abeni <pabeni@redhat.com>
Tue, 11 Apr 2023 20:42:09 +0000 (22:42 +0200)
committerJakub Kicinski <kuba@kernel.org>
Thu, 13 Apr 2023 16:58:55 +0000 (09:58 -0700)
Beyond reducing code duplication this also avoids scheduling
the mptcp_worker on a closed socket on some edge scenarios.

The addressed issue is actually older than the blamed commit
below, but this fix needs it as a pre-requisite.

Fixes: ba8f48f7a4d7 ("mptcp: introduce mptcp_schedule_work")
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/mptcp/options.c
net/mptcp/subflow.c

index b30cea2fbf3fd0d4706573c4c41624ca3d9cfe26..355f798d575a40542195fcc724819e1de9f2e4e7 100644 (file)
@@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
         */
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
                if (mp_opt.data_fin && mp_opt.data_len == 1 &&
-                   mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
-                   schedule_work(&msk->work))
-                       sock_hold(subflow->conn);
+                   mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
+                       mptcp_schedule_work((struct sock *)msk);
 
                return true;
        }
index a0041360ee9d95b0cf85845e98c0f157a578e59d..d34588850545703deea6b1b7864c9c90c5d63e96 100644 (file)
@@ -408,9 +408,8 @@ void mptcp_subflow_reset(struct sock *ssk)
 
        tcp_send_active_reset(ssk, GFP_ATOMIC);
        tcp_done(ssk);
-       if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
-           schedule_work(&mptcp_sk(sk)->work))
-               return; /* worker will put sk for us */
+       if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
+               mptcp_schedule_work(sk);
 
        sock_put(sk);
 }
@@ -1118,8 +1117,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
                                skb_ext_del(skb, SKB_EXT_MPTCP);
                                return MAPPING_OK;
                        } else {
-                               if (updated && schedule_work(&msk->work))
-                                       sock_hold((struct sock *)msk);
+                               if (updated)
+                                       mptcp_schedule_work((struct sock *)msk);
 
                                return MAPPING_DATA_FIN;
                        }
@@ -1222,17 +1221,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
 /* sched mptcp worker to remove the subflow if no more data is pending */
 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
 {
-       struct sock *sk = (struct sock *)msk;
-
        if (likely(ssk->sk_state != TCP_CLOSE))
                return;
 
        if (skb_queue_empty(&ssk->sk_receive_queue) &&
-           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
-               sock_hold(sk);
-               if (!schedule_work(&msk->work))
-                       sock_put(sk);
-       }
+           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+               mptcp_schedule_work((struct sock *)msk);
 }
 
 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)