]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net_sched: fq: take care of throttled flows before reuse
authorEric Dumazet <edumazet@google.com>
Wed, 2 May 2018 17:03:30 +0000 (10:03 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 2 May 2018 20:37:38 +0000 (16:37 -0400)
Normally, a socket can not be freed/reused unless all its TX packets
left qdisc and were TX-completed. However connect(AF_UNSPEC) allows
this to happen.

With commit fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for
reused flows") we cleared f->time_next_packet but took no special
action if the flow was still in the throttled rb-tree.

Since f->time_next_packet is the key used in the rb-tree searches,
blindly clearing it might break rb-tree integrity. We need to make
sure the flow is no longer in the rb-tree to avoid this problem.

Fixes: fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for reused flows")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_fq.c

index a366e4c9413ab4fe4dfb16f0255cb7632ade7f1c..4808713c73b988cc3e536cff866cf18de05375fa 100644 (file)
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
        return f->next == &detached;
 }
 
+static bool fq_flow_is_throttled(const struct fq_flow *f)
+{
+       return f->next == &throttled;
+}
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+       if (head->first)
+               head->last->next = flow;
+       else
+               head->first = flow;
+       head->last = flow;
+       flow->next = NULL;
+}
+
+static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+       rb_erase(&f->rate_node, &q->delayed);
+       q->throttled_flows--;
+       fq_flow_add_tail(&q->old_flows, f);
+}
+
 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 {
        struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 
 static struct kmem_cache *fq_flow_cachep __read_mostly;
 
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
-{
-       if (head->first)
-               head->last->next = flow;
-       else
-               head->first = flow;
-       head->last = flow;
-       flow->next = NULL;
-}
 
 /* limit number of collected flows per round */
 #define FQ_GC_MAX 8
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
                                     f->socket_hash != sk->sk_hash)) {
                                f->credit = q->initial_quantum;
                                f->socket_hash = sk->sk_hash;
+                               if (fq_flow_is_throttled(f))
+                                       fq_flow_unset_throttled(q, f);
                                f->time_next_packet = 0ULL;
                        }
                        return f;
@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
                        q->time_next_delayed_flow = f->time_next_packet;
                        break;
                }
-               rb_erase(p, &q->delayed);
-               q->throttled_flows--;
-               fq_flow_add_tail(&q->old_flows, f);
+               fq_flow_unset_throttled(q, f);
        }
 }