]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
net: IP defrag: encapsulate rbtree defrag code into callable functions
authorPeter Oskolkov <posk@google.com>
Tue, 23 Apr 2019 17:25:31 +0000 (10:25 -0700)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Wed, 14 Aug 2019 09:18:49 +0000 (11:18 +0200)
BugLink: https://bugs.launchpad.net/bugs/1838349
[ Upstream commit c23f35d19db3b36ffb9e04b08f1d91565d15f84f ]

This is a refactoring patch: without changing runtime behavior,
it moves rbtree-related code from IPv4-specific files/functions
into .h/.c defrag files shared with IPv6 defragmentation code.

v2: make handling of overlapping packets match upstream.

Signed-off-by: Peter Oskolkov <posk@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Florian Westphal <fw@strlen.de>
Cc: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
include/linux/skbuff.h
include/net/inet_frag.h
net/ipv4/inet_fragment.c
net/ipv4/ip_fragment.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/reassembly.c

index a16a63f0ed863bd76273a143e3915ff8446233fd..3c2987e9563b9aefd5cc903d18119a6b221736a8 100644 (file)
@@ -670,12 +670,16 @@ struct sk_buff {
                                 * UDP receive path is one user.
                                 */
                                unsigned long           dev_scratch;
-                               int                     ip_defrag_offset;
                        };
                };
-               struct rb_node  rbnode; /* used in netem & tcp stack */
+               struct rb_node          rbnode; /* used in netem, ip4 defrag, and tcp stack */
+               struct list_head        list;
+       };
+
+       union {
+               struct sock             *sk;
+               int                     ip_defrag_offset;
        };
-       struct sock             *sk;
 
        union {
                ktime_t         tstamp;
index 0a4cadea92db9779a870c7aecb19e7bc4c0dd2be..008f64823c4187e3ea72f6ab90d6134ea82168c1 100644 (file)
@@ -77,7 +77,8 @@ struct inet_frag_queue {
        struct timer_list       timer;
        spinlock_t              lock;
        refcount_t              refcnt;
-       struct sk_buff          *fragments;
+       struct sk_buff          *fragments;  /* used in 6lopwpan IPv6. */
+       struct rb_root          rb_fragments; /* Used in IPv4/IPv6. */
        struct sk_buff          *fragments_tail;
        struct sk_buff          *last_run_head;
        ktime_t                 stamp;
@@ -152,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
 
 extern const u8 ip_frag_ecn_table[16];
 
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK      0
+#define IPFRAG_DUP     1
+#define IPFRAG_OVERLAP 2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+                          int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+                             struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+                           void *reasm_data);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
 #endif
index 89644edab8aa3ee5f4b03c9e04e3e159a7c580bc..0f2516cf88361625e33d79ac1776b4c7de4d6b3c 100644 (file)
 #include <net/sock.h>
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
+       union {
+               struct inet_skb_parm    h4;
+               struct inet6_skb_parm   h6;
+       };
+       struct sk_buff          *next_frag;
+       int                     frag_run_len;
+};
+
+#define FRAG_CB(skb)           ((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void fragcb_clear(struct sk_buff *skb)
+{
+       RB_CLEAR_NODE(&skb->rbnode);
+       FRAG_CB(skb)->next_frag = NULL;
+       FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void fragrun_append_to_last(struct inet_frag_queue *q,
+                                  struct sk_buff *skb)
+{
+       fragcb_clear(skb);
+
+       FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+       FRAG_CB(q->fragments_tail)->next_frag = skb;
+       q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+       fragcb_clear(skb);
+
+       if (q->last_run_head)
+               rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+                            &q->last_run_head->rbnode.rb_right);
+       else
+               rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+       rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+       q->fragments_tail = skb;
+       q->last_run_head = skb;
+}
 
 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
  * Value : 0xff if frame should be dropped.
@@ -134,12 +190,16 @@ void inet_frag_destroy(struct inet_frag_queue *q)
        fp = q->fragments;
        nf = q->net;
        f = nf->f;
-       while (fp) {
-               struct sk_buff *xp = fp->next;
-
-               sum_truesize += fp->truesize;
-               kfree_skb(fp);
-               fp = xp;
+       if (fp) {
+               do {
+                       struct sk_buff *xp = fp->next;
+
+                       sum_truesize += fp->truesize;
+                       kfree_skb(fp);
+                       fp = xp;
+               } while (fp);
+       } else {
+               sum_truesize = skb_rbtree_purge(&q->rb_fragments);
        }
        sum = sum_truesize + f->qsize;
 
@@ -218,3 +278,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
        return fq;
 }
 EXPORT_SYMBOL(inet_frag_find);
+
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+                          int offset, int end)
+{
+       struct sk_buff *last = q->fragments_tail;
+
+       /* RFC5722, Section 4, amended by Errata ID : 3089
+        *                          When reassembling an IPv6 datagram, if
+        *   one or more its constituent fragments is determined to be an
+        *   overlapping fragment, the entire datagram (and any constituent
+        *   fragments) MUST be silently discarded.
+        *
+        * Duplicates, however, should be ignored (i.e. skb dropped, but the
+        * queue/fragments kept for later reassembly).
+        */
+       if (!last)
+               fragrun_create(q, skb);  /* First fragment. */
+       else if (last->ip_defrag_offset + last->len < end) {
+               /* This is the common case: skb goes to the end. */
+               /* Detect and discard overlaps. */
+               if (offset < last->ip_defrag_offset + last->len)
+                       return IPFRAG_OVERLAP;
+               if (offset == last->ip_defrag_offset + last->len)
+                       fragrun_append_to_last(q, skb);
+               else
+                       fragrun_create(q, skb);
+       } else {
+               /* Binary search. Note that skb can become the first fragment,
+                * but not the last (covered above).
+                */
+               struct rb_node **rbn, *parent;
+
+               rbn = &q->rb_fragments.rb_node;
+               do {
+                       struct sk_buff *curr;
+                       int curr_run_end;
+
+                       parent = *rbn;
+                       curr = rb_to_skb(parent);
+                       curr_run_end = curr->ip_defrag_offset +
+                                       FRAG_CB(curr)->frag_run_len;
+                       if (end <= curr->ip_defrag_offset)
+                               rbn = &parent->rb_left;
+                       else if (offset >= curr_run_end)
+                               rbn = &parent->rb_right;
+                       else if (offset >= curr->ip_defrag_offset &&
+                                end <= curr_run_end)
+                               return IPFRAG_DUP;
+                       else
+                               return IPFRAG_OVERLAP;
+               } while (*rbn);
+               /* Here we have parent properly set, and rbn pointing to
+                * one of its NULL left/right children. Insert skb.
+                */
+               fragcb_clear(skb);
+               rb_link_node(&skb->rbnode, parent, rbn);
+               rb_insert_color(&skb->rbnode, &q->rb_fragments);
+       }
+
+       skb->ip_defrag_offset = offset;
+
+       return IPFRAG_OK;
+}
+EXPORT_SYMBOL(inet_frag_queue_insert);
+
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+                             struct sk_buff *parent)
+{
+       struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+       struct sk_buff **nextp;
+       int delta;
+
+       if (head != skb) {
+               fp = skb_clone(skb, GFP_ATOMIC);
+               if (!fp)
+                       return NULL;
+               FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+               if (RB_EMPTY_NODE(&skb->rbnode))
+                       FRAG_CB(parent)->next_frag = fp;
+               else
+                       rb_replace_node(&skb->rbnode, &fp->rbnode,
+                                       &q->rb_fragments);
+               if (q->fragments_tail == skb)
+                       q->fragments_tail = fp;
+               skb_morph(skb, head);
+               FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+               rb_replace_node(&head->rbnode, &skb->rbnode,
+                               &q->rb_fragments);
+               consume_skb(head);
+               head = skb;
+       }
+       WARN_ON(head->ip_defrag_offset != 0);
+
+       delta = -head->truesize;
+
+       /* Head of list must not be cloned. */
+       if (skb_unclone(head, GFP_ATOMIC))
+               return NULL;
+
+       delta += head->truesize;
+       if (delta)
+               add_frag_mem_limit(q->net, delta);
+
+       /* If the first fragment is fragmented itself, we split
+        * it to two chunks: the first with data and paged part
+        * and the second, holding only fragments.
+        */
+       if (skb_has_frag_list(head)) {
+               struct sk_buff *clone;
+               int i, plen = 0;
+
+               clone = alloc_skb(0, GFP_ATOMIC);
+               if (!clone)
+                       return NULL;
+               skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+               skb_frag_list_init(head);
+               for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+                       plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+               clone->data_len = head->data_len - plen;
+               clone->len = clone->data_len;
+               head->truesize += clone->truesize;
+               clone->csum = 0;
+               clone->ip_summed = head->ip_summed;
+               add_frag_mem_limit(q->net, clone->truesize);
+               skb_shinfo(head)->frag_list = clone;
+               nextp = &clone->next;
+       } else {
+               nextp = &skb_shinfo(head)->frag_list;
+       }
+
+       return nextp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_prepare);
+
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+                           void *reasm_data)
+{
+       struct sk_buff **nextp = (struct sk_buff **)reasm_data;
+       struct rb_node *rbn;
+       struct sk_buff *fp;
+
+       skb_push(head, head->data - skb_network_header(head));
+
+       /* Traverse the tree in order, to build frag_list. */
+       fp = FRAG_CB(head)->next_frag;
+       rbn = rb_next(&head->rbnode);
+       rb_erase(&head->rbnode, &q->rb_fragments);
+       while (rbn || fp) {
+               /* fp points to the next sk_buff in the current run;
+                * rbn points to the next run.
+                */
+               /* Go through the current run. */
+               while (fp) {
+                       *nextp = fp;
+                       nextp = &fp->next;
+                       fp->prev = NULL;
+                       memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+                       fp->sk = NULL;
+                       head->data_len += fp->len;
+                       head->len += fp->len;
+                       if (head->ip_summed != fp->ip_summed)
+                               head->ip_summed = CHECKSUM_NONE;
+                       else if (head->ip_summed == CHECKSUM_COMPLETE)
+                               head->csum = csum_add(head->csum, fp->csum);
+                       head->truesize += fp->truesize;
+                       fp = FRAG_CB(fp)->next_frag;
+               }
+               /* Move to the next run. */
+               if (rbn) {
+                       struct rb_node *rbnext = rb_next(rbn);
+
+                       fp = rb_to_skb(rbn);
+                       rb_erase(rbn, &q->rb_fragments);
+                       rbn = rbnext;
+               }
+       }
+       sub_frag_mem_limit(q->net, head->truesize);
+
+       *nextp = NULL;
+       head->next = NULL;
+       head->prev = NULL;
+       head->tstamp = q->stamp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_finish);
+
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
+{
+       struct sk_buff *head;
+
+       if (q->fragments) {
+               head = q->fragments;
+               q->fragments = head->next;
+       } else {
+               struct sk_buff *skb;
+
+               head = skb_rb_first(&q->rb_fragments);
+               if (!head)
+                       return NULL;
+               skb = FRAG_CB(head)->next_frag;
+               if (skb)
+                       rb_replace_node(&head->rbnode, &skb->rbnode,
+                                       &q->rb_fragments);
+               else
+                       rb_erase(&head->rbnode, &q->rb_fragments);
+               memset(&head->rbnode, 0, sizeof(head->rbnode));
+               barrier();
+       }
+       if (head == q->fragments_tail)
+               q->fragments_tail = NULL;
+
+       sub_frag_mem_limit(q->net, head->truesize);
+
+       return head;
+}
+EXPORT_SYMBOL(inet_frag_pull_head);
index 4c5d5f3c7806b11767827982a0e7459f4631d9d4..37a0b6e5a9a27b9d2bfc803c906607e6bc722a68 100644 (file)
@@ -187,7 +187,7 @@ static void ip_expire(struct timer_list *t)
 {
        struct inet_frag_queue *frag = from_timer(frag, t, timer);
        const struct iphdr *iph;
-       struct sk_buff *head;
+       struct sk_buff *head = NULL;
        struct net *net;
        struct ipq *qp;
        int err;
@@ -203,14 +203,31 @@ static void ip_expire(struct timer_list *t)
 
        ipq_kill(qp);
        __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
-
-       head = qp->q.fragments;
-
        __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
 
-       if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
+       if (!qp->q.flags & INET_FRAG_FIRST_IN)
                goto out;
 
+       /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+        * pull the head out of the tree in order to be able to
+        * deal with head->dev.
+        */
+       if (qp->q.fragments) {
+               head = qp->q.fragments;
+               qp->q.fragments = head->next;
+       } else {
+               head = skb_rb_first(&qp->q.rb_fragments);
+               if (!head)
+                       goto out;
+               rb_erase(&head->rbnode, &qp->q.rb_fragments);
+               memset(&head->rbnode, 0, sizeof(head->rbnode));
+               barrier();
+       }
+       if (head == qp->q.fragments_tail)
+               qp->q.fragments_tail = NULL;
+
+       sub_frag_mem_limit(qp->q.net, head->truesize);
+
        head->dev = dev_get_by_index_rcu(net, qp->iif);
        if (!head->dev)
                goto out;
@@ -230,16 +247,16 @@ static void ip_expire(struct timer_list *t)
            (skb_rtable(head)->rt_type != RTN_LOCAL))
                goto out;
 
-       skb_get(head);
        spin_unlock(&qp->q.lock);
        icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-       kfree_skb(head);
        goto out_rcu_unlock;
 
 out:
        spin_unlock(&qp->q.lock);
 out_rcu_unlock:
        rcu_read_unlock();
+       if (head)
+               kfree_skb(head);
        ipq_put(qp);
 }
 
@@ -282,7 +299,7 @@ static int ip_frag_too_far(struct ipq *qp)
        end = atomic_inc_return(&peer->rid);
        qp->rid = end;
 
-       rc = qp->q.fragments && (end - start) > max;
+       rc = qp->q.fragments_tail && (end - start) > max;
 
        if (rc) {
                struct net *net;
@@ -296,7 +313,6 @@ static int ip_frag_too_far(struct ipq *qp)
 
 static int ip_frag_reinit(struct ipq *qp)
 {
-       struct sk_buff *fp;
        unsigned int sum_truesize = 0;
 
        if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
@@ -304,20 +320,14 @@ static int ip_frag_reinit(struct ipq *qp)
                return -ETIMEDOUT;
        }
 
-       fp = qp->q.fragments;
-       do {
-               struct sk_buff *xp = fp->next;
-
-               sum_truesize += fp->truesize;
-               kfree_skb(fp);
-               fp = xp;
-       } while (fp);
+       sum_truesize = skb_rbtree_purge(&qp->q.rb_fragments);
        sub_frag_mem_limit(qp->q.net, sum_truesize);
 
        qp->q.flags = 0;
        qp->q.len = 0;
        qp->q.meat = 0;
        qp->q.fragments = NULL;
+       qp->q.rb_fragments = RB_ROOT;
        qp->q.fragments_tail = NULL;
        qp->iif = 0;
        qp->ecn = 0;
@@ -329,7 +339,8 @@ static int ip_frag_reinit(struct ipq *qp)
 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 {
        struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
-       struct sk_buff *prev, *next;
+       struct rb_node **rbn, *parent;
+       struct sk_buff *skb1;
        struct net_device *dev;
        unsigned int fragsize;
        int flags, offset;
@@ -392,58 +403,58 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        if (err)
                goto err;
 
-       /* Find out which fragments are in front and at the back of us
-        * in the chain of fragments so far.  We must know where to put
-        * this fragment, right?
-        */
-       prev = qp->q.fragments_tail;
-       if (!prev || prev->ip_defrag_offset < offset) {
-               next = NULL;
-               goto found;
-       }
-       prev = NULL;
-       for (next = qp->q.fragments; next != NULL; next = next->next) {
-               if (next->ip_defrag_offset >= offset)
-                       break;  /* bingo! */
-               prev = next;
-       }
+       /* Note : skb->rbnode and skb->dev share the same location. */
+       dev = skb->dev;
+       /* Makes sure compiler wont do silly aliasing games */
+       barrier();
 
-found:
        /* RFC5722, Section 4, amended by Errata ID : 3089
         *                          When reassembling an IPv6 datagram, if
         *   one or more its constituent fragments is determined to be an
         *   overlapping fragment, the entire datagram (and any constituent
         *   fragments) MUST be silently discarded.
         *
-        * We do the same here for IPv4.
+        * We do the same here for IPv4 (and increment an snmp counter).
         */
 
-       /* Is there an overlap with the previous fragment? */
-       if (prev &&
-           (prev->ip_defrag_offset + prev->len) > offset)
-               goto discard_qp;
-
-       /* Is there an overlap with the next fragment? */
-       if (next && next->ip_defrag_offset < end)
-               goto discard_qp;
+       /* Find out where to put this fragment.  */
+       skb1 = qp->q.fragments_tail;
+       if (!skb1) {
+               /* This is the first fragment we've received. */
+               rb_link_node(&skb->rbnode, NULL, &qp->q.rb_fragments.rb_node);
+               qp->q.fragments_tail = skb;
+       } else if ((skb1->ip_defrag_offset + skb1->len) < end) {
+               /* This is the common/special case: skb goes to the end. */
+               /* Detect and discard overlaps. */
+               if (offset < (skb1->ip_defrag_offset + skb1->len))
+                       goto discard_qp;
+               /* Insert after skb1. */
+               rb_link_node(&skb->rbnode, &skb1->rbnode, &skb1->rbnode.rb_right);
+               qp->q.fragments_tail = skb;
+       } else {
+               /* Binary search. Note that skb can become the first fragment, but
+                * not the last (covered above). */
+               rbn = &qp->q.rb_fragments.rb_node;
+               do {
+                       parent = *rbn;
+                       skb1 = rb_to_skb(parent);
+                       if (end <= skb1->ip_defrag_offset)
+                               rbn = &parent->rb_left;
+                       else if (offset >= skb1->ip_defrag_offset + skb1->len)
+                               rbn = &parent->rb_right;
+                       else /* Found an overlap with skb1. */
+                               goto discard_qp;
+               } while (*rbn);
+               /* Here we have parent properly set, and rbn pointing to
+                * one of its NULL left/right children. Insert skb. */
+               rb_link_node(&skb->rbnode, parent, rbn);
+       }
+       rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
 
-       /* Note : skb->ip_defrag_offset and skb->dev share the same location */
-       dev = skb->dev;
        if (dev)
                qp->iif = dev->ifindex;
-       /* Makes sure compiler wont do silly aliasing games */
-       barrier();
        skb->ip_defrag_offset = offset;
 
-       /* Insert this fragment in the chain of fragments. */
-       skb->next = next;
-       if (!next)
-               qp->q.fragments_tail = skb;
-       if (prev)
-               prev->next = skb;
-       else
-               qp->q.fragments = skb;
-
        qp->q.stamp = skb->tstamp;
        qp->q.meat += skb->len;
        qp->ecn |= ecn;
@@ -465,7 +476,7 @@ found:
                unsigned long orefdst = skb->_skb_refdst;
 
                skb->_skb_refdst = 0UL;
-               err = ip_frag_reasm(qp, prev, dev);
+               err = ip_frag_reasm(qp, skb, dev);
                skb->_skb_refdst = orefdst;
                return err;
        }
@@ -482,15 +493,15 @@ err:
        return err;
 }
 
-
 /* Build a new IP datagram from all its fragments. */
-
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
                         struct net_device *dev)
 {
        struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
        struct iphdr *iph;
-       struct sk_buff *fp, *head = qp->q.fragments;
+       struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
+       struct sk_buff **nextp; /* To build frag_list. */
+       struct rb_node *rbn;
        int len;
        int ihlen;
        int delta;
@@ -505,25 +516,20 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                goto out_fail;
        }
        /* Make the one we just received the head. */
-       if (prev) {
-               head = prev->next;
-               fp = skb_clone(head, GFP_ATOMIC);
+       if (head != skb) {
+               fp = skb_clone(skb, GFP_ATOMIC);
                if (!fp)
                        goto out_nomem;
-
-               fp->next = head->next;
-               if (!fp->next)
+               rb_replace_node(&skb->rbnode, &fp->rbnode, &qp->q.rb_fragments);
+               if (qp->q.fragments_tail == skb)
                        qp->q.fragments_tail = fp;
-               prev->next = fp;
-
-               skb_morph(head, qp->q.fragments);
-               head->next = qp->q.fragments->next;
-
-               consume_skb(qp->q.fragments);
-               qp->q.fragments = head;
+               skb_morph(skb, head);
+               rb_replace_node(&head->rbnode, &skb->rbnode,
+                               &qp->q.rb_fragments);
+               consume_skb(head);
+               head = skb;
        }
 
-       WARN_ON(!head);
        WARN_ON(head->ip_defrag_offset != 0);
 
        /* Allocate a new buffer for the datagram. */
@@ -554,24 +560,35 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                clone = alloc_skb(0, GFP_ATOMIC);
                if (!clone)
                        goto out_nomem;
-               clone->next = head->next;
-               head->next = clone;
                skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
                skb_frag_list_init(head);
                for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
                        plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
                clone->len = clone->data_len = head->data_len - plen;
-               head->data_len -= clone->len;
-               head->len -= clone->len;
+               skb->truesize += clone->truesize;
                clone->csum = 0;
                clone->ip_summed = head->ip_summed;
                add_frag_mem_limit(qp->q.net, clone->truesize);
+               skb_shinfo(head)->frag_list = clone;
+               nextp = &clone->next;
+       } else {
+               nextp = &skb_shinfo(head)->frag_list;
        }
 
-       skb_shinfo(head)->frag_list = head->next;
        skb_push(head, head->data - skb_network_header(head));
 
-       for (fp=head->next; fp; fp = fp->next) {
+       /* Traverse the tree in order, to build frag_list. */
+       rbn = rb_next(&head->rbnode);
+       rb_erase(&head->rbnode, &qp->q.rb_fragments);
+       while (rbn) {
+               struct rb_node *rbnext = rb_next(rbn);
+               fp = rb_to_skb(rbn);
+               rb_erase(rbn, &qp->q.rb_fragments);
+               rbn = rbnext;
+               *nextp = fp;
+               nextp = &fp->next;
+               fp->prev = NULL;
+               memset(&fp->rbnode, 0, sizeof(fp->rbnode));
                head->data_len += fp->len;
                head->len += fp->len;
                if (head->ip_summed != fp->ip_summed)
@@ -582,7 +599,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
        }
        sub_frag_mem_limit(qp->q.net, head->truesize);
 
+       *nextp = NULL;
        head->next = NULL;
+       head->prev = NULL;
        head->dev = dev;
        head->tstamp = qp->q.stamp;
        IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
@@ -610,6 +629,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
 
        __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
        qp->q.fragments = NULL;
+       qp->q.rb_fragments = RB_ROOT;
        qp->q.fragments_tail = NULL;
        return 0;
 
index c27fbaf995db7dc7980487c5cb65cf0ec53ddb97..8c510ee00b8b268c29b9c3c37e8a6741b1483058 100644 (file)
@@ -477,6 +477,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_devic
                                          head->csum);
 
        fq->q.fragments = NULL;
+       fq->q.rb_fragments = RB_ROOT;
        fq->q.fragments_tail = NULL;
 
        return true;
index dc3ce7895538e20e205d6e0a61674c354cc73456..70fe9a6e5d32aa31791151726789ad640591e9fe 100644 (file)
@@ -473,6 +473,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
        __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
        rcu_read_unlock();
        fq->q.fragments = NULL;
+       fq->q.rb_fragments = RB_ROOT;
        fq->q.fragments_tail = NULL;
        return 1;