]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blobdiff - net/netfilter/nf_conntrack_core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next
[mirror_ubuntu-kernels.git] / net / netfilter / nf_conntrack_core.c
index 082a2fd8d85b1529455cfe843f6d121fb3844a3a..8c97d062b1aea0d63600e2b72cce0a11bd7efb91 100644 (file)
@@ -329,20 +329,18 @@ nf_ct_get_tuple(const struct sk_buff *skb,
                return gre_pkt_to_tuple(skb, dataoff, net, tuple);
 #endif
        case IPPROTO_TCP:
-       case IPPROTO_UDP: /* fallthrough */
-               return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+       case IPPROTO_UDP:
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
        case IPPROTO_UDPLITE:
-               return nf_ct_get_tuple_ports(skb, dataoff, tuple);
 #endif
 #ifdef CONFIG_NF_CT_PROTO_SCTP
        case IPPROTO_SCTP:
-               return nf_ct_get_tuple_ports(skb, dataoff, tuple);
 #endif
 #ifdef CONFIG_NF_CT_PROTO_DCCP
        case IPPROTO_DCCP:
-               return nf_ct_get_tuple_ports(skb, dataoff, tuple);
 #endif
+               /* fallthrough */
+               return nf_ct_get_tuple_ports(skb, dataoff, tuple);
        default:
                break;
        }
@@ -729,6 +727,9 @@ static void nf_ct_gc_expired(struct nf_conn *ct)
        if (!refcount_inc_not_zero(&ct->ct_general.use))
                return;
 
+       /* load ->status after refcount increase */
+       smp_acquire__after_ctrl_dep();
+
        if (nf_ct_should_gc(ct))
                nf_ct_kill(ct);
 
@@ -795,6 +796,9 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                 */
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
+                       /* re-check key after refcount */
+                       smp_acquire__after_ctrl_dep();
+
                        if (likely(nf_ct_key_equal(h, tuple, zone, net)))
                                goto found;
 
@@ -1387,6 +1391,9 @@ static unsigned int early_drop_list(struct net *net,
                if (!refcount_inc_not_zero(&tmp->ct_general.use))
                        continue;
 
+               /* load ->ct_net and ->status after refcount increase */
+               smp_acquire__after_ctrl_dep();
+
                /* kill only if still in same netns -- might have moved due to
                 * SLAB_TYPESAFE_BY_RCU rules.
                 *
@@ -1536,6 +1543,9 @@ static void gc_worker(struct work_struct *work)
                        if (!refcount_inc_not_zero(&tmp->ct_general.use))
                                continue;
 
+                       /* load ->status after refcount increase */
+                       smp_acquire__after_ctrl_dep();
+
                        if (gc_worker_skip_ct(tmp)) {
                                nf_ct_put(tmp);
                                continue;
@@ -1775,6 +1785,16 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        if (!exp)
                __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
 
+       /* Other CPU might have obtained a pointer to this object before it was
+        * released.  Because refcount is 0, refcount_inc_not_zero() will fail.
+        *
+        * After refcount_set(1) it will succeed; ensure that zeroing of
+        * ct->status and the correct ct->net pointer are visible; else other
+        * core might observe CONFIRMED bit which means the entry is valid and
+        * in the hash table, but its not (anymore).
+        */
+       smp_wmb();
+
        /* Now it is going to be associated with an sk_buff, set refcount to 1. */
        refcount_set(&ct->ct_general.use, 1);