]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
net: convert sk_buff_fclones.fclone_ref from atomic_t to refcount_t
authorReshetova, Elena <elena.reshetova@intel.com>
Fri, 30 Jun 2017 10:07:59 +0000 (13:07 +0300)
committerDavid S. Miller <davem@davemloft.net>
Sat, 1 Jul 2017 14:39:08 +0000 (07:39 -0700)
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/skbuff.h
net/core/skbuff.c

index 90cbd86152dabb2a194bfe5601f16619f2f916d0..d0b9f3846eabfceaab04359d796733e74ff3fd54 100644 (file)
@@ -915,7 +915,7 @@ struct sk_buff_fclones {
 
        struct sk_buff  skb2;
 
-       atomic_t        fclone_ref;
+       refcount_t      fclone_ref;
 };
 
 /**
@@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
        fclones = container_of(skb, struct sk_buff_fclones, skb1);
 
        return skb->fclone == SKB_FCLONE_ORIG &&
-              atomic_read(&fclones->fclone_ref) > 1 &&
+              refcount_read(&fclones->fclone_ref) > 1 &&
               fclones->skb2.sk == sk;
 }
 
index 45dc6620dd748afcc1cd229e9631dd69dd0efa83..659dfc0494c5b3cc9849de329e2b02c0a136a891 100644 (file)
@@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 
                kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
                skb->fclone = SKB_FCLONE_ORIG;
-               atomic_set(&fclones->fclone_ref, 1);
+               refcount_set(&fclones->fclone_ref, 1);
 
                fclones->skb2.fclone = SKB_FCLONE_CLONE;
        }
@@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
                 * This test would have no chance to be true for the clone,
                 * while here, branch prediction will be good.
                 */
-               if (atomic_read(&fclones->fclone_ref) == 1)
+               if (refcount_read(&fclones->fclone_ref) == 1)
                        goto fastpath;
                break;
 
@@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
                fclones = container_of(skb, struct sk_buff_fclones, skb2);
                break;
        }
-       if (!atomic_dec_and_test(&fclones->fclone_ref))
+       if (!refcount_dec_and_test(&fclones->fclone_ref))
                return;
 fastpath:
        kmem_cache_free(skbuff_fclone_cache, fclones);
@@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
                return NULL;
 
        if (skb->fclone == SKB_FCLONE_ORIG &&
-           atomic_read(&fclones->fclone_ref) == 1) {
+           refcount_read(&fclones->fclone_ref) == 1) {
                n = &fclones->skb2;
-               atomic_set(&fclones->fclone_ref, 2);
+               refcount_set(&fclones->fclone_ref, 2);
        } else {
                if (skb_pfmemalloc(skb))
                        gfp_mask |= __GFP_MEMALLOC;