]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bpf: move memory size checks to bpf_map_charge_init()
authorRoman Gushchin <guro@fb.com>
Thu, 30 May 2019 01:03:59 +0000 (18:03 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 31 May 2019 23:52:56 +0000 (16:52 -0700)
Most bpf map types doing similar checks and bytes to pages
conversion during memory allocation and charging.

Let's unify these checks by moving them into bpf_map_charge_init().

Signed-off-by: Roman Gushchin <guro@fb.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
14 files changed:
include/linux/bpf.h
kernel/bpf/arraymap.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/local_storage.c
kernel/bpf/lpm_trie.c
kernel/bpf/queue_stack_maps.c
kernel/bpf/reuseport_array.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/xskmap.c
net/core/bpf_sk_storage.c
net/core/sock_map.c

index 3c8f24f402bfd61089f5940570132bb69fb26b2d..e5a309e6a400577824fe46568629a8f97118dd52 100644 (file)
@@ -652,7 +652,7 @@ void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
-int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages);
+int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
 void bpf_map_charge_finish(struct bpf_map_memory *mem);
 void bpf_map_charge_move(struct bpf_map_memory *dst,
                         struct bpf_map_memory *src);
index 3552da4407d9cd8e2a6cb5328d6745b9d7fe3329..0349cbf23cdb8da6647fba24beba8ad709bf6ec2 100644 (file)
@@ -117,14 +117,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        /* make sure there is no u32 overflow later in round_up() */
        cost = array_size;
-       if (cost >= U32_MAX - PAGE_SIZE)
-               return ERR_PTR(-ENOMEM);
-       if (percpu) {
+       if (percpu)
                cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
-               if (cost >= U32_MAX - PAGE_SIZE)
-                       return ERR_PTR(-ENOMEM);
-       }
-       cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        ret = bpf_map_charge_init(&mem, cost);
        if (ret < 0)
index c633c8d68023698bc53adbb05b49c74c6ab152e6..b31a71909307a3145bf6bad52f59fc2c33552999 100644 (file)
@@ -106,12 +106,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
        /* make sure page count doesn't overflow */
        cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
        cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
-       if (cost >= U32_MAX - PAGE_SIZE)
-               goto free_cmap;
 
        /* Notice returns -EPERM on if map size is larger than memlock limit */
-       ret = bpf_map_charge_init(&cmap->map.memory,
-                                 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+       ret = bpf_map_charge_init(&cmap->map.memory, cost);
        if (ret) {
                err = ret;
                goto free_cmap;
index 371bd880ed589d1a3f95565473a246516049369d..5ae7cce5ef16d00728dd9a0b3f69adbaeb9a8c2e 100644 (file)
@@ -108,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
        /* make sure page count doesn't overflow */
        cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
        cost += dev_map_bitmap_size(attr) * num_possible_cpus();
-       if (cost >= U32_MAX - PAGE_SIZE)
-               goto free_dtab;
 
        /* if map size is larger than memlock limit, reject it */
-       err = bpf_map_charge_init(&dtab->map.memory,
-                                 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+       err = bpf_map_charge_init(&dtab->map.memory, cost);
        if (err)
                goto free_dtab;
 
index b0bdc7b040adc55c5cd445db264b365686041606..d92e05d9979bec6bdd5571bc81a7410b73a715b1 100644 (file)
@@ -360,13 +360,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
        else
               cost += (u64) htab->elem_size * num_possible_cpus();
 
-       if (cost >= U32_MAX - PAGE_SIZE)
-               /* make sure page count doesn't overflow */
-               goto free_htab;
-
        /* if map size is larger than memlock limit, reject it */
-       err = bpf_map_charge_init(&htab->map.memory,
-                                 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+       err = bpf_map_charge_init(&htab->map.memory, cost);
        if (err)
                goto free_htab;
 
index e49bfd4f4f6d5467db80d18cded8febbdb47c49e..addd6fdceec814ffe233720362405f77cef3a347 100644 (file)
@@ -273,7 +273,6 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
        int numa_node = bpf_map_attr_numa_node(attr);
        struct bpf_cgroup_storage_map *map;
        struct bpf_map_memory mem;
-       u32 pages;
        int ret;
 
        if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
@@ -293,9 +292,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
                /* max_entries is not used and enforced to be 0 */
                return ERR_PTR(-EINVAL);
 
-       pages = round_up(sizeof(struct bpf_cgroup_storage_map), PAGE_SIZE) >>
-               PAGE_SHIFT;
-       ret = bpf_map_charge_init(&mem, pages);
+       ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
        if (ret < 0)
                return ERR_PTR(ret);
 
index 6345a8d2dcd012e6cf4922cf6e070c929cd1e027..09334f13a8a0cb16c647ef4080ef93939936d00d 100644 (file)
@@ -573,13 +573,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
        cost_per_node = sizeof(struct lpm_trie_node) +
                        attr->value_size + trie->data_size;
        cost += (u64) attr->max_entries * cost_per_node;
-       if (cost >= U32_MAX - PAGE_SIZE) {
-               ret = -E2BIG;
-               goto out_err;
-       }
 
-       ret = bpf_map_charge_init(&trie->map.memory,
-                                 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+       ret = bpf_map_charge_init(&trie->map.memory, cost);
        if (ret)
                goto out_err;
 
index 224cb0fd8f0349a1febed4a0f1894444377d9800..f697647ceb547a615152f3f86868780e4b6194a2 100644 (file)
@@ -73,10 +73,6 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 
        size = (u64) attr->max_entries + 1;
        cost = queue_size = sizeof(*qs) + size * attr->value_size;
-       if (cost >= U32_MAX - PAGE_SIZE)
-               return ERR_PTR(-E2BIG);
-
-       cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        ret = bpf_map_charge_init(&mem, cost);
        if (ret < 0)
index 5c6e25b1b9b146bcc6afc326214f21566f62aad7..50c083ba978c85dd399699bb3ad61710da842f37 100644 (file)
@@ -152,7 +152,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
        int err, numa_node = bpf_map_attr_numa_node(attr);
        struct reuseport_array *array;
        struct bpf_map_memory mem;
-       u64 cost, array_size;
+       u64 array_size;
 
        if (!capable(CAP_SYS_ADMIN))
                return ERR_PTR(-EPERM);
@@ -160,13 +160,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
        array_size = sizeof(*array);
        array_size += (u64)attr->max_entries * sizeof(struct sock *);
 
-       /* make sure there is no u32 overflow later in round_up() */
-       cost = array_size;
-       if (cost >= U32_MAX - PAGE_SIZE)
-               return ERR_PTR(-ENOMEM);
-       cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
-       err = bpf_map_charge_init(&mem, cost);
+       err = bpf_map_charge_init(&mem, array_size);
        if (err)
                return ERR_PTR(err);
 
index 8da24ca65d97c81e67597d3c3f3d41bec2b6b98f..3d86072d8e32a5065ee0477e9a1e1204afc6d301 100644 (file)
@@ -117,14 +117,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        n_buckets = roundup_pow_of_two(attr->max_entries);
 
        cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
-       if (cost >= U32_MAX - PAGE_SIZE)
-               return ERR_PTR(-E2BIG);
        cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
-       if (cost >= U32_MAX - PAGE_SIZE)
-               return ERR_PTR(-E2BIG);
-
-       err = bpf_map_charge_init(&mem,
-                                 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+       err = bpf_map_charge_init(&mem, cost);
        if (err)
                return ERR_PTR(err);
 
index 4a5ebad9915442c4b446be709c7c52acfc77f955..4c53cbd3329d295cd2f8ce7a0a4b65fc5afe17eb 100644 (file)
@@ -205,11 +205,16 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
                atomic_long_sub(pages, &user->locked_vm);
 }
 
-int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages)
+int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
 {
-       struct user_struct *user = get_current_user();
+       u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
+       struct user_struct *user;
        int ret;
 
+       if (size >= U32_MAX - PAGE_SIZE)
+               return -E2BIG;
+
+       user = get_current_user();
        ret = bpf_charge_memlock(user, pages);
        if (ret) {
                free_uid(user);
index a329dab7c7a489dda2094b3d2b5600fc1412bdcc..22066c28ba614a97ad5032dc9b4bbe0d603087e6 100644 (file)
@@ -37,12 +37,9 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
 
        cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
        cost += sizeof(struct list_head) * num_possible_cpus();
-       if (cost >= U32_MAX - PAGE_SIZE)
-               goto free_m;
 
        /* Notice returns -EPERM on if map size is larger than memlock limit */
-       err = bpf_map_charge_init(&m->map.memory,
-                                 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+       err = bpf_map_charge_init(&m->map.memory, cost);
        if (err)
                goto free_m;
 
index 621a0b07ff1114544fca0ac4f750ef8acc92cfcc..f40e3d35fd9c2f88e6ced6bc0992c276f177d29c 100644 (file)
@@ -626,7 +626,6 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
        struct bpf_sk_storage_map *smap;
        unsigned int i;
        u32 nbuckets;
-       u32 pages;
        u64 cost;
        int ret;
 
@@ -638,9 +637,8 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
        smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
        nbuckets = 1U << smap->bucket_log;
        cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
-       pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
-       ret = bpf_map_charge_init(&smap->map.memory, pages);
+       ret = bpf_map_charge_init(&smap->map.memory, cost);
        if (ret < 0) {
                kfree(smap);
                return ERR_PTR(ret);
index 1028c922a1498f2752d179f966eaeb7093d91321..52d4faeee18b0cecc8432ee71db16efb852b8644 100644 (file)
@@ -44,13 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
 
        /* Make sure page count doesn't overflow. */
        cost = (u64) stab->map.max_entries * sizeof(struct sock *);
-       if (cost >= U32_MAX - PAGE_SIZE) {
-               err = -EINVAL;
-               goto free_stab;
-       }
-
-       err = bpf_map_charge_init(&stab->map.memory,
-                                 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+       err = bpf_map_charge_init(&stab->map.memory, cost);
        if (err)
                goto free_stab;