]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
bpf: add memlock precharge for socket local storage
authorRoman Gushchin <guro@fb.com>
Thu, 30 May 2019 01:03:56 +0000 (18:03 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 31 May 2019 23:52:56 +0000 (16:52 -0700)
Socket local storage maps lack the memlock precharge check,
which is performed before the memory allocation for
most other bpf map types.

Let's add it in order to unify all map types.

Signed-off-by: Roman Gushchin <guro@fb.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
net/core/bpf_sk_storage.c

index cc9597a877707f3de6e7bbfdec3d3c7ae9f56fbc..9a8aaf8e235d3cd5021c381e41c2ce27ea6e9591 100644 (file)
@@ -626,7 +626,9 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
        struct bpf_sk_storage_map *smap;
        unsigned int i;
        u32 nbuckets;
+       u32 pages;
        u64 cost;
+       int ret;
 
        smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
        if (!smap)
@@ -635,13 +637,19 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
 
        smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
        nbuckets = 1U << smap->bucket_log;
+       cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
+       pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+       ret = bpf_map_precharge_memlock(pages);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
        smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
                                 GFP_USER | __GFP_NOWARN);
        if (!smap->buckets) {
                kfree(smap);
                return ERR_PTR(-ENOMEM);
        }
-       cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
 
        for (i = 0; i < nbuckets; i++) {
                INIT_HLIST_HEAD(&smap->buckets[i].list);
@@ -651,7 +659,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
        smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
        smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
                BPF_SK_STORAGE_CACHE_SIZE;
-       smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       smap->map.pages = pages;
 
        return &smap->map;
 }