]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/arraymap.c
Merge branch 'bpf-improve-test-verifier-coverage'
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / arraymap.c
CommitLineData
28fbcfa0 1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
81ed18ab 2 * Copyright (c) 2016,2017 Facebook
28fbcfa0
AS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/bpf.h>
14#include <linux/err.h>
28fbcfa0
AS
15#include <linux/slab.h>
16#include <linux/mm.h>
04fd61ab 17#include <linux/filter.h>
0cdf5640 18#include <linux/perf_event.h>
28fbcfa0 19
56f668df
MKL
20#include "map_in_map.h"
21
6e71b04a
CF
22#define ARRAY_CREATE_FLAG_MASK \
23 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
24
a10423b8
AS
25static void bpf_array_free_percpu(struct bpf_array *array)
26{
27 int i;
28
29 for (i = 0; i < array->map.max_entries; i++)
30 free_percpu(array->pptrs[i]);
31}
32
33static int bpf_array_alloc_percpu(struct bpf_array *array)
34{
35 void __percpu *ptr;
36 int i;
37
38 for (i = 0; i < array->map.max_entries; i++) {
39 ptr = __alloc_percpu_gfp(array->elem_size, 8,
40 GFP_USER | __GFP_NOWARN);
41 if (!ptr) {
42 bpf_array_free_percpu(array);
43 return -ENOMEM;
44 }
45 array->pptrs[i] = ptr;
46 }
47
48 return 0;
49}
50
28fbcfa0
AS
51/* Called from syscall */
52static struct bpf_map *array_map_alloc(union bpf_attr *attr)
53{
a10423b8 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
96eabe7a 55 int numa_node = bpf_map_attr_numa_node(attr);
b2157399
AS
56 u32 elem_size, index_mask, max_entries;
57 bool unpriv = !capable(CAP_SYS_ADMIN);
28fbcfa0 58 struct bpf_array *array;
bbeb6e43 59 u64 array_size, mask64;
28fbcfa0
AS
60
61 /* check sanity of attributes */
62 if (attr->max_entries == 0 || attr->key_size != 4 ||
6e71b04a
CF
63 attr->value_size == 0 ||
64 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
96eabe7a 65 (percpu && numa_node != NUMA_NO_NODE))
28fbcfa0
AS
66 return ERR_PTR(-EINVAL);
67
7984c27c 68 if (attr->value_size > KMALLOC_MAX_SIZE)
01b3f521
AS
69 /* if value_size is bigger, the user space won't be able to
70 * access the elements.
71 */
72 return ERR_PTR(-E2BIG);
73
28fbcfa0
AS
74 elem_size = round_up(attr->value_size, 8);
75
b2157399 76 max_entries = attr->max_entries;
b2157399 77
bbeb6e43
DB
78 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
79 * upper most bit set in u32 space is undefined behavior due to
80 * resulting 1U << 32, so do it manually here in u64 space.
81 */
82 mask64 = fls_long(max_entries - 1);
83 mask64 = 1ULL << mask64;
84 mask64 -= 1;
85
86 index_mask = mask64;
87 if (unpriv) {
b2157399
AS
88 /* round up array size to nearest power of 2,
89 * since cpu will speculate within index_mask limits
90 */
91 max_entries = index_mask + 1;
bbeb6e43
DB
92 /* Check for overflows. */
93 if (max_entries < attr->max_entries)
94 return ERR_PTR(-E2BIG);
95 }
b2157399 96
a10423b8
AS
97 array_size = sizeof(*array);
98 if (percpu)
b2157399 99 array_size += (u64) max_entries * sizeof(void *);
a10423b8 100 else
b2157399 101 array_size += (u64) max_entries * elem_size;
a10423b8
AS
102
103 /* make sure there is no u32 overflow later in round_up() */
104 if (array_size >= U32_MAX - PAGE_SIZE)
daaf427c
AS
105 return ERR_PTR(-ENOMEM);
106
28fbcfa0 107 /* allocate all map elements and zero-initialize them */
96eabe7a 108 array = bpf_map_area_alloc(array_size, numa_node);
d407bd25
DB
109 if (!array)
110 return ERR_PTR(-ENOMEM);
b2157399
AS
111 array->index_mask = index_mask;
112 array->map.unpriv_array = unpriv;
28fbcfa0
AS
113
114 /* copy mandatory map attributes */
a10423b8 115 array->map.map_type = attr->map_type;
28fbcfa0
AS
116 array->map.key_size = attr->key_size;
117 array->map.value_size = attr->value_size;
118 array->map.max_entries = attr->max_entries;
a316338c 119 array->map.map_flags = attr->map_flags;
96eabe7a 120 array->map.numa_node = numa_node;
28fbcfa0
AS
121 array->elem_size = elem_size;
122
a10423b8
AS
123 if (!percpu)
124 goto out;
125
126 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
127
128 if (array_size >= U32_MAX - PAGE_SIZE ||
bc6d5031 129 bpf_array_alloc_percpu(array)) {
d407bd25 130 bpf_map_area_free(array);
a10423b8
AS
131 return ERR_PTR(-ENOMEM);
132 }
133out:
134 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
135
28fbcfa0 136 return &array->map;
28fbcfa0
AS
137}
138
139/* Called from syscall or from eBPF program */
140static void *array_map_lookup_elem(struct bpf_map *map, void *key)
141{
142 struct bpf_array *array = container_of(map, struct bpf_array, map);
143 u32 index = *(u32 *)key;
144
a10423b8 145 if (unlikely(index >= array->map.max_entries))
28fbcfa0
AS
146 return NULL;
147
b2157399 148 return array->value + array->elem_size * (index & array->index_mask);
28fbcfa0
AS
149}
150
81ed18ab
AS
151/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
152static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
153{
b2157399 154 struct bpf_array *array = container_of(map, struct bpf_array, map);
81ed18ab 155 struct bpf_insn *insn = insn_buf;
fad73a1a 156 u32 elem_size = round_up(map->value_size, 8);
81ed18ab
AS
157 const int ret = BPF_REG_0;
158 const int map_ptr = BPF_REG_1;
159 const int index = BPF_REG_2;
160
161 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
162 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
b2157399
AS
163 if (map->unpriv_array) {
164 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
165 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
166 } else {
167 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
168 }
fad73a1a
MKL
169
170 if (is_power_of_2(elem_size)) {
81ed18ab
AS
171 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
172 } else {
173 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
174 }
175 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
176 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
177 *insn++ = BPF_MOV64_IMM(ret, 0);
178 return insn - insn_buf;
179}
180
a10423b8
AS
181/* Called from eBPF program */
182static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
183{
184 struct bpf_array *array = container_of(map, struct bpf_array, map);
185 u32 index = *(u32 *)key;
186
187 if (unlikely(index >= array->map.max_entries))
188 return NULL;
189
b2157399 190 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
a10423b8
AS
191}
192
15a07b33
AS
193int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
194{
195 struct bpf_array *array = container_of(map, struct bpf_array, map);
196 u32 index = *(u32 *)key;
197 void __percpu *pptr;
198 int cpu, off = 0;
199 u32 size;
200
201 if (unlikely(index >= array->map.max_entries))
202 return -ENOENT;
203
204 /* per_cpu areas are zero-filled and bpf programs can only
205 * access 'value_size' of them, so copying rounded areas
206 * will not leak any kernel data
207 */
208 size = round_up(map->value_size, 8);
209 rcu_read_lock();
b2157399 210 pptr = array->pptrs[index & array->index_mask];
15a07b33
AS
211 for_each_possible_cpu(cpu) {
212 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
213 off += size;
214 }
215 rcu_read_unlock();
216 return 0;
217}
218
28fbcfa0
AS
219/* Called from syscall */
220static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
221{
222 struct bpf_array *array = container_of(map, struct bpf_array, map);
8fe45924 223 u32 index = key ? *(u32 *)key : U32_MAX;
28fbcfa0
AS
224 u32 *next = (u32 *)next_key;
225
226 if (index >= array->map.max_entries) {
227 *next = 0;
228 return 0;
229 }
230
231 if (index == array->map.max_entries - 1)
232 return -ENOENT;
233
234 *next = index + 1;
235 return 0;
236}
237
238/* Called from syscall or from eBPF program */
239static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
240 u64 map_flags)
241{
242 struct bpf_array *array = container_of(map, struct bpf_array, map);
243 u32 index = *(u32 *)key;
244
a10423b8 245 if (unlikely(map_flags > BPF_EXIST))
28fbcfa0
AS
246 /* unknown flags */
247 return -EINVAL;
248
a10423b8 249 if (unlikely(index >= array->map.max_entries))
28fbcfa0
AS
250 /* all elements were pre-allocated, cannot insert a new one */
251 return -E2BIG;
252
a10423b8 253 if (unlikely(map_flags == BPF_NOEXIST))
daaf427c 254 /* all elements already exist */
28fbcfa0
AS
255 return -EEXIST;
256
a10423b8 257 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
b2157399 258 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
a10423b8
AS
259 value, map->value_size);
260 else
b2157399
AS
261 memcpy(array->value +
262 array->elem_size * (index & array->index_mask),
a10423b8 263 value, map->value_size);
28fbcfa0
AS
264 return 0;
265}
266
15a07b33
AS
267int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
268 u64 map_flags)
269{
270 struct bpf_array *array = container_of(map, struct bpf_array, map);
271 u32 index = *(u32 *)key;
272 void __percpu *pptr;
273 int cpu, off = 0;
274 u32 size;
275
276 if (unlikely(map_flags > BPF_EXIST))
277 /* unknown flags */
278 return -EINVAL;
279
280 if (unlikely(index >= array->map.max_entries))
281 /* all elements were pre-allocated, cannot insert a new one */
282 return -E2BIG;
283
284 if (unlikely(map_flags == BPF_NOEXIST))
285 /* all elements already exist */
286 return -EEXIST;
287
288 /* the user space will provide round_up(value_size, 8) bytes that
289 * will be copied into per-cpu area. bpf programs can only access
290 * value_size of it. During lookup the same extra bytes will be
291 * returned or zeros which were zero-filled by percpu_alloc,
292 * so no kernel data leaks possible
293 */
294 size = round_up(map->value_size, 8);
295 rcu_read_lock();
b2157399 296 pptr = array->pptrs[index & array->index_mask];
15a07b33
AS
297 for_each_possible_cpu(cpu) {
298 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
299 off += size;
300 }
301 rcu_read_unlock();
302 return 0;
303}
304
28fbcfa0
AS
305/* Called from syscall or from eBPF program */
306static int array_map_delete_elem(struct bpf_map *map, void *key)
307{
308 return -EINVAL;
309}
310
311/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
312static void array_map_free(struct bpf_map *map)
313{
314 struct bpf_array *array = container_of(map, struct bpf_array, map);
315
316 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
317 * so the programs (can be more than one that used this map) were
318 * disconnected from events. Wait for outstanding programs to complete
319 * and free the array
320 */
321 synchronize_rcu();
322
a10423b8
AS
323 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
324 bpf_array_free_percpu(array);
325
d407bd25 326 bpf_map_area_free(array);
28fbcfa0
AS
327}
328
40077e0c 329const struct bpf_map_ops array_map_ops = {
28fbcfa0
AS
330 .map_alloc = array_map_alloc,
331 .map_free = array_map_free,
332 .map_get_next_key = array_map_get_next_key,
333 .map_lookup_elem = array_map_lookup_elem,
334 .map_update_elem = array_map_update_elem,
335 .map_delete_elem = array_map_delete_elem,
81ed18ab 336 .map_gen_lookup = array_map_gen_lookup,
28fbcfa0
AS
337};
338
40077e0c 339const struct bpf_map_ops percpu_array_map_ops = {
a10423b8
AS
340 .map_alloc = array_map_alloc,
341 .map_free = array_map_free,
342 .map_get_next_key = array_map_get_next_key,
343 .map_lookup_elem = percpu_array_map_lookup_elem,
344 .map_update_elem = array_map_update_elem,
345 .map_delete_elem = array_map_delete_elem,
346};
347
2a36f0b9 348static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
04fd61ab 349{
2a36f0b9 350 /* only file descriptors can be stored in this type of map */
04fd61ab
AS
351 if (attr->value_size != sizeof(u32))
352 return ERR_PTR(-EINVAL);
353 return array_map_alloc(attr);
354}
355
2a36f0b9 356static void fd_array_map_free(struct bpf_map *map)
04fd61ab
AS
357{
358 struct bpf_array *array = container_of(map, struct bpf_array, map);
359 int i;
360
361 synchronize_rcu();
362
363 /* make sure it's empty */
364 for (i = 0; i < array->map.max_entries; i++)
2a36f0b9 365 BUG_ON(array->ptrs[i] != NULL);
d407bd25
DB
366
367 bpf_map_area_free(array);
04fd61ab
AS
368}
369
2a36f0b9 370static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
04fd61ab
AS
371{
372 return NULL;
373}
374
14dc6f04
MKL
375/* only called from syscall */
376int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
377{
378 void **elem, *ptr;
379 int ret = 0;
380
381 if (!map->ops->map_fd_sys_lookup_elem)
382 return -ENOTSUPP;
383
384 rcu_read_lock();
385 elem = array_map_lookup_elem(map, key);
386 if (elem && (ptr = READ_ONCE(*elem)))
387 *value = map->ops->map_fd_sys_lookup_elem(ptr);
388 else
389 ret = -ENOENT;
390 rcu_read_unlock();
391
392 return ret;
393}
394
04fd61ab 395/* only called from syscall */
d056a788
DB
396int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
397 void *key, void *value, u64 map_flags)
04fd61ab
AS
398{
399 struct bpf_array *array = container_of(map, struct bpf_array, map);
2a36f0b9 400 void *new_ptr, *old_ptr;
04fd61ab
AS
401 u32 index = *(u32 *)key, ufd;
402
403 if (map_flags != BPF_ANY)
404 return -EINVAL;
405
406 if (index >= array->map.max_entries)
407 return -E2BIG;
408
409 ufd = *(u32 *)value;
d056a788 410 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2a36f0b9
WN
411 if (IS_ERR(new_ptr))
412 return PTR_ERR(new_ptr);
04fd61ab 413
2a36f0b9
WN
414 old_ptr = xchg(array->ptrs + index, new_ptr);
415 if (old_ptr)
416 map->ops->map_fd_put_ptr(old_ptr);
04fd61ab
AS
417
418 return 0;
419}
420
2a36f0b9 421static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
04fd61ab
AS
422{
423 struct bpf_array *array = container_of(map, struct bpf_array, map);
2a36f0b9 424 void *old_ptr;
04fd61ab
AS
425 u32 index = *(u32 *)key;
426
427 if (index >= array->map.max_entries)
428 return -E2BIG;
429
2a36f0b9
WN
430 old_ptr = xchg(array->ptrs + index, NULL);
431 if (old_ptr) {
432 map->ops->map_fd_put_ptr(old_ptr);
04fd61ab
AS
433 return 0;
434 } else {
435 return -ENOENT;
436 }
437}
438
d056a788
DB
439static void *prog_fd_array_get_ptr(struct bpf_map *map,
440 struct file *map_file, int fd)
2a36f0b9
WN
441{
442 struct bpf_array *array = container_of(map, struct bpf_array, map);
443 struct bpf_prog *prog = bpf_prog_get(fd);
d056a788 444
2a36f0b9
WN
445 if (IS_ERR(prog))
446 return prog;
447
448 if (!bpf_prog_array_compatible(array, prog)) {
449 bpf_prog_put(prog);
450 return ERR_PTR(-EINVAL);
451 }
d056a788 452
2a36f0b9
WN
453 return prog;
454}
455
456static void prog_fd_array_put_ptr(void *ptr)
457{
1aacde3d 458 bpf_prog_put(ptr);
2a36f0b9
WN
459}
460
14dc6f04
MKL
461static u32 prog_fd_array_sys_lookup_elem(void *ptr)
462{
463 return ((struct bpf_prog *)ptr)->aux->id;
464}
465
04fd61ab 466/* decrement refcnt of all bpf_progs that are stored in this map */
2a36f0b9 467void bpf_fd_array_map_clear(struct bpf_map *map)
04fd61ab
AS
468{
469 struct bpf_array *array = container_of(map, struct bpf_array, map);
470 int i;
471
472 for (i = 0; i < array->map.max_entries; i++)
2a36f0b9 473 fd_array_map_delete_elem(map, &i);
04fd61ab
AS
474}
475
40077e0c 476const struct bpf_map_ops prog_array_map_ops = {
2a36f0b9
WN
477 .map_alloc = fd_array_map_alloc,
478 .map_free = fd_array_map_free,
04fd61ab 479 .map_get_next_key = array_map_get_next_key,
2a36f0b9 480 .map_lookup_elem = fd_array_map_lookup_elem,
2a36f0b9
WN
481 .map_delete_elem = fd_array_map_delete_elem,
482 .map_fd_get_ptr = prog_fd_array_get_ptr,
483 .map_fd_put_ptr = prog_fd_array_put_ptr,
14dc6f04 484 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
04fd61ab
AS
485};
486
3b1efb19
DB
487static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
488 struct file *map_file)
ea317b26 489{
3b1efb19
DB
490 struct bpf_event_entry *ee;
491
858d68f1 492 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
3b1efb19
DB
493 if (ee) {
494 ee->event = perf_file->private_data;
495 ee->perf_file = perf_file;
496 ee->map_file = map_file;
497 }
498
499 return ee;
500}
501
502static void __bpf_event_entry_free(struct rcu_head *rcu)
503{
504 struct bpf_event_entry *ee;
505
506 ee = container_of(rcu, struct bpf_event_entry, rcu);
507 fput(ee->perf_file);
508 kfree(ee);
509}
510
511static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
512{
513 call_rcu(&ee->rcu, __bpf_event_entry_free);
ea317b26
KX
514}
515
d056a788
DB
516static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
517 struct file *map_file, int fd)
ea317b26 518{
3b1efb19
DB
519 struct bpf_event_entry *ee;
520 struct perf_event *event;
521 struct file *perf_file;
f91840a3 522 u64 value;
ea317b26 523
3b1efb19
DB
524 perf_file = perf_event_get(fd);
525 if (IS_ERR(perf_file))
526 return perf_file;
e03e7ee3 527
f91840a3 528 ee = ERR_PTR(-EOPNOTSUPP);
3b1efb19 529 event = perf_file->private_data;
97562633 530 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
3b1efb19
DB
531 goto err_out;
532
f91840a3
AS
533 ee = bpf_event_entry_gen(perf_file, map_file);
534 if (ee)
535 return ee;
536 ee = ERR_PTR(-ENOMEM);
3b1efb19
DB
537err_out:
538 fput(perf_file);
539 return ee;
ea317b26
KX
540}
541
542static void perf_event_fd_array_put_ptr(void *ptr)
543{
3b1efb19
DB
544 bpf_event_entry_free_rcu(ptr);
545}
546
547static void perf_event_fd_array_release(struct bpf_map *map,
548 struct file *map_file)
549{
550 struct bpf_array *array = container_of(map, struct bpf_array, map);
551 struct bpf_event_entry *ee;
552 int i;
553
554 rcu_read_lock();
555 for (i = 0; i < array->map.max_entries; i++) {
556 ee = READ_ONCE(array->ptrs[i]);
557 if (ee && ee->map_file == map_file)
558 fd_array_map_delete_elem(map, &i);
559 }
560 rcu_read_unlock();
ea317b26
KX
561}
562
40077e0c 563const struct bpf_map_ops perf_event_array_map_ops = {
ea317b26 564 .map_alloc = fd_array_map_alloc,
3b1efb19 565 .map_free = fd_array_map_free,
ea317b26
KX
566 .map_get_next_key = array_map_get_next_key,
567 .map_lookup_elem = fd_array_map_lookup_elem,
ea317b26
KX
568 .map_delete_elem = fd_array_map_delete_elem,
569 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
570 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
3b1efb19 571 .map_release = perf_event_fd_array_release,
ea317b26
KX
572};
573
60d20f91 574#ifdef CONFIG_CGROUPS
4ed8ec52
MKL
575static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
576 struct file *map_file /* not used */,
577 int fd)
578{
579 return cgroup_get_from_fd(fd);
580}
581
582static void cgroup_fd_array_put_ptr(void *ptr)
583{
584 /* cgroup_put free cgrp after a rcu grace period */
585 cgroup_put(ptr);
586}
587
588static void cgroup_fd_array_free(struct bpf_map *map)
589{
590 bpf_fd_array_map_clear(map);
591 fd_array_map_free(map);
592}
593
40077e0c 594const struct bpf_map_ops cgroup_array_map_ops = {
4ed8ec52
MKL
595 .map_alloc = fd_array_map_alloc,
596 .map_free = cgroup_fd_array_free,
597 .map_get_next_key = array_map_get_next_key,
598 .map_lookup_elem = fd_array_map_lookup_elem,
599 .map_delete_elem = fd_array_map_delete_elem,
600 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
601 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
602};
4ed8ec52 603#endif
56f668df
MKL
604
605static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
606{
607 struct bpf_map *map, *inner_map_meta;
608
609 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
610 if (IS_ERR(inner_map_meta))
611 return inner_map_meta;
612
613 map = fd_array_map_alloc(attr);
614 if (IS_ERR(map)) {
615 bpf_map_meta_free(inner_map_meta);
616 return map;
617 }
618
619 map->inner_map_meta = inner_map_meta;
620
621 return map;
622}
623
624static void array_of_map_free(struct bpf_map *map)
625{
626 /* map->inner_map_meta is only accessed by syscall which
627 * is protected by fdget/fdput.
628 */
629 bpf_map_meta_free(map->inner_map_meta);
630 bpf_fd_array_map_clear(map);
631 fd_array_map_free(map);
632}
633
634static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
635{
636 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
637
638 if (!inner_map)
639 return NULL;
640
641 return READ_ONCE(*inner_map);
642}
643
7b0c2a05
DB
644static u32 array_of_map_gen_lookup(struct bpf_map *map,
645 struct bpf_insn *insn_buf)
646{
b2157399 647 struct bpf_array *array = container_of(map, struct bpf_array, map);
7b0c2a05
DB
648 u32 elem_size = round_up(map->value_size, 8);
649 struct bpf_insn *insn = insn_buf;
650 const int ret = BPF_REG_0;
651 const int map_ptr = BPF_REG_1;
652 const int index = BPF_REG_2;
653
654 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
655 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
b2157399
AS
656 if (map->unpriv_array) {
657 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
658 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
659 } else {
660 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
661 }
7b0c2a05
DB
662 if (is_power_of_2(elem_size))
663 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
664 else
665 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
666 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
667 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
668 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
669 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
670 *insn++ = BPF_MOV64_IMM(ret, 0);
671
672 return insn - insn_buf;
673}
674
40077e0c 675const struct bpf_map_ops array_of_maps_map_ops = {
56f668df
MKL
676 .map_alloc = array_of_map_alloc,
677 .map_free = array_of_map_free,
678 .map_get_next_key = array_map_get_next_key,
679 .map_lookup_elem = array_of_map_lookup_elem,
680 .map_delete_elem = fd_array_map_delete_elem,
681 .map_fd_get_ptr = bpf_map_fd_get_ptr,
682 .map_fd_put_ptr = bpf_map_fd_put_ptr,
14dc6f04 683 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
7b0c2a05 684 .map_gen_lookup = array_of_map_gen_lookup,
56f668df 685};