]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/syscall.c
bpf: Add jited_len to struct bpf_prog
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / syscall.c
CommitLineData
99c55f7d
AS
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
a67edbf4 13#include <linux/bpf_trace.h>
99c55f7d
AS
14#include <linux/syscalls.h>
15#include <linux/slab.h>
3f07c014 16#include <linux/sched/signal.h>
d407bd25
DB
17#include <linux/vmalloc.h>
18#include <linux/mmzone.h>
99c55f7d 19#include <linux/anon_inodes.h>
db20fd2b 20#include <linux/file.h>
09756af4
AS
21#include <linux/license.h>
22#include <linux/filter.h>
2541517c 23#include <linux/version.h>
535e7b4b 24#include <linux/kernel.h>
dc4bb0e2 25#include <linux/idr.h>
99c55f7d 26
b121d1e7 27DEFINE_PER_CPU(int, bpf_prog_active);
dc4bb0e2
MKL
28static DEFINE_IDR(prog_idr);
29static DEFINE_SPINLOCK(prog_idr_lock);
f3f1c054
MKL
30static DEFINE_IDR(map_idr);
31static DEFINE_SPINLOCK(map_idr_lock);
b121d1e7 32
1be7f75d
AS
33int sysctl_unprivileged_bpf_disabled __read_mostly;
34
40077e0c
JB
35static const struct bpf_map_ops * const bpf_map_types[] = {
36#define BPF_PROG_TYPE(_id, _ops)
37#define BPF_MAP_TYPE(_id, _ops) \
38 [_id] = &_ops,
39#include <linux/bpf_types.h>
40#undef BPF_PROG_TYPE
41#undef BPF_MAP_TYPE
42};
99c55f7d
AS
43
44static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
45{
99c55f7d
AS
46 struct bpf_map *map;
47
40077e0c
JB
48 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
49 !bpf_map_types[attr->map_type])
50 return ERR_PTR(-EINVAL);
99c55f7d 51
40077e0c
JB
52 map = bpf_map_types[attr->map_type]->map_alloc(attr);
53 if (IS_ERR(map))
54 return map;
55 map->ops = bpf_map_types[attr->map_type];
56 map->map_type = attr->map_type;
57 return map;
99c55f7d
AS
58}
59
d407bd25
DB
60void *bpf_map_area_alloc(size_t size)
61{
62 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
63 * trigger under memory pressure as we really just want to
64 * fail instead.
65 */
66 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
67 void *area;
68
69 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
70 area = kmalloc(size, GFP_USER | flags);
71 if (area != NULL)
72 return area;
73 }
74
19809c2d 75 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
d407bd25
DB
76}
77
78void bpf_map_area_free(void *area)
79{
80 kvfree(area);
81}
82
6c905981
AS
83int bpf_map_precharge_memlock(u32 pages)
84{
85 struct user_struct *user = get_current_user();
86 unsigned long memlock_limit, cur;
87
88 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
89 cur = atomic_long_read(&user->locked_vm);
90 free_uid(user);
91 if (cur + pages > memlock_limit)
92 return -EPERM;
93 return 0;
94}
95
aaac3ba9
AS
96static int bpf_map_charge_memlock(struct bpf_map *map)
97{
98 struct user_struct *user = get_current_user();
99 unsigned long memlock_limit;
100
101 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
102
103 atomic_long_add(map->pages, &user->locked_vm);
104
105 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
106 atomic_long_sub(map->pages, &user->locked_vm);
107 free_uid(user);
108 return -EPERM;
109 }
110 map->user = user;
111 return 0;
112}
113
114static void bpf_map_uncharge_memlock(struct bpf_map *map)
115{
116 struct user_struct *user = map->user;
117
118 atomic_long_sub(map->pages, &user->locked_vm);
119 free_uid(user);
120}
121
f3f1c054
MKL
122static int bpf_map_alloc_id(struct bpf_map *map)
123{
124 int id;
125
126 spin_lock_bh(&map_idr_lock);
127 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
128 if (id > 0)
129 map->id = id;
130 spin_unlock_bh(&map_idr_lock);
131
132 if (WARN_ON_ONCE(!id))
133 return -ENOSPC;
134
135 return id > 0 ? 0 : id;
136}
137
bd5f5f4e 138static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
f3f1c054 139{
bd5f5f4e
MKL
140 if (do_idr_lock)
141 spin_lock_bh(&map_idr_lock);
142 else
143 __acquire(&map_idr_lock);
144
f3f1c054 145 idr_remove(&map_idr, map->id);
bd5f5f4e
MKL
146
147 if (do_idr_lock)
148 spin_unlock_bh(&map_idr_lock);
149 else
150 __release(&map_idr_lock);
f3f1c054
MKL
151}
152
99c55f7d
AS
153/* called from workqueue */
154static void bpf_map_free_deferred(struct work_struct *work)
155{
156 struct bpf_map *map = container_of(work, struct bpf_map, work);
157
aaac3ba9 158 bpf_map_uncharge_memlock(map);
99c55f7d
AS
159 /* implementation dependent freeing */
160 map->ops->map_free(map);
161}
162
c9da161c
DB
163static void bpf_map_put_uref(struct bpf_map *map)
164{
165 if (atomic_dec_and_test(&map->usercnt)) {
166 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
167 bpf_fd_array_map_clear(map);
168 }
169}
170
99c55f7d
AS
171/* decrement map refcnt and schedule it for freeing via workqueue
172 * (unrelying map implementation ops->map_free() might sleep)
173 */
bd5f5f4e 174static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
99c55f7d
AS
175{
176 if (atomic_dec_and_test(&map->refcnt)) {
34ad5580 177 /* bpf_map_free_id() must be called first */
bd5f5f4e 178 bpf_map_free_id(map, do_idr_lock);
99c55f7d
AS
179 INIT_WORK(&map->work, bpf_map_free_deferred);
180 schedule_work(&map->work);
181 }
182}
183
bd5f5f4e
MKL
184void bpf_map_put(struct bpf_map *map)
185{
186 __bpf_map_put(map, true);
187}
188
c9da161c 189void bpf_map_put_with_uref(struct bpf_map *map)
99c55f7d 190{
c9da161c 191 bpf_map_put_uref(map);
99c55f7d 192 bpf_map_put(map);
c9da161c
DB
193}
194
195static int bpf_map_release(struct inode *inode, struct file *filp)
196{
61d1b6a4
DB
197 struct bpf_map *map = filp->private_data;
198
199 if (map->ops->map_release)
200 map->ops->map_release(map, filp);
201
202 bpf_map_put_with_uref(map);
99c55f7d
AS
203 return 0;
204}
205
f99bf205
DB
206#ifdef CONFIG_PROC_FS
207static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
208{
209 const struct bpf_map *map = filp->private_data;
21116b70
DB
210 const struct bpf_array *array;
211 u32 owner_prog_type = 0;
212
213 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
214 array = container_of(map, struct bpf_array, map);
215 owner_prog_type = array->owner_prog_type;
216 }
f99bf205
DB
217
218 seq_printf(m,
219 "map_type:\t%u\n"
220 "key_size:\t%u\n"
221 "value_size:\t%u\n"
322cea2f 222 "max_entries:\t%u\n"
21116b70
DB
223 "map_flags:\t%#x\n"
224 "memlock:\t%llu\n",
f99bf205
DB
225 map->map_type,
226 map->key_size,
227 map->value_size,
322cea2f 228 map->max_entries,
21116b70
DB
229 map->map_flags,
230 map->pages * 1ULL << PAGE_SHIFT);
231
232 if (owner_prog_type)
233 seq_printf(m, "owner_prog_type:\t%u\n",
234 owner_prog_type);
f99bf205
DB
235}
236#endif
237
99c55f7d 238static const struct file_operations bpf_map_fops = {
f99bf205
DB
239#ifdef CONFIG_PROC_FS
240 .show_fdinfo = bpf_map_show_fdinfo,
241#endif
242 .release = bpf_map_release,
99c55f7d
AS
243};
244
b2197755 245int bpf_map_new_fd(struct bpf_map *map)
aa79781b
DB
246{
247 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
248 O_RDWR | O_CLOEXEC);
249}
250
99c55f7d
AS
251/* helper macro to check that unused fields 'union bpf_attr' are zero */
252#define CHECK_ATTR(CMD) \
253 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
254 sizeof(attr->CMD##_LAST_FIELD), 0, \
255 sizeof(*attr) - \
256 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
257 sizeof(attr->CMD##_LAST_FIELD)) != NULL
258
56f668df 259#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
99c55f7d
AS
260/* called via syscall */
261static int map_create(union bpf_attr *attr)
262{
263 struct bpf_map *map;
264 int err;
265
266 err = CHECK_ATTR(BPF_MAP_CREATE);
267 if (err)
268 return -EINVAL;
269
270 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
271 map = find_and_alloc_map(attr);
272 if (IS_ERR(map))
273 return PTR_ERR(map);
274
275 atomic_set(&map->refcnt, 1);
c9da161c 276 atomic_set(&map->usercnt, 1);
99c55f7d 277
aaac3ba9
AS
278 err = bpf_map_charge_memlock(map);
279 if (err)
20b2b24f 280 goto free_map_nouncharge;
aaac3ba9 281
f3f1c054
MKL
282 err = bpf_map_alloc_id(map);
283 if (err)
284 goto free_map;
285
aa79781b 286 err = bpf_map_new_fd(map);
bd5f5f4e
MKL
287 if (err < 0) {
288 /* failed to allocate fd.
289 * bpf_map_put() is needed because the above
290 * bpf_map_alloc_id() has published the map
291 * to the userspace and the userspace may
292 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
293 */
294 bpf_map_put(map);
295 return err;
296 }
99c55f7d 297
a67edbf4 298 trace_bpf_map_create(map, err);
99c55f7d
AS
299 return err;
300
301free_map:
20b2b24f
DB
302 bpf_map_uncharge_memlock(map);
303free_map_nouncharge:
99c55f7d
AS
304 map->ops->map_free(map);
305 return err;
306}
307
db20fd2b
AS
308/* if error is returned, fd is released.
309 * On success caller should complete fd access with matching fdput()
310 */
c2101297 311struct bpf_map *__bpf_map_get(struct fd f)
db20fd2b 312{
db20fd2b
AS
313 if (!f.file)
314 return ERR_PTR(-EBADF);
db20fd2b
AS
315 if (f.file->f_op != &bpf_map_fops) {
316 fdput(f);
317 return ERR_PTR(-EINVAL);
318 }
319
c2101297
DB
320 return f.file->private_data;
321}
322
92117d84
AS
323/* prog's and map's refcnt limit */
324#define BPF_MAX_REFCNT 32768
325
326struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
c9da161c 327{
92117d84
AS
328 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
329 atomic_dec(&map->refcnt);
330 return ERR_PTR(-EBUSY);
331 }
c9da161c
DB
332 if (uref)
333 atomic_inc(&map->usercnt);
92117d84 334 return map;
c9da161c
DB
335}
336
337struct bpf_map *bpf_map_get_with_uref(u32 ufd)
c2101297
DB
338{
339 struct fd f = fdget(ufd);
340 struct bpf_map *map;
341
342 map = __bpf_map_get(f);
343 if (IS_ERR(map))
344 return map;
345
92117d84 346 map = bpf_map_inc(map, true);
c2101297 347 fdput(f);
db20fd2b
AS
348
349 return map;
350}
351
bd5f5f4e
MKL
352/* map_idr_lock should have been held */
353static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
354 bool uref)
355{
356 int refold;
357
358 refold = __atomic_add_unless(&map->refcnt, 1, 0);
359
360 if (refold >= BPF_MAX_REFCNT) {
361 __bpf_map_put(map, false);
362 return ERR_PTR(-EBUSY);
363 }
364
365 if (!refold)
366 return ERR_PTR(-ENOENT);
367
368 if (uref)
369 atomic_inc(&map->usercnt);
370
371 return map;
372}
373
b8cdc051
AS
374int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
375{
376 return -ENOTSUPP;
377}
378
db20fd2b
AS
379/* last field in 'union bpf_attr' used by this command */
380#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
381
382static int map_lookup_elem(union bpf_attr *attr)
383{
535e7b4b
MS
384 void __user *ukey = u64_to_user_ptr(attr->key);
385 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 386 int ufd = attr->map_fd;
db20fd2b 387 struct bpf_map *map;
8ebe667c 388 void *key, *value, *ptr;
15a07b33 389 u32 value_size;
592867bf 390 struct fd f;
db20fd2b
AS
391 int err;
392
393 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
394 return -EINVAL;
395
592867bf 396 f = fdget(ufd);
c2101297 397 map = __bpf_map_get(f);
db20fd2b
AS
398 if (IS_ERR(map))
399 return PTR_ERR(map);
400
401 err = -ENOMEM;
402 key = kmalloc(map->key_size, GFP_USER);
403 if (!key)
404 goto err_put;
405
406 err = -EFAULT;
407 if (copy_from_user(key, ukey, map->key_size) != 0)
408 goto free_key;
409
15a07b33 410 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 411 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
412 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
413 value_size = round_up(map->value_size, 8) * num_possible_cpus();
414 else
415 value_size = map->value_size;
416
8ebe667c 417 err = -ENOMEM;
15a07b33 418 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b 419 if (!value)
8ebe667c
AS
420 goto free_key;
421
8f844938
MKL
422 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
423 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
424 err = bpf_percpu_hash_copy(map, key, value);
425 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
426 err = bpf_percpu_array_copy(map, key, value);
557c0c6e
AS
427 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
428 err = bpf_stackmap_copy(map, key, value);
bcc6b1b7
MKL
429 } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
430 map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
56f668df 431 err = -ENOTSUPP;
15a07b33
AS
432 } else {
433 rcu_read_lock();
434 ptr = map->ops->map_lookup_elem(map, key);
435 if (ptr)
436 memcpy(value, ptr, value_size);
437 rcu_read_unlock();
438 err = ptr ? 0 : -ENOENT;
439 }
8ebe667c 440
15a07b33 441 if (err)
8ebe667c 442 goto free_value;
db20fd2b
AS
443
444 err = -EFAULT;
15a07b33 445 if (copy_to_user(uvalue, value, value_size) != 0)
8ebe667c 446 goto free_value;
db20fd2b 447
a67edbf4 448 trace_bpf_map_lookup_elem(map, ufd, key, value);
db20fd2b
AS
449 err = 0;
450
8ebe667c
AS
451free_value:
452 kfree(value);
db20fd2b
AS
453free_key:
454 kfree(key);
455err_put:
456 fdput(f);
457 return err;
458}
459
3274f520 460#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
db20fd2b
AS
461
462static int map_update_elem(union bpf_attr *attr)
463{
535e7b4b
MS
464 void __user *ukey = u64_to_user_ptr(attr->key);
465 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 466 int ufd = attr->map_fd;
db20fd2b
AS
467 struct bpf_map *map;
468 void *key, *value;
15a07b33 469 u32 value_size;
592867bf 470 struct fd f;
db20fd2b
AS
471 int err;
472
473 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
474 return -EINVAL;
475
592867bf 476 f = fdget(ufd);
c2101297 477 map = __bpf_map_get(f);
db20fd2b
AS
478 if (IS_ERR(map))
479 return PTR_ERR(map);
480
481 err = -ENOMEM;
482 key = kmalloc(map->key_size, GFP_USER);
483 if (!key)
484 goto err_put;
485
486 err = -EFAULT;
487 if (copy_from_user(key, ukey, map->key_size) != 0)
488 goto free_key;
489
15a07b33 490 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 491 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
492 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
493 value_size = round_up(map->value_size, 8) * num_possible_cpus();
494 else
495 value_size = map->value_size;
496
db20fd2b 497 err = -ENOMEM;
15a07b33 498 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b
AS
499 if (!value)
500 goto free_key;
501
502 err = -EFAULT;
15a07b33 503 if (copy_from_user(value, uvalue, value_size) != 0)
db20fd2b
AS
504 goto free_value;
505
b121d1e7
AS
506 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
507 * inside bpf map update or delete otherwise deadlocks are possible
508 */
509 preempt_disable();
510 __this_cpu_inc(bpf_prog_active);
8f844938
MKL
511 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
512 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
513 err = bpf_percpu_hash_update(map, key, value, attr->flags);
514 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
515 err = bpf_percpu_array_update(map, key, value, attr->flags);
d056a788 516 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
4ed8ec52 517 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
56f668df
MKL
518 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
519 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
d056a788
DB
520 rcu_read_lock();
521 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
522 attr->flags);
523 rcu_read_unlock();
bcc6b1b7
MKL
524 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
525 rcu_read_lock();
526 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
527 attr->flags);
528 rcu_read_unlock();
15a07b33
AS
529 } else {
530 rcu_read_lock();
531 err = map->ops->map_update_elem(map, key, value, attr->flags);
532 rcu_read_unlock();
533 }
b121d1e7
AS
534 __this_cpu_dec(bpf_prog_active);
535 preempt_enable();
db20fd2b 536
a67edbf4
DB
537 if (!err)
538 trace_bpf_map_update_elem(map, ufd, key, value);
db20fd2b
AS
539free_value:
540 kfree(value);
541free_key:
542 kfree(key);
543err_put:
544 fdput(f);
545 return err;
546}
547
548#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
549
550static int map_delete_elem(union bpf_attr *attr)
551{
535e7b4b 552 void __user *ukey = u64_to_user_ptr(attr->key);
db20fd2b 553 int ufd = attr->map_fd;
db20fd2b 554 struct bpf_map *map;
592867bf 555 struct fd f;
db20fd2b
AS
556 void *key;
557 int err;
558
559 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
560 return -EINVAL;
561
592867bf 562 f = fdget(ufd);
c2101297 563 map = __bpf_map_get(f);
db20fd2b
AS
564 if (IS_ERR(map))
565 return PTR_ERR(map);
566
567 err = -ENOMEM;
568 key = kmalloc(map->key_size, GFP_USER);
569 if (!key)
570 goto err_put;
571
572 err = -EFAULT;
573 if (copy_from_user(key, ukey, map->key_size) != 0)
574 goto free_key;
575
b121d1e7
AS
576 preempt_disable();
577 __this_cpu_inc(bpf_prog_active);
db20fd2b
AS
578 rcu_read_lock();
579 err = map->ops->map_delete_elem(map, key);
580 rcu_read_unlock();
b121d1e7
AS
581 __this_cpu_dec(bpf_prog_active);
582 preempt_enable();
db20fd2b 583
a67edbf4
DB
584 if (!err)
585 trace_bpf_map_delete_elem(map, ufd, key);
db20fd2b
AS
586free_key:
587 kfree(key);
588err_put:
589 fdput(f);
590 return err;
591}
592
593/* last field in 'union bpf_attr' used by this command */
594#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
595
596static int map_get_next_key(union bpf_attr *attr)
597{
535e7b4b
MS
598 void __user *ukey = u64_to_user_ptr(attr->key);
599 void __user *unext_key = u64_to_user_ptr(attr->next_key);
db20fd2b 600 int ufd = attr->map_fd;
db20fd2b
AS
601 struct bpf_map *map;
602 void *key, *next_key;
592867bf 603 struct fd f;
db20fd2b
AS
604 int err;
605
606 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
607 return -EINVAL;
608
592867bf 609 f = fdget(ufd);
c2101297 610 map = __bpf_map_get(f);
db20fd2b
AS
611 if (IS_ERR(map))
612 return PTR_ERR(map);
613
8fe45924
TQ
614 if (ukey) {
615 err = -ENOMEM;
616 key = kmalloc(map->key_size, GFP_USER);
617 if (!key)
618 goto err_put;
619
620 err = -EFAULT;
621 if (copy_from_user(key, ukey, map->key_size) != 0)
622 goto free_key;
623 } else {
624 key = NULL;
625 }
db20fd2b
AS
626
627 err = -ENOMEM;
628 next_key = kmalloc(map->key_size, GFP_USER);
629 if (!next_key)
630 goto free_key;
631
632 rcu_read_lock();
633 err = map->ops->map_get_next_key(map, key, next_key);
634 rcu_read_unlock();
635 if (err)
636 goto free_next_key;
637
638 err = -EFAULT;
639 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
640 goto free_next_key;
641
a67edbf4 642 trace_bpf_map_next_key(map, ufd, key, next_key);
db20fd2b
AS
643 err = 0;
644
645free_next_key:
646 kfree(next_key);
647free_key:
648 kfree(key);
649err_put:
650 fdput(f);
651 return err;
652}
653
be9370a7
JB
654static const struct bpf_verifier_ops * const bpf_prog_types[] = {
655#define BPF_PROG_TYPE(_id, _ops) \
656 [_id] = &_ops,
40077e0c 657#define BPF_MAP_TYPE(_id, _ops)
be9370a7
JB
658#include <linux/bpf_types.h>
659#undef BPF_PROG_TYPE
40077e0c 660#undef BPF_MAP_TYPE
be9370a7 661};
09756af4
AS
662
663static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
664{
be9370a7
JB
665 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
666 return -EINVAL;
09756af4 667
be9370a7
JB
668 prog->aux->ops = bpf_prog_types[type];
669 prog->type = type;
670 return 0;
09756af4
AS
671}
672
673/* drop refcnt on maps used by eBPF program and free auxilary data */
674static void free_used_maps(struct bpf_prog_aux *aux)
675{
676 int i;
677
678 for (i = 0; i < aux->used_map_cnt; i++)
679 bpf_map_put(aux->used_maps[i]);
680
681 kfree(aux->used_maps);
682}
683
5ccb071e
DB
684int __bpf_prog_charge(struct user_struct *user, u32 pages)
685{
686 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
687 unsigned long user_bufs;
688
689 if (user) {
690 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
691 if (user_bufs > memlock_limit) {
692 atomic_long_sub(pages, &user->locked_vm);
693 return -EPERM;
694 }
695 }
696
697 return 0;
698}
699
700void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
701{
702 if (user)
703 atomic_long_sub(pages, &user->locked_vm);
704}
705
aaac3ba9
AS
706static int bpf_prog_charge_memlock(struct bpf_prog *prog)
707{
708 struct user_struct *user = get_current_user();
5ccb071e 709 int ret;
aaac3ba9 710
5ccb071e
DB
711 ret = __bpf_prog_charge(user, prog->pages);
712 if (ret) {
aaac3ba9 713 free_uid(user);
5ccb071e 714 return ret;
aaac3ba9 715 }
5ccb071e 716
aaac3ba9
AS
717 prog->aux->user = user;
718 return 0;
719}
720
721static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
722{
723 struct user_struct *user = prog->aux->user;
724
5ccb071e 725 __bpf_prog_uncharge(user, prog->pages);
aaac3ba9
AS
726 free_uid(user);
727}
728
dc4bb0e2
MKL
729static int bpf_prog_alloc_id(struct bpf_prog *prog)
730{
731 int id;
732
733 spin_lock_bh(&prog_idr_lock);
734 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
735 if (id > 0)
736 prog->aux->id = id;
737 spin_unlock_bh(&prog_idr_lock);
738
739 /* id is in [1, INT_MAX) */
740 if (WARN_ON_ONCE(!id))
741 return -ENOSPC;
742
743 return id > 0 ? 0 : id;
744}
745
b16d9aa4 746static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
dc4bb0e2
MKL
747{
748 /* cBPF to eBPF migrations are currently not in the idr store. */
749 if (!prog->aux->id)
750 return;
751
b16d9aa4
MKL
752 if (do_idr_lock)
753 spin_lock_bh(&prog_idr_lock);
754 else
755 __acquire(&prog_idr_lock);
756
dc4bb0e2 757 idr_remove(&prog_idr, prog->aux->id);
b16d9aa4
MKL
758
759 if (do_idr_lock)
760 spin_unlock_bh(&prog_idr_lock);
761 else
762 __release(&prog_idr_lock);
dc4bb0e2
MKL
763}
764
1aacde3d 765static void __bpf_prog_put_rcu(struct rcu_head *rcu)
abf2e7d6
AS
766{
767 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
768
769 free_used_maps(aux);
aaac3ba9 770 bpf_prog_uncharge_memlock(aux->prog);
abf2e7d6
AS
771 bpf_prog_free(aux->prog);
772}
773
b16d9aa4 774static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
09756af4 775{
a67edbf4
DB
776 if (atomic_dec_and_test(&prog->aux->refcnt)) {
777 trace_bpf_prog_put_rcu(prog);
34ad5580 778 /* bpf_prog_free_id() must be called first */
b16d9aa4 779 bpf_prog_free_id(prog, do_idr_lock);
74451e66 780 bpf_prog_kallsyms_del(prog);
1aacde3d 781 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
a67edbf4 782 }
09756af4 783}
b16d9aa4
MKL
784
785void bpf_prog_put(struct bpf_prog *prog)
786{
787 __bpf_prog_put(prog, true);
788}
e2e9b654 789EXPORT_SYMBOL_GPL(bpf_prog_put);
09756af4
AS
790
791static int bpf_prog_release(struct inode *inode, struct file *filp)
792{
793 struct bpf_prog *prog = filp->private_data;
794
1aacde3d 795 bpf_prog_put(prog);
09756af4
AS
796 return 0;
797}
798
7bd509e3
DB
799#ifdef CONFIG_PROC_FS
800static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
801{
802 const struct bpf_prog *prog = filp->private_data;
f1f7714e 803 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
7bd509e3 804
f1f7714e 805 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
7bd509e3
DB
806 seq_printf(m,
807 "prog_type:\t%u\n"
808 "prog_jited:\t%u\n"
f1f7714e 809 "prog_tag:\t%s\n"
7bd509e3
DB
810 "memlock:\t%llu\n",
811 prog->type,
812 prog->jited,
f1f7714e 813 prog_tag,
7bd509e3
DB
814 prog->pages * 1ULL << PAGE_SHIFT);
815}
816#endif
817
09756af4 818static const struct file_operations bpf_prog_fops = {
7bd509e3
DB
819#ifdef CONFIG_PROC_FS
820 .show_fdinfo = bpf_prog_show_fdinfo,
821#endif
822 .release = bpf_prog_release,
09756af4
AS
823};
824
b2197755 825int bpf_prog_new_fd(struct bpf_prog *prog)
aa79781b
DB
826{
827 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
828 O_RDWR | O_CLOEXEC);
829}
830
113214be 831static struct bpf_prog *____bpf_prog_get(struct fd f)
09756af4 832{
09756af4
AS
833 if (!f.file)
834 return ERR_PTR(-EBADF);
09756af4
AS
835 if (f.file->f_op != &bpf_prog_fops) {
836 fdput(f);
837 return ERR_PTR(-EINVAL);
838 }
839
c2101297 840 return f.file->private_data;
09756af4
AS
841}
842
59d3656d 843struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
92117d84 844{
59d3656d
BB
845 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
846 atomic_sub(i, &prog->aux->refcnt);
92117d84
AS
847 return ERR_PTR(-EBUSY);
848 }
849 return prog;
850}
59d3656d
BB
851EXPORT_SYMBOL_GPL(bpf_prog_add);
852
c540594f
DB
853void bpf_prog_sub(struct bpf_prog *prog, int i)
854{
855 /* Only to be used for undoing previous bpf_prog_add() in some
856 * error path. We still know that another entity in our call
857 * path holds a reference to the program, thus atomic_sub() can
858 * be safely used in such cases!
859 */
860 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
861}
862EXPORT_SYMBOL_GPL(bpf_prog_sub);
863
59d3656d
BB
864struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
865{
866 return bpf_prog_add(prog, 1);
867}
97bc402d 868EXPORT_SYMBOL_GPL(bpf_prog_inc);
92117d84 869
b16d9aa4
MKL
870/* prog_idr_lock should have been held */
871static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
872{
873 int refold;
874
875 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
876
877 if (refold >= BPF_MAX_REFCNT) {
878 __bpf_prog_put(prog, false);
879 return ERR_PTR(-EBUSY);
880 }
881
882 if (!refold)
883 return ERR_PTR(-ENOENT);
884
885 return prog;
886}
887
113214be 888static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
09756af4
AS
889{
890 struct fd f = fdget(ufd);
891 struct bpf_prog *prog;
892
113214be 893 prog = ____bpf_prog_get(f);
09756af4
AS
894 if (IS_ERR(prog))
895 return prog;
113214be
DB
896 if (type && prog->type != *type) {
897 prog = ERR_PTR(-EINVAL);
898 goto out;
899 }
09756af4 900
92117d84 901 prog = bpf_prog_inc(prog);
113214be 902out:
09756af4
AS
903 fdput(f);
904 return prog;
905}
113214be
DB
906
907struct bpf_prog *bpf_prog_get(u32 ufd)
908{
909 return __bpf_prog_get(ufd, NULL);
910}
911
912struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
913{
a67edbf4
DB
914 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
915
916 if (!IS_ERR(prog))
917 trace_bpf_prog_get_type(prog);
918 return prog;
113214be
DB
919}
920EXPORT_SYMBOL_GPL(bpf_prog_get_type);
09756af4
AS
921
922/* last field in 'union bpf_attr' used by this command */
e07b98d9 923#define BPF_PROG_LOAD_LAST_FIELD prog_flags
09756af4
AS
924
925static int bpf_prog_load(union bpf_attr *attr)
926{
927 enum bpf_prog_type type = attr->prog_type;
928 struct bpf_prog *prog;
929 int err;
930 char license[128];
931 bool is_gpl;
932
933 if (CHECK_ATTR(BPF_PROG_LOAD))
934 return -EINVAL;
935
e07b98d9
DM
936 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
937 return -EINVAL;
938
09756af4 939 /* copy eBPF program license from user space */
535e7b4b 940 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
09756af4
AS
941 sizeof(license) - 1) < 0)
942 return -EFAULT;
943 license[sizeof(license) - 1] = 0;
944
945 /* eBPF programs must be GPL compatible to use GPL-ed functions */
946 is_gpl = license_is_gpl_compatible(license);
947
ef0915ca
DB
948 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
949 return -E2BIG;
09756af4 950
2541517c
AS
951 if (type == BPF_PROG_TYPE_KPROBE &&
952 attr->kern_version != LINUX_VERSION_CODE)
953 return -EINVAL;
954
80b7d819
CF
955 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
956 type != BPF_PROG_TYPE_CGROUP_SKB &&
957 !capable(CAP_SYS_ADMIN))
1be7f75d
AS
958 return -EPERM;
959
09756af4
AS
960 /* plain bpf_prog allocation */
961 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
962 if (!prog)
963 return -ENOMEM;
964
aaac3ba9
AS
965 err = bpf_prog_charge_memlock(prog);
966 if (err)
967 goto free_prog_nouncharge;
968
09756af4
AS
969 prog->len = attr->insn_cnt;
970
971 err = -EFAULT;
535e7b4b 972 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
aafe6ae9 973 bpf_prog_insn_size(prog)) != 0)
09756af4
AS
974 goto free_prog;
975
976 prog->orig_prog = NULL;
a91263d5 977 prog->jited = 0;
09756af4
AS
978
979 atomic_set(&prog->aux->refcnt, 1);
a91263d5 980 prog->gpl_compatible = is_gpl ? 1 : 0;
09756af4
AS
981
982 /* find program type: socket_filter vs tracing_filter */
983 err = find_prog_type(type, prog);
984 if (err < 0)
985 goto free_prog;
986
987 /* run eBPF verifier */
9bac3d6d 988 err = bpf_check(&prog, attr);
09756af4
AS
989 if (err < 0)
990 goto free_used_maps;
991
992 /* eBPF program is ready to be JITed */
d1c55ab5 993 prog = bpf_prog_select_runtime(prog, &err);
04fd61ab
AS
994 if (err < 0)
995 goto free_used_maps;
09756af4 996
dc4bb0e2
MKL
997 err = bpf_prog_alloc_id(prog);
998 if (err)
999 goto free_used_maps;
1000
aa79781b 1001 err = bpf_prog_new_fd(prog);
b16d9aa4
MKL
1002 if (err < 0) {
1003 /* failed to allocate fd.
1004 * bpf_prog_put() is needed because the above
1005 * bpf_prog_alloc_id() has published the prog
1006 * to the userspace and the userspace may
1007 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1008 */
1009 bpf_prog_put(prog);
1010 return err;
1011 }
09756af4 1012
74451e66 1013 bpf_prog_kallsyms_add(prog);
a67edbf4 1014 trace_bpf_prog_load(prog, err);
09756af4
AS
1015 return err;
1016
1017free_used_maps:
1018 free_used_maps(prog->aux);
1019free_prog:
aaac3ba9
AS
1020 bpf_prog_uncharge_memlock(prog);
1021free_prog_nouncharge:
09756af4
AS
1022 bpf_prog_free(prog);
1023 return err;
1024}
1025
b2197755
DB
1026#define BPF_OBJ_LAST_FIELD bpf_fd
1027
1028static int bpf_obj_pin(const union bpf_attr *attr)
1029{
1030 if (CHECK_ATTR(BPF_OBJ))
1031 return -EINVAL;
1032
535e7b4b 1033 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
b2197755
DB
1034}
1035
1036static int bpf_obj_get(const union bpf_attr *attr)
1037{
1038 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1039 return -EINVAL;
1040
535e7b4b 1041 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
b2197755
DB
1042}
1043
f4324551
DM
1044#ifdef CONFIG_CGROUP_BPF
1045
7f677633 1046#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
f4324551
DM
1047
1048static int bpf_prog_attach(const union bpf_attr *attr)
1049{
7f677633 1050 enum bpf_prog_type ptype;
f4324551
DM
1051 struct bpf_prog *prog;
1052 struct cgroup *cgrp;
7f677633 1053 int ret;
f4324551
DM
1054
1055 if (!capable(CAP_NET_ADMIN))
1056 return -EPERM;
1057
1058 if (CHECK_ATTR(BPF_PROG_ATTACH))
1059 return -EINVAL;
1060
7f677633
AS
1061 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1062 return -EINVAL;
1063
f4324551
DM
1064 switch (attr->attach_type) {
1065 case BPF_CGROUP_INET_INGRESS:
1066 case BPF_CGROUP_INET_EGRESS:
b2cd1257 1067 ptype = BPF_PROG_TYPE_CGROUP_SKB;
f4324551 1068 break;
61023658
DA
1069 case BPF_CGROUP_INET_SOCK_CREATE:
1070 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1071 break;
f4324551
DM
1072 default:
1073 return -EINVAL;
1074 }
1075
b2cd1257
DA
1076 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1077 if (IS_ERR(prog))
1078 return PTR_ERR(prog);
1079
1080 cgrp = cgroup_get_from_fd(attr->target_fd);
1081 if (IS_ERR(cgrp)) {
1082 bpf_prog_put(prog);
1083 return PTR_ERR(cgrp);
1084 }
1085
7f677633
AS
1086 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1087 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1088 if (ret)
1089 bpf_prog_put(prog);
b2cd1257
DA
1090 cgroup_put(cgrp);
1091
7f677633 1092 return ret;
f4324551
DM
1093}
1094
1095#define BPF_PROG_DETACH_LAST_FIELD attach_type
1096
1097static int bpf_prog_detach(const union bpf_attr *attr)
1098{
1099 struct cgroup *cgrp;
7f677633 1100 int ret;
f4324551
DM
1101
1102 if (!capable(CAP_NET_ADMIN))
1103 return -EPERM;
1104
1105 if (CHECK_ATTR(BPF_PROG_DETACH))
1106 return -EINVAL;
1107
1108 switch (attr->attach_type) {
1109 case BPF_CGROUP_INET_INGRESS:
1110 case BPF_CGROUP_INET_EGRESS:
61023658 1111 case BPF_CGROUP_INET_SOCK_CREATE:
f4324551
DM
1112 cgrp = cgroup_get_from_fd(attr->target_fd);
1113 if (IS_ERR(cgrp))
1114 return PTR_ERR(cgrp);
1115
7f677633 1116 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
f4324551
DM
1117 cgroup_put(cgrp);
1118 break;
1119
1120 default:
1121 return -EINVAL;
1122 }
1123
7f677633 1124 return ret;
f4324551
DM
1125}
1126#endif /* CONFIG_CGROUP_BPF */
1127
1cf1cae9
AS
1128#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1129
1130static int bpf_prog_test_run(const union bpf_attr *attr,
1131 union bpf_attr __user *uattr)
1132{
1133 struct bpf_prog *prog;
1134 int ret = -ENOTSUPP;
1135
1136 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1137 return -EINVAL;
1138
1139 prog = bpf_prog_get(attr->test.prog_fd);
1140 if (IS_ERR(prog))
1141 return PTR_ERR(prog);
1142
1143 if (prog->aux->ops->test_run)
1144 ret = prog->aux->ops->test_run(prog, attr, uattr);
1145
1146 bpf_prog_put(prog);
1147 return ret;
1148}
1149
34ad5580
MKL
1150#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1151
1152static int bpf_obj_get_next_id(const union bpf_attr *attr,
1153 union bpf_attr __user *uattr,
1154 struct idr *idr,
1155 spinlock_t *lock)
1156{
1157 u32 next_id = attr->start_id;
1158 int err = 0;
1159
1160 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1161 return -EINVAL;
1162
1163 if (!capable(CAP_SYS_ADMIN))
1164 return -EPERM;
1165
1166 next_id++;
1167 spin_lock_bh(lock);
1168 if (!idr_get_next(idr, &next_id))
1169 err = -ENOENT;
1170 spin_unlock_bh(lock);
1171
1172 if (!err)
1173 err = put_user(next_id, &uattr->next_id);
1174
1175 return err;
1176}
1177
b16d9aa4
MKL
1178#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1179
1180static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1181{
1182 struct bpf_prog *prog;
1183 u32 id = attr->prog_id;
1184 int fd;
1185
1186 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1187 return -EINVAL;
1188
1189 if (!capable(CAP_SYS_ADMIN))
1190 return -EPERM;
1191
1192 spin_lock_bh(&prog_idr_lock);
1193 prog = idr_find(&prog_idr, id);
1194 if (prog)
1195 prog = bpf_prog_inc_not_zero(prog);
1196 else
1197 prog = ERR_PTR(-ENOENT);
1198 spin_unlock_bh(&prog_idr_lock);
1199
1200 if (IS_ERR(prog))
1201 return PTR_ERR(prog);
1202
1203 fd = bpf_prog_new_fd(prog);
1204 if (fd < 0)
1205 bpf_prog_put(prog);
1206
1207 return fd;
1208}
1209
bd5f5f4e
MKL
1210#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1211
1212static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1213{
1214 struct bpf_map *map;
1215 u32 id = attr->map_id;
1216 int fd;
1217
1218 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1219 return -EINVAL;
1220
1221 if (!capable(CAP_SYS_ADMIN))
1222 return -EPERM;
1223
1224 spin_lock_bh(&map_idr_lock);
1225 map = idr_find(&map_idr, id);
1226 if (map)
1227 map = bpf_map_inc_not_zero(map, true);
1228 else
1229 map = ERR_PTR(-ENOENT);
1230 spin_unlock_bh(&map_idr_lock);
1231
1232 if (IS_ERR(map))
1233 return PTR_ERR(map);
1234
1235 fd = bpf_map_new_fd(map);
1236 if (fd < 0)
1237 bpf_map_put(map);
1238
1239 return fd;
1240}
1241
99c55f7d
AS
1242SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1243{
1244 union bpf_attr attr = {};
1245 int err;
1246
1be7f75d 1247 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
99c55f7d
AS
1248 return -EPERM;
1249
1250 if (!access_ok(VERIFY_READ, uattr, 1))
1251 return -EFAULT;
1252
1253 if (size > PAGE_SIZE) /* silly large */
1254 return -E2BIG;
1255
1256 /* If we're handed a bigger struct than we know of,
1257 * ensure all the unknown bits are 0 - i.e. new
1258 * user-space does not rely on any kernel feature
1259 * extensions we dont know about yet.
1260 */
1261 if (size > sizeof(attr)) {
1262 unsigned char __user *addr;
1263 unsigned char __user *end;
1264 unsigned char val;
1265
1266 addr = (void __user *)uattr + sizeof(attr);
1267 end = (void __user *)uattr + size;
1268
1269 for (; addr < end; addr++) {
1270 err = get_user(val, addr);
1271 if (err)
1272 return err;
1273 if (val)
1274 return -E2BIG;
1275 }
1276 size = sizeof(attr);
1277 }
1278
1279 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1280 if (copy_from_user(&attr, uattr, size) != 0)
1281 return -EFAULT;
1282
1283 switch (cmd) {
1284 case BPF_MAP_CREATE:
1285 err = map_create(&attr);
1286 break;
db20fd2b
AS
1287 case BPF_MAP_LOOKUP_ELEM:
1288 err = map_lookup_elem(&attr);
1289 break;
1290 case BPF_MAP_UPDATE_ELEM:
1291 err = map_update_elem(&attr);
1292 break;
1293 case BPF_MAP_DELETE_ELEM:
1294 err = map_delete_elem(&attr);
1295 break;
1296 case BPF_MAP_GET_NEXT_KEY:
1297 err = map_get_next_key(&attr);
1298 break;
09756af4
AS
1299 case BPF_PROG_LOAD:
1300 err = bpf_prog_load(&attr);
1301 break;
b2197755
DB
1302 case BPF_OBJ_PIN:
1303 err = bpf_obj_pin(&attr);
1304 break;
1305 case BPF_OBJ_GET:
1306 err = bpf_obj_get(&attr);
1307 break;
f4324551
DM
1308#ifdef CONFIG_CGROUP_BPF
1309 case BPF_PROG_ATTACH:
1310 err = bpf_prog_attach(&attr);
1311 break;
1312 case BPF_PROG_DETACH:
1313 err = bpf_prog_detach(&attr);
1314 break;
1315#endif
1cf1cae9
AS
1316 case BPF_PROG_TEST_RUN:
1317 err = bpf_prog_test_run(&attr, uattr);
1318 break;
34ad5580
MKL
1319 case BPF_PROG_GET_NEXT_ID:
1320 err = bpf_obj_get_next_id(&attr, uattr,
1321 &prog_idr, &prog_idr_lock);
1322 break;
1323 case BPF_MAP_GET_NEXT_ID:
1324 err = bpf_obj_get_next_id(&attr, uattr,
1325 &map_idr, &map_idr_lock);
1326 break;
b16d9aa4
MKL
1327 case BPF_PROG_GET_FD_BY_ID:
1328 err = bpf_prog_get_fd_by_id(&attr);
1329 break;
bd5f5f4e
MKL
1330 case BPF_MAP_GET_FD_BY_ID:
1331 err = bpf_map_get_fd_by_id(&attr);
1332 break;
99c55f7d
AS
1333 default:
1334 err = -EINVAL;
1335 break;
1336 }
1337
1338 return err;
1339}