1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
26 DEFINE_PER_CPU(int, bpf_prog_active
);
28 int sysctl_unprivileged_bpf_disabled __read_mostly
;
30 static const struct bpf_map_ops
* const bpf_map_types
[] = {
31 #define BPF_PROG_TYPE(_id, _ops)
32 #define BPF_MAP_TYPE(_id, _ops) \
34 #include <linux/bpf_types.h>
39 static struct bpf_map
*find_and_alloc_map(union bpf_attr
*attr
)
43 if (attr
->map_type
>= ARRAY_SIZE(bpf_map_types
) ||
44 !bpf_map_types
[attr
->map_type
])
45 return ERR_PTR(-EINVAL
);
47 map
= bpf_map_types
[attr
->map_type
]->map_alloc(attr
);
50 map
->ops
= bpf_map_types
[attr
->map_type
];
51 map
->map_type
= attr
->map_type
;
55 void *bpf_map_area_alloc(size_t size
)
57 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
58 * trigger under memory pressure as we really just want to
61 const gfp_t flags
= __GFP_NOWARN
| __GFP_NORETRY
| __GFP_ZERO
;
64 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
65 area
= kmalloc(size
, GFP_USER
| flags
);
70 return __vmalloc(size
, GFP_KERNEL
| flags
, PAGE_KERNEL
);
73 void bpf_map_area_free(void *area
)
78 int bpf_map_precharge_memlock(u32 pages
)
80 struct user_struct
*user
= get_current_user();
81 unsigned long memlock_limit
, cur
;
83 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
84 cur
= atomic_long_read(&user
->locked_vm
);
86 if (cur
+ pages
> memlock_limit
)
91 static int bpf_map_charge_memlock(struct bpf_map
*map
)
93 struct user_struct
*user
= get_current_user();
94 unsigned long memlock_limit
;
96 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
98 atomic_long_add(map
->pages
, &user
->locked_vm
);
100 if (atomic_long_read(&user
->locked_vm
) > memlock_limit
) {
101 atomic_long_sub(map
->pages
, &user
->locked_vm
);
109 static void bpf_map_uncharge_memlock(struct bpf_map
*map
)
111 struct user_struct
*user
= map
->user
;
113 atomic_long_sub(map
->pages
, &user
->locked_vm
);
117 /* called from workqueue */
118 static void bpf_map_free_deferred(struct work_struct
*work
)
120 struct bpf_map
*map
= container_of(work
, struct bpf_map
, work
);
122 bpf_map_uncharge_memlock(map
);
123 /* implementation dependent freeing */
124 map
->ops
->map_free(map
);
127 static void bpf_map_put_uref(struct bpf_map
*map
)
129 if (atomic_dec_and_test(&map
->usercnt
)) {
130 if (map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
)
131 bpf_fd_array_map_clear(map
);
135 /* decrement map refcnt and schedule it for freeing via workqueue
136 * (unrelying map implementation ops->map_free() might sleep)
138 void bpf_map_put(struct bpf_map
*map
)
140 if (atomic_dec_and_test(&map
->refcnt
)) {
141 INIT_WORK(&map
->work
, bpf_map_free_deferred
);
142 schedule_work(&map
->work
);
146 void bpf_map_put_with_uref(struct bpf_map
*map
)
148 bpf_map_put_uref(map
);
152 static int bpf_map_release(struct inode
*inode
, struct file
*filp
)
154 struct bpf_map
*map
= filp
->private_data
;
156 if (map
->ops
->map_release
)
157 map
->ops
->map_release(map
, filp
);
159 bpf_map_put_with_uref(map
);
163 #ifdef CONFIG_PROC_FS
164 static void bpf_map_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
166 const struct bpf_map
*map
= filp
->private_data
;
167 const struct bpf_array
*array
;
168 u32 owner_prog_type
= 0;
170 if (map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
) {
171 array
= container_of(map
, struct bpf_array
, map
);
172 owner_prog_type
= array
->owner_prog_type
;
187 map
->pages
* 1ULL << PAGE_SHIFT
);
190 seq_printf(m
, "owner_prog_type:\t%u\n",
195 static const struct file_operations bpf_map_fops
= {
196 #ifdef CONFIG_PROC_FS
197 .show_fdinfo
= bpf_map_show_fdinfo
,
199 .release
= bpf_map_release
,
202 int bpf_map_new_fd(struct bpf_map
*map
)
204 return anon_inode_getfd("bpf-map", &bpf_map_fops
, map
,
208 /* helper macro to check that unused fields 'union bpf_attr' are zero */
209 #define CHECK_ATTR(CMD) \
210 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
211 sizeof(attr->CMD##_LAST_FIELD), 0, \
213 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
214 sizeof(attr->CMD##_LAST_FIELD)) != NULL
216 #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
217 /* called via syscall */
218 static int map_create(union bpf_attr
*attr
)
223 err
= CHECK_ATTR(BPF_MAP_CREATE
);
227 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
228 map
= find_and_alloc_map(attr
);
232 atomic_set(&map
->refcnt
, 1);
233 atomic_set(&map
->usercnt
, 1);
235 err
= bpf_map_charge_memlock(map
);
237 goto free_map_nouncharge
;
239 err
= bpf_map_new_fd(map
);
241 /* failed to allocate fd */
244 trace_bpf_map_create(map
, err
);
248 bpf_map_uncharge_memlock(map
);
250 map
->ops
->map_free(map
);
254 /* if error is returned, fd is released.
255 * On success caller should complete fd access with matching fdput()
257 struct bpf_map
*__bpf_map_get(struct fd f
)
260 return ERR_PTR(-EBADF
);
261 if (f
.file
->f_op
!= &bpf_map_fops
) {
263 return ERR_PTR(-EINVAL
);
266 return f
.file
->private_data
;
269 /* prog's and map's refcnt limit */
270 #define BPF_MAX_REFCNT 32768
272 struct bpf_map
*bpf_map_inc(struct bpf_map
*map
, bool uref
)
274 if (atomic_inc_return(&map
->refcnt
) > BPF_MAX_REFCNT
) {
275 atomic_dec(&map
->refcnt
);
276 return ERR_PTR(-EBUSY
);
279 atomic_inc(&map
->usercnt
);
283 struct bpf_map
*bpf_map_get_with_uref(u32 ufd
)
285 struct fd f
= fdget(ufd
);
288 map
= __bpf_map_get(f
);
292 map
= bpf_map_inc(map
, true);
298 int __weak
bpf_stackmap_copy(struct bpf_map
*map
, void *key
, void *value
)
303 /* last field in 'union bpf_attr' used by this command */
304 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
306 static int map_lookup_elem(union bpf_attr
*attr
)
308 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
309 void __user
*uvalue
= u64_to_user_ptr(attr
->value
);
310 int ufd
= attr
->map_fd
;
312 void *key
, *value
, *ptr
;
317 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM
))
321 map
= __bpf_map_get(f
);
326 key
= kmalloc(map
->key_size
, GFP_USER
);
331 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
334 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
335 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
||
336 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
337 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
339 value_size
= map
->value_size
;
342 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
346 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
347 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
348 err
= bpf_percpu_hash_copy(map
, key
, value
);
349 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
350 err
= bpf_percpu_array_copy(map
, key
, value
);
351 } else if (map
->map_type
== BPF_MAP_TYPE_STACK_TRACE
) {
352 err
= bpf_stackmap_copy(map
, key
, value
);
353 } else if (map
->map_type
== BPF_MAP_TYPE_ARRAY_OF_MAPS
||
354 map
->map_type
== BPF_MAP_TYPE_HASH_OF_MAPS
) {
358 ptr
= map
->ops
->map_lookup_elem(map
, key
);
360 memcpy(value
, ptr
, value_size
);
362 err
= ptr
? 0 : -ENOENT
;
369 if (copy_to_user(uvalue
, value
, value_size
) != 0)
372 trace_bpf_map_lookup_elem(map
, ufd
, key
, value
);
384 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
386 static int map_update_elem(union bpf_attr
*attr
)
388 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
389 void __user
*uvalue
= u64_to_user_ptr(attr
->value
);
390 int ufd
= attr
->map_fd
;
397 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM
))
401 map
= __bpf_map_get(f
);
406 key
= kmalloc(map
->key_size
, GFP_USER
);
411 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
414 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
415 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
||
416 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
417 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
419 value_size
= map
->value_size
;
422 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
427 if (copy_from_user(value
, uvalue
, value_size
) != 0)
430 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
431 * inside bpf map update or delete otherwise deadlocks are possible
434 __this_cpu_inc(bpf_prog_active
);
435 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
436 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
437 err
= bpf_percpu_hash_update(map
, key
, value
, attr
->flags
);
438 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
439 err
= bpf_percpu_array_update(map
, key
, value
, attr
->flags
);
440 } else if (map
->map_type
== BPF_MAP_TYPE_PERF_EVENT_ARRAY
||
441 map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
||
442 map
->map_type
== BPF_MAP_TYPE_CGROUP_ARRAY
||
443 map
->map_type
== BPF_MAP_TYPE_ARRAY_OF_MAPS
) {
445 err
= bpf_fd_array_map_update_elem(map
, f
.file
, key
, value
,
448 } else if (map
->map_type
== BPF_MAP_TYPE_HASH_OF_MAPS
) {
450 err
= bpf_fd_htab_map_update_elem(map
, f
.file
, key
, value
,
455 err
= map
->ops
->map_update_elem(map
, key
, value
, attr
->flags
);
458 __this_cpu_dec(bpf_prog_active
);
462 trace_bpf_map_update_elem(map
, ufd
, key
, value
);
472 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
474 static int map_delete_elem(union bpf_attr
*attr
)
476 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
477 int ufd
= attr
->map_fd
;
483 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM
))
487 map
= __bpf_map_get(f
);
492 key
= kmalloc(map
->key_size
, GFP_USER
);
497 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
501 __this_cpu_inc(bpf_prog_active
);
503 err
= map
->ops
->map_delete_elem(map
, key
);
505 __this_cpu_dec(bpf_prog_active
);
509 trace_bpf_map_delete_elem(map
, ufd
, key
);
517 /* last field in 'union bpf_attr' used by this command */
518 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
520 static int map_get_next_key(union bpf_attr
*attr
)
522 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
523 void __user
*unext_key
= u64_to_user_ptr(attr
->next_key
);
524 int ufd
= attr
->map_fd
;
526 void *key
, *next_key
;
530 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY
))
534 map
= __bpf_map_get(f
);
540 key
= kmalloc(map
->key_size
, GFP_USER
);
545 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
552 next_key
= kmalloc(map
->key_size
, GFP_USER
);
557 err
= map
->ops
->map_get_next_key(map
, key
, next_key
);
563 if (copy_to_user(unext_key
, next_key
, map
->key_size
) != 0)
566 trace_bpf_map_next_key(map
, ufd
, key
, next_key
);
578 static const struct bpf_verifier_ops
* const bpf_prog_types
[] = {
579 #define BPF_PROG_TYPE(_id, _ops) \
581 #define BPF_MAP_TYPE(_id, _ops)
582 #include <linux/bpf_types.h>
587 static int find_prog_type(enum bpf_prog_type type
, struct bpf_prog
*prog
)
589 if (type
>= ARRAY_SIZE(bpf_prog_types
) || !bpf_prog_types
[type
])
592 prog
->aux
->ops
= bpf_prog_types
[type
];
597 /* drop refcnt on maps used by eBPF program and free auxilary data */
598 static void free_used_maps(struct bpf_prog_aux
*aux
)
602 for (i
= 0; i
< aux
->used_map_cnt
; i
++)
603 bpf_map_put(aux
->used_maps
[i
]);
605 kfree(aux
->used_maps
);
608 int __bpf_prog_charge(struct user_struct
*user
, u32 pages
)
610 unsigned long memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
611 unsigned long user_bufs
;
614 user_bufs
= atomic_long_add_return(pages
, &user
->locked_vm
);
615 if (user_bufs
> memlock_limit
) {
616 atomic_long_sub(pages
, &user
->locked_vm
);
624 void __bpf_prog_uncharge(struct user_struct
*user
, u32 pages
)
627 atomic_long_sub(pages
, &user
->locked_vm
);
630 static int bpf_prog_charge_memlock(struct bpf_prog
*prog
)
632 struct user_struct
*user
= get_current_user();
635 ret
= __bpf_prog_charge(user
, prog
->pages
);
641 prog
->aux
->user
= user
;
645 static void bpf_prog_uncharge_memlock(struct bpf_prog
*prog
)
647 struct user_struct
*user
= prog
->aux
->user
;
649 __bpf_prog_uncharge(user
, prog
->pages
);
653 static void __bpf_prog_put_rcu(struct rcu_head
*rcu
)
655 struct bpf_prog_aux
*aux
= container_of(rcu
, struct bpf_prog_aux
, rcu
);
658 bpf_prog_uncharge_memlock(aux
->prog
);
659 bpf_prog_free(aux
->prog
);
662 void bpf_prog_put(struct bpf_prog
*prog
)
664 if (atomic_dec_and_test(&prog
->aux
->refcnt
)) {
665 trace_bpf_prog_put_rcu(prog
);
666 bpf_prog_kallsyms_del(prog
);
667 call_rcu(&prog
->aux
->rcu
, __bpf_prog_put_rcu
);
670 EXPORT_SYMBOL_GPL(bpf_prog_put
);
672 static int bpf_prog_release(struct inode
*inode
, struct file
*filp
)
674 struct bpf_prog
*prog
= filp
->private_data
;
680 #ifdef CONFIG_PROC_FS
681 static void bpf_prog_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
683 const struct bpf_prog
*prog
= filp
->private_data
;
684 char prog_tag
[sizeof(prog
->tag
) * 2 + 1] = { };
686 bin2hex(prog_tag
, prog
->tag
, sizeof(prog
->tag
));
695 prog
->pages
* 1ULL << PAGE_SHIFT
);
699 static const struct file_operations bpf_prog_fops
= {
700 #ifdef CONFIG_PROC_FS
701 .show_fdinfo
= bpf_prog_show_fdinfo
,
703 .release
= bpf_prog_release
,
706 int bpf_prog_new_fd(struct bpf_prog
*prog
)
708 return anon_inode_getfd("bpf-prog", &bpf_prog_fops
, prog
,
712 static struct bpf_prog
*____bpf_prog_get(struct fd f
)
715 return ERR_PTR(-EBADF
);
716 if (f
.file
->f_op
!= &bpf_prog_fops
) {
718 return ERR_PTR(-EINVAL
);
721 return f
.file
->private_data
;
724 struct bpf_prog
*bpf_prog_add(struct bpf_prog
*prog
, int i
)
726 if (atomic_add_return(i
, &prog
->aux
->refcnt
) > BPF_MAX_REFCNT
) {
727 atomic_sub(i
, &prog
->aux
->refcnt
);
728 return ERR_PTR(-EBUSY
);
732 EXPORT_SYMBOL_GPL(bpf_prog_add
);
734 void bpf_prog_sub(struct bpf_prog
*prog
, int i
)
736 /* Only to be used for undoing previous bpf_prog_add() in some
737 * error path. We still know that another entity in our call
738 * path holds a reference to the program, thus atomic_sub() can
739 * be safely used in such cases!
741 WARN_ON(atomic_sub_return(i
, &prog
->aux
->refcnt
) == 0);
743 EXPORT_SYMBOL_GPL(bpf_prog_sub
);
745 struct bpf_prog
*bpf_prog_inc(struct bpf_prog
*prog
)
747 return bpf_prog_add(prog
, 1);
749 EXPORT_SYMBOL_GPL(bpf_prog_inc
);
751 static struct bpf_prog
*__bpf_prog_get(u32 ufd
, enum bpf_prog_type
*type
)
753 struct fd f
= fdget(ufd
);
754 struct bpf_prog
*prog
;
756 prog
= ____bpf_prog_get(f
);
759 if (type
&& prog
->type
!= *type
) {
760 prog
= ERR_PTR(-EINVAL
);
764 prog
= bpf_prog_inc(prog
);
770 struct bpf_prog
*bpf_prog_get(u32 ufd
)
772 return __bpf_prog_get(ufd
, NULL
);
775 struct bpf_prog
*bpf_prog_get_type(u32 ufd
, enum bpf_prog_type type
)
777 struct bpf_prog
*prog
= __bpf_prog_get(ufd
, &type
);
780 trace_bpf_prog_get_type(prog
);
783 EXPORT_SYMBOL_GPL(bpf_prog_get_type
);
785 /* last field in 'union bpf_attr' used by this command */
786 #define BPF_PROG_LOAD_LAST_FIELD prog_flags
788 static int bpf_prog_load(union bpf_attr
*attr
)
790 enum bpf_prog_type type
= attr
->prog_type
;
791 struct bpf_prog
*prog
;
796 if (CHECK_ATTR(BPF_PROG_LOAD
))
799 if (attr
->prog_flags
& ~BPF_F_STRICT_ALIGNMENT
)
802 /* copy eBPF program license from user space */
803 if (strncpy_from_user(license
, u64_to_user_ptr(attr
->license
),
804 sizeof(license
) - 1) < 0)
806 license
[sizeof(license
) - 1] = 0;
808 /* eBPF programs must be GPL compatible to use GPL-ed functions */
809 is_gpl
= license_is_gpl_compatible(license
);
811 if (attr
->insn_cnt
== 0 || attr
->insn_cnt
> BPF_MAXINSNS
)
814 if (type
== BPF_PROG_TYPE_KPROBE
&&
815 attr
->kern_version
!= LINUX_VERSION_CODE
)
818 if (type
!= BPF_PROG_TYPE_SOCKET_FILTER
&& !capable(CAP_SYS_ADMIN
))
821 /* plain bpf_prog allocation */
822 prog
= bpf_prog_alloc(bpf_prog_size(attr
->insn_cnt
), GFP_USER
);
826 err
= bpf_prog_charge_memlock(prog
);
828 goto free_prog_nouncharge
;
830 prog
->len
= attr
->insn_cnt
;
833 if (copy_from_user(prog
->insns
, u64_to_user_ptr(attr
->insns
),
834 bpf_prog_insn_size(prog
)) != 0)
837 prog
->orig_prog
= NULL
;
840 atomic_set(&prog
->aux
->refcnt
, 1);
841 prog
->gpl_compatible
= is_gpl
? 1 : 0;
843 /* find program type: socket_filter vs tracing_filter */
844 err
= find_prog_type(type
, prog
);
848 /* run eBPF verifier */
849 err
= bpf_check(&prog
, attr
);
853 /* eBPF program is ready to be JITed */
854 prog
= bpf_prog_select_runtime(prog
, &err
);
858 err
= bpf_prog_new_fd(prog
);
860 /* failed to allocate fd */
863 bpf_prog_kallsyms_add(prog
);
864 trace_bpf_prog_load(prog
, err
);
868 free_used_maps(prog
->aux
);
870 bpf_prog_uncharge_memlock(prog
);
871 free_prog_nouncharge
:
876 #define BPF_OBJ_LAST_FIELD bpf_fd
878 static int bpf_obj_pin(const union bpf_attr
*attr
)
880 if (CHECK_ATTR(BPF_OBJ
))
883 return bpf_obj_pin_user(attr
->bpf_fd
, u64_to_user_ptr(attr
->pathname
));
886 static int bpf_obj_get(const union bpf_attr
*attr
)
888 if (CHECK_ATTR(BPF_OBJ
) || attr
->bpf_fd
!= 0)
891 return bpf_obj_get_user(u64_to_user_ptr(attr
->pathname
));
894 #ifdef CONFIG_CGROUP_BPF
896 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
898 static int bpf_prog_attach(const union bpf_attr
*attr
)
900 enum bpf_prog_type ptype
;
901 struct bpf_prog
*prog
;
905 if (!capable(CAP_NET_ADMIN
))
908 if (CHECK_ATTR(BPF_PROG_ATTACH
))
911 if (attr
->attach_flags
& ~BPF_F_ALLOW_OVERRIDE
)
914 switch (attr
->attach_type
) {
915 case BPF_CGROUP_INET_INGRESS
:
916 case BPF_CGROUP_INET_EGRESS
:
917 ptype
= BPF_PROG_TYPE_CGROUP_SKB
;
919 case BPF_CGROUP_INET_SOCK_CREATE
:
920 ptype
= BPF_PROG_TYPE_CGROUP_SOCK
;
926 prog
= bpf_prog_get_type(attr
->attach_bpf_fd
, ptype
);
928 return PTR_ERR(prog
);
930 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
933 return PTR_ERR(cgrp
);
936 ret
= cgroup_bpf_update(cgrp
, prog
, attr
->attach_type
,
937 attr
->attach_flags
& BPF_F_ALLOW_OVERRIDE
);
945 #define BPF_PROG_DETACH_LAST_FIELD attach_type
947 static int bpf_prog_detach(const union bpf_attr
*attr
)
952 if (!capable(CAP_NET_ADMIN
))
955 if (CHECK_ATTR(BPF_PROG_DETACH
))
958 switch (attr
->attach_type
) {
959 case BPF_CGROUP_INET_INGRESS
:
960 case BPF_CGROUP_INET_EGRESS
:
961 case BPF_CGROUP_INET_SOCK_CREATE
:
962 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
964 return PTR_ERR(cgrp
);
966 ret
= cgroup_bpf_update(cgrp
, NULL
, attr
->attach_type
, false);
976 #endif /* CONFIG_CGROUP_BPF */
978 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
980 static int bpf_prog_test_run(const union bpf_attr
*attr
,
981 union bpf_attr __user
*uattr
)
983 struct bpf_prog
*prog
;
986 if (CHECK_ATTR(BPF_PROG_TEST_RUN
))
989 prog
= bpf_prog_get(attr
->test
.prog_fd
);
991 return PTR_ERR(prog
);
993 if (prog
->aux
->ops
->test_run
)
994 ret
= prog
->aux
->ops
->test_run(prog
, attr
, uattr
);
1000 SYSCALL_DEFINE3(bpf
, int, cmd
, union bpf_attr __user
*, uattr
, unsigned int, size
)
1002 union bpf_attr attr
= {};
1005 if (!capable(CAP_SYS_ADMIN
) && sysctl_unprivileged_bpf_disabled
)
1008 if (!access_ok(VERIFY_READ
, uattr
, 1))
1011 if (size
> PAGE_SIZE
) /* silly large */
1014 /* If we're handed a bigger struct than we know of,
1015 * ensure all the unknown bits are 0 - i.e. new
1016 * user-space does not rely on any kernel feature
1017 * extensions we dont know about yet.
1019 if (size
> sizeof(attr
)) {
1020 unsigned char __user
*addr
;
1021 unsigned char __user
*end
;
1024 addr
= (void __user
*)uattr
+ sizeof(attr
);
1025 end
= (void __user
*)uattr
+ size
;
1027 for (; addr
< end
; addr
++) {
1028 err
= get_user(val
, addr
);
1034 size
= sizeof(attr
);
1037 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1038 if (copy_from_user(&attr
, uattr
, size
) != 0)
1042 case BPF_MAP_CREATE
:
1043 err
= map_create(&attr
);
1045 case BPF_MAP_LOOKUP_ELEM
:
1046 err
= map_lookup_elem(&attr
);
1048 case BPF_MAP_UPDATE_ELEM
:
1049 err
= map_update_elem(&attr
);
1051 case BPF_MAP_DELETE_ELEM
:
1052 err
= map_delete_elem(&attr
);
1054 case BPF_MAP_GET_NEXT_KEY
:
1055 err
= map_get_next_key(&attr
);
1058 err
= bpf_prog_load(&attr
);
1061 err
= bpf_obj_pin(&attr
);
1064 err
= bpf_obj_get(&attr
);
1066 #ifdef CONFIG_CGROUP_BPF
1067 case BPF_PROG_ATTACH
:
1068 err
= bpf_prog_attach(&attr
);
1070 case BPF_PROG_DETACH
:
1071 err
= bpf_prog_detach(&attr
);
1074 case BPF_PROG_TEST_RUN
:
1075 err
= bpf_prog_test_run(&attr
, uattr
);