1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
26 DEFINE_PER_CPU(int, bpf_prog_active
);
28 int sysctl_unprivileged_bpf_disabled __read_mostly
;
30 static LIST_HEAD(bpf_map_types
);
32 static struct bpf_map
*find_and_alloc_map(union bpf_attr
*attr
)
34 struct bpf_map_type_list
*tl
;
37 list_for_each_entry(tl
, &bpf_map_types
, list_node
) {
38 if (tl
->type
== attr
->map_type
) {
39 map
= tl
->ops
->map_alloc(attr
);
43 map
->map_type
= attr
->map_type
;
47 return ERR_PTR(-EINVAL
);
50 /* boot time registration of different map implementations */
51 void bpf_register_map_type(struct bpf_map_type_list
*tl
)
53 list_add(&tl
->list_node
, &bpf_map_types
);
56 void *bpf_map_area_alloc(size_t size
)
58 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
59 * trigger under memory pressure as we really just want to
62 const gfp_t flags
= __GFP_NOWARN
| __GFP_NORETRY
| __GFP_ZERO
;
65 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
66 area
= kmalloc(size
, GFP_USER
| flags
);
71 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
| flags
,
75 void bpf_map_area_free(void *area
)
80 int bpf_map_precharge_memlock(u32 pages
)
82 struct user_struct
*user
= get_current_user();
83 unsigned long memlock_limit
, cur
;
85 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
86 cur
= atomic_long_read(&user
->locked_vm
);
88 if (cur
+ pages
> memlock_limit
)
93 static int bpf_map_charge_memlock(struct bpf_map
*map
)
95 struct user_struct
*user
= get_current_user();
96 unsigned long memlock_limit
;
98 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
100 atomic_long_add(map
->pages
, &user
->locked_vm
);
102 if (atomic_long_read(&user
->locked_vm
) > memlock_limit
) {
103 atomic_long_sub(map
->pages
, &user
->locked_vm
);
111 static void bpf_map_uncharge_memlock(struct bpf_map
*map
)
113 struct user_struct
*user
= map
->user
;
115 atomic_long_sub(map
->pages
, &user
->locked_vm
);
119 /* called from workqueue */
120 static void bpf_map_free_deferred(struct work_struct
*work
)
122 struct bpf_map
*map
= container_of(work
, struct bpf_map
, work
);
124 bpf_map_uncharge_memlock(map
);
125 /* implementation dependent freeing */
126 map
->ops
->map_free(map
);
129 static void bpf_map_put_uref(struct bpf_map
*map
)
131 if (atomic_dec_and_test(&map
->usercnt
)) {
132 if (map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
)
133 bpf_fd_array_map_clear(map
);
137 /* decrement map refcnt and schedule it for freeing via workqueue
138 * (unrelying map implementation ops->map_free() might sleep)
140 void bpf_map_put(struct bpf_map
*map
)
142 if (atomic_dec_and_test(&map
->refcnt
)) {
143 INIT_WORK(&map
->work
, bpf_map_free_deferred
);
144 schedule_work(&map
->work
);
148 void bpf_map_put_with_uref(struct bpf_map
*map
)
150 bpf_map_put_uref(map
);
154 static int bpf_map_release(struct inode
*inode
, struct file
*filp
)
156 struct bpf_map
*map
= filp
->private_data
;
158 if (map
->ops
->map_release
)
159 map
->ops
->map_release(map
, filp
);
161 bpf_map_put_with_uref(map
);
165 #ifdef CONFIG_PROC_FS
166 static void bpf_map_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
168 const struct bpf_map
*map
= filp
->private_data
;
169 const struct bpf_array
*array
;
170 u32 owner_prog_type
= 0;
172 if (map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
) {
173 array
= container_of(map
, struct bpf_array
, map
);
174 owner_prog_type
= array
->owner_prog_type
;
189 map
->pages
* 1ULL << PAGE_SHIFT
);
192 seq_printf(m
, "owner_prog_type:\t%u\n",
197 static const struct file_operations bpf_map_fops
= {
198 #ifdef CONFIG_PROC_FS
199 .show_fdinfo
= bpf_map_show_fdinfo
,
201 .release
= bpf_map_release
,
204 int bpf_map_new_fd(struct bpf_map
*map
)
206 return anon_inode_getfd("bpf-map", &bpf_map_fops
, map
,
210 /* helper macro to check that unused fields 'union bpf_attr' are zero */
211 #define CHECK_ATTR(CMD) \
212 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
213 sizeof(attr->CMD##_LAST_FIELD), 0, \
215 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
216 sizeof(attr->CMD##_LAST_FIELD)) != NULL
218 #define BPF_MAP_CREATE_LAST_FIELD map_flags
219 /* called via syscall */
220 static int map_create(union bpf_attr
*attr
)
225 err
= CHECK_ATTR(BPF_MAP_CREATE
);
229 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
230 map
= find_and_alloc_map(attr
);
234 atomic_set(&map
->refcnt
, 1);
235 atomic_set(&map
->usercnt
, 1);
237 err
= bpf_map_charge_memlock(map
);
239 goto free_map_nouncharge
;
241 err
= bpf_map_new_fd(map
);
243 /* failed to allocate fd */
246 trace_bpf_map_create(map
, err
);
250 bpf_map_uncharge_memlock(map
);
252 map
->ops
->map_free(map
);
256 /* if error is returned, fd is released.
257 * On success caller should complete fd access with matching fdput()
259 struct bpf_map
*__bpf_map_get(struct fd f
)
262 return ERR_PTR(-EBADF
);
263 if (f
.file
->f_op
!= &bpf_map_fops
) {
265 return ERR_PTR(-EINVAL
);
268 return f
.file
->private_data
;
271 /* prog's and map's refcnt limit */
272 #define BPF_MAX_REFCNT 32768
274 struct bpf_map
*bpf_map_inc(struct bpf_map
*map
, bool uref
)
276 if (atomic_inc_return(&map
->refcnt
) > BPF_MAX_REFCNT
) {
277 atomic_dec(&map
->refcnt
);
278 return ERR_PTR(-EBUSY
);
281 atomic_inc(&map
->usercnt
);
285 struct bpf_map
*bpf_map_get_with_uref(u32 ufd
)
287 struct fd f
= fdget(ufd
);
290 map
= __bpf_map_get(f
);
294 map
= bpf_map_inc(map
, true);
300 int __weak
bpf_stackmap_copy(struct bpf_map
*map
, void *key
, void *value
)
305 /* last field in 'union bpf_attr' used by this command */
306 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
308 static int map_lookup_elem(union bpf_attr
*attr
)
310 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
311 void __user
*uvalue
= u64_to_user_ptr(attr
->value
);
312 int ufd
= attr
->map_fd
;
314 void *key
, *value
, *ptr
;
319 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM
))
323 map
= __bpf_map_get(f
);
328 key
= kmalloc(map
->key_size
, GFP_USER
);
333 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
336 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
337 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
||
338 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
339 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
341 value_size
= map
->value_size
;
344 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
348 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
349 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
350 err
= bpf_percpu_hash_copy(map
, key
, value
);
351 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
352 err
= bpf_percpu_array_copy(map
, key
, value
);
353 } else if (map
->map_type
== BPF_MAP_TYPE_STACK_TRACE
) {
354 err
= bpf_stackmap_copy(map
, key
, value
);
357 ptr
= map
->ops
->map_lookup_elem(map
, key
);
359 memcpy(value
, ptr
, value_size
);
361 err
= ptr
? 0 : -ENOENT
;
368 if (copy_to_user(uvalue
, value
, value_size
) != 0)
371 trace_bpf_map_lookup_elem(map
, ufd
, key
, value
);
383 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
385 static int map_update_elem(union bpf_attr
*attr
)
387 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
388 void __user
*uvalue
= u64_to_user_ptr(attr
->value
);
389 int ufd
= attr
->map_fd
;
396 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM
))
400 map
= __bpf_map_get(f
);
405 key
= kmalloc(map
->key_size
, GFP_USER
);
410 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
413 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
414 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
||
415 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
416 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
418 value_size
= map
->value_size
;
421 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
426 if (copy_from_user(value
, uvalue
, value_size
) != 0)
429 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
430 * inside bpf map update or delete otherwise deadlocks are possible
433 __this_cpu_inc(bpf_prog_active
);
434 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
435 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
436 err
= bpf_percpu_hash_update(map
, key
, value
, attr
->flags
);
437 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
438 err
= bpf_percpu_array_update(map
, key
, value
, attr
->flags
);
439 } else if (map
->map_type
== BPF_MAP_TYPE_PERF_EVENT_ARRAY
||
440 map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
||
441 map
->map_type
== BPF_MAP_TYPE_CGROUP_ARRAY
) {
443 err
= bpf_fd_array_map_update_elem(map
, f
.file
, key
, value
,
448 err
= map
->ops
->map_update_elem(map
, key
, value
, attr
->flags
);
451 __this_cpu_dec(bpf_prog_active
);
455 trace_bpf_map_update_elem(map
, ufd
, key
, value
);
465 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
467 static int map_delete_elem(union bpf_attr
*attr
)
469 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
470 int ufd
= attr
->map_fd
;
476 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM
))
480 map
= __bpf_map_get(f
);
485 key
= kmalloc(map
->key_size
, GFP_USER
);
490 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
494 __this_cpu_inc(bpf_prog_active
);
496 err
= map
->ops
->map_delete_elem(map
, key
);
498 __this_cpu_dec(bpf_prog_active
);
502 trace_bpf_map_delete_elem(map
, ufd
, key
);
510 /* last field in 'union bpf_attr' used by this command */
511 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
513 static int map_get_next_key(union bpf_attr
*attr
)
515 void __user
*ukey
= u64_to_user_ptr(attr
->key
);
516 void __user
*unext_key
= u64_to_user_ptr(attr
->next_key
);
517 int ufd
= attr
->map_fd
;
519 void *key
, *next_key
;
523 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY
))
527 map
= __bpf_map_get(f
);
532 key
= kmalloc(map
->key_size
, GFP_USER
);
537 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
541 next_key
= kmalloc(map
->key_size
, GFP_USER
);
546 err
= map
->ops
->map_get_next_key(map
, key
, next_key
);
552 if (copy_to_user(unext_key
, next_key
, map
->key_size
) != 0)
555 trace_bpf_map_next_key(map
, ufd
, key
, next_key
);
567 static LIST_HEAD(bpf_prog_types
);
569 static int find_prog_type(enum bpf_prog_type type
, struct bpf_prog
*prog
)
571 struct bpf_prog_type_list
*tl
;
573 list_for_each_entry(tl
, &bpf_prog_types
, list_node
) {
574 if (tl
->type
== type
) {
575 prog
->aux
->ops
= tl
->ops
;
584 void bpf_register_prog_type(struct bpf_prog_type_list
*tl
)
586 list_add(&tl
->list_node
, &bpf_prog_types
);
589 /* fixup insn->imm field of bpf_call instructions:
590 * if (insn->imm == BPF_FUNC_map_lookup_elem)
591 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
592 * else if (insn->imm == BPF_FUNC_map_update_elem)
593 * insn->imm = bpf_map_update_elem - __bpf_call_base;
596 * this function is called after eBPF program passed verification
598 static void fixup_bpf_calls(struct bpf_prog
*prog
)
600 const struct bpf_func_proto
*fn
;
603 for (i
= 0; i
< prog
->len
; i
++) {
604 struct bpf_insn
*insn
= &prog
->insnsi
[i
];
606 if (insn
->code
== (BPF_JMP
| BPF_CALL
)) {
607 /* we reach here when program has bpf_call instructions
608 * and it passed bpf_check(), means that
609 * ops->get_func_proto must have been supplied, check it
611 BUG_ON(!prog
->aux
->ops
->get_func_proto
);
613 if (insn
->imm
== BPF_FUNC_get_route_realm
)
614 prog
->dst_needed
= 1;
615 if (insn
->imm
== BPF_FUNC_get_prandom_u32
)
616 bpf_user_rnd_init_once();
617 if (insn
->imm
== BPF_FUNC_xdp_adjust_head
)
618 prog
->xdp_adjust_head
= 1;
619 if (insn
->imm
== BPF_FUNC_tail_call
) {
620 /* If we tail call into other programs, we
621 * cannot make any assumptions since they
622 * can be replaced dynamically during runtime
623 * in the program array.
626 prog
->xdp_adjust_head
= 1;
628 /* mark bpf_tail_call as different opcode
629 * to avoid conditional branch in
630 * interpeter for every normal call
631 * and to prevent accidental JITing by
632 * JIT compiler that doesn't support
640 fn
= prog
->aux
->ops
->get_func_proto(insn
->imm
);
641 /* all functions that have prototype and verifier allowed
642 * programs to call them, must be real in-kernel functions
645 insn
->imm
= fn
->func
- __bpf_call_base
;
650 /* drop refcnt on maps used by eBPF program and free auxilary data */
651 static void free_used_maps(struct bpf_prog_aux
*aux
)
655 for (i
= 0; i
< aux
->used_map_cnt
; i
++)
656 bpf_map_put(aux
->used_maps
[i
]);
658 kfree(aux
->used_maps
);
661 int __bpf_prog_charge(struct user_struct
*user
, u32 pages
)
663 unsigned long memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
664 unsigned long user_bufs
;
667 user_bufs
= atomic_long_add_return(pages
, &user
->locked_vm
);
668 if (user_bufs
> memlock_limit
) {
669 atomic_long_sub(pages
, &user
->locked_vm
);
677 void __bpf_prog_uncharge(struct user_struct
*user
, u32 pages
)
680 atomic_long_sub(pages
, &user
->locked_vm
);
683 static int bpf_prog_charge_memlock(struct bpf_prog
*prog
)
685 struct user_struct
*user
= get_current_user();
688 ret
= __bpf_prog_charge(user
, prog
->pages
);
694 prog
->aux
->user
= user
;
698 static void bpf_prog_uncharge_memlock(struct bpf_prog
*prog
)
700 struct user_struct
*user
= prog
->aux
->user
;
702 __bpf_prog_uncharge(user
, prog
->pages
);
706 static void __bpf_prog_put_rcu(struct rcu_head
*rcu
)
708 struct bpf_prog_aux
*aux
= container_of(rcu
, struct bpf_prog_aux
, rcu
);
711 bpf_prog_uncharge_memlock(aux
->prog
);
712 bpf_prog_free(aux
->prog
);
715 void bpf_prog_put(struct bpf_prog
*prog
)
717 if (atomic_dec_and_test(&prog
->aux
->refcnt
)) {
718 trace_bpf_prog_put_rcu(prog
);
719 bpf_prog_kallsyms_del(prog
);
720 call_rcu(&prog
->aux
->rcu
, __bpf_prog_put_rcu
);
723 EXPORT_SYMBOL_GPL(bpf_prog_put
);
725 static int bpf_prog_release(struct inode
*inode
, struct file
*filp
)
727 struct bpf_prog
*prog
= filp
->private_data
;
733 #ifdef CONFIG_PROC_FS
734 static void bpf_prog_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
736 const struct bpf_prog
*prog
= filp
->private_data
;
737 char prog_tag
[sizeof(prog
->tag
) * 2 + 1] = { };
739 bin2hex(prog_tag
, prog
->tag
, sizeof(prog
->tag
));
748 prog
->pages
* 1ULL << PAGE_SHIFT
);
752 static const struct file_operations bpf_prog_fops
= {
753 #ifdef CONFIG_PROC_FS
754 .show_fdinfo
= bpf_prog_show_fdinfo
,
756 .release
= bpf_prog_release
,
759 int bpf_prog_new_fd(struct bpf_prog
*prog
)
761 return anon_inode_getfd("bpf-prog", &bpf_prog_fops
, prog
,
765 static struct bpf_prog
*____bpf_prog_get(struct fd f
)
768 return ERR_PTR(-EBADF
);
769 if (f
.file
->f_op
!= &bpf_prog_fops
) {
771 return ERR_PTR(-EINVAL
);
774 return f
.file
->private_data
;
777 struct bpf_prog
*bpf_prog_add(struct bpf_prog
*prog
, int i
)
779 if (atomic_add_return(i
, &prog
->aux
->refcnt
) > BPF_MAX_REFCNT
) {
780 atomic_sub(i
, &prog
->aux
->refcnt
);
781 return ERR_PTR(-EBUSY
);
785 EXPORT_SYMBOL_GPL(bpf_prog_add
);
787 void bpf_prog_sub(struct bpf_prog
*prog
, int i
)
789 /* Only to be used for undoing previous bpf_prog_add() in some
790 * error path. We still know that another entity in our call
791 * path holds a reference to the program, thus atomic_sub() can
792 * be safely used in such cases!
794 WARN_ON(atomic_sub_return(i
, &prog
->aux
->refcnt
) == 0);
796 EXPORT_SYMBOL_GPL(bpf_prog_sub
);
798 struct bpf_prog
*bpf_prog_inc(struct bpf_prog
*prog
)
800 return bpf_prog_add(prog
, 1);
802 EXPORT_SYMBOL_GPL(bpf_prog_inc
);
804 static struct bpf_prog
*__bpf_prog_get(u32 ufd
, enum bpf_prog_type
*type
)
806 struct fd f
= fdget(ufd
);
807 struct bpf_prog
*prog
;
809 prog
= ____bpf_prog_get(f
);
812 if (type
&& prog
->type
!= *type
) {
813 prog
= ERR_PTR(-EINVAL
);
817 prog
= bpf_prog_inc(prog
);
823 struct bpf_prog
*bpf_prog_get(u32 ufd
)
825 return __bpf_prog_get(ufd
, NULL
);
828 struct bpf_prog
*bpf_prog_get_type(u32 ufd
, enum bpf_prog_type type
)
830 struct bpf_prog
*prog
= __bpf_prog_get(ufd
, &type
);
833 trace_bpf_prog_get_type(prog
);
836 EXPORT_SYMBOL_GPL(bpf_prog_get_type
);
838 /* last field in 'union bpf_attr' used by this command */
839 #define BPF_PROG_LOAD_LAST_FIELD kern_version
841 static int bpf_prog_load(union bpf_attr
*attr
)
843 enum bpf_prog_type type
= attr
->prog_type
;
844 struct bpf_prog
*prog
;
849 if (CHECK_ATTR(BPF_PROG_LOAD
))
852 /* copy eBPF program license from user space */
853 if (strncpy_from_user(license
, u64_to_user_ptr(attr
->license
),
854 sizeof(license
) - 1) < 0)
856 license
[sizeof(license
) - 1] = 0;
858 /* eBPF programs must be GPL compatible to use GPL-ed functions */
859 is_gpl
= license_is_gpl_compatible(license
);
861 if (attr
->insn_cnt
== 0 || attr
->insn_cnt
> BPF_MAXINSNS
)
864 if (type
== BPF_PROG_TYPE_KPROBE
&&
865 attr
->kern_version
!= LINUX_VERSION_CODE
)
868 if (type
!= BPF_PROG_TYPE_SOCKET_FILTER
&& !capable(CAP_SYS_ADMIN
))
871 /* plain bpf_prog allocation */
872 prog
= bpf_prog_alloc(bpf_prog_size(attr
->insn_cnt
), GFP_USER
);
876 err
= bpf_prog_charge_memlock(prog
);
878 goto free_prog_nouncharge
;
880 prog
->len
= attr
->insn_cnt
;
883 if (copy_from_user(prog
->insns
, u64_to_user_ptr(attr
->insns
),
884 bpf_prog_insn_size(prog
)) != 0)
887 prog
->orig_prog
= NULL
;
890 atomic_set(&prog
->aux
->refcnt
, 1);
891 prog
->gpl_compatible
= is_gpl
? 1 : 0;
893 /* find program type: socket_filter vs tracing_filter */
894 err
= find_prog_type(type
, prog
);
898 /* run eBPF verifier */
899 err
= bpf_check(&prog
, attr
);
903 /* fixup BPF_CALL->imm field */
904 fixup_bpf_calls(prog
);
906 /* eBPF program is ready to be JITed */
907 prog
= bpf_prog_select_runtime(prog
, &err
);
911 err
= bpf_prog_new_fd(prog
);
913 /* failed to allocate fd */
916 bpf_prog_kallsyms_add(prog
);
917 trace_bpf_prog_load(prog
, err
);
921 free_used_maps(prog
->aux
);
923 bpf_prog_uncharge_memlock(prog
);
924 free_prog_nouncharge
:
929 #define BPF_OBJ_LAST_FIELD bpf_fd
931 static int bpf_obj_pin(const union bpf_attr
*attr
)
933 if (CHECK_ATTR(BPF_OBJ
))
936 return bpf_obj_pin_user(attr
->bpf_fd
, u64_to_user_ptr(attr
->pathname
));
939 static int bpf_obj_get(const union bpf_attr
*attr
)
941 if (CHECK_ATTR(BPF_OBJ
) || attr
->bpf_fd
!= 0)
944 return bpf_obj_get_user(u64_to_user_ptr(attr
->pathname
));
947 #ifdef CONFIG_CGROUP_BPF
949 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
951 static int bpf_prog_attach(const union bpf_attr
*attr
)
953 enum bpf_prog_type ptype
;
954 struct bpf_prog
*prog
;
958 if (!capable(CAP_NET_ADMIN
))
961 if (CHECK_ATTR(BPF_PROG_ATTACH
))
964 if (attr
->attach_flags
& ~BPF_F_ALLOW_OVERRIDE
)
967 switch (attr
->attach_type
) {
968 case BPF_CGROUP_INET_INGRESS
:
969 case BPF_CGROUP_INET_EGRESS
:
970 ptype
= BPF_PROG_TYPE_CGROUP_SKB
;
972 case BPF_CGROUP_INET_SOCK_CREATE
:
973 ptype
= BPF_PROG_TYPE_CGROUP_SOCK
;
979 prog
= bpf_prog_get_type(attr
->attach_bpf_fd
, ptype
);
981 return PTR_ERR(prog
);
983 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
986 return PTR_ERR(cgrp
);
989 ret
= cgroup_bpf_update(cgrp
, prog
, attr
->attach_type
,
990 attr
->attach_flags
& BPF_F_ALLOW_OVERRIDE
);
998 #define BPF_PROG_DETACH_LAST_FIELD attach_type
1000 static int bpf_prog_detach(const union bpf_attr
*attr
)
1002 struct cgroup
*cgrp
;
1005 if (!capable(CAP_NET_ADMIN
))
1008 if (CHECK_ATTR(BPF_PROG_DETACH
))
1011 switch (attr
->attach_type
) {
1012 case BPF_CGROUP_INET_INGRESS
:
1013 case BPF_CGROUP_INET_EGRESS
:
1014 case BPF_CGROUP_INET_SOCK_CREATE
:
1015 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
1017 return PTR_ERR(cgrp
);
1019 ret
= cgroup_bpf_update(cgrp
, NULL
, attr
->attach_type
, false);
1029 #endif /* CONFIG_CGROUP_BPF */
1031 SYSCALL_DEFINE3(bpf
, int, cmd
, union bpf_attr __user
*, uattr
, unsigned int, size
)
1033 union bpf_attr attr
= {};
1036 if (!capable(CAP_SYS_ADMIN
) && sysctl_unprivileged_bpf_disabled
)
1039 if (!access_ok(VERIFY_READ
, uattr
, 1))
1042 if (size
> PAGE_SIZE
) /* silly large */
1045 /* If we're handed a bigger struct than we know of,
1046 * ensure all the unknown bits are 0 - i.e. new
1047 * user-space does not rely on any kernel feature
1048 * extensions we dont know about yet.
1050 if (size
> sizeof(attr
)) {
1051 unsigned char __user
*addr
;
1052 unsigned char __user
*end
;
1055 addr
= (void __user
*)uattr
+ sizeof(attr
);
1056 end
= (void __user
*)uattr
+ size
;
1058 for (; addr
< end
; addr
++) {
1059 err
= get_user(val
, addr
);
1065 size
= sizeof(attr
);
1068 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1069 if (copy_from_user(&attr
, uattr
, size
) != 0)
1073 case BPF_MAP_CREATE
:
1074 err
= map_create(&attr
);
1076 case BPF_MAP_LOOKUP_ELEM
:
1077 err
= map_lookup_elem(&attr
);
1079 case BPF_MAP_UPDATE_ELEM
:
1080 err
= map_update_elem(&attr
);
1082 case BPF_MAP_DELETE_ELEM
:
1083 err
= map_delete_elem(&attr
);
1085 case BPF_MAP_GET_NEXT_KEY
:
1086 err
= map_get_next_key(&attr
);
1089 err
= bpf_prog_load(&attr
);
1092 err
= bpf_obj_pin(&attr
);
1095 err
= bpf_obj_get(&attr
);
1098 #ifdef CONFIG_CGROUP_BPF
1099 case BPF_PROG_ATTACH
:
1100 err
= bpf_prog_attach(&attr
);
1102 case BPF_PROG_DETACH
:
1103 err
= bpf_prog_detach(&attr
);