]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/bpf/syscall.c
Merge remote-tracking branches 'asoc/topic/sta529', 'asoc/topic/sti', 'asoc/topic...
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / syscall.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
25
26 DEFINE_PER_CPU(int, bpf_prog_active);
27
28 int sysctl_unprivileged_bpf_disabled __read_mostly;
29
30 static LIST_HEAD(bpf_map_types);
31
32 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
33 {
34 struct bpf_map_type_list *tl;
35 struct bpf_map *map;
36
37 list_for_each_entry(tl, &bpf_map_types, list_node) {
38 if (tl->type == attr->map_type) {
39 map = tl->ops->map_alloc(attr);
40 if (IS_ERR(map))
41 return map;
42 map->ops = tl->ops;
43 map->map_type = attr->map_type;
44 return map;
45 }
46 }
47 return ERR_PTR(-EINVAL);
48 }
49
50 /* boot time registration of different map implementations */
51 void bpf_register_map_type(struct bpf_map_type_list *tl)
52 {
53 list_add(&tl->list_node, &bpf_map_types);
54 }
55
56 void *bpf_map_area_alloc(size_t size)
57 {
58 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
59 * trigger under memory pressure as we really just want to
60 * fail instead.
61 */
62 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
63 void *area;
64
65 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
66 area = kmalloc(size, GFP_USER | flags);
67 if (area != NULL)
68 return area;
69 }
70
71 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
72 PAGE_KERNEL);
73 }
74
75 void bpf_map_area_free(void *area)
76 {
77 kvfree(area);
78 }
79
80 int bpf_map_precharge_memlock(u32 pages)
81 {
82 struct user_struct *user = get_current_user();
83 unsigned long memlock_limit, cur;
84
85 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
86 cur = atomic_long_read(&user->locked_vm);
87 free_uid(user);
88 if (cur + pages > memlock_limit)
89 return -EPERM;
90 return 0;
91 }
92
93 static int bpf_map_charge_memlock(struct bpf_map *map)
94 {
95 struct user_struct *user = get_current_user();
96 unsigned long memlock_limit;
97
98 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
99
100 atomic_long_add(map->pages, &user->locked_vm);
101
102 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
103 atomic_long_sub(map->pages, &user->locked_vm);
104 free_uid(user);
105 return -EPERM;
106 }
107 map->user = user;
108 return 0;
109 }
110
111 static void bpf_map_uncharge_memlock(struct bpf_map *map)
112 {
113 struct user_struct *user = map->user;
114
115 atomic_long_sub(map->pages, &user->locked_vm);
116 free_uid(user);
117 }
118
119 /* called from workqueue */
120 static void bpf_map_free_deferred(struct work_struct *work)
121 {
122 struct bpf_map *map = container_of(work, struct bpf_map, work);
123
124 bpf_map_uncharge_memlock(map);
125 /* implementation dependent freeing */
126 map->ops->map_free(map);
127 }
128
129 static void bpf_map_put_uref(struct bpf_map *map)
130 {
131 if (atomic_dec_and_test(&map->usercnt)) {
132 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
133 bpf_fd_array_map_clear(map);
134 }
135 }
136
137 /* decrement map refcnt and schedule it for freeing via workqueue
138 * (unrelying map implementation ops->map_free() might sleep)
139 */
140 void bpf_map_put(struct bpf_map *map)
141 {
142 if (atomic_dec_and_test(&map->refcnt)) {
143 INIT_WORK(&map->work, bpf_map_free_deferred);
144 schedule_work(&map->work);
145 }
146 }
147
148 void bpf_map_put_with_uref(struct bpf_map *map)
149 {
150 bpf_map_put_uref(map);
151 bpf_map_put(map);
152 }
153
154 static int bpf_map_release(struct inode *inode, struct file *filp)
155 {
156 struct bpf_map *map = filp->private_data;
157
158 if (map->ops->map_release)
159 map->ops->map_release(map, filp);
160
161 bpf_map_put_with_uref(map);
162 return 0;
163 }
164
165 #ifdef CONFIG_PROC_FS
166 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
167 {
168 const struct bpf_map *map = filp->private_data;
169 const struct bpf_array *array;
170 u32 owner_prog_type = 0;
171
172 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
173 array = container_of(map, struct bpf_array, map);
174 owner_prog_type = array->owner_prog_type;
175 }
176
177 seq_printf(m,
178 "map_type:\t%u\n"
179 "key_size:\t%u\n"
180 "value_size:\t%u\n"
181 "max_entries:\t%u\n"
182 "map_flags:\t%#x\n"
183 "memlock:\t%llu\n",
184 map->map_type,
185 map->key_size,
186 map->value_size,
187 map->max_entries,
188 map->map_flags,
189 map->pages * 1ULL << PAGE_SHIFT);
190
191 if (owner_prog_type)
192 seq_printf(m, "owner_prog_type:\t%u\n",
193 owner_prog_type);
194 }
195 #endif
196
197 static const struct file_operations bpf_map_fops = {
198 #ifdef CONFIG_PROC_FS
199 .show_fdinfo = bpf_map_show_fdinfo,
200 #endif
201 .release = bpf_map_release,
202 };
203
204 int bpf_map_new_fd(struct bpf_map *map)
205 {
206 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
207 O_RDWR | O_CLOEXEC);
208 }
209
210 /* helper macro to check that unused fields 'union bpf_attr' are zero */
211 #define CHECK_ATTR(CMD) \
212 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
213 sizeof(attr->CMD##_LAST_FIELD), 0, \
214 sizeof(*attr) - \
215 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
216 sizeof(attr->CMD##_LAST_FIELD)) != NULL
217
218 #define BPF_MAP_CREATE_LAST_FIELD map_flags
219 /* called via syscall */
220 static int map_create(union bpf_attr *attr)
221 {
222 struct bpf_map *map;
223 int err;
224
225 err = CHECK_ATTR(BPF_MAP_CREATE);
226 if (err)
227 return -EINVAL;
228
229 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
230 map = find_and_alloc_map(attr);
231 if (IS_ERR(map))
232 return PTR_ERR(map);
233
234 atomic_set(&map->refcnt, 1);
235 atomic_set(&map->usercnt, 1);
236
237 err = bpf_map_charge_memlock(map);
238 if (err)
239 goto free_map_nouncharge;
240
241 err = bpf_map_new_fd(map);
242 if (err < 0)
243 /* failed to allocate fd */
244 goto free_map;
245
246 trace_bpf_map_create(map, err);
247 return err;
248
249 free_map:
250 bpf_map_uncharge_memlock(map);
251 free_map_nouncharge:
252 map->ops->map_free(map);
253 return err;
254 }
255
256 /* if error is returned, fd is released.
257 * On success caller should complete fd access with matching fdput()
258 */
259 struct bpf_map *__bpf_map_get(struct fd f)
260 {
261 if (!f.file)
262 return ERR_PTR(-EBADF);
263 if (f.file->f_op != &bpf_map_fops) {
264 fdput(f);
265 return ERR_PTR(-EINVAL);
266 }
267
268 return f.file->private_data;
269 }
270
271 /* prog's and map's refcnt limit */
272 #define BPF_MAX_REFCNT 32768
273
274 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
275 {
276 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
277 atomic_dec(&map->refcnt);
278 return ERR_PTR(-EBUSY);
279 }
280 if (uref)
281 atomic_inc(&map->usercnt);
282 return map;
283 }
284
285 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
286 {
287 struct fd f = fdget(ufd);
288 struct bpf_map *map;
289
290 map = __bpf_map_get(f);
291 if (IS_ERR(map))
292 return map;
293
294 map = bpf_map_inc(map, true);
295 fdput(f);
296
297 return map;
298 }
299
300 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
301 {
302 return -ENOTSUPP;
303 }
304
305 /* last field in 'union bpf_attr' used by this command */
306 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
307
308 static int map_lookup_elem(union bpf_attr *attr)
309 {
310 void __user *ukey = u64_to_user_ptr(attr->key);
311 void __user *uvalue = u64_to_user_ptr(attr->value);
312 int ufd = attr->map_fd;
313 struct bpf_map *map;
314 void *key, *value, *ptr;
315 u32 value_size;
316 struct fd f;
317 int err;
318
319 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
320 return -EINVAL;
321
322 f = fdget(ufd);
323 map = __bpf_map_get(f);
324 if (IS_ERR(map))
325 return PTR_ERR(map);
326
327 err = -ENOMEM;
328 key = kmalloc(map->key_size, GFP_USER);
329 if (!key)
330 goto err_put;
331
332 err = -EFAULT;
333 if (copy_from_user(key, ukey, map->key_size) != 0)
334 goto free_key;
335
336 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
337 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
338 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
339 value_size = round_up(map->value_size, 8) * num_possible_cpus();
340 else
341 value_size = map->value_size;
342
343 err = -ENOMEM;
344 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
345 if (!value)
346 goto free_key;
347
348 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
349 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
350 err = bpf_percpu_hash_copy(map, key, value);
351 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
352 err = bpf_percpu_array_copy(map, key, value);
353 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
354 err = bpf_stackmap_copy(map, key, value);
355 } else {
356 rcu_read_lock();
357 ptr = map->ops->map_lookup_elem(map, key);
358 if (ptr)
359 memcpy(value, ptr, value_size);
360 rcu_read_unlock();
361 err = ptr ? 0 : -ENOENT;
362 }
363
364 if (err)
365 goto free_value;
366
367 err = -EFAULT;
368 if (copy_to_user(uvalue, value, value_size) != 0)
369 goto free_value;
370
371 trace_bpf_map_lookup_elem(map, ufd, key, value);
372 err = 0;
373
374 free_value:
375 kfree(value);
376 free_key:
377 kfree(key);
378 err_put:
379 fdput(f);
380 return err;
381 }
382
383 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
384
385 static int map_update_elem(union bpf_attr *attr)
386 {
387 void __user *ukey = u64_to_user_ptr(attr->key);
388 void __user *uvalue = u64_to_user_ptr(attr->value);
389 int ufd = attr->map_fd;
390 struct bpf_map *map;
391 void *key, *value;
392 u32 value_size;
393 struct fd f;
394 int err;
395
396 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
397 return -EINVAL;
398
399 f = fdget(ufd);
400 map = __bpf_map_get(f);
401 if (IS_ERR(map))
402 return PTR_ERR(map);
403
404 err = -ENOMEM;
405 key = kmalloc(map->key_size, GFP_USER);
406 if (!key)
407 goto err_put;
408
409 err = -EFAULT;
410 if (copy_from_user(key, ukey, map->key_size) != 0)
411 goto free_key;
412
413 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
414 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
415 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
416 value_size = round_up(map->value_size, 8) * num_possible_cpus();
417 else
418 value_size = map->value_size;
419
420 err = -ENOMEM;
421 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
422 if (!value)
423 goto free_key;
424
425 err = -EFAULT;
426 if (copy_from_user(value, uvalue, value_size) != 0)
427 goto free_value;
428
429 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
430 * inside bpf map update or delete otherwise deadlocks are possible
431 */
432 preempt_disable();
433 __this_cpu_inc(bpf_prog_active);
434 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
435 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
436 err = bpf_percpu_hash_update(map, key, value, attr->flags);
437 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
438 err = bpf_percpu_array_update(map, key, value, attr->flags);
439 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
440 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
441 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
442 rcu_read_lock();
443 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
444 attr->flags);
445 rcu_read_unlock();
446 } else {
447 rcu_read_lock();
448 err = map->ops->map_update_elem(map, key, value, attr->flags);
449 rcu_read_unlock();
450 }
451 __this_cpu_dec(bpf_prog_active);
452 preempt_enable();
453
454 if (!err)
455 trace_bpf_map_update_elem(map, ufd, key, value);
456 free_value:
457 kfree(value);
458 free_key:
459 kfree(key);
460 err_put:
461 fdput(f);
462 return err;
463 }
464
465 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
466
467 static int map_delete_elem(union bpf_attr *attr)
468 {
469 void __user *ukey = u64_to_user_ptr(attr->key);
470 int ufd = attr->map_fd;
471 struct bpf_map *map;
472 struct fd f;
473 void *key;
474 int err;
475
476 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
477 return -EINVAL;
478
479 f = fdget(ufd);
480 map = __bpf_map_get(f);
481 if (IS_ERR(map))
482 return PTR_ERR(map);
483
484 err = -ENOMEM;
485 key = kmalloc(map->key_size, GFP_USER);
486 if (!key)
487 goto err_put;
488
489 err = -EFAULT;
490 if (copy_from_user(key, ukey, map->key_size) != 0)
491 goto free_key;
492
493 preempt_disable();
494 __this_cpu_inc(bpf_prog_active);
495 rcu_read_lock();
496 err = map->ops->map_delete_elem(map, key);
497 rcu_read_unlock();
498 __this_cpu_dec(bpf_prog_active);
499 preempt_enable();
500
501 if (!err)
502 trace_bpf_map_delete_elem(map, ufd, key);
503 free_key:
504 kfree(key);
505 err_put:
506 fdput(f);
507 return err;
508 }
509
510 /* last field in 'union bpf_attr' used by this command */
511 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
512
513 static int map_get_next_key(union bpf_attr *attr)
514 {
515 void __user *ukey = u64_to_user_ptr(attr->key);
516 void __user *unext_key = u64_to_user_ptr(attr->next_key);
517 int ufd = attr->map_fd;
518 struct bpf_map *map;
519 void *key, *next_key;
520 struct fd f;
521 int err;
522
523 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
524 return -EINVAL;
525
526 f = fdget(ufd);
527 map = __bpf_map_get(f);
528 if (IS_ERR(map))
529 return PTR_ERR(map);
530
531 err = -ENOMEM;
532 key = kmalloc(map->key_size, GFP_USER);
533 if (!key)
534 goto err_put;
535
536 err = -EFAULT;
537 if (copy_from_user(key, ukey, map->key_size) != 0)
538 goto free_key;
539
540 err = -ENOMEM;
541 next_key = kmalloc(map->key_size, GFP_USER);
542 if (!next_key)
543 goto free_key;
544
545 rcu_read_lock();
546 err = map->ops->map_get_next_key(map, key, next_key);
547 rcu_read_unlock();
548 if (err)
549 goto free_next_key;
550
551 err = -EFAULT;
552 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
553 goto free_next_key;
554
555 trace_bpf_map_next_key(map, ufd, key, next_key);
556 err = 0;
557
558 free_next_key:
559 kfree(next_key);
560 free_key:
561 kfree(key);
562 err_put:
563 fdput(f);
564 return err;
565 }
566
567 static LIST_HEAD(bpf_prog_types);
568
569 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
570 {
571 struct bpf_prog_type_list *tl;
572
573 list_for_each_entry(tl, &bpf_prog_types, list_node) {
574 if (tl->type == type) {
575 prog->aux->ops = tl->ops;
576 prog->type = type;
577 return 0;
578 }
579 }
580
581 return -EINVAL;
582 }
583
584 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
585 {
586 list_add(&tl->list_node, &bpf_prog_types);
587 }
588
589 /* fixup insn->imm field of bpf_call instructions:
590 * if (insn->imm == BPF_FUNC_map_lookup_elem)
591 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
592 * else if (insn->imm == BPF_FUNC_map_update_elem)
593 * insn->imm = bpf_map_update_elem - __bpf_call_base;
594 * else ...
595 *
596 * this function is called after eBPF program passed verification
597 */
598 static void fixup_bpf_calls(struct bpf_prog *prog)
599 {
600 const struct bpf_func_proto *fn;
601 int i;
602
603 for (i = 0; i < prog->len; i++) {
604 struct bpf_insn *insn = &prog->insnsi[i];
605
606 if (insn->code == (BPF_JMP | BPF_CALL)) {
607 /* we reach here when program has bpf_call instructions
608 * and it passed bpf_check(), means that
609 * ops->get_func_proto must have been supplied, check it
610 */
611 BUG_ON(!prog->aux->ops->get_func_proto);
612
613 if (insn->imm == BPF_FUNC_get_route_realm)
614 prog->dst_needed = 1;
615 if (insn->imm == BPF_FUNC_get_prandom_u32)
616 bpf_user_rnd_init_once();
617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* If we tail call into other programs, we
621 * cannot make any assumptions since they
622 * can be replaced dynamically during runtime
623 * in the program array.
624 */
625 prog->cb_access = 1;
626 prog->xdp_adjust_head = 1;
627
628 /* mark bpf_tail_call as different opcode
629 * to avoid conditional branch in
630 * interpeter for every normal call
631 * and to prevent accidental JITing by
632 * JIT compiler that doesn't support
633 * bpf_tail_call yet
634 */
635 insn->imm = 0;
636 insn->code |= BPF_X;
637 continue;
638 }
639
640 fn = prog->aux->ops->get_func_proto(insn->imm);
641 /* all functions that have prototype and verifier allowed
642 * programs to call them, must be real in-kernel functions
643 */
644 BUG_ON(!fn->func);
645 insn->imm = fn->func - __bpf_call_base;
646 }
647 }
648 }
649
650 /* drop refcnt on maps used by eBPF program and free auxilary data */
651 static void free_used_maps(struct bpf_prog_aux *aux)
652 {
653 int i;
654
655 for (i = 0; i < aux->used_map_cnt; i++)
656 bpf_map_put(aux->used_maps[i]);
657
658 kfree(aux->used_maps);
659 }
660
661 int __bpf_prog_charge(struct user_struct *user, u32 pages)
662 {
663 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
664 unsigned long user_bufs;
665
666 if (user) {
667 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
668 if (user_bufs > memlock_limit) {
669 atomic_long_sub(pages, &user->locked_vm);
670 return -EPERM;
671 }
672 }
673
674 return 0;
675 }
676
677 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
678 {
679 if (user)
680 atomic_long_sub(pages, &user->locked_vm);
681 }
682
683 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
684 {
685 struct user_struct *user = get_current_user();
686 int ret;
687
688 ret = __bpf_prog_charge(user, prog->pages);
689 if (ret) {
690 free_uid(user);
691 return ret;
692 }
693
694 prog->aux->user = user;
695 return 0;
696 }
697
698 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
699 {
700 struct user_struct *user = prog->aux->user;
701
702 __bpf_prog_uncharge(user, prog->pages);
703 free_uid(user);
704 }
705
706 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
707 {
708 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
709
710 free_used_maps(aux);
711 bpf_prog_uncharge_memlock(aux->prog);
712 bpf_prog_free(aux->prog);
713 }
714
715 void bpf_prog_put(struct bpf_prog *prog)
716 {
717 if (atomic_dec_and_test(&prog->aux->refcnt)) {
718 trace_bpf_prog_put_rcu(prog);
719 bpf_prog_kallsyms_del(prog);
720 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
721 }
722 }
723 EXPORT_SYMBOL_GPL(bpf_prog_put);
724
725 static int bpf_prog_release(struct inode *inode, struct file *filp)
726 {
727 struct bpf_prog *prog = filp->private_data;
728
729 bpf_prog_put(prog);
730 return 0;
731 }
732
733 #ifdef CONFIG_PROC_FS
734 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
735 {
736 const struct bpf_prog *prog = filp->private_data;
737 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
738
739 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
740 seq_printf(m,
741 "prog_type:\t%u\n"
742 "prog_jited:\t%u\n"
743 "prog_tag:\t%s\n"
744 "memlock:\t%llu\n",
745 prog->type,
746 prog->jited,
747 prog_tag,
748 prog->pages * 1ULL << PAGE_SHIFT);
749 }
750 #endif
751
752 static const struct file_operations bpf_prog_fops = {
753 #ifdef CONFIG_PROC_FS
754 .show_fdinfo = bpf_prog_show_fdinfo,
755 #endif
756 .release = bpf_prog_release,
757 };
758
759 int bpf_prog_new_fd(struct bpf_prog *prog)
760 {
761 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
762 O_RDWR | O_CLOEXEC);
763 }
764
765 static struct bpf_prog *____bpf_prog_get(struct fd f)
766 {
767 if (!f.file)
768 return ERR_PTR(-EBADF);
769 if (f.file->f_op != &bpf_prog_fops) {
770 fdput(f);
771 return ERR_PTR(-EINVAL);
772 }
773
774 return f.file->private_data;
775 }
776
777 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
778 {
779 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
780 atomic_sub(i, &prog->aux->refcnt);
781 return ERR_PTR(-EBUSY);
782 }
783 return prog;
784 }
785 EXPORT_SYMBOL_GPL(bpf_prog_add);
786
787 void bpf_prog_sub(struct bpf_prog *prog, int i)
788 {
789 /* Only to be used for undoing previous bpf_prog_add() in some
790 * error path. We still know that another entity in our call
791 * path holds a reference to the program, thus atomic_sub() can
792 * be safely used in such cases!
793 */
794 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
795 }
796 EXPORT_SYMBOL_GPL(bpf_prog_sub);
797
798 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
799 {
800 return bpf_prog_add(prog, 1);
801 }
802 EXPORT_SYMBOL_GPL(bpf_prog_inc);
803
804 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
805 {
806 struct fd f = fdget(ufd);
807 struct bpf_prog *prog;
808
809 prog = ____bpf_prog_get(f);
810 if (IS_ERR(prog))
811 return prog;
812 if (type && prog->type != *type) {
813 prog = ERR_PTR(-EINVAL);
814 goto out;
815 }
816
817 prog = bpf_prog_inc(prog);
818 out:
819 fdput(f);
820 return prog;
821 }
822
823 struct bpf_prog *bpf_prog_get(u32 ufd)
824 {
825 return __bpf_prog_get(ufd, NULL);
826 }
827
828 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
829 {
830 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
831
832 if (!IS_ERR(prog))
833 trace_bpf_prog_get_type(prog);
834 return prog;
835 }
836 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
837
838 /* last field in 'union bpf_attr' used by this command */
839 #define BPF_PROG_LOAD_LAST_FIELD kern_version
840
841 static int bpf_prog_load(union bpf_attr *attr)
842 {
843 enum bpf_prog_type type = attr->prog_type;
844 struct bpf_prog *prog;
845 int err;
846 char license[128];
847 bool is_gpl;
848
849 if (CHECK_ATTR(BPF_PROG_LOAD))
850 return -EINVAL;
851
852 /* copy eBPF program license from user space */
853 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
854 sizeof(license) - 1) < 0)
855 return -EFAULT;
856 license[sizeof(license) - 1] = 0;
857
858 /* eBPF programs must be GPL compatible to use GPL-ed functions */
859 is_gpl = license_is_gpl_compatible(license);
860
861 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
862 return -E2BIG;
863
864 if (type == BPF_PROG_TYPE_KPROBE &&
865 attr->kern_version != LINUX_VERSION_CODE)
866 return -EINVAL;
867
868 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
869 return -EPERM;
870
871 /* plain bpf_prog allocation */
872 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
873 if (!prog)
874 return -ENOMEM;
875
876 err = bpf_prog_charge_memlock(prog);
877 if (err)
878 goto free_prog_nouncharge;
879
880 prog->len = attr->insn_cnt;
881
882 err = -EFAULT;
883 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
884 bpf_prog_insn_size(prog)) != 0)
885 goto free_prog;
886
887 prog->orig_prog = NULL;
888 prog->jited = 0;
889
890 atomic_set(&prog->aux->refcnt, 1);
891 prog->gpl_compatible = is_gpl ? 1 : 0;
892
893 /* find program type: socket_filter vs tracing_filter */
894 err = find_prog_type(type, prog);
895 if (err < 0)
896 goto free_prog;
897
898 /* run eBPF verifier */
899 err = bpf_check(&prog, attr);
900 if (err < 0)
901 goto free_used_maps;
902
903 /* fixup BPF_CALL->imm field */
904 fixup_bpf_calls(prog);
905
906 /* eBPF program is ready to be JITed */
907 prog = bpf_prog_select_runtime(prog, &err);
908 if (err < 0)
909 goto free_used_maps;
910
911 err = bpf_prog_new_fd(prog);
912 if (err < 0)
913 /* failed to allocate fd */
914 goto free_used_maps;
915
916 bpf_prog_kallsyms_add(prog);
917 trace_bpf_prog_load(prog, err);
918 return err;
919
920 free_used_maps:
921 free_used_maps(prog->aux);
922 free_prog:
923 bpf_prog_uncharge_memlock(prog);
924 free_prog_nouncharge:
925 bpf_prog_free(prog);
926 return err;
927 }
928
929 #define BPF_OBJ_LAST_FIELD bpf_fd
930
931 static int bpf_obj_pin(const union bpf_attr *attr)
932 {
933 if (CHECK_ATTR(BPF_OBJ))
934 return -EINVAL;
935
936 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
937 }
938
939 static int bpf_obj_get(const union bpf_attr *attr)
940 {
941 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
942 return -EINVAL;
943
944 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
945 }
946
947 #ifdef CONFIG_CGROUP_BPF
948
949 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
950
951 static int bpf_prog_attach(const union bpf_attr *attr)
952 {
953 enum bpf_prog_type ptype;
954 struct bpf_prog *prog;
955 struct cgroup *cgrp;
956 int ret;
957
958 if (!capable(CAP_NET_ADMIN))
959 return -EPERM;
960
961 if (CHECK_ATTR(BPF_PROG_ATTACH))
962 return -EINVAL;
963
964 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
965 return -EINVAL;
966
967 switch (attr->attach_type) {
968 case BPF_CGROUP_INET_INGRESS:
969 case BPF_CGROUP_INET_EGRESS:
970 ptype = BPF_PROG_TYPE_CGROUP_SKB;
971 break;
972 case BPF_CGROUP_INET_SOCK_CREATE:
973 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
974 break;
975 default:
976 return -EINVAL;
977 }
978
979 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
980 if (IS_ERR(prog))
981 return PTR_ERR(prog);
982
983 cgrp = cgroup_get_from_fd(attr->target_fd);
984 if (IS_ERR(cgrp)) {
985 bpf_prog_put(prog);
986 return PTR_ERR(cgrp);
987 }
988
989 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
990 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
991 if (ret)
992 bpf_prog_put(prog);
993 cgroup_put(cgrp);
994
995 return ret;
996 }
997
998 #define BPF_PROG_DETACH_LAST_FIELD attach_type
999
1000 static int bpf_prog_detach(const union bpf_attr *attr)
1001 {
1002 struct cgroup *cgrp;
1003 int ret;
1004
1005 if (!capable(CAP_NET_ADMIN))
1006 return -EPERM;
1007
1008 if (CHECK_ATTR(BPF_PROG_DETACH))
1009 return -EINVAL;
1010
1011 switch (attr->attach_type) {
1012 case BPF_CGROUP_INET_INGRESS:
1013 case BPF_CGROUP_INET_EGRESS:
1014 case BPF_CGROUP_INET_SOCK_CREATE:
1015 cgrp = cgroup_get_from_fd(attr->target_fd);
1016 if (IS_ERR(cgrp))
1017 return PTR_ERR(cgrp);
1018
1019 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1020 cgroup_put(cgrp);
1021 break;
1022
1023 default:
1024 return -EINVAL;
1025 }
1026
1027 return ret;
1028 }
1029 #endif /* CONFIG_CGROUP_BPF */
1030
1031 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1032 {
1033 union bpf_attr attr = {};
1034 int err;
1035
1036 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1037 return -EPERM;
1038
1039 if (!access_ok(VERIFY_READ, uattr, 1))
1040 return -EFAULT;
1041
1042 if (size > PAGE_SIZE) /* silly large */
1043 return -E2BIG;
1044
1045 /* If we're handed a bigger struct than we know of,
1046 * ensure all the unknown bits are 0 - i.e. new
1047 * user-space does not rely on any kernel feature
1048 * extensions we dont know about yet.
1049 */
1050 if (size > sizeof(attr)) {
1051 unsigned char __user *addr;
1052 unsigned char __user *end;
1053 unsigned char val;
1054
1055 addr = (void __user *)uattr + sizeof(attr);
1056 end = (void __user *)uattr + size;
1057
1058 for (; addr < end; addr++) {
1059 err = get_user(val, addr);
1060 if (err)
1061 return err;
1062 if (val)
1063 return -E2BIG;
1064 }
1065 size = sizeof(attr);
1066 }
1067
1068 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1069 if (copy_from_user(&attr, uattr, size) != 0)
1070 return -EFAULT;
1071
1072 switch (cmd) {
1073 case BPF_MAP_CREATE:
1074 err = map_create(&attr);
1075 break;
1076 case BPF_MAP_LOOKUP_ELEM:
1077 err = map_lookup_elem(&attr);
1078 break;
1079 case BPF_MAP_UPDATE_ELEM:
1080 err = map_update_elem(&attr);
1081 break;
1082 case BPF_MAP_DELETE_ELEM:
1083 err = map_delete_elem(&attr);
1084 break;
1085 case BPF_MAP_GET_NEXT_KEY:
1086 err = map_get_next_key(&attr);
1087 break;
1088 case BPF_PROG_LOAD:
1089 err = bpf_prog_load(&attr);
1090 break;
1091 case BPF_OBJ_PIN:
1092 err = bpf_obj_pin(&attr);
1093 break;
1094 case BPF_OBJ_GET:
1095 err = bpf_obj_get(&attr);
1096 break;
1097
1098 #ifdef CONFIG_CGROUP_BPF
1099 case BPF_PROG_ATTACH:
1100 err = bpf_prog_attach(&attr);
1101 break;
1102 case BPF_PROG_DETACH:
1103 err = bpf_prog_detach(&attr);
1104 break;
1105 #endif
1106
1107 default:
1108 err = -EINVAL;
1109 break;
1110 }
1111
1112 return err;
1113 }