]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/syscall.c
bpf: decrease usercnt if bpf_map_new_fd() fails in bpf_map_get_fd_by_id()
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / syscall.c
CommitLineData
99c55f7d
AS
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
a67edbf4 13#include <linux/bpf_trace.h>
99c55f7d
AS
14#include <linux/syscalls.h>
15#include <linux/slab.h>
3f07c014 16#include <linux/sched/signal.h>
d407bd25
DB
17#include <linux/vmalloc.h>
18#include <linux/mmzone.h>
99c55f7d 19#include <linux/anon_inodes.h>
db20fd2b 20#include <linux/file.h>
09756af4
AS
21#include <linux/license.h>
22#include <linux/filter.h>
2541517c 23#include <linux/version.h>
535e7b4b 24#include <linux/kernel.h>
dc4bb0e2 25#include <linux/idr.h>
cb4d2b3f
MKL
26#include <linux/cred.h>
27#include <linux/timekeeping.h>
28#include <linux/ctype.h>
38c61037 29#include <linux/nospec.h>
99c55f7d 30
14dc6f04
MKL
31#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
32 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
33 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
34 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
35#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
36#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
37
6e71b04a
CF
38#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
39
b121d1e7 40DEFINE_PER_CPU(int, bpf_prog_active);
dc4bb0e2
MKL
41static DEFINE_IDR(prog_idr);
42static DEFINE_SPINLOCK(prog_idr_lock);
f3f1c054
MKL
43static DEFINE_IDR(map_idr);
44static DEFINE_SPINLOCK(map_idr_lock);
b121d1e7 45
1be7f75d
AS
46int sysctl_unprivileged_bpf_disabled __read_mostly;
47
40077e0c
JB
48static const struct bpf_map_ops * const bpf_map_types[] = {
49#define BPF_PROG_TYPE(_id, _ops)
50#define BPF_MAP_TYPE(_id, _ops) \
51 [_id] = &_ops,
52#include <linux/bpf_types.h>
53#undef BPF_PROG_TYPE
54#undef BPF_MAP_TYPE
55};
99c55f7d 56
752ba56f
MS
57/*
58 * If we're handed a bigger struct than we know of, ensure all the unknown bits
59 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
60 * we don't know about yet.
61 *
62 * There is a ToCToU between this function call and the following
63 * copy_from_user() call. However, this is not a concern since this function is
64 * meant to be a future-proofing of bits.
65 */
58291a74
MS
66static int check_uarg_tail_zero(void __user *uaddr,
67 size_t expected_size,
68 size_t actual_size)
69{
70 unsigned char __user *addr;
71 unsigned char __user *end;
72 unsigned char val;
73 int err;
74
752ba56f
MS
75 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
76 return -E2BIG;
77
78 if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
79 return -EFAULT;
80
58291a74
MS
81 if (actual_size <= expected_size)
82 return 0;
83
84 addr = uaddr + expected_size;
85 end = uaddr + actual_size;
86
87 for (; addr < end; addr++) {
88 err = get_user(val, addr);
89 if (err)
90 return err;
91 if (val)
92 return -E2BIG;
93 }
94
95 return 0;
96}
97
99c55f7d
AS
98static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
99{
144bc489 100 const struct bpf_map_ops *ops;
38c61037 101 u32 type = attr->map_type;
99c55f7d 102 struct bpf_map *map;
144bc489 103 int err;
99c55f7d 104
38c61037 105 if (type >= ARRAY_SIZE(bpf_map_types))
144bc489 106 return ERR_PTR(-EINVAL);
38c61037
MR
107 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
108 ops = bpf_map_types[type];
144bc489 109 if (!ops)
40077e0c 110 return ERR_PTR(-EINVAL);
99c55f7d 111
144bc489
JK
112 if (ops->map_alloc_check) {
113 err = ops->map_alloc_check(attr);
114 if (err)
115 return ERR_PTR(err);
116 }
117 map = ops->map_alloc(attr);
40077e0c
JB
118 if (IS_ERR(map))
119 return map;
144bc489 120 map->ops = ops;
38c61037 121 map->map_type = type;
40077e0c 122 return map;
99c55f7d
AS
123}
124
96eabe7a 125void *bpf_map_area_alloc(size_t size, int numa_node)
d407bd25
DB
126{
127 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
128 * trigger under memory pressure as we really just want to
129 * fail instead.
130 */
131 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
132 void *area;
133
134 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
96eabe7a 135 area = kmalloc_node(size, GFP_USER | flags, numa_node);
d407bd25
DB
136 if (area != NULL)
137 return area;
138 }
139
96eabe7a
MKL
140 return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
141 __builtin_return_address(0));
d407bd25
DB
142}
143
144void bpf_map_area_free(void *area)
145{
146 kvfree(area);
147}
148
6c905981
AS
149int bpf_map_precharge_memlock(u32 pages)
150{
151 struct user_struct *user = get_current_user();
152 unsigned long memlock_limit, cur;
153
154 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
155 cur = atomic_long_read(&user->locked_vm);
156 free_uid(user);
157 if (cur + pages > memlock_limit)
158 return -EPERM;
159 return 0;
160}
161
aaac3ba9
AS
162static int bpf_map_charge_memlock(struct bpf_map *map)
163{
164 struct user_struct *user = get_current_user();
165 unsigned long memlock_limit;
166
167 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
168
169 atomic_long_add(map->pages, &user->locked_vm);
170
171 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
172 atomic_long_sub(map->pages, &user->locked_vm);
173 free_uid(user);
174 return -EPERM;
175 }
176 map->user = user;
177 return 0;
178}
179
180static void bpf_map_uncharge_memlock(struct bpf_map *map)
181{
182 struct user_struct *user = map->user;
183
184 atomic_long_sub(map->pages, &user->locked_vm);
185 free_uid(user);
186}
187
f3f1c054
MKL
188static int bpf_map_alloc_id(struct bpf_map *map)
189{
190 int id;
191
192 spin_lock_bh(&map_idr_lock);
193 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
194 if (id > 0)
195 map->id = id;
196 spin_unlock_bh(&map_idr_lock);
197
198 if (WARN_ON_ONCE(!id))
199 return -ENOSPC;
200
201 return id > 0 ? 0 : id;
202}
203
bd5f5f4e 204static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
f3f1c054 205{
930651a7
ED
206 unsigned long flags;
207
bd5f5f4e 208 if (do_idr_lock)
930651a7 209 spin_lock_irqsave(&map_idr_lock, flags);
bd5f5f4e
MKL
210 else
211 __acquire(&map_idr_lock);
212
f3f1c054 213 idr_remove(&map_idr, map->id);
bd5f5f4e
MKL
214
215 if (do_idr_lock)
930651a7 216 spin_unlock_irqrestore(&map_idr_lock, flags);
bd5f5f4e
MKL
217 else
218 __release(&map_idr_lock);
f3f1c054
MKL
219}
220
99c55f7d
AS
221/* called from workqueue */
222static void bpf_map_free_deferred(struct work_struct *work)
223{
224 struct bpf_map *map = container_of(work, struct bpf_map, work);
225
aaac3ba9 226 bpf_map_uncharge_memlock(map);
afdb09c7 227 security_bpf_map_free(map);
99c55f7d
AS
228 /* implementation dependent freeing */
229 map->ops->map_free(map);
230}
231
c9da161c
DB
232static void bpf_map_put_uref(struct bpf_map *map)
233{
234 if (atomic_dec_and_test(&map->usercnt)) {
458397ab
JF
235 if (map->ops->map_release_uref)
236 map->ops->map_release_uref(map);
c9da161c
DB
237 }
238}
239
99c55f7d
AS
240/* decrement map refcnt and schedule it for freeing via workqueue
241 * (unrelying map implementation ops->map_free() might sleep)
242 */
bd5f5f4e 243static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
99c55f7d
AS
244{
245 if (atomic_dec_and_test(&map->refcnt)) {
34ad5580 246 /* bpf_map_free_id() must be called first */
bd5f5f4e 247 bpf_map_free_id(map, do_idr_lock);
99c55f7d
AS
248 INIT_WORK(&map->work, bpf_map_free_deferred);
249 schedule_work(&map->work);
250 }
251}
252
bd5f5f4e
MKL
253void bpf_map_put(struct bpf_map *map)
254{
255 __bpf_map_put(map, true);
256}
257
c9da161c 258void bpf_map_put_with_uref(struct bpf_map *map)
99c55f7d 259{
c9da161c 260 bpf_map_put_uref(map);
99c55f7d 261 bpf_map_put(map);
c9da161c
DB
262}
263
264static int bpf_map_release(struct inode *inode, struct file *filp)
265{
61d1b6a4
DB
266 struct bpf_map *map = filp->private_data;
267
268 if (map->ops->map_release)
269 map->ops->map_release(map, filp);
270
271 bpf_map_put_with_uref(map);
99c55f7d
AS
272 return 0;
273}
274
f99bf205
DB
275#ifdef CONFIG_PROC_FS
276static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
277{
278 const struct bpf_map *map = filp->private_data;
21116b70
DB
279 const struct bpf_array *array;
280 u32 owner_prog_type = 0;
9780c0ab 281 u32 owner_jited = 0;
21116b70
DB
282
283 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
284 array = container_of(map, struct bpf_array, map);
285 owner_prog_type = array->owner_prog_type;
9780c0ab 286 owner_jited = array->owner_jited;
21116b70 287 }
f99bf205
DB
288
289 seq_printf(m,
290 "map_type:\t%u\n"
291 "key_size:\t%u\n"
292 "value_size:\t%u\n"
322cea2f 293 "max_entries:\t%u\n"
21116b70
DB
294 "map_flags:\t%#x\n"
295 "memlock:\t%llu\n",
f99bf205
DB
296 map->map_type,
297 map->key_size,
298 map->value_size,
322cea2f 299 map->max_entries,
21116b70
DB
300 map->map_flags,
301 map->pages * 1ULL << PAGE_SHIFT);
302
9780c0ab 303 if (owner_prog_type) {
21116b70
DB
304 seq_printf(m, "owner_prog_type:\t%u\n",
305 owner_prog_type);
9780c0ab
DB
306 seq_printf(m, "owner_jited:\t%u\n",
307 owner_jited);
308 }
f99bf205
DB
309}
310#endif
311
6e71b04a
CF
312static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
313 loff_t *ppos)
314{
315 /* We need this handler such that alloc_file() enables
316 * f_mode with FMODE_CAN_READ.
317 */
318 return -EINVAL;
319}
320
321static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
322 size_t siz, loff_t *ppos)
323{
324 /* We need this handler such that alloc_file() enables
325 * f_mode with FMODE_CAN_WRITE.
326 */
327 return -EINVAL;
328}
329
f66e448c 330const struct file_operations bpf_map_fops = {
f99bf205
DB
331#ifdef CONFIG_PROC_FS
332 .show_fdinfo = bpf_map_show_fdinfo,
333#endif
334 .release = bpf_map_release,
6e71b04a
CF
335 .read = bpf_dummy_read,
336 .write = bpf_dummy_write,
99c55f7d
AS
337};
338
6e71b04a 339int bpf_map_new_fd(struct bpf_map *map, int flags)
aa79781b 340{
afdb09c7
CF
341 int ret;
342
343 ret = security_bpf_map(map, OPEN_FMODE(flags));
344 if (ret < 0)
345 return ret;
346
aa79781b 347 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
6e71b04a
CF
348 flags | O_CLOEXEC);
349}
350
351int bpf_get_file_flag(int flags)
352{
353 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
354 return -EINVAL;
355 if (flags & BPF_F_RDONLY)
356 return O_RDONLY;
357 if (flags & BPF_F_WRONLY)
358 return O_WRONLY;
359 return O_RDWR;
aa79781b
DB
360}
361
99c55f7d
AS
362/* helper macro to check that unused fields 'union bpf_attr' are zero */
363#define CHECK_ATTR(CMD) \
364 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
365 sizeof(attr->CMD##_LAST_FIELD), 0, \
366 sizeof(*attr) - \
367 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
368 sizeof(attr->CMD##_LAST_FIELD)) != NULL
369
cb4d2b3f
MKL
370/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
371 * Return 0 on success and < 0 on error.
372 */
373static int bpf_obj_name_cpy(char *dst, const char *src)
374{
375 const char *end = src + BPF_OBJ_NAME_LEN;
376
473d9734
MKL
377 memset(dst, 0, BPF_OBJ_NAME_LEN);
378
cb4d2b3f
MKL
379 /* Copy all isalnum() and '_' char */
380 while (src < end && *src) {
381 if (!isalnum(*src) && *src != '_')
382 return -EINVAL;
383 *dst++ = *src++;
384 }
385
386 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
387 if (src == end)
388 return -EINVAL;
389
cb4d2b3f
MKL
390 return 0;
391}
392
ad5b177b 393#define BPF_MAP_CREATE_LAST_FIELD map_name
99c55f7d
AS
394/* called via syscall */
395static int map_create(union bpf_attr *attr)
396{
96eabe7a 397 int numa_node = bpf_map_attr_numa_node(attr);
99c55f7d 398 struct bpf_map *map;
6e71b04a 399 int f_flags;
99c55f7d
AS
400 int err;
401
402 err = CHECK_ATTR(BPF_MAP_CREATE);
403 if (err)
404 return -EINVAL;
405
6e71b04a
CF
406 f_flags = bpf_get_file_flag(attr->map_flags);
407 if (f_flags < 0)
408 return f_flags;
409
96eabe7a 410 if (numa_node != NUMA_NO_NODE &&
96e5ae4e
ED
411 ((unsigned int)numa_node >= nr_node_ids ||
412 !node_online(numa_node)))
96eabe7a
MKL
413 return -EINVAL;
414
99c55f7d
AS
415 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
416 map = find_and_alloc_map(attr);
417 if (IS_ERR(map))
418 return PTR_ERR(map);
419
ad5b177b
MKL
420 err = bpf_obj_name_cpy(map->name, attr->map_name);
421 if (err)
422 goto free_map_nouncharge;
423
99c55f7d 424 atomic_set(&map->refcnt, 1);
c9da161c 425 atomic_set(&map->usercnt, 1);
99c55f7d 426
afdb09c7 427 err = security_bpf_map_alloc(map);
aaac3ba9 428 if (err)
20b2b24f 429 goto free_map_nouncharge;
aaac3ba9 430
afdb09c7
CF
431 err = bpf_map_charge_memlock(map);
432 if (err)
433 goto free_map_sec;
434
f3f1c054
MKL
435 err = bpf_map_alloc_id(map);
436 if (err)
437 goto free_map;
438
6e71b04a 439 err = bpf_map_new_fd(map, f_flags);
bd5f5f4e
MKL
440 if (err < 0) {
441 /* failed to allocate fd.
442 * bpf_map_put() is needed because the above
443 * bpf_map_alloc_id() has published the map
444 * to the userspace and the userspace may
445 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
446 */
447 bpf_map_put(map);
448 return err;
449 }
99c55f7d 450
a67edbf4 451 trace_bpf_map_create(map, err);
99c55f7d
AS
452 return err;
453
454free_map:
20b2b24f 455 bpf_map_uncharge_memlock(map);
afdb09c7
CF
456free_map_sec:
457 security_bpf_map_free(map);
20b2b24f 458free_map_nouncharge:
99c55f7d
AS
459 map->ops->map_free(map);
460 return err;
461}
462
db20fd2b
AS
463/* if error is returned, fd is released.
464 * On success caller should complete fd access with matching fdput()
465 */
c2101297 466struct bpf_map *__bpf_map_get(struct fd f)
db20fd2b 467{
db20fd2b
AS
468 if (!f.file)
469 return ERR_PTR(-EBADF);
db20fd2b
AS
470 if (f.file->f_op != &bpf_map_fops) {
471 fdput(f);
472 return ERR_PTR(-EINVAL);
473 }
474
c2101297
DB
475 return f.file->private_data;
476}
477
92117d84
AS
478/* prog's and map's refcnt limit */
479#define BPF_MAX_REFCNT 32768
480
481struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
c9da161c 482{
92117d84
AS
483 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
484 atomic_dec(&map->refcnt);
485 return ERR_PTR(-EBUSY);
486 }
c9da161c
DB
487 if (uref)
488 atomic_inc(&map->usercnt);
92117d84 489 return map;
c9da161c
DB
490}
491
492struct bpf_map *bpf_map_get_with_uref(u32 ufd)
c2101297
DB
493{
494 struct fd f = fdget(ufd);
495 struct bpf_map *map;
496
497 map = __bpf_map_get(f);
498 if (IS_ERR(map))
499 return map;
500
92117d84 501 map = bpf_map_inc(map, true);
c2101297 502 fdput(f);
db20fd2b
AS
503
504 return map;
505}
506
bd5f5f4e
MKL
507/* map_idr_lock should have been held */
508static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
509 bool uref)
510{
511 int refold;
512
513 refold = __atomic_add_unless(&map->refcnt, 1, 0);
514
515 if (refold >= BPF_MAX_REFCNT) {
516 __bpf_map_put(map, false);
517 return ERR_PTR(-EBUSY);
518 }
519
520 if (!refold)
521 return ERR_PTR(-ENOENT);
522
523 if (uref)
524 atomic_inc(&map->usercnt);
525
526 return map;
527}
528
b8cdc051
AS
529int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
530{
531 return -ENOTSUPP;
532}
533
db20fd2b
AS
534/* last field in 'union bpf_attr' used by this command */
535#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
536
537static int map_lookup_elem(union bpf_attr *attr)
538{
535e7b4b
MS
539 void __user *ukey = u64_to_user_ptr(attr->key);
540 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 541 int ufd = attr->map_fd;
db20fd2b 542 struct bpf_map *map;
8ebe667c 543 void *key, *value, *ptr;
15a07b33 544 u32 value_size;
592867bf 545 struct fd f;
db20fd2b
AS
546 int err;
547
548 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
549 return -EINVAL;
550
592867bf 551 f = fdget(ufd);
c2101297 552 map = __bpf_map_get(f);
db20fd2b
AS
553 if (IS_ERR(map))
554 return PTR_ERR(map);
555
6e71b04a
CF
556 if (!(f.file->f_mode & FMODE_CAN_READ)) {
557 err = -EPERM;
558 goto err_put;
559 }
560
e4448ed8
AV
561 key = memdup_user(ukey, map->key_size);
562 if (IS_ERR(key)) {
563 err = PTR_ERR(key);
db20fd2b 564 goto err_put;
e4448ed8 565 }
db20fd2b 566
15a07b33 567 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 568 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
569 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
570 value_size = round_up(map->value_size, 8) * num_possible_cpus();
14dc6f04
MKL
571 else if (IS_FD_MAP(map))
572 value_size = sizeof(u32);
15a07b33
AS
573 else
574 value_size = map->value_size;
575
8ebe667c 576 err = -ENOMEM;
15a07b33 577 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b 578 if (!value)
8ebe667c
AS
579 goto free_key;
580
8f844938
MKL
581 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
582 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
583 err = bpf_percpu_hash_copy(map, key, value);
584 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
585 err = bpf_percpu_array_copy(map, key, value);
557c0c6e
AS
586 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
587 err = bpf_stackmap_copy(map, key, value);
14dc6f04
MKL
588 } else if (IS_FD_ARRAY(map)) {
589 err = bpf_fd_array_map_lookup_elem(map, key, value);
590 } else if (IS_FD_HASH(map)) {
591 err = bpf_fd_htab_map_lookup_elem(map, key, value);
15a07b33
AS
592 } else {
593 rcu_read_lock();
597e8d96
DB
594 if (map->ops->map_lookup_elem_sys_only)
595 ptr = map->ops->map_lookup_elem_sys_only(map, key);
596 else
597 ptr = map->ops->map_lookup_elem(map, key);
15a07b33
AS
598 if (ptr)
599 memcpy(value, ptr, value_size);
600 rcu_read_unlock();
601 err = ptr ? 0 : -ENOENT;
602 }
8ebe667c 603
15a07b33 604 if (err)
8ebe667c 605 goto free_value;
db20fd2b
AS
606
607 err = -EFAULT;
15a07b33 608 if (copy_to_user(uvalue, value, value_size) != 0)
8ebe667c 609 goto free_value;
db20fd2b 610
a67edbf4 611 trace_bpf_map_lookup_elem(map, ufd, key, value);
db20fd2b
AS
612 err = 0;
613
8ebe667c
AS
614free_value:
615 kfree(value);
db20fd2b
AS
616free_key:
617 kfree(key);
618err_put:
619 fdput(f);
620 return err;
621}
622
cea851b5
DC
623static void maybe_wait_bpf_programs(struct bpf_map *map)
624{
625 /* Wait for any running BPF programs to complete so that
626 * userspace, when we return to it, knows that all programs
627 * that could be running use the new map value.
628 */
629 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
630 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
631 synchronize_rcu();
632}
633
3274f520 634#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
db20fd2b
AS
635
636static int map_update_elem(union bpf_attr *attr)
637{
535e7b4b
MS
638 void __user *ukey = u64_to_user_ptr(attr->key);
639 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 640 int ufd = attr->map_fd;
db20fd2b
AS
641 struct bpf_map *map;
642 void *key, *value;
15a07b33 643 u32 value_size;
592867bf 644 struct fd f;
db20fd2b
AS
645 int err;
646
647 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
648 return -EINVAL;
649
592867bf 650 f = fdget(ufd);
c2101297 651 map = __bpf_map_get(f);
db20fd2b
AS
652 if (IS_ERR(map))
653 return PTR_ERR(map);
654
6e71b04a
CF
655 if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
656 err = -EPERM;
657 goto err_put;
658 }
659
e4448ed8
AV
660 key = memdup_user(ukey, map->key_size);
661 if (IS_ERR(key)) {
662 err = PTR_ERR(key);
db20fd2b 663 goto err_put;
e4448ed8 664 }
db20fd2b 665
15a07b33 666 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 667 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
668 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
669 value_size = round_up(map->value_size, 8) * num_possible_cpus();
670 else
671 value_size = map->value_size;
672
db20fd2b 673 err = -ENOMEM;
15a07b33 674 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b
AS
675 if (!value)
676 goto free_key;
677
678 err = -EFAULT;
15a07b33 679 if (copy_from_user(value, uvalue, value_size) != 0)
db20fd2b
AS
680 goto free_value;
681
6710e112
JDB
682 /* Need to create a kthread, thus must support schedule */
683 if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
684 err = map->ops->map_update_elem(map, key, value, attr->flags);
685 goto out;
686 }
687
b121d1e7
AS
688 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
689 * inside bpf map update or delete otherwise deadlocks are possible
690 */
691 preempt_disable();
692 __this_cpu_inc(bpf_prog_active);
8f844938
MKL
693 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
694 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
695 err = bpf_percpu_hash_update(map, key, value, attr->flags);
696 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
697 err = bpf_percpu_array_update(map, key, value, attr->flags);
d056a788 698 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
4ed8ec52 699 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
56f668df
MKL
700 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
701 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
d056a788
DB
702 rcu_read_lock();
703 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
704 attr->flags);
705 rcu_read_unlock();
bcc6b1b7
MKL
706 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
707 rcu_read_lock();
708 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
709 attr->flags);
710 rcu_read_unlock();
15a07b33
AS
711 } else {
712 rcu_read_lock();
713 err = map->ops->map_update_elem(map, key, value, attr->flags);
714 rcu_read_unlock();
715 }
b121d1e7
AS
716 __this_cpu_dec(bpf_prog_active);
717 preempt_enable();
cea851b5 718 maybe_wait_bpf_programs(map);
6710e112 719out:
a67edbf4
DB
720 if (!err)
721 trace_bpf_map_update_elem(map, ufd, key, value);
db20fd2b
AS
722free_value:
723 kfree(value);
724free_key:
725 kfree(key);
726err_put:
727 fdput(f);
728 return err;
729}
730
731#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
732
733static int map_delete_elem(union bpf_attr *attr)
734{
535e7b4b 735 void __user *ukey = u64_to_user_ptr(attr->key);
db20fd2b 736 int ufd = attr->map_fd;
db20fd2b 737 struct bpf_map *map;
592867bf 738 struct fd f;
db20fd2b
AS
739 void *key;
740 int err;
741
742 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
743 return -EINVAL;
744
592867bf 745 f = fdget(ufd);
c2101297 746 map = __bpf_map_get(f);
db20fd2b
AS
747 if (IS_ERR(map))
748 return PTR_ERR(map);
749
6e71b04a
CF
750 if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
751 err = -EPERM;
752 goto err_put;
753 }
754
e4448ed8
AV
755 key = memdup_user(ukey, map->key_size);
756 if (IS_ERR(key)) {
757 err = PTR_ERR(key);
db20fd2b 758 goto err_put;
e4448ed8 759 }
db20fd2b 760
b121d1e7
AS
761 preempt_disable();
762 __this_cpu_inc(bpf_prog_active);
db20fd2b
AS
763 rcu_read_lock();
764 err = map->ops->map_delete_elem(map, key);
765 rcu_read_unlock();
b121d1e7
AS
766 __this_cpu_dec(bpf_prog_active);
767 preempt_enable();
cea851b5 768 maybe_wait_bpf_programs(map);
db20fd2b 769
a67edbf4
DB
770 if (!err)
771 trace_bpf_map_delete_elem(map, ufd, key);
db20fd2b
AS
772 kfree(key);
773err_put:
774 fdput(f);
775 return err;
776}
777
778/* last field in 'union bpf_attr' used by this command */
779#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
780
781static int map_get_next_key(union bpf_attr *attr)
782{
535e7b4b
MS
783 void __user *ukey = u64_to_user_ptr(attr->key);
784 void __user *unext_key = u64_to_user_ptr(attr->next_key);
db20fd2b 785 int ufd = attr->map_fd;
db20fd2b
AS
786 struct bpf_map *map;
787 void *key, *next_key;
592867bf 788 struct fd f;
db20fd2b
AS
789 int err;
790
791 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
792 return -EINVAL;
793
592867bf 794 f = fdget(ufd);
c2101297 795 map = __bpf_map_get(f);
db20fd2b
AS
796 if (IS_ERR(map))
797 return PTR_ERR(map);
798
6e71b04a
CF
799 if (!(f.file->f_mode & FMODE_CAN_READ)) {
800 err = -EPERM;
801 goto err_put;
802 }
803
8fe45924 804 if (ukey) {
e4448ed8
AV
805 key = memdup_user(ukey, map->key_size);
806 if (IS_ERR(key)) {
807 err = PTR_ERR(key);
8fe45924 808 goto err_put;
e4448ed8 809 }
8fe45924
TQ
810 } else {
811 key = NULL;
812 }
db20fd2b
AS
813
814 err = -ENOMEM;
815 next_key = kmalloc(map->key_size, GFP_USER);
816 if (!next_key)
817 goto free_key;
818
819 rcu_read_lock();
820 err = map->ops->map_get_next_key(map, key, next_key);
821 rcu_read_unlock();
822 if (err)
823 goto free_next_key;
824
825 err = -EFAULT;
826 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
827 goto free_next_key;
828
a67edbf4 829 trace_bpf_map_next_key(map, ufd, key, next_key);
db20fd2b
AS
830 err = 0;
831
832free_next_key:
833 kfree(next_key);
834free_key:
835 kfree(key);
836err_put:
837 fdput(f);
838 return err;
839}
840
7de16e3a
JK
841static const struct bpf_prog_ops * const bpf_prog_types[] = {
842#define BPF_PROG_TYPE(_id, _name) \
843 [_id] = & _name ## _prog_ops,
844#define BPF_MAP_TYPE(_id, _ops)
845#include <linux/bpf_types.h>
846#undef BPF_PROG_TYPE
847#undef BPF_MAP_TYPE
848};
849
09756af4
AS
850static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
851{
6a9d2be7
DB
852 const struct bpf_prog_ops *ops;
853
854 if (type >= ARRAY_SIZE(bpf_prog_types))
855 return -EINVAL;
856 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
857 ops = bpf_prog_types[type];
858 if (!ops)
be9370a7 859 return -EINVAL;
09756af4 860
ab3f0063 861 if (!bpf_prog_is_dev_bound(prog->aux))
6a9d2be7 862 prog->aux->ops = ops;
ab3f0063
JK
863 else
864 prog->aux->ops = &bpf_offload_prog_ops;
be9370a7
JB
865 prog->type = type;
866 return 0;
09756af4
AS
867}
868
869/* drop refcnt on maps used by eBPF program and free auxilary data */
870static void free_used_maps(struct bpf_prog_aux *aux)
871{
872 int i;
873
874 for (i = 0; i < aux->used_map_cnt; i++)
875 bpf_map_put(aux->used_maps[i]);
876
877 kfree(aux->used_maps);
878}
879
5ccb071e
DB
880int __bpf_prog_charge(struct user_struct *user, u32 pages)
881{
882 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
883 unsigned long user_bufs;
884
885 if (user) {
886 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
887 if (user_bufs > memlock_limit) {
888 atomic_long_sub(pages, &user->locked_vm);
889 return -EPERM;
890 }
891 }
892
893 return 0;
894}
895
896void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
897{
898 if (user)
899 atomic_long_sub(pages, &user->locked_vm);
900}
901
aaac3ba9
AS
902static int bpf_prog_charge_memlock(struct bpf_prog *prog)
903{
904 struct user_struct *user = get_current_user();
5ccb071e 905 int ret;
aaac3ba9 906
5ccb071e
DB
907 ret = __bpf_prog_charge(user, prog->pages);
908 if (ret) {
aaac3ba9 909 free_uid(user);
5ccb071e 910 return ret;
aaac3ba9 911 }
5ccb071e 912
aaac3ba9
AS
913 prog->aux->user = user;
914 return 0;
915}
916
917static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
918{
919 struct user_struct *user = prog->aux->user;
920
5ccb071e 921 __bpf_prog_uncharge(user, prog->pages);
aaac3ba9
AS
922 free_uid(user);
923}
924
dc4bb0e2
MKL
925static int bpf_prog_alloc_id(struct bpf_prog *prog)
926{
927 int id;
928
929 spin_lock_bh(&prog_idr_lock);
930 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
931 if (id > 0)
932 prog->aux->id = id;
933 spin_unlock_bh(&prog_idr_lock);
934
935 /* id is in [1, INT_MAX) */
936 if (WARN_ON_ONCE(!id))
937 return -ENOSPC;
938
939 return id > 0 ? 0 : id;
940}
941
b16d9aa4 942static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
dc4bb0e2
MKL
943{
944 /* cBPF to eBPF migrations are currently not in the idr store. */
945 if (!prog->aux->id)
946 return;
947
b16d9aa4
MKL
948 if (do_idr_lock)
949 spin_lock_bh(&prog_idr_lock);
950 else
951 __acquire(&prog_idr_lock);
952
dc4bb0e2 953 idr_remove(&prog_idr, prog->aux->id);
b16d9aa4
MKL
954
955 if (do_idr_lock)
956 spin_unlock_bh(&prog_idr_lock);
957 else
958 __release(&prog_idr_lock);
dc4bb0e2
MKL
959}
960
1aacde3d 961static void __bpf_prog_put_rcu(struct rcu_head *rcu)
abf2e7d6
AS
962{
963 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
964
965 free_used_maps(aux);
aaac3ba9 966 bpf_prog_uncharge_memlock(aux->prog);
afdb09c7 967 security_bpf_prog_free(aux);
abf2e7d6
AS
968 bpf_prog_free(aux->prog);
969}
970
b16d9aa4 971static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
09756af4 972{
a67edbf4
DB
973 if (atomic_dec_and_test(&prog->aux->refcnt)) {
974 trace_bpf_prog_put_rcu(prog);
34ad5580 975 /* bpf_prog_free_id() must be called first */
b16d9aa4 976 bpf_prog_free_id(prog, do_idr_lock);
74451e66 977 bpf_prog_kallsyms_del(prog);
1aacde3d 978 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
a67edbf4 979 }
09756af4 980}
b16d9aa4
MKL
981
982void bpf_prog_put(struct bpf_prog *prog)
983{
984 __bpf_prog_put(prog, true);
985}
e2e9b654 986EXPORT_SYMBOL_GPL(bpf_prog_put);
09756af4
AS
987
988static int bpf_prog_release(struct inode *inode, struct file *filp)
989{
990 struct bpf_prog *prog = filp->private_data;
991
1aacde3d 992 bpf_prog_put(prog);
09756af4
AS
993 return 0;
994}
995
7bd509e3
DB
996#ifdef CONFIG_PROC_FS
997static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
998{
999 const struct bpf_prog *prog = filp->private_data;
f1f7714e 1000 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
7bd509e3 1001
f1f7714e 1002 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
7bd509e3
DB
1003 seq_printf(m,
1004 "prog_type:\t%u\n"
1005 "prog_jited:\t%u\n"
f1f7714e 1006 "prog_tag:\t%s\n"
7bd509e3
DB
1007 "memlock:\t%llu\n",
1008 prog->type,
1009 prog->jited,
f1f7714e 1010 prog_tag,
7bd509e3
DB
1011 prog->pages * 1ULL << PAGE_SHIFT);
1012}
1013#endif
1014
f66e448c 1015const struct file_operations bpf_prog_fops = {
7bd509e3
DB
1016#ifdef CONFIG_PROC_FS
1017 .show_fdinfo = bpf_prog_show_fdinfo,
1018#endif
1019 .release = bpf_prog_release,
6e71b04a
CF
1020 .read = bpf_dummy_read,
1021 .write = bpf_dummy_write,
09756af4
AS
1022};
1023
b2197755 1024int bpf_prog_new_fd(struct bpf_prog *prog)
aa79781b 1025{
afdb09c7
CF
1026 int ret;
1027
1028 ret = security_bpf_prog(prog);
1029 if (ret < 0)
1030 return ret;
1031
aa79781b
DB
1032 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1033 O_RDWR | O_CLOEXEC);
1034}
1035
113214be 1036static struct bpf_prog *____bpf_prog_get(struct fd f)
09756af4 1037{
09756af4
AS
1038 if (!f.file)
1039 return ERR_PTR(-EBADF);
09756af4
AS
1040 if (f.file->f_op != &bpf_prog_fops) {
1041 fdput(f);
1042 return ERR_PTR(-EINVAL);
1043 }
1044
c2101297 1045 return f.file->private_data;
09756af4
AS
1046}
1047
59d3656d 1048struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
92117d84 1049{
59d3656d
BB
1050 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
1051 atomic_sub(i, &prog->aux->refcnt);
92117d84
AS
1052 return ERR_PTR(-EBUSY);
1053 }
1054 return prog;
1055}
59d3656d
BB
1056EXPORT_SYMBOL_GPL(bpf_prog_add);
1057
c540594f
DB
1058void bpf_prog_sub(struct bpf_prog *prog, int i)
1059{
1060 /* Only to be used for undoing previous bpf_prog_add() in some
1061 * error path. We still know that another entity in our call
1062 * path holds a reference to the program, thus atomic_sub() can
1063 * be safely used in such cases!
1064 */
1065 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
1066}
1067EXPORT_SYMBOL_GPL(bpf_prog_sub);
1068
59d3656d
BB
1069struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
1070{
1071 return bpf_prog_add(prog, 1);
1072}
97bc402d 1073EXPORT_SYMBOL_GPL(bpf_prog_inc);
92117d84 1074
b16d9aa4 1075/* prog_idr_lock should have been held */
a6f6df69 1076struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
b16d9aa4
MKL
1077{
1078 int refold;
1079
1080 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
1081
1082 if (refold >= BPF_MAX_REFCNT) {
1083 __bpf_prog_put(prog, false);
1084 return ERR_PTR(-EBUSY);
1085 }
1086
1087 if (!refold)
1088 return ERR_PTR(-ENOENT);
1089
1090 return prog;
1091}
a6f6df69 1092EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
b16d9aa4 1093
040ee692 1094bool bpf_prog_get_ok(struct bpf_prog *prog,
288b3de5 1095 enum bpf_prog_type *attach_type, bool attach_drv)
248f346f 1096{
288b3de5
JK
1097 /* not an attachment, just a refcount inc, always allow */
1098 if (!attach_type)
1099 return true;
248f346f
JK
1100
1101 if (prog->type != *attach_type)
1102 return false;
288b3de5 1103 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
248f346f
JK
1104 return false;
1105
1106 return true;
1107}
1108
1109static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
288b3de5 1110 bool attach_drv)
09756af4
AS
1111{
1112 struct fd f = fdget(ufd);
1113 struct bpf_prog *prog;
1114
113214be 1115 prog = ____bpf_prog_get(f);
09756af4
AS
1116 if (IS_ERR(prog))
1117 return prog;
288b3de5 1118 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
113214be
DB
1119 prog = ERR_PTR(-EINVAL);
1120 goto out;
1121 }
09756af4 1122
92117d84 1123 prog = bpf_prog_inc(prog);
113214be 1124out:
09756af4
AS
1125 fdput(f);
1126 return prog;
1127}
113214be
DB
1128
1129struct bpf_prog *bpf_prog_get(u32 ufd)
1130{
288b3de5 1131 return __bpf_prog_get(ufd, NULL, false);
113214be
DB
1132}
1133
248f346f 1134struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
288b3de5 1135 bool attach_drv)
248f346f 1136{
288b3de5 1137 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
248f346f
JK
1138
1139 if (!IS_ERR(prog))
1140 trace_bpf_prog_get_type(prog);
1141 return prog;
1142}
6c8dfe21 1143EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
248f346f 1144
09756af4 1145/* last field in 'union bpf_attr' used by this command */
1f6f4cb7 1146#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex
09756af4
AS
1147
1148static int bpf_prog_load(union bpf_attr *attr)
1149{
1150 enum bpf_prog_type type = attr->prog_type;
1151 struct bpf_prog *prog;
1152 int err;
1153 char license[128];
1154 bool is_gpl;
1155
1156 if (CHECK_ATTR(BPF_PROG_LOAD))
1157 return -EINVAL;
1158
e07b98d9
DM
1159 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
1160 return -EINVAL;
1161
09756af4 1162 /* copy eBPF program license from user space */
535e7b4b 1163 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
09756af4
AS
1164 sizeof(license) - 1) < 0)
1165 return -EFAULT;
1166 license[sizeof(license) - 1] = 0;
1167
1168 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1169 is_gpl = license_is_gpl_compatible(license);
1170
ef0915ca
DB
1171 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
1172 return -E2BIG;
09756af4 1173
2541517c
AS
1174 if (type == BPF_PROG_TYPE_KPROBE &&
1175 attr->kern_version != LINUX_VERSION_CODE)
1176 return -EINVAL;
1177
80b7d819
CF
1178 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1179 type != BPF_PROG_TYPE_CGROUP_SKB &&
1180 !capable(CAP_SYS_ADMIN))
1be7f75d
AS
1181 return -EPERM;
1182
09756af4
AS
1183 /* plain bpf_prog allocation */
1184 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1185 if (!prog)
1186 return -ENOMEM;
1187
afdb09c7 1188 err = security_bpf_prog_alloc(prog->aux);
aaac3ba9
AS
1189 if (err)
1190 goto free_prog_nouncharge;
1191
afdb09c7
CF
1192 err = bpf_prog_charge_memlock(prog);
1193 if (err)
1194 goto free_prog_sec;
1195
09756af4
AS
1196 prog->len = attr->insn_cnt;
1197
1198 err = -EFAULT;
535e7b4b 1199 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
aafe6ae9 1200 bpf_prog_insn_size(prog)) != 0)
09756af4
AS
1201 goto free_prog;
1202
1203 prog->orig_prog = NULL;
a91263d5 1204 prog->jited = 0;
09756af4
AS
1205
1206 atomic_set(&prog->aux->refcnt, 1);
a91263d5 1207 prog->gpl_compatible = is_gpl ? 1 : 0;
09756af4 1208
1f6f4cb7 1209 if (attr->prog_ifindex) {
ab3f0063
JK
1210 err = bpf_prog_offload_init(prog, attr);
1211 if (err)
1212 goto free_prog;
1213 }
1214
09756af4
AS
1215 /* find program type: socket_filter vs tracing_filter */
1216 err = find_prog_type(type, prog);
1217 if (err < 0)
1218 goto free_prog;
1219
cb4d2b3f
MKL
1220 prog->aux->load_time = ktime_get_boot_ns();
1221 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
1222 if (err)
1223 goto free_prog;
1224
09756af4 1225 /* run eBPF verifier */
9bac3d6d 1226 err = bpf_check(&prog, attr);
09756af4
AS
1227 if (err < 0)
1228 goto free_used_maps;
1229
1230 /* eBPF program is ready to be JITed */
d1c55ab5 1231 prog = bpf_prog_select_runtime(prog, &err);
04fd61ab
AS
1232 if (err < 0)
1233 goto free_used_maps;
09756af4 1234
dc4bb0e2
MKL
1235 err = bpf_prog_alloc_id(prog);
1236 if (err)
1237 goto free_used_maps;
1238
60d1fd42
DB
1239 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
1240 * effectively publicly exposed. However, retrieving via
1241 * bpf_prog_get_fd_by_id() will take another reference,
1242 * therefore it cannot be gone underneath us.
1243 *
1244 * Only for the time /after/ successful bpf_prog_new_fd()
1245 * and before returning to userspace, we might just hold
1246 * one reference and any parallel close on that fd could
1247 * rip everything out. Hence, below notifications must
1248 * happen before bpf_prog_new_fd().
1249 *
1250 * Also, any failure handling from this point onwards must
1251 * be using bpf_prog_put() given the program is exposed.
1252 */
74451e66 1253 bpf_prog_kallsyms_add(prog);
a67edbf4 1254 trace_bpf_prog_load(prog, err);
60d1fd42
DB
1255
1256 err = bpf_prog_new_fd(prog);
1257 if (err < 0)
1258 bpf_prog_put(prog);
09756af4
AS
1259 return err;
1260
1261free_used_maps:
1262 free_used_maps(prog->aux);
1263free_prog:
aaac3ba9 1264 bpf_prog_uncharge_memlock(prog);
afdb09c7
CF
1265free_prog_sec:
1266 security_bpf_prog_free(prog->aux);
aaac3ba9 1267free_prog_nouncharge:
09756af4
AS
1268 bpf_prog_free(prog);
1269 return err;
1270}
1271
6e71b04a 1272#define BPF_OBJ_LAST_FIELD file_flags
b2197755
DB
1273
1274static int bpf_obj_pin(const union bpf_attr *attr)
1275{
6e71b04a 1276 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
b2197755
DB
1277 return -EINVAL;
1278
535e7b4b 1279 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
b2197755
DB
1280}
1281
1282static int bpf_obj_get(const union bpf_attr *attr)
1283{
6e71b04a
CF
1284 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
1285 attr->file_flags & ~BPF_OBJ_FLAG_MASK)
b2197755
DB
1286 return -EINVAL;
1287
6e71b04a
CF
1288 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
1289 attr->file_flags);
b2197755
DB
1290}
1291
f4324551
DM
1292#ifdef CONFIG_CGROUP_BPF
1293
464bc0fd 1294#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
174a79ff 1295
5a67da2a 1296static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
174a79ff 1297{
5a67da2a 1298 struct bpf_prog *prog = NULL;
174a79ff
JF
1299 int ufd = attr->target_fd;
1300 struct bpf_map *map;
1301 struct fd f;
1302 int err;
1303
1304 f = fdget(ufd);
1305 map = __bpf_map_get(f);
1306 if (IS_ERR(map))
1307 return PTR_ERR(map);
1308
5a67da2a
JF
1309 if (attach) {
1310 prog = bpf_prog_get_type(attr->attach_bpf_fd,
1311 BPF_PROG_TYPE_SK_SKB);
1312 if (IS_ERR(prog)) {
1313 fdput(f);
1314 return PTR_ERR(prog);
1315 }
174a79ff
JF
1316 }
1317
5a67da2a 1318 err = sock_map_prog(map, prog, attr->attach_type);
174a79ff
JF
1319 if (err) {
1320 fdput(f);
5a67da2a
JF
1321 if (prog)
1322 bpf_prog_put(prog);
ae2b27b8 1323 return err;
174a79ff
JF
1324 }
1325
1326 fdput(f);
ae2b27b8 1327 return 0;
174a79ff 1328}
f4324551 1329
324bda9e
AS
1330#define BPF_F_ATTACH_MASK \
1331 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
1332
f4324551
DM
1333static int bpf_prog_attach(const union bpf_attr *attr)
1334{
7f677633 1335 enum bpf_prog_type ptype;
f4324551
DM
1336 struct bpf_prog *prog;
1337 struct cgroup *cgrp;
7f677633 1338 int ret;
f4324551
DM
1339
1340 if (!capable(CAP_NET_ADMIN))
1341 return -EPERM;
1342
1343 if (CHECK_ATTR(BPF_PROG_ATTACH))
1344 return -EINVAL;
1345
324bda9e 1346 if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
7f677633
AS
1347 return -EINVAL;
1348
f4324551
DM
1349 switch (attr->attach_type) {
1350 case BPF_CGROUP_INET_INGRESS:
1351 case BPF_CGROUP_INET_EGRESS:
b2cd1257 1352 ptype = BPF_PROG_TYPE_CGROUP_SKB;
f4324551 1353 break;
61023658
DA
1354 case BPF_CGROUP_INET_SOCK_CREATE:
1355 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1356 break;
40304b2a
LB
1357 case BPF_CGROUP_SOCK_OPS:
1358 ptype = BPF_PROG_TYPE_SOCK_OPS;
1359 break;
ebc614f6
RG
1360 case BPF_CGROUP_DEVICE:
1361 ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1362 break;
464bc0fd
JF
1363 case BPF_SK_SKB_STREAM_PARSER:
1364 case BPF_SK_SKB_STREAM_VERDICT:
5a67da2a 1365 return sockmap_get_from_fd(attr, true);
f4324551
DM
1366 default:
1367 return -EINVAL;
1368 }
1369
b2cd1257
DA
1370 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1371 if (IS_ERR(prog))
1372 return PTR_ERR(prog);
1373
1374 cgrp = cgroup_get_from_fd(attr->target_fd);
1375 if (IS_ERR(cgrp)) {
1376 bpf_prog_put(prog);
1377 return PTR_ERR(cgrp);
1378 }
1379
324bda9e
AS
1380 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
1381 attr->attach_flags);
7f677633
AS
1382 if (ret)
1383 bpf_prog_put(prog);
b2cd1257
DA
1384 cgroup_put(cgrp);
1385
7f677633 1386 return ret;
f4324551
DM
1387}
1388
1389#define BPF_PROG_DETACH_LAST_FIELD attach_type
1390
1391static int bpf_prog_detach(const union bpf_attr *attr)
1392{
324bda9e
AS
1393 enum bpf_prog_type ptype;
1394 struct bpf_prog *prog;
f4324551 1395 struct cgroup *cgrp;
7f677633 1396 int ret;
f4324551
DM
1397
1398 if (!capable(CAP_NET_ADMIN))
1399 return -EPERM;
1400
1401 if (CHECK_ATTR(BPF_PROG_DETACH))
1402 return -EINVAL;
1403
1404 switch (attr->attach_type) {
1405 case BPF_CGROUP_INET_INGRESS:
1406 case BPF_CGROUP_INET_EGRESS:
324bda9e
AS
1407 ptype = BPF_PROG_TYPE_CGROUP_SKB;
1408 break;
61023658 1409 case BPF_CGROUP_INET_SOCK_CREATE:
324bda9e
AS
1410 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1411 break;
40304b2a 1412 case BPF_CGROUP_SOCK_OPS:
324bda9e 1413 ptype = BPF_PROG_TYPE_SOCK_OPS;
f4324551 1414 break;
ebc614f6
RG
1415 case BPF_CGROUP_DEVICE:
1416 ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1417 break;
5a67da2a
JF
1418 case BPF_SK_SKB_STREAM_PARSER:
1419 case BPF_SK_SKB_STREAM_VERDICT:
324bda9e 1420 return sockmap_get_from_fd(attr, false);
f4324551
DM
1421 default:
1422 return -EINVAL;
1423 }
1424
324bda9e
AS
1425 cgrp = cgroup_get_from_fd(attr->target_fd);
1426 if (IS_ERR(cgrp))
1427 return PTR_ERR(cgrp);
1428
1429 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1430 if (IS_ERR(prog))
1431 prog = NULL;
1432
1433 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
1434 if (prog)
1435 bpf_prog_put(prog);
1436 cgroup_put(cgrp);
7f677633 1437 return ret;
f4324551 1438}
40304b2a 1439
468e2f64
AS
1440#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
1441
1442static int bpf_prog_query(const union bpf_attr *attr,
1443 union bpf_attr __user *uattr)
1444{
1445 struct cgroup *cgrp;
1446 int ret;
1447
1448 if (!capable(CAP_NET_ADMIN))
1449 return -EPERM;
1450 if (CHECK_ATTR(BPF_PROG_QUERY))
1451 return -EINVAL;
1452 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
1453 return -EINVAL;
1454
1455 switch (attr->query.attach_type) {
1456 case BPF_CGROUP_INET_INGRESS:
1457 case BPF_CGROUP_INET_EGRESS:
1458 case BPF_CGROUP_INET_SOCK_CREATE:
1459 case BPF_CGROUP_SOCK_OPS:
ebc614f6 1460 case BPF_CGROUP_DEVICE:
468e2f64
AS
1461 break;
1462 default:
1463 return -EINVAL;
1464 }
1465 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1466 if (IS_ERR(cgrp))
1467 return PTR_ERR(cgrp);
1468 ret = cgroup_bpf_query(cgrp, attr, uattr);
1469 cgroup_put(cgrp);
1470 return ret;
1471}
f4324551
DM
1472#endif /* CONFIG_CGROUP_BPF */
1473
1cf1cae9
AS
1474#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1475
1476static int bpf_prog_test_run(const union bpf_attr *attr,
1477 union bpf_attr __user *uattr)
1478{
1479 struct bpf_prog *prog;
1480 int ret = -ENOTSUPP;
1481
1482 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1483 return -EINVAL;
1484
1485 prog = bpf_prog_get(attr->test.prog_fd);
1486 if (IS_ERR(prog))
1487 return PTR_ERR(prog);
1488
1489 if (prog->aux->ops->test_run)
1490 ret = prog->aux->ops->test_run(prog, attr, uattr);
1491
1492 bpf_prog_put(prog);
1493 return ret;
1494}
1495
34ad5580
MKL
1496#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1497
1498static int bpf_obj_get_next_id(const union bpf_attr *attr,
1499 union bpf_attr __user *uattr,
1500 struct idr *idr,
1501 spinlock_t *lock)
1502{
1503 u32 next_id = attr->start_id;
1504 int err = 0;
1505
1506 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1507 return -EINVAL;
1508
1509 if (!capable(CAP_SYS_ADMIN))
1510 return -EPERM;
1511
1512 next_id++;
1513 spin_lock_bh(lock);
1514 if (!idr_get_next(idr, &next_id))
1515 err = -ENOENT;
1516 spin_unlock_bh(lock);
1517
1518 if (!err)
1519 err = put_user(next_id, &uattr->next_id);
1520
1521 return err;
1522}
1523
b16d9aa4
MKL
1524#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1525
1526static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1527{
1528 struct bpf_prog *prog;
1529 u32 id = attr->prog_id;
1530 int fd;
1531
1532 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1533 return -EINVAL;
1534
1535 if (!capable(CAP_SYS_ADMIN))
1536 return -EPERM;
1537
1538 spin_lock_bh(&prog_idr_lock);
1539 prog = idr_find(&prog_idr, id);
1540 if (prog)
1541 prog = bpf_prog_inc_not_zero(prog);
1542 else
1543 prog = ERR_PTR(-ENOENT);
1544 spin_unlock_bh(&prog_idr_lock);
1545
1546 if (IS_ERR(prog))
1547 return PTR_ERR(prog);
1548
1549 fd = bpf_prog_new_fd(prog);
1550 if (fd < 0)
1551 bpf_prog_put(prog);
1552
1553 return fd;
1554}
1555
6e71b04a 1556#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
bd5f5f4e
MKL
1557
1558static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1559{
1560 struct bpf_map *map;
1561 u32 id = attr->map_id;
6e71b04a 1562 int f_flags;
bd5f5f4e
MKL
1563 int fd;
1564
6e71b04a
CF
1565 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
1566 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
bd5f5f4e
MKL
1567 return -EINVAL;
1568
1569 if (!capable(CAP_SYS_ADMIN))
1570 return -EPERM;
1571
6e71b04a
CF
1572 f_flags = bpf_get_file_flag(attr->open_flags);
1573 if (f_flags < 0)
1574 return f_flags;
1575
bd5f5f4e
MKL
1576 spin_lock_bh(&map_idr_lock);
1577 map = idr_find(&map_idr, id);
1578 if (map)
1579 map = bpf_map_inc_not_zero(map, true);
1580 else
1581 map = ERR_PTR(-ENOENT);
1582 spin_unlock_bh(&map_idr_lock);
1583
1584 if (IS_ERR(map))
1585 return PTR_ERR(map);
1586
6e71b04a 1587 fd = bpf_map_new_fd(map, f_flags);
bd5f5f4e 1588 if (fd < 0)
327dbab3 1589 bpf_map_put_with_uref(map);
bd5f5f4e
MKL
1590
1591 return fd;
1592}
1593
1e270976
MKL
1594static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1595 const union bpf_attr *attr,
1596 union bpf_attr __user *uattr)
1597{
1598 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1599 struct bpf_prog_info info = {};
1600 u32 info_len = attr->info.info_len;
1601 char __user *uinsns;
1602 u32 ulen;
1603 int err;
1604
1605 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1606 if (err)
1607 return err;
1608 info_len = min_t(u32, sizeof(info), info_len);
1609
1610 if (copy_from_user(&info, uinfo, info_len))
89b09689 1611 return -EFAULT;
1e270976
MKL
1612
1613 info.type = prog->type;
1614 info.id = prog->aux->id;
cb4d2b3f
MKL
1615 info.load_time = prog->aux->load_time;
1616 info.created_by_uid = from_kuid_munged(current_user_ns(),
1617 prog->aux->user->uid);
1e270976
MKL
1618
1619 memcpy(info.tag, prog->tag, sizeof(prog->tag));
cb4d2b3f
MKL
1620 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
1621
1622 ulen = info.nr_map_ids;
1623 info.nr_map_ids = prog->aux->used_map_cnt;
1624 ulen = min_t(u32, info.nr_map_ids, ulen);
1625 if (ulen) {
721e08da 1626 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
cb4d2b3f
MKL
1627 u32 i;
1628
1629 for (i = 0; i < ulen; i++)
1630 if (put_user(prog->aux->used_maps[i]->id,
1631 &user_map_ids[i]))
1632 return -EFAULT;
1633 }
1e270976
MKL
1634
1635 if (!capable(CAP_SYS_ADMIN)) {
1636 info.jited_prog_len = 0;
1637 info.xlated_prog_len = 0;
1638 goto done;
1639 }
1640
1641 ulen = info.jited_prog_len;
1642 info.jited_prog_len = prog->jited_len;
1643 if (info.jited_prog_len && ulen) {
1644 uinsns = u64_to_user_ptr(info.jited_prog_insns);
1645 ulen = min_t(u32, info.jited_prog_len, ulen);
1646 if (copy_to_user(uinsns, prog->bpf_func, ulen))
1647 return -EFAULT;
1648 }
1649
1650 ulen = info.xlated_prog_len;
9975a54b 1651 info.xlated_prog_len = bpf_prog_insn_size(prog);
1e270976
MKL
1652 if (info.xlated_prog_len && ulen) {
1653 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1654 ulen = min_t(u32, info.xlated_prog_len, ulen);
1655 if (copy_to_user(uinsns, prog->insnsi, ulen))
1656 return -EFAULT;
1657 }
1658
1659done:
1660 if (copy_to_user(uinfo, &info, info_len) ||
1661 put_user(info_len, &uattr->info.info_len))
1662 return -EFAULT;
1663
1664 return 0;
1665}
1666
1667static int bpf_map_get_info_by_fd(struct bpf_map *map,
1668 const union bpf_attr *attr,
1669 union bpf_attr __user *uattr)
1670{
1671 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1672 struct bpf_map_info info = {};
1673 u32 info_len = attr->info.info_len;
1674 int err;
1675
1676 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1677 if (err)
1678 return err;
1679 info_len = min_t(u32, sizeof(info), info_len);
1680
1681 info.type = map->map_type;
1682 info.id = map->id;
1683 info.key_size = map->key_size;
1684 info.value_size = map->value_size;
1685 info.max_entries = map->max_entries;
1686 info.map_flags = map->map_flags;
ad5b177b 1687 memcpy(info.name, map->name, sizeof(map->name));
1e270976
MKL
1688
1689 if (copy_to_user(uinfo, &info, info_len) ||
1690 put_user(info_len, &uattr->info.info_len))
1691 return -EFAULT;
1692
1693 return 0;
1694}
1695
1696#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
1697
1698static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
1699 union bpf_attr __user *uattr)
1700{
1701 int ufd = attr->info.bpf_fd;
1702 struct fd f;
1703 int err;
1704
1705 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
1706 return -EINVAL;
1707
1708 f = fdget(ufd);
1709 if (!f.file)
1710 return -EBADFD;
1711
1712 if (f.file->f_op == &bpf_prog_fops)
1713 err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
1714 uattr);
1715 else if (f.file->f_op == &bpf_map_fops)
1716 err = bpf_map_get_info_by_fd(f.file->private_data, attr,
1717 uattr);
1718 else
1719 err = -EINVAL;
1720
1721 fdput(f);
1722 return err;
1723}
1724
99c55f7d
AS
1725SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1726{
1727 union bpf_attr attr = {};
1728 int err;
1729
4a7070b7 1730 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
99c55f7d
AS
1731 return -EPERM;
1732
1e270976
MKL
1733 err = check_uarg_tail_zero(uattr, sizeof(attr), size);
1734 if (err)
1735 return err;
1736 size = min_t(u32, size, sizeof(attr));
99c55f7d
AS
1737
1738 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1739 if (copy_from_user(&attr, uattr, size) != 0)
1740 return -EFAULT;
1741
afdb09c7
CF
1742 err = security_bpf(cmd, &attr, size);
1743 if (err < 0)
1744 return err;
1745
99c55f7d
AS
1746 switch (cmd) {
1747 case BPF_MAP_CREATE:
1748 err = map_create(&attr);
1749 break;
db20fd2b
AS
1750 case BPF_MAP_LOOKUP_ELEM:
1751 err = map_lookup_elem(&attr);
1752 break;
1753 case BPF_MAP_UPDATE_ELEM:
1754 err = map_update_elem(&attr);
1755 break;
1756 case BPF_MAP_DELETE_ELEM:
1757 err = map_delete_elem(&attr);
1758 break;
1759 case BPF_MAP_GET_NEXT_KEY:
1760 err = map_get_next_key(&attr);
1761 break;
09756af4
AS
1762 case BPF_PROG_LOAD:
1763 err = bpf_prog_load(&attr);
1764 break;
b2197755
DB
1765 case BPF_OBJ_PIN:
1766 err = bpf_obj_pin(&attr);
1767 break;
1768 case BPF_OBJ_GET:
1769 err = bpf_obj_get(&attr);
1770 break;
f4324551
DM
1771#ifdef CONFIG_CGROUP_BPF
1772 case BPF_PROG_ATTACH:
1773 err = bpf_prog_attach(&attr);
1774 break;
1775 case BPF_PROG_DETACH:
1776 err = bpf_prog_detach(&attr);
1777 break;
468e2f64
AS
1778 case BPF_PROG_QUERY:
1779 err = bpf_prog_query(&attr, uattr);
1780 break;
f4324551 1781#endif
1cf1cae9
AS
1782 case BPF_PROG_TEST_RUN:
1783 err = bpf_prog_test_run(&attr, uattr);
1784 break;
34ad5580
MKL
1785 case BPF_PROG_GET_NEXT_ID:
1786 err = bpf_obj_get_next_id(&attr, uattr,
1787 &prog_idr, &prog_idr_lock);
1788 break;
1789 case BPF_MAP_GET_NEXT_ID:
1790 err = bpf_obj_get_next_id(&attr, uattr,
1791 &map_idr, &map_idr_lock);
1792 break;
b16d9aa4
MKL
1793 case BPF_PROG_GET_FD_BY_ID:
1794 err = bpf_prog_get_fd_by_id(&attr);
1795 break;
bd5f5f4e
MKL
1796 case BPF_MAP_GET_FD_BY_ID:
1797 err = bpf_map_get_fd_by_id(&attr);
1798 break;
1e270976
MKL
1799 case BPF_OBJ_GET_INFO_BY_FD:
1800 err = bpf_obj_get_info_by_fd(&attr, uattr);
1801 break;
99c55f7d
AS
1802 default:
1803 err = -EINVAL;
1804 break;
1805 }
1806
1807 return err;
1808}