]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/syscall.c
bpf: fix possible spectre-v1 in find_and_alloc_map()
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / syscall.c
CommitLineData
99c55f7d
AS
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
a67edbf4 13#include <linux/bpf_trace.h>
99c55f7d
AS
14#include <linux/syscalls.h>
15#include <linux/slab.h>
3f07c014 16#include <linux/sched/signal.h>
d407bd25
DB
17#include <linux/vmalloc.h>
18#include <linux/mmzone.h>
99c55f7d 19#include <linux/anon_inodes.h>
db20fd2b 20#include <linux/file.h>
09756af4
AS
21#include <linux/license.h>
22#include <linux/filter.h>
2541517c 23#include <linux/version.h>
535e7b4b 24#include <linux/kernel.h>
dc4bb0e2 25#include <linux/idr.h>
cb4d2b3f
MKL
26#include <linux/cred.h>
27#include <linux/timekeeping.h>
28#include <linux/ctype.h>
9ef09e35 29#include <linux/nospec.h>
99c55f7d 30
14dc6f04
MKL
31#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
32 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
33 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
34 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
35#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
36#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
37
6e71b04a
CF
38#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
39
b121d1e7 40DEFINE_PER_CPU(int, bpf_prog_active);
dc4bb0e2
MKL
41static DEFINE_IDR(prog_idr);
42static DEFINE_SPINLOCK(prog_idr_lock);
f3f1c054
MKL
43static DEFINE_IDR(map_idr);
44static DEFINE_SPINLOCK(map_idr_lock);
b121d1e7 45
1be7f75d
AS
46int sysctl_unprivileged_bpf_disabled __read_mostly;
47
40077e0c
JB
48static const struct bpf_map_ops * const bpf_map_types[] = {
49#define BPF_PROG_TYPE(_id, _ops)
50#define BPF_MAP_TYPE(_id, _ops) \
51 [_id] = &_ops,
52#include <linux/bpf_types.h>
53#undef BPF_PROG_TYPE
54#undef BPF_MAP_TYPE
55};
99c55f7d 56
752ba56f
MS
57/*
58 * If we're handed a bigger struct than we know of, ensure all the unknown bits
59 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
60 * we don't know about yet.
61 *
62 * There is a ToCToU between this function call and the following
63 * copy_from_user() call. However, this is not a concern since this function is
64 * meant to be a future-proofing of bits.
65 */
58291a74
MS
66static int check_uarg_tail_zero(void __user *uaddr,
67 size_t expected_size,
68 size_t actual_size)
69{
70 unsigned char __user *addr;
71 unsigned char __user *end;
72 unsigned char val;
73 int err;
74
752ba56f
MS
75 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
76 return -E2BIG;
77
78 if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
79 return -EFAULT;
80
58291a74
MS
81 if (actual_size <= expected_size)
82 return 0;
83
84 addr = uaddr + expected_size;
85 end = uaddr + actual_size;
86
87 for (; addr < end; addr++) {
88 err = get_user(val, addr);
89 if (err)
90 return err;
91 if (val)
92 return -E2BIG;
93 }
94
95 return 0;
96}
97
a3884572
JK
98const struct bpf_map_ops bpf_map_offload_ops = {
99 .map_alloc = bpf_map_offload_map_alloc,
100 .map_free = bpf_map_offload_map_free,
101};
102
99c55f7d
AS
103static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
104{
1110f3a9 105 const struct bpf_map_ops *ops;
9ef09e35 106 u32 type = attr->map_type;
99c55f7d 107 struct bpf_map *map;
1110f3a9 108 int err;
99c55f7d 109
9ef09e35 110 if (type >= ARRAY_SIZE(bpf_map_types))
1110f3a9 111 return ERR_PTR(-EINVAL);
9ef09e35
MR
112 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
113 ops = bpf_map_types[type];
1110f3a9 114 if (!ops)
40077e0c 115 return ERR_PTR(-EINVAL);
99c55f7d 116
1110f3a9
JK
117 if (ops->map_alloc_check) {
118 err = ops->map_alloc_check(attr);
119 if (err)
120 return ERR_PTR(err);
121 }
a3884572
JK
122 if (attr->map_ifindex)
123 ops = &bpf_map_offload_ops;
1110f3a9 124 map = ops->map_alloc(attr);
40077e0c
JB
125 if (IS_ERR(map))
126 return map;
1110f3a9 127 map->ops = ops;
9ef09e35 128 map->map_type = type;
40077e0c 129 return map;
99c55f7d
AS
130}
131
96eabe7a 132void *bpf_map_area_alloc(size_t size, int numa_node)
d407bd25
DB
133{
134 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
135 * trigger under memory pressure as we really just want to
136 * fail instead.
137 */
138 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
139 void *area;
140
141 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
96eabe7a 142 area = kmalloc_node(size, GFP_USER | flags, numa_node);
d407bd25
DB
143 if (area != NULL)
144 return area;
145 }
146
96eabe7a
MKL
147 return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
148 __builtin_return_address(0));
d407bd25
DB
149}
150
151void bpf_map_area_free(void *area)
152{
153 kvfree(area);
154}
155
bd475643
JK
156void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
157{
158 map->map_type = attr->map_type;
159 map->key_size = attr->key_size;
160 map->value_size = attr->value_size;
161 map->max_entries = attr->max_entries;
162 map->map_flags = attr->map_flags;
163 map->numa_node = bpf_map_attr_numa_node(attr);
164}
165
6c905981
AS
166int bpf_map_precharge_memlock(u32 pages)
167{
168 struct user_struct *user = get_current_user();
169 unsigned long memlock_limit, cur;
170
171 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
172 cur = atomic_long_read(&user->locked_vm);
173 free_uid(user);
174 if (cur + pages > memlock_limit)
175 return -EPERM;
176 return 0;
177}
178
aaac3ba9
AS
179static int bpf_map_charge_memlock(struct bpf_map *map)
180{
181 struct user_struct *user = get_current_user();
182 unsigned long memlock_limit;
183
184 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
185
186 atomic_long_add(map->pages, &user->locked_vm);
187
188 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
189 atomic_long_sub(map->pages, &user->locked_vm);
190 free_uid(user);
191 return -EPERM;
192 }
193 map->user = user;
194 return 0;
195}
196
197static void bpf_map_uncharge_memlock(struct bpf_map *map)
198{
199 struct user_struct *user = map->user;
200
201 atomic_long_sub(map->pages, &user->locked_vm);
202 free_uid(user);
203}
204
f3f1c054
MKL
205static int bpf_map_alloc_id(struct bpf_map *map)
206{
207 int id;
208
b76354cd 209 idr_preload(GFP_KERNEL);
f3f1c054
MKL
210 spin_lock_bh(&map_idr_lock);
211 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
212 if (id > 0)
213 map->id = id;
214 spin_unlock_bh(&map_idr_lock);
b76354cd 215 idr_preload_end();
f3f1c054
MKL
216
217 if (WARN_ON_ONCE(!id))
218 return -ENOSPC;
219
220 return id > 0 ? 0 : id;
221}
222
a3884572 223void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
f3f1c054 224{
930651a7
ED
225 unsigned long flags;
226
a3884572
JK
227 /* Offloaded maps are removed from the IDR store when their device
228 * disappears - even if someone holds an fd to them they are unusable,
229 * the memory is gone, all ops will fail; they are simply waiting for
230 * refcnt to drop to be freed.
231 */
232 if (!map->id)
233 return;
234
bd5f5f4e 235 if (do_idr_lock)
930651a7 236 spin_lock_irqsave(&map_idr_lock, flags);
bd5f5f4e
MKL
237 else
238 __acquire(&map_idr_lock);
239
f3f1c054 240 idr_remove(&map_idr, map->id);
a3884572 241 map->id = 0;
bd5f5f4e
MKL
242
243 if (do_idr_lock)
930651a7 244 spin_unlock_irqrestore(&map_idr_lock, flags);
bd5f5f4e
MKL
245 else
246 __release(&map_idr_lock);
f3f1c054
MKL
247}
248
99c55f7d
AS
249/* called from workqueue */
250static void bpf_map_free_deferred(struct work_struct *work)
251{
252 struct bpf_map *map = container_of(work, struct bpf_map, work);
253
aaac3ba9 254 bpf_map_uncharge_memlock(map);
afdb09c7 255 security_bpf_map_free(map);
99c55f7d
AS
256 /* implementation dependent freeing */
257 map->ops->map_free(map);
258}
259
c9da161c
DB
260static void bpf_map_put_uref(struct bpf_map *map)
261{
262 if (atomic_dec_and_test(&map->usercnt)) {
ba6b8de4
JF
263 if (map->ops->map_release_uref)
264 map->ops->map_release_uref(map);
c9da161c
DB
265 }
266}
267
99c55f7d
AS
268/* decrement map refcnt and schedule it for freeing via workqueue
269 * (unrelying map implementation ops->map_free() might sleep)
270 */
bd5f5f4e 271static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
99c55f7d
AS
272{
273 if (atomic_dec_and_test(&map->refcnt)) {
34ad5580 274 /* bpf_map_free_id() must be called first */
bd5f5f4e 275 bpf_map_free_id(map, do_idr_lock);
99c55f7d
AS
276 INIT_WORK(&map->work, bpf_map_free_deferred);
277 schedule_work(&map->work);
278 }
279}
280
bd5f5f4e
MKL
281void bpf_map_put(struct bpf_map *map)
282{
283 __bpf_map_put(map, true);
284}
285
c9da161c 286void bpf_map_put_with_uref(struct bpf_map *map)
99c55f7d 287{
c9da161c 288 bpf_map_put_uref(map);
99c55f7d 289 bpf_map_put(map);
c9da161c
DB
290}
291
292static int bpf_map_release(struct inode *inode, struct file *filp)
293{
61d1b6a4
DB
294 struct bpf_map *map = filp->private_data;
295
296 if (map->ops->map_release)
297 map->ops->map_release(map, filp);
298
299 bpf_map_put_with_uref(map);
99c55f7d
AS
300 return 0;
301}
302
f99bf205
DB
303#ifdef CONFIG_PROC_FS
304static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
305{
306 const struct bpf_map *map = filp->private_data;
21116b70
DB
307 const struct bpf_array *array;
308 u32 owner_prog_type = 0;
9780c0ab 309 u32 owner_jited = 0;
21116b70
DB
310
311 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
312 array = container_of(map, struct bpf_array, map);
313 owner_prog_type = array->owner_prog_type;
9780c0ab 314 owner_jited = array->owner_jited;
21116b70 315 }
f99bf205
DB
316
317 seq_printf(m,
318 "map_type:\t%u\n"
319 "key_size:\t%u\n"
320 "value_size:\t%u\n"
322cea2f 321 "max_entries:\t%u\n"
21116b70
DB
322 "map_flags:\t%#x\n"
323 "memlock:\t%llu\n",
f99bf205
DB
324 map->map_type,
325 map->key_size,
326 map->value_size,
322cea2f 327 map->max_entries,
21116b70
DB
328 map->map_flags,
329 map->pages * 1ULL << PAGE_SHIFT);
330
9780c0ab 331 if (owner_prog_type) {
21116b70
DB
332 seq_printf(m, "owner_prog_type:\t%u\n",
333 owner_prog_type);
9780c0ab
DB
334 seq_printf(m, "owner_jited:\t%u\n",
335 owner_jited);
336 }
f99bf205
DB
337}
338#endif
339
6e71b04a
CF
340static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
341 loff_t *ppos)
342{
343 /* We need this handler such that alloc_file() enables
344 * f_mode with FMODE_CAN_READ.
345 */
346 return -EINVAL;
347}
348
349static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
350 size_t siz, loff_t *ppos)
351{
352 /* We need this handler such that alloc_file() enables
353 * f_mode with FMODE_CAN_WRITE.
354 */
355 return -EINVAL;
356}
357
f66e448c 358const struct file_operations bpf_map_fops = {
f99bf205
DB
359#ifdef CONFIG_PROC_FS
360 .show_fdinfo = bpf_map_show_fdinfo,
361#endif
362 .release = bpf_map_release,
6e71b04a
CF
363 .read = bpf_dummy_read,
364 .write = bpf_dummy_write,
99c55f7d
AS
365};
366
6e71b04a 367int bpf_map_new_fd(struct bpf_map *map, int flags)
aa79781b 368{
afdb09c7
CF
369 int ret;
370
371 ret = security_bpf_map(map, OPEN_FMODE(flags));
372 if (ret < 0)
373 return ret;
374
aa79781b 375 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
6e71b04a
CF
376 flags | O_CLOEXEC);
377}
378
379int bpf_get_file_flag(int flags)
380{
381 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
382 return -EINVAL;
383 if (flags & BPF_F_RDONLY)
384 return O_RDONLY;
385 if (flags & BPF_F_WRONLY)
386 return O_WRONLY;
387 return O_RDWR;
aa79781b
DB
388}
389
99c55f7d
AS
390/* helper macro to check that unused fields 'union bpf_attr' are zero */
391#define CHECK_ATTR(CMD) \
392 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
393 sizeof(attr->CMD##_LAST_FIELD), 0, \
394 sizeof(*attr) - \
395 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
396 sizeof(attr->CMD##_LAST_FIELD)) != NULL
397
cb4d2b3f
MKL
398/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
399 * Return 0 on success and < 0 on error.
400 */
401static int bpf_obj_name_cpy(char *dst, const char *src)
402{
403 const char *end = src + BPF_OBJ_NAME_LEN;
404
473d9734
MKL
405 memset(dst, 0, BPF_OBJ_NAME_LEN);
406
cb4d2b3f
MKL
407 /* Copy all isalnum() and '_' char */
408 while (src < end && *src) {
409 if (!isalnum(*src) && *src != '_')
410 return -EINVAL;
411 *dst++ = *src++;
412 }
413
414 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
415 if (src == end)
416 return -EINVAL;
417
cb4d2b3f
MKL
418 return 0;
419}
420
a3884572 421#define BPF_MAP_CREATE_LAST_FIELD map_ifindex
99c55f7d
AS
422/* called via syscall */
423static int map_create(union bpf_attr *attr)
424{
96eabe7a 425 int numa_node = bpf_map_attr_numa_node(attr);
99c55f7d 426 struct bpf_map *map;
6e71b04a 427 int f_flags;
99c55f7d
AS
428 int err;
429
430 err = CHECK_ATTR(BPF_MAP_CREATE);
431 if (err)
432 return -EINVAL;
433
6e71b04a
CF
434 f_flags = bpf_get_file_flag(attr->map_flags);
435 if (f_flags < 0)
436 return f_flags;
437
96eabe7a 438 if (numa_node != NUMA_NO_NODE &&
96e5ae4e
ED
439 ((unsigned int)numa_node >= nr_node_ids ||
440 !node_online(numa_node)))
96eabe7a
MKL
441 return -EINVAL;
442
99c55f7d
AS
443 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
444 map = find_and_alloc_map(attr);
445 if (IS_ERR(map))
446 return PTR_ERR(map);
447
ad5b177b
MKL
448 err = bpf_obj_name_cpy(map->name, attr->map_name);
449 if (err)
450 goto free_map_nouncharge;
451
99c55f7d 452 atomic_set(&map->refcnt, 1);
c9da161c 453 atomic_set(&map->usercnt, 1);
99c55f7d 454
afdb09c7 455 err = security_bpf_map_alloc(map);
aaac3ba9 456 if (err)
20b2b24f 457 goto free_map_nouncharge;
aaac3ba9 458
afdb09c7
CF
459 err = bpf_map_charge_memlock(map);
460 if (err)
461 goto free_map_sec;
462
f3f1c054
MKL
463 err = bpf_map_alloc_id(map);
464 if (err)
465 goto free_map;
466
6e71b04a 467 err = bpf_map_new_fd(map, f_flags);
bd5f5f4e
MKL
468 if (err < 0) {
469 /* failed to allocate fd.
470 * bpf_map_put() is needed because the above
471 * bpf_map_alloc_id() has published the map
472 * to the userspace and the userspace may
473 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
474 */
475 bpf_map_put(map);
476 return err;
477 }
99c55f7d 478
a67edbf4 479 trace_bpf_map_create(map, err);
99c55f7d
AS
480 return err;
481
482free_map:
20b2b24f 483 bpf_map_uncharge_memlock(map);
afdb09c7
CF
484free_map_sec:
485 security_bpf_map_free(map);
20b2b24f 486free_map_nouncharge:
99c55f7d
AS
487 map->ops->map_free(map);
488 return err;
489}
490
db20fd2b
AS
491/* if error is returned, fd is released.
492 * On success caller should complete fd access with matching fdput()
493 */
c2101297 494struct bpf_map *__bpf_map_get(struct fd f)
db20fd2b 495{
db20fd2b
AS
496 if (!f.file)
497 return ERR_PTR(-EBADF);
db20fd2b
AS
498 if (f.file->f_op != &bpf_map_fops) {
499 fdput(f);
500 return ERR_PTR(-EINVAL);
501 }
502
c2101297
DB
503 return f.file->private_data;
504}
505
92117d84
AS
506/* prog's and map's refcnt limit */
507#define BPF_MAX_REFCNT 32768
508
509struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
c9da161c 510{
92117d84
AS
511 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
512 atomic_dec(&map->refcnt);
513 return ERR_PTR(-EBUSY);
514 }
c9da161c
DB
515 if (uref)
516 atomic_inc(&map->usercnt);
92117d84 517 return map;
c9da161c
DB
518}
519
520struct bpf_map *bpf_map_get_with_uref(u32 ufd)
c2101297
DB
521{
522 struct fd f = fdget(ufd);
523 struct bpf_map *map;
524
525 map = __bpf_map_get(f);
526 if (IS_ERR(map))
527 return map;
528
92117d84 529 map = bpf_map_inc(map, true);
c2101297 530 fdput(f);
db20fd2b
AS
531
532 return map;
533}
534
bd5f5f4e
MKL
535/* map_idr_lock should have been held */
536static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
537 bool uref)
538{
539 int refold;
540
541 refold = __atomic_add_unless(&map->refcnt, 1, 0);
542
543 if (refold >= BPF_MAX_REFCNT) {
544 __bpf_map_put(map, false);
545 return ERR_PTR(-EBUSY);
546 }
547
548 if (!refold)
549 return ERR_PTR(-ENOENT);
550
551 if (uref)
552 atomic_inc(&map->usercnt);
553
554 return map;
555}
556
b8cdc051
AS
557int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
558{
559 return -ENOTSUPP;
560}
561
db20fd2b
AS
562/* last field in 'union bpf_attr' used by this command */
563#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
564
565static int map_lookup_elem(union bpf_attr *attr)
566{
535e7b4b
MS
567 void __user *ukey = u64_to_user_ptr(attr->key);
568 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 569 int ufd = attr->map_fd;
db20fd2b 570 struct bpf_map *map;
8ebe667c 571 void *key, *value, *ptr;
15a07b33 572 u32 value_size;
592867bf 573 struct fd f;
db20fd2b
AS
574 int err;
575
576 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
577 return -EINVAL;
578
592867bf 579 f = fdget(ufd);
c2101297 580 map = __bpf_map_get(f);
db20fd2b
AS
581 if (IS_ERR(map))
582 return PTR_ERR(map);
583
6e71b04a
CF
584 if (!(f.file->f_mode & FMODE_CAN_READ)) {
585 err = -EPERM;
586 goto err_put;
587 }
588
e4448ed8
AV
589 key = memdup_user(ukey, map->key_size);
590 if (IS_ERR(key)) {
591 err = PTR_ERR(key);
db20fd2b 592 goto err_put;
e4448ed8 593 }
db20fd2b 594
15a07b33 595 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 596 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
597 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
598 value_size = round_up(map->value_size, 8) * num_possible_cpus();
14dc6f04
MKL
599 else if (IS_FD_MAP(map))
600 value_size = sizeof(u32);
15a07b33
AS
601 else
602 value_size = map->value_size;
603
8ebe667c 604 err = -ENOMEM;
15a07b33 605 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b 606 if (!value)
8ebe667c
AS
607 goto free_key;
608
a3884572
JK
609 if (bpf_map_is_dev_bound(map)) {
610 err = bpf_map_offload_lookup_elem(map, key, value);
611 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
612 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
613 err = bpf_percpu_hash_copy(map, key, value);
614 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
615 err = bpf_percpu_array_copy(map, key, value);
557c0c6e
AS
616 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
617 err = bpf_stackmap_copy(map, key, value);
14dc6f04
MKL
618 } else if (IS_FD_ARRAY(map)) {
619 err = bpf_fd_array_map_lookup_elem(map, key, value);
620 } else if (IS_FD_HASH(map)) {
621 err = bpf_fd_htab_map_lookup_elem(map, key, value);
15a07b33
AS
622 } else {
623 rcu_read_lock();
624 ptr = map->ops->map_lookup_elem(map, key);
625 if (ptr)
626 memcpy(value, ptr, value_size);
627 rcu_read_unlock();
628 err = ptr ? 0 : -ENOENT;
629 }
8ebe667c 630
15a07b33 631 if (err)
8ebe667c 632 goto free_value;
db20fd2b
AS
633
634 err = -EFAULT;
15a07b33 635 if (copy_to_user(uvalue, value, value_size) != 0)
8ebe667c 636 goto free_value;
db20fd2b 637
a67edbf4 638 trace_bpf_map_lookup_elem(map, ufd, key, value);
db20fd2b
AS
639 err = 0;
640
8ebe667c
AS
641free_value:
642 kfree(value);
db20fd2b
AS
643free_key:
644 kfree(key);
645err_put:
646 fdput(f);
647 return err;
648}
649
3274f520 650#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
db20fd2b
AS
651
652static int map_update_elem(union bpf_attr *attr)
653{
535e7b4b
MS
654 void __user *ukey = u64_to_user_ptr(attr->key);
655 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 656 int ufd = attr->map_fd;
db20fd2b
AS
657 struct bpf_map *map;
658 void *key, *value;
15a07b33 659 u32 value_size;
592867bf 660 struct fd f;
db20fd2b
AS
661 int err;
662
663 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
664 return -EINVAL;
665
592867bf 666 f = fdget(ufd);
c2101297 667 map = __bpf_map_get(f);
db20fd2b
AS
668 if (IS_ERR(map))
669 return PTR_ERR(map);
670
6e71b04a
CF
671 if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
672 err = -EPERM;
673 goto err_put;
674 }
675
e4448ed8
AV
676 key = memdup_user(ukey, map->key_size);
677 if (IS_ERR(key)) {
678 err = PTR_ERR(key);
db20fd2b 679 goto err_put;
e4448ed8 680 }
db20fd2b 681
15a07b33 682 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 683 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
684 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
685 value_size = round_up(map->value_size, 8) * num_possible_cpus();
686 else
687 value_size = map->value_size;
688
db20fd2b 689 err = -ENOMEM;
15a07b33 690 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b
AS
691 if (!value)
692 goto free_key;
693
694 err = -EFAULT;
15a07b33 695 if (copy_from_user(value, uvalue, value_size) != 0)
db20fd2b
AS
696 goto free_value;
697
6710e112 698 /* Need to create a kthread, thus must support schedule */
a3884572
JK
699 if (bpf_map_is_dev_bound(map)) {
700 err = bpf_map_offload_update_elem(map, key, value, attr->flags);
701 goto out;
702 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
6710e112
JDB
703 err = map->ops->map_update_elem(map, key, value, attr->flags);
704 goto out;
705 }
706
b121d1e7
AS
707 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
708 * inside bpf map update or delete otherwise deadlocks are possible
709 */
710 preempt_disable();
711 __this_cpu_inc(bpf_prog_active);
8f844938
MKL
712 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
713 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
714 err = bpf_percpu_hash_update(map, key, value, attr->flags);
715 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
716 err = bpf_percpu_array_update(map, key, value, attr->flags);
9c147b56 717 } else if (IS_FD_ARRAY(map)) {
d056a788
DB
718 rcu_read_lock();
719 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
720 attr->flags);
721 rcu_read_unlock();
bcc6b1b7
MKL
722 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
723 rcu_read_lock();
724 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
725 attr->flags);
726 rcu_read_unlock();
15a07b33
AS
727 } else {
728 rcu_read_lock();
729 err = map->ops->map_update_elem(map, key, value, attr->flags);
730 rcu_read_unlock();
731 }
b121d1e7
AS
732 __this_cpu_dec(bpf_prog_active);
733 preempt_enable();
6710e112 734out:
a67edbf4
DB
735 if (!err)
736 trace_bpf_map_update_elem(map, ufd, key, value);
db20fd2b
AS
737free_value:
738 kfree(value);
739free_key:
740 kfree(key);
741err_put:
742 fdput(f);
743 return err;
744}
745
746#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
747
748static int map_delete_elem(union bpf_attr *attr)
749{
535e7b4b 750 void __user *ukey = u64_to_user_ptr(attr->key);
db20fd2b 751 int ufd = attr->map_fd;
db20fd2b 752 struct bpf_map *map;
592867bf 753 struct fd f;
db20fd2b
AS
754 void *key;
755 int err;
756
757 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
758 return -EINVAL;
759
592867bf 760 f = fdget(ufd);
c2101297 761 map = __bpf_map_get(f);
db20fd2b
AS
762 if (IS_ERR(map))
763 return PTR_ERR(map);
764
6e71b04a
CF
765 if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
766 err = -EPERM;
767 goto err_put;
768 }
769
e4448ed8
AV
770 key = memdup_user(ukey, map->key_size);
771 if (IS_ERR(key)) {
772 err = PTR_ERR(key);
db20fd2b 773 goto err_put;
e4448ed8 774 }
db20fd2b 775
a3884572
JK
776 if (bpf_map_is_dev_bound(map)) {
777 err = bpf_map_offload_delete_elem(map, key);
778 goto out;
779 }
780
b121d1e7
AS
781 preempt_disable();
782 __this_cpu_inc(bpf_prog_active);
db20fd2b
AS
783 rcu_read_lock();
784 err = map->ops->map_delete_elem(map, key);
785 rcu_read_unlock();
b121d1e7
AS
786 __this_cpu_dec(bpf_prog_active);
787 preempt_enable();
a3884572 788out:
a67edbf4
DB
789 if (!err)
790 trace_bpf_map_delete_elem(map, ufd, key);
db20fd2b
AS
791 kfree(key);
792err_put:
793 fdput(f);
794 return err;
795}
796
797/* last field in 'union bpf_attr' used by this command */
798#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
799
800static int map_get_next_key(union bpf_attr *attr)
801{
535e7b4b
MS
802 void __user *ukey = u64_to_user_ptr(attr->key);
803 void __user *unext_key = u64_to_user_ptr(attr->next_key);
db20fd2b 804 int ufd = attr->map_fd;
db20fd2b
AS
805 struct bpf_map *map;
806 void *key, *next_key;
592867bf 807 struct fd f;
db20fd2b
AS
808 int err;
809
810 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
811 return -EINVAL;
812
592867bf 813 f = fdget(ufd);
c2101297 814 map = __bpf_map_get(f);
db20fd2b
AS
815 if (IS_ERR(map))
816 return PTR_ERR(map);
817
6e71b04a
CF
818 if (!(f.file->f_mode & FMODE_CAN_READ)) {
819 err = -EPERM;
820 goto err_put;
821 }
822
8fe45924 823 if (ukey) {
e4448ed8
AV
824 key = memdup_user(ukey, map->key_size);
825 if (IS_ERR(key)) {
826 err = PTR_ERR(key);
8fe45924 827 goto err_put;
e4448ed8 828 }
8fe45924
TQ
829 } else {
830 key = NULL;
831 }
db20fd2b
AS
832
833 err = -ENOMEM;
834 next_key = kmalloc(map->key_size, GFP_USER);
835 if (!next_key)
836 goto free_key;
837
a3884572
JK
838 if (bpf_map_is_dev_bound(map)) {
839 err = bpf_map_offload_get_next_key(map, key, next_key);
840 goto out;
841 }
842
db20fd2b
AS
843 rcu_read_lock();
844 err = map->ops->map_get_next_key(map, key, next_key);
845 rcu_read_unlock();
a3884572 846out:
db20fd2b
AS
847 if (err)
848 goto free_next_key;
849
850 err = -EFAULT;
851 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
852 goto free_next_key;
853
a67edbf4 854 trace_bpf_map_next_key(map, ufd, key, next_key);
db20fd2b
AS
855 err = 0;
856
857free_next_key:
858 kfree(next_key);
859free_key:
860 kfree(key);
861err_put:
862 fdput(f);
863 return err;
864}
865
7de16e3a
JK
866static const struct bpf_prog_ops * const bpf_prog_types[] = {
867#define BPF_PROG_TYPE(_id, _name) \
868 [_id] = & _name ## _prog_ops,
869#define BPF_MAP_TYPE(_id, _ops)
870#include <linux/bpf_types.h>
871#undef BPF_PROG_TYPE
872#undef BPF_MAP_TYPE
873};
874
09756af4
AS
875static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
876{
be9370a7
JB
877 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
878 return -EINVAL;
09756af4 879
ab3f0063
JK
880 if (!bpf_prog_is_dev_bound(prog->aux))
881 prog->aux->ops = bpf_prog_types[type];
882 else
883 prog->aux->ops = &bpf_offload_prog_ops;
be9370a7
JB
884 prog->type = type;
885 return 0;
09756af4
AS
886}
887
888/* drop refcnt on maps used by eBPF program and free auxilary data */
889static void free_used_maps(struct bpf_prog_aux *aux)
890{
891 int i;
892
893 for (i = 0; i < aux->used_map_cnt; i++)
894 bpf_map_put(aux->used_maps[i]);
895
896 kfree(aux->used_maps);
897}
898
5ccb071e
DB
899int __bpf_prog_charge(struct user_struct *user, u32 pages)
900{
901 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
902 unsigned long user_bufs;
903
904 if (user) {
905 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
906 if (user_bufs > memlock_limit) {
907 atomic_long_sub(pages, &user->locked_vm);
908 return -EPERM;
909 }
910 }
911
912 return 0;
913}
914
915void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
916{
917 if (user)
918 atomic_long_sub(pages, &user->locked_vm);
919}
920
aaac3ba9
AS
921static int bpf_prog_charge_memlock(struct bpf_prog *prog)
922{
923 struct user_struct *user = get_current_user();
5ccb071e 924 int ret;
aaac3ba9 925
5ccb071e
DB
926 ret = __bpf_prog_charge(user, prog->pages);
927 if (ret) {
aaac3ba9 928 free_uid(user);
5ccb071e 929 return ret;
aaac3ba9 930 }
5ccb071e 931
aaac3ba9
AS
932 prog->aux->user = user;
933 return 0;
934}
935
936static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
937{
938 struct user_struct *user = prog->aux->user;
939
5ccb071e 940 __bpf_prog_uncharge(user, prog->pages);
aaac3ba9
AS
941 free_uid(user);
942}
943
dc4bb0e2
MKL
944static int bpf_prog_alloc_id(struct bpf_prog *prog)
945{
946 int id;
947
b76354cd 948 idr_preload(GFP_KERNEL);
dc4bb0e2
MKL
949 spin_lock_bh(&prog_idr_lock);
950 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
951 if (id > 0)
952 prog->aux->id = id;
953 spin_unlock_bh(&prog_idr_lock);
b76354cd 954 idr_preload_end();
dc4bb0e2
MKL
955
956 /* id is in [1, INT_MAX) */
957 if (WARN_ON_ONCE(!id))
958 return -ENOSPC;
959
960 return id > 0 ? 0 : id;
961}
962
ad8ad79f 963void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
dc4bb0e2 964{
ad8ad79f
JK
965 /* cBPF to eBPF migrations are currently not in the idr store.
966 * Offloaded programs are removed from the store when their device
967 * disappears - even if someone grabs an fd to them they are unusable,
968 * simply waiting for refcnt to drop to be freed.
969 */
dc4bb0e2
MKL
970 if (!prog->aux->id)
971 return;
972
b16d9aa4
MKL
973 if (do_idr_lock)
974 spin_lock_bh(&prog_idr_lock);
975 else
976 __acquire(&prog_idr_lock);
977
dc4bb0e2 978 idr_remove(&prog_idr, prog->aux->id);
ad8ad79f 979 prog->aux->id = 0;
b16d9aa4
MKL
980
981 if (do_idr_lock)
982 spin_unlock_bh(&prog_idr_lock);
983 else
984 __release(&prog_idr_lock);
dc4bb0e2
MKL
985}
986
1aacde3d 987static void __bpf_prog_put_rcu(struct rcu_head *rcu)
abf2e7d6
AS
988{
989 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
990
991 free_used_maps(aux);
aaac3ba9 992 bpf_prog_uncharge_memlock(aux->prog);
afdb09c7 993 security_bpf_prog_free(aux);
abf2e7d6
AS
994 bpf_prog_free(aux->prog);
995}
996
b16d9aa4 997static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
09756af4 998{
a67edbf4 999 if (atomic_dec_and_test(&prog->aux->refcnt)) {
4f74d809
DB
1000 int i;
1001
a67edbf4 1002 trace_bpf_prog_put_rcu(prog);
34ad5580 1003 /* bpf_prog_free_id() must be called first */
b16d9aa4 1004 bpf_prog_free_id(prog, do_idr_lock);
4f74d809
DB
1005
1006 for (i = 0; i < prog->aux->func_cnt; i++)
1007 bpf_prog_kallsyms_del(prog->aux->func[i]);
74451e66 1008 bpf_prog_kallsyms_del(prog);
4f74d809 1009
1aacde3d 1010 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
a67edbf4 1011 }
09756af4 1012}
b16d9aa4
MKL
1013
1014void bpf_prog_put(struct bpf_prog *prog)
1015{
1016 __bpf_prog_put(prog, true);
1017}
e2e9b654 1018EXPORT_SYMBOL_GPL(bpf_prog_put);
09756af4
AS
1019
1020static int bpf_prog_release(struct inode *inode, struct file *filp)
1021{
1022 struct bpf_prog *prog = filp->private_data;
1023
1aacde3d 1024 bpf_prog_put(prog);
09756af4
AS
1025 return 0;
1026}
1027
7bd509e3
DB
1028#ifdef CONFIG_PROC_FS
1029static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1030{
1031 const struct bpf_prog *prog = filp->private_data;
f1f7714e 1032 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
7bd509e3 1033
f1f7714e 1034 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
7bd509e3
DB
1035 seq_printf(m,
1036 "prog_type:\t%u\n"
1037 "prog_jited:\t%u\n"
f1f7714e 1038 "prog_tag:\t%s\n"
7bd509e3
DB
1039 "memlock:\t%llu\n",
1040 prog->type,
1041 prog->jited,
f1f7714e 1042 prog_tag,
7bd509e3
DB
1043 prog->pages * 1ULL << PAGE_SHIFT);
1044}
1045#endif
1046
f66e448c 1047const struct file_operations bpf_prog_fops = {
7bd509e3
DB
1048#ifdef CONFIG_PROC_FS
1049 .show_fdinfo = bpf_prog_show_fdinfo,
1050#endif
1051 .release = bpf_prog_release,
6e71b04a
CF
1052 .read = bpf_dummy_read,
1053 .write = bpf_dummy_write,
09756af4
AS
1054};
1055
b2197755 1056int bpf_prog_new_fd(struct bpf_prog *prog)
aa79781b 1057{
afdb09c7
CF
1058 int ret;
1059
1060 ret = security_bpf_prog(prog);
1061 if (ret < 0)
1062 return ret;
1063
aa79781b
DB
1064 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1065 O_RDWR | O_CLOEXEC);
1066}
1067
113214be 1068static struct bpf_prog *____bpf_prog_get(struct fd f)
09756af4 1069{
09756af4
AS
1070 if (!f.file)
1071 return ERR_PTR(-EBADF);
09756af4
AS
1072 if (f.file->f_op != &bpf_prog_fops) {
1073 fdput(f);
1074 return ERR_PTR(-EINVAL);
1075 }
1076
c2101297 1077 return f.file->private_data;
09756af4
AS
1078}
1079
59d3656d 1080struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
92117d84 1081{
59d3656d
BB
1082 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
1083 atomic_sub(i, &prog->aux->refcnt);
92117d84
AS
1084 return ERR_PTR(-EBUSY);
1085 }
1086 return prog;
1087}
59d3656d
BB
1088EXPORT_SYMBOL_GPL(bpf_prog_add);
1089
c540594f
DB
1090void bpf_prog_sub(struct bpf_prog *prog, int i)
1091{
1092 /* Only to be used for undoing previous bpf_prog_add() in some
1093 * error path. We still know that another entity in our call
1094 * path holds a reference to the program, thus atomic_sub() can
1095 * be safely used in such cases!
1096 */
1097 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
1098}
1099EXPORT_SYMBOL_GPL(bpf_prog_sub);
1100
59d3656d
BB
1101struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
1102{
1103 return bpf_prog_add(prog, 1);
1104}
97bc402d 1105EXPORT_SYMBOL_GPL(bpf_prog_inc);
92117d84 1106
b16d9aa4 1107/* prog_idr_lock should have been held */
a6f6df69 1108struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
b16d9aa4
MKL
1109{
1110 int refold;
1111
1112 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
1113
1114 if (refold >= BPF_MAX_REFCNT) {
1115 __bpf_prog_put(prog, false);
1116 return ERR_PTR(-EBUSY);
1117 }
1118
1119 if (!refold)
1120 return ERR_PTR(-ENOENT);
1121
1122 return prog;
1123}
a6f6df69 1124EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
b16d9aa4 1125
040ee692 1126bool bpf_prog_get_ok(struct bpf_prog *prog,
288b3de5 1127 enum bpf_prog_type *attach_type, bool attach_drv)
248f346f 1128{
288b3de5
JK
1129 /* not an attachment, just a refcount inc, always allow */
1130 if (!attach_type)
1131 return true;
248f346f
JK
1132
1133 if (prog->type != *attach_type)
1134 return false;
288b3de5 1135 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
248f346f
JK
1136 return false;
1137
1138 return true;
1139}
1140
1141static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
288b3de5 1142 bool attach_drv)
09756af4
AS
1143{
1144 struct fd f = fdget(ufd);
1145 struct bpf_prog *prog;
1146
113214be 1147 prog = ____bpf_prog_get(f);
09756af4
AS
1148 if (IS_ERR(prog))
1149 return prog;
288b3de5 1150 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
113214be
DB
1151 prog = ERR_PTR(-EINVAL);
1152 goto out;
1153 }
09756af4 1154
92117d84 1155 prog = bpf_prog_inc(prog);
113214be 1156out:
09756af4
AS
1157 fdput(f);
1158 return prog;
1159}
113214be
DB
1160
1161struct bpf_prog *bpf_prog_get(u32 ufd)
1162{
288b3de5 1163 return __bpf_prog_get(ufd, NULL, false);
113214be
DB
1164}
1165
248f346f 1166struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
288b3de5 1167 bool attach_drv)
248f346f 1168{
288b3de5 1169 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
248f346f
JK
1170
1171 if (!IS_ERR(prog))
1172 trace_bpf_prog_get_type(prog);
1173 return prog;
1174}
6c8dfe21 1175EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
248f346f 1176
aac3fc32
AI
1177/* Initially all BPF programs could be loaded w/o specifying
1178 * expected_attach_type. Later for some of them specifying expected_attach_type
1179 * at load time became required so that program could be validated properly.
1180 * Programs of types that are allowed to be loaded both w/ and w/o (for
1181 * backward compatibility) expected_attach_type, should have the default attach
1182 * type assigned to expected_attach_type for the latter case, so that it can be
1183 * validated later at attach time.
1184 *
1185 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1186 * prog type requires it but has some attach types that have to be backward
1187 * compatible.
1188 */
1189static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1190{
1191 switch (attr->prog_type) {
1192 case BPF_PROG_TYPE_CGROUP_SOCK:
1193 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1194 * exist so checking for non-zero is the way to go here.
1195 */
1196 if (!attr->expected_attach_type)
1197 attr->expected_attach_type =
1198 BPF_CGROUP_INET_SOCK_CREATE;
1199 break;
1200 }
1201}
1202
5e43f899
AI
1203static int
1204bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
1205 enum bpf_attach_type expected_attach_type)
1206{
4fbac77d 1207 switch (prog_type) {
aac3fc32
AI
1208 case BPF_PROG_TYPE_CGROUP_SOCK:
1209 switch (expected_attach_type) {
1210 case BPF_CGROUP_INET_SOCK_CREATE:
1211 case BPF_CGROUP_INET4_POST_BIND:
1212 case BPF_CGROUP_INET6_POST_BIND:
1213 return 0;
1214 default:
1215 return -EINVAL;
1216 }
4fbac77d
AI
1217 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1218 switch (expected_attach_type) {
1219 case BPF_CGROUP_INET4_BIND:
1220 case BPF_CGROUP_INET6_BIND:
d74bad4e
AI
1221 case BPF_CGROUP_INET4_CONNECT:
1222 case BPF_CGROUP_INET6_CONNECT:
4fbac77d
AI
1223 return 0;
1224 default:
1225 return -EINVAL;
1226 }
1227 default:
1228 return 0;
1229 }
5e43f899
AI
1230}
1231
09756af4 1232/* last field in 'union bpf_attr' used by this command */
5e43f899 1233#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type
09756af4
AS
1234
1235static int bpf_prog_load(union bpf_attr *attr)
1236{
1237 enum bpf_prog_type type = attr->prog_type;
1238 struct bpf_prog *prog;
1239 int err;
1240 char license[128];
1241 bool is_gpl;
1242
1243 if (CHECK_ATTR(BPF_PROG_LOAD))
1244 return -EINVAL;
1245
e07b98d9
DM
1246 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
1247 return -EINVAL;
1248
09756af4 1249 /* copy eBPF program license from user space */
535e7b4b 1250 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
09756af4
AS
1251 sizeof(license) - 1) < 0)
1252 return -EFAULT;
1253 license[sizeof(license) - 1] = 0;
1254
1255 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1256 is_gpl = license_is_gpl_compatible(license);
1257
ef0915ca
DB
1258 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
1259 return -E2BIG;
09756af4 1260
2541517c
AS
1261 if (type == BPF_PROG_TYPE_KPROBE &&
1262 attr->kern_version != LINUX_VERSION_CODE)
1263 return -EINVAL;
1264
80b7d819
CF
1265 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1266 type != BPF_PROG_TYPE_CGROUP_SKB &&
1267 !capable(CAP_SYS_ADMIN))
1be7f75d
AS
1268 return -EPERM;
1269
aac3fc32 1270 bpf_prog_load_fixup_attach_type(attr);
5e43f899
AI
1271 if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type))
1272 return -EINVAL;
1273
09756af4
AS
1274 /* plain bpf_prog allocation */
1275 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1276 if (!prog)
1277 return -ENOMEM;
1278
5e43f899
AI
1279 prog->expected_attach_type = attr->expected_attach_type;
1280
9a18eedb
JK
1281 prog->aux->offload_requested = !!attr->prog_ifindex;
1282
afdb09c7 1283 err = security_bpf_prog_alloc(prog->aux);
aaac3ba9
AS
1284 if (err)
1285 goto free_prog_nouncharge;
1286
afdb09c7
CF
1287 err = bpf_prog_charge_memlock(prog);
1288 if (err)
1289 goto free_prog_sec;
1290
09756af4
AS
1291 prog->len = attr->insn_cnt;
1292
1293 err = -EFAULT;
535e7b4b 1294 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
aafe6ae9 1295 bpf_prog_insn_size(prog)) != 0)
09756af4
AS
1296 goto free_prog;
1297
1298 prog->orig_prog = NULL;
a91263d5 1299 prog->jited = 0;
09756af4
AS
1300
1301 atomic_set(&prog->aux->refcnt, 1);
a91263d5 1302 prog->gpl_compatible = is_gpl ? 1 : 0;
09756af4 1303
9a18eedb 1304 if (bpf_prog_is_dev_bound(prog->aux)) {
ab3f0063
JK
1305 err = bpf_prog_offload_init(prog, attr);
1306 if (err)
1307 goto free_prog;
1308 }
1309
09756af4
AS
1310 /* find program type: socket_filter vs tracing_filter */
1311 err = find_prog_type(type, prog);
1312 if (err < 0)
1313 goto free_prog;
1314
cb4d2b3f
MKL
1315 prog->aux->load_time = ktime_get_boot_ns();
1316 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
1317 if (err)
1318 goto free_prog;
1319
09756af4 1320 /* run eBPF verifier */
9bac3d6d 1321 err = bpf_check(&prog, attr);
09756af4
AS
1322 if (err < 0)
1323 goto free_used_maps;
1324
1325 /* eBPF program is ready to be JITed */
1c2a088a
AS
1326 if (!prog->bpf_func)
1327 prog = bpf_prog_select_runtime(prog, &err);
04fd61ab
AS
1328 if (err < 0)
1329 goto free_used_maps;
09756af4 1330
dc4bb0e2
MKL
1331 err = bpf_prog_alloc_id(prog);
1332 if (err)
1333 goto free_used_maps;
1334
aa79781b 1335 err = bpf_prog_new_fd(prog);
b16d9aa4
MKL
1336 if (err < 0) {
1337 /* failed to allocate fd.
1338 * bpf_prog_put() is needed because the above
1339 * bpf_prog_alloc_id() has published the prog
1340 * to the userspace and the userspace may
1341 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1342 */
1343 bpf_prog_put(prog);
1344 return err;
1345 }
09756af4 1346
74451e66 1347 bpf_prog_kallsyms_add(prog);
a67edbf4 1348 trace_bpf_prog_load(prog, err);
09756af4
AS
1349 return err;
1350
1351free_used_maps:
1352 free_used_maps(prog->aux);
1353free_prog:
aaac3ba9 1354 bpf_prog_uncharge_memlock(prog);
afdb09c7
CF
1355free_prog_sec:
1356 security_bpf_prog_free(prog->aux);
aaac3ba9 1357free_prog_nouncharge:
09756af4
AS
1358 bpf_prog_free(prog);
1359 return err;
1360}
1361
6e71b04a 1362#define BPF_OBJ_LAST_FIELD file_flags
b2197755
DB
1363
1364static int bpf_obj_pin(const union bpf_attr *attr)
1365{
6e71b04a 1366 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
b2197755
DB
1367 return -EINVAL;
1368
535e7b4b 1369 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
b2197755
DB
1370}
1371
1372static int bpf_obj_get(const union bpf_attr *attr)
1373{
6e71b04a
CF
1374 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
1375 attr->file_flags & ~BPF_OBJ_FLAG_MASK)
b2197755
DB
1376 return -EINVAL;
1377
6e71b04a
CF
1378 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
1379 attr->file_flags);
b2197755
DB
1380}
1381
c4f6699d
AS
1382struct bpf_raw_tracepoint {
1383 struct bpf_raw_event_map *btp;
1384 struct bpf_prog *prog;
1385};
1386
1387static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
1388{
1389 struct bpf_raw_tracepoint *raw_tp = filp->private_data;
1390
1391 if (raw_tp->prog) {
1392 bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
1393 bpf_prog_put(raw_tp->prog);
1394 }
1395 kfree(raw_tp);
1396 return 0;
1397}
1398
1399static const struct file_operations bpf_raw_tp_fops = {
1400 .release = bpf_raw_tracepoint_release,
1401 .read = bpf_dummy_read,
1402 .write = bpf_dummy_write,
1403};
1404
1405#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
1406
1407static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
1408{
1409 struct bpf_raw_tracepoint *raw_tp;
1410 struct bpf_raw_event_map *btp;
1411 struct bpf_prog *prog;
1412 char tp_name[128];
1413 int tp_fd, err;
1414
1415 if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name),
1416 sizeof(tp_name) - 1) < 0)
1417 return -EFAULT;
1418 tp_name[sizeof(tp_name) - 1] = 0;
1419
1420 btp = bpf_find_raw_tracepoint(tp_name);
1421 if (!btp)
1422 return -ENOENT;
1423
1424 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
1425 if (!raw_tp)
1426 return -ENOMEM;
1427 raw_tp->btp = btp;
1428
1429 prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
1430 BPF_PROG_TYPE_RAW_TRACEPOINT);
1431 if (IS_ERR(prog)) {
1432 err = PTR_ERR(prog);
1433 goto out_free_tp;
1434 }
1435
1436 err = bpf_probe_register(raw_tp->btp, prog);
1437 if (err)
1438 goto out_put_prog;
1439
1440 raw_tp->prog = prog;
1441 tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
1442 O_CLOEXEC);
1443 if (tp_fd < 0) {
1444 bpf_probe_unregister(raw_tp->btp, prog);
1445 err = tp_fd;
1446 goto out_put_prog;
1447 }
1448 return tp_fd;
1449
1450out_put_prog:
1451 bpf_prog_put(prog);
1452out_free_tp:
1453 kfree(raw_tp);
1454 return err;
1455}
1456
f4324551
DM
1457#ifdef CONFIG_CGROUP_BPF
1458
33491588
AR
1459static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
1460 enum bpf_attach_type attach_type)
1461{
1462 switch (prog->type) {
1463 case BPF_PROG_TYPE_CGROUP_SOCK:
1464 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1465 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
1466 default:
1467 return 0;
1468 }
1469}
1470
464bc0fd 1471#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
174a79ff 1472
4f738adb
JF
1473static int sockmap_get_from_fd(const union bpf_attr *attr,
1474 int type, bool attach)
174a79ff 1475{
5a67da2a 1476 struct bpf_prog *prog = NULL;
174a79ff
JF
1477 int ufd = attr->target_fd;
1478 struct bpf_map *map;
1479 struct fd f;
1480 int err;
1481
1482 f = fdget(ufd);
1483 map = __bpf_map_get(f);
1484 if (IS_ERR(map))
1485 return PTR_ERR(map);
1486
5a67da2a 1487 if (attach) {
4f738adb 1488 prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
5a67da2a
JF
1489 if (IS_ERR(prog)) {
1490 fdput(f);
1491 return PTR_ERR(prog);
1492 }
174a79ff
JF
1493 }
1494
5a67da2a 1495 err = sock_map_prog(map, prog, attr->attach_type);
174a79ff
JF
1496 if (err) {
1497 fdput(f);
5a67da2a
JF
1498 if (prog)
1499 bpf_prog_put(prog);
ae2b27b8 1500 return err;
174a79ff
JF
1501 }
1502
1503 fdput(f);
ae2b27b8 1504 return 0;
174a79ff 1505}
f4324551 1506
324bda9e
AS
1507#define BPF_F_ATTACH_MASK \
1508 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
1509
f4324551
DM
1510static int bpf_prog_attach(const union bpf_attr *attr)
1511{
7f677633 1512 enum bpf_prog_type ptype;
f4324551
DM
1513 struct bpf_prog *prog;
1514 struct cgroup *cgrp;
7f677633 1515 int ret;
f4324551
DM
1516
1517 if (!capable(CAP_NET_ADMIN))
1518 return -EPERM;
1519
1520 if (CHECK_ATTR(BPF_PROG_ATTACH))
1521 return -EINVAL;
1522
324bda9e 1523 if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
7f677633
AS
1524 return -EINVAL;
1525
f4324551
DM
1526 switch (attr->attach_type) {
1527 case BPF_CGROUP_INET_INGRESS:
1528 case BPF_CGROUP_INET_EGRESS:
b2cd1257 1529 ptype = BPF_PROG_TYPE_CGROUP_SKB;
f4324551 1530 break;
61023658 1531 case BPF_CGROUP_INET_SOCK_CREATE:
aac3fc32
AI
1532 case BPF_CGROUP_INET4_POST_BIND:
1533 case BPF_CGROUP_INET6_POST_BIND:
61023658
DA
1534 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1535 break;
4fbac77d
AI
1536 case BPF_CGROUP_INET4_BIND:
1537 case BPF_CGROUP_INET6_BIND:
d74bad4e
AI
1538 case BPF_CGROUP_INET4_CONNECT:
1539 case BPF_CGROUP_INET6_CONNECT:
4fbac77d
AI
1540 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
1541 break;
40304b2a
LB
1542 case BPF_CGROUP_SOCK_OPS:
1543 ptype = BPF_PROG_TYPE_SOCK_OPS;
1544 break;
ebc614f6
RG
1545 case BPF_CGROUP_DEVICE:
1546 ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1547 break;
4f738adb
JF
1548 case BPF_SK_MSG_VERDICT:
1549 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
464bc0fd
JF
1550 case BPF_SK_SKB_STREAM_PARSER:
1551 case BPF_SK_SKB_STREAM_VERDICT:
4f738adb 1552 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
f4324551
DM
1553 default:
1554 return -EINVAL;
1555 }
1556
b2cd1257
DA
1557 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1558 if (IS_ERR(prog))
1559 return PTR_ERR(prog);
1560
5e43f899
AI
1561 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
1562 bpf_prog_put(prog);
1563 return -EINVAL;
1564 }
1565
b2cd1257
DA
1566 cgrp = cgroup_get_from_fd(attr->target_fd);
1567 if (IS_ERR(cgrp)) {
1568 bpf_prog_put(prog);
1569 return PTR_ERR(cgrp);
1570 }
1571
324bda9e
AS
1572 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
1573 attr->attach_flags);
7f677633
AS
1574 if (ret)
1575 bpf_prog_put(prog);
b2cd1257
DA
1576 cgroup_put(cgrp);
1577
7f677633 1578 return ret;
f4324551
DM
1579}
1580
1581#define BPF_PROG_DETACH_LAST_FIELD attach_type
1582
1583static int bpf_prog_detach(const union bpf_attr *attr)
1584{
324bda9e
AS
1585 enum bpf_prog_type ptype;
1586 struct bpf_prog *prog;
f4324551 1587 struct cgroup *cgrp;
7f677633 1588 int ret;
f4324551
DM
1589
1590 if (!capable(CAP_NET_ADMIN))
1591 return -EPERM;
1592
1593 if (CHECK_ATTR(BPF_PROG_DETACH))
1594 return -EINVAL;
1595
1596 switch (attr->attach_type) {
1597 case BPF_CGROUP_INET_INGRESS:
1598 case BPF_CGROUP_INET_EGRESS:
324bda9e
AS
1599 ptype = BPF_PROG_TYPE_CGROUP_SKB;
1600 break;
61023658 1601 case BPF_CGROUP_INET_SOCK_CREATE:
aac3fc32
AI
1602 case BPF_CGROUP_INET4_POST_BIND:
1603 case BPF_CGROUP_INET6_POST_BIND:
324bda9e
AS
1604 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1605 break;
4fbac77d
AI
1606 case BPF_CGROUP_INET4_BIND:
1607 case BPF_CGROUP_INET6_BIND:
d74bad4e
AI
1608 case BPF_CGROUP_INET4_CONNECT:
1609 case BPF_CGROUP_INET6_CONNECT:
4fbac77d
AI
1610 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
1611 break;
40304b2a 1612 case BPF_CGROUP_SOCK_OPS:
324bda9e 1613 ptype = BPF_PROG_TYPE_SOCK_OPS;
f4324551 1614 break;
ebc614f6
RG
1615 case BPF_CGROUP_DEVICE:
1616 ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1617 break;
4f738adb
JF
1618 case BPF_SK_MSG_VERDICT:
1619 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
5a67da2a
JF
1620 case BPF_SK_SKB_STREAM_PARSER:
1621 case BPF_SK_SKB_STREAM_VERDICT:
4f738adb 1622 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
f4324551
DM
1623 default:
1624 return -EINVAL;
1625 }
1626
324bda9e
AS
1627 cgrp = cgroup_get_from_fd(attr->target_fd);
1628 if (IS_ERR(cgrp))
1629 return PTR_ERR(cgrp);
1630
1631 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1632 if (IS_ERR(prog))
1633 prog = NULL;
1634
1635 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
1636 if (prog)
1637 bpf_prog_put(prog);
1638 cgroup_put(cgrp);
7f677633 1639 return ret;
f4324551 1640}
40304b2a 1641
468e2f64
AS
1642#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
1643
1644static int bpf_prog_query(const union bpf_attr *attr,
1645 union bpf_attr __user *uattr)
1646{
1647 struct cgroup *cgrp;
1648 int ret;
1649
1650 if (!capable(CAP_NET_ADMIN))
1651 return -EPERM;
1652 if (CHECK_ATTR(BPF_PROG_QUERY))
1653 return -EINVAL;
1654 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
1655 return -EINVAL;
1656
1657 switch (attr->query.attach_type) {
1658 case BPF_CGROUP_INET_INGRESS:
1659 case BPF_CGROUP_INET_EGRESS:
1660 case BPF_CGROUP_INET_SOCK_CREATE:
4fbac77d
AI
1661 case BPF_CGROUP_INET4_BIND:
1662 case BPF_CGROUP_INET6_BIND:
aac3fc32
AI
1663 case BPF_CGROUP_INET4_POST_BIND:
1664 case BPF_CGROUP_INET6_POST_BIND:
d74bad4e
AI
1665 case BPF_CGROUP_INET4_CONNECT:
1666 case BPF_CGROUP_INET6_CONNECT:
468e2f64 1667 case BPF_CGROUP_SOCK_OPS:
ebc614f6 1668 case BPF_CGROUP_DEVICE:
468e2f64
AS
1669 break;
1670 default:
1671 return -EINVAL;
1672 }
1673 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1674 if (IS_ERR(cgrp))
1675 return PTR_ERR(cgrp);
1676 ret = cgroup_bpf_query(cgrp, attr, uattr);
1677 cgroup_put(cgrp);
1678 return ret;
1679}
f4324551
DM
1680#endif /* CONFIG_CGROUP_BPF */
1681
1cf1cae9
AS
1682#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1683
1684static int bpf_prog_test_run(const union bpf_attr *attr,
1685 union bpf_attr __user *uattr)
1686{
1687 struct bpf_prog *prog;
1688 int ret = -ENOTSUPP;
1689
61f3c964
AS
1690 if (!capable(CAP_SYS_ADMIN))
1691 return -EPERM;
1cf1cae9
AS
1692 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1693 return -EINVAL;
1694
1695 prog = bpf_prog_get(attr->test.prog_fd);
1696 if (IS_ERR(prog))
1697 return PTR_ERR(prog);
1698
1699 if (prog->aux->ops->test_run)
1700 ret = prog->aux->ops->test_run(prog, attr, uattr);
1701
1702 bpf_prog_put(prog);
1703 return ret;
1704}
1705
34ad5580
MKL
1706#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1707
1708static int bpf_obj_get_next_id(const union bpf_attr *attr,
1709 union bpf_attr __user *uattr,
1710 struct idr *idr,
1711 spinlock_t *lock)
1712{
1713 u32 next_id = attr->start_id;
1714 int err = 0;
1715
1716 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1717 return -EINVAL;
1718
1719 if (!capable(CAP_SYS_ADMIN))
1720 return -EPERM;
1721
1722 next_id++;
1723 spin_lock_bh(lock);
1724 if (!idr_get_next(idr, &next_id))
1725 err = -ENOENT;
1726 spin_unlock_bh(lock);
1727
1728 if (!err)
1729 err = put_user(next_id, &uattr->next_id);
1730
1731 return err;
1732}
1733
b16d9aa4
MKL
1734#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1735
1736static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1737{
1738 struct bpf_prog *prog;
1739 u32 id = attr->prog_id;
1740 int fd;
1741
1742 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1743 return -EINVAL;
1744
1745 if (!capable(CAP_SYS_ADMIN))
1746 return -EPERM;
1747
1748 spin_lock_bh(&prog_idr_lock);
1749 prog = idr_find(&prog_idr, id);
1750 if (prog)
1751 prog = bpf_prog_inc_not_zero(prog);
1752 else
1753 prog = ERR_PTR(-ENOENT);
1754 spin_unlock_bh(&prog_idr_lock);
1755
1756 if (IS_ERR(prog))
1757 return PTR_ERR(prog);
1758
1759 fd = bpf_prog_new_fd(prog);
1760 if (fd < 0)
1761 bpf_prog_put(prog);
1762
1763 return fd;
1764}
1765
6e71b04a 1766#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
bd5f5f4e
MKL
1767
1768static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1769{
1770 struct bpf_map *map;
1771 u32 id = attr->map_id;
6e71b04a 1772 int f_flags;
bd5f5f4e
MKL
1773 int fd;
1774
6e71b04a
CF
1775 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
1776 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
bd5f5f4e
MKL
1777 return -EINVAL;
1778
1779 if (!capable(CAP_SYS_ADMIN))
1780 return -EPERM;
1781
6e71b04a
CF
1782 f_flags = bpf_get_file_flag(attr->open_flags);
1783 if (f_flags < 0)
1784 return f_flags;
1785
bd5f5f4e
MKL
1786 spin_lock_bh(&map_idr_lock);
1787 map = idr_find(&map_idr, id);
1788 if (map)
1789 map = bpf_map_inc_not_zero(map, true);
1790 else
1791 map = ERR_PTR(-ENOENT);
1792 spin_unlock_bh(&map_idr_lock);
1793
1794 if (IS_ERR(map))
1795 return PTR_ERR(map);
1796
6e71b04a 1797 fd = bpf_map_new_fd(map, f_flags);
bd5f5f4e
MKL
1798 if (fd < 0)
1799 bpf_map_put(map);
1800
1801 return fd;
1802}
1803
7105e828
DB
1804static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
1805 unsigned long addr)
1806{
1807 int i;
1808
1809 for (i = 0; i < prog->aux->used_map_cnt; i++)
1810 if (prog->aux->used_maps[i] == (void *)addr)
1811 return prog->aux->used_maps[i];
1812 return NULL;
1813}
1814
1815static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
1816{
1817 const struct bpf_map *map;
1818 struct bpf_insn *insns;
1819 u64 imm;
1820 int i;
1821
1822 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
1823 GFP_USER);
1824 if (!insns)
1825 return insns;
1826
1827 for (i = 0; i < prog->len; i++) {
1828 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
1829 insns[i].code = BPF_JMP | BPF_CALL;
1830 insns[i].imm = BPF_FUNC_tail_call;
1831 /* fall-through */
1832 }
1833 if (insns[i].code == (BPF_JMP | BPF_CALL) ||
1834 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
1835 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
1836 insns[i].code = BPF_JMP | BPF_CALL;
1837 if (!bpf_dump_raw_ok())
1838 insns[i].imm = 0;
1839 continue;
1840 }
1841
1842 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
1843 continue;
1844
1845 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
1846 map = bpf_map_from_imm(prog, imm);
1847 if (map) {
1848 insns[i].src_reg = BPF_PSEUDO_MAP_FD;
1849 insns[i].imm = map->id;
1850 insns[i + 1].imm = 0;
1851 continue;
1852 }
1853
1854 if (!bpf_dump_raw_ok() &&
1855 imm == (unsigned long)prog->aux) {
1856 insns[i].imm = 0;
1857 insns[i + 1].imm = 0;
1858 continue;
1859 }
1860 }
1861
1862 return insns;
1863}
1864
1e270976
MKL
1865static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1866 const union bpf_attr *attr,
1867 union bpf_attr __user *uattr)
1868{
1869 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1870 struct bpf_prog_info info = {};
1871 u32 info_len = attr->info.info_len;
1872 char __user *uinsns;
1873 u32 ulen;
1874 int err;
1875
1876 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1877 if (err)
1878 return err;
1879 info_len = min_t(u32, sizeof(info), info_len);
1880
1881 if (copy_from_user(&info, uinfo, info_len))
89b09689 1882 return -EFAULT;
1e270976
MKL
1883
1884 info.type = prog->type;
1885 info.id = prog->aux->id;
cb4d2b3f
MKL
1886 info.load_time = prog->aux->load_time;
1887 info.created_by_uid = from_kuid_munged(current_user_ns(),
1888 prog->aux->user->uid);
1e270976
MKL
1889
1890 memcpy(info.tag, prog->tag, sizeof(prog->tag));
cb4d2b3f
MKL
1891 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
1892
1893 ulen = info.nr_map_ids;
1894 info.nr_map_ids = prog->aux->used_map_cnt;
1895 ulen = min_t(u32, info.nr_map_ids, ulen);
1896 if (ulen) {
721e08da 1897 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
cb4d2b3f
MKL
1898 u32 i;
1899
1900 for (i = 0; i < ulen; i++)
1901 if (put_user(prog->aux->used_maps[i]->id,
1902 &user_map_ids[i]))
1903 return -EFAULT;
1904 }
1e270976
MKL
1905
1906 if (!capable(CAP_SYS_ADMIN)) {
1907 info.jited_prog_len = 0;
1908 info.xlated_prog_len = 0;
1909 goto done;
1910 }
1911
1e270976 1912 ulen = info.xlated_prog_len;
9975a54b 1913 info.xlated_prog_len = bpf_prog_insn_size(prog);
1e270976 1914 if (info.xlated_prog_len && ulen) {
7105e828
DB
1915 struct bpf_insn *insns_sanitized;
1916 bool fault;
1917
1918 if (prog->blinded && !bpf_dump_raw_ok()) {
1919 info.xlated_prog_insns = 0;
1920 goto done;
1921 }
1922 insns_sanitized = bpf_insn_prepare_dump(prog);
1923 if (!insns_sanitized)
1924 return -ENOMEM;
1e270976
MKL
1925 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1926 ulen = min_t(u32, info.xlated_prog_len, ulen);
7105e828
DB
1927 fault = copy_to_user(uinsns, insns_sanitized, ulen);
1928 kfree(insns_sanitized);
1929 if (fault)
1e270976
MKL
1930 return -EFAULT;
1931 }
1932
675fc275
JK
1933 if (bpf_prog_is_dev_bound(prog->aux)) {
1934 err = bpf_prog_offload_info_fill(&info, prog);
1935 if (err)
1936 return err;
fcfb126d
JW
1937 goto done;
1938 }
1939
1940 /* NOTE: the following code is supposed to be skipped for offload.
1941 * bpf_prog_offload_info_fill() is the place to fill similar fields
1942 * for offload.
1943 */
1944 ulen = info.jited_prog_len;
1945 info.jited_prog_len = prog->jited_len;
1946 if (info.jited_prog_len && ulen) {
1947 if (bpf_dump_raw_ok()) {
1948 uinsns = u64_to_user_ptr(info.jited_prog_insns);
1949 ulen = min_t(u32, info.jited_prog_len, ulen);
1950 if (copy_to_user(uinsns, prog->bpf_func, ulen))
1951 return -EFAULT;
1952 } else {
1953 info.jited_prog_insns = 0;
1954 }
675fc275
JK
1955 }
1956
1e270976
MKL
1957done:
1958 if (copy_to_user(uinfo, &info, info_len) ||
1959 put_user(info_len, &uattr->info.info_len))
1960 return -EFAULT;
1961
1962 return 0;
1963}
1964
1965static int bpf_map_get_info_by_fd(struct bpf_map *map,
1966 const union bpf_attr *attr,
1967 union bpf_attr __user *uattr)
1968{
1969 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1970 struct bpf_map_info info = {};
1971 u32 info_len = attr->info.info_len;
1972 int err;
1973
1974 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1975 if (err)
1976 return err;
1977 info_len = min_t(u32, sizeof(info), info_len);
1978
1979 info.type = map->map_type;
1980 info.id = map->id;
1981 info.key_size = map->key_size;
1982 info.value_size = map->value_size;
1983 info.max_entries = map->max_entries;
1984 info.map_flags = map->map_flags;
ad5b177b 1985 memcpy(info.name, map->name, sizeof(map->name));
1e270976 1986
52775b33
JK
1987 if (bpf_map_is_dev_bound(map)) {
1988 err = bpf_map_offload_info_fill(&info, map);
1989 if (err)
1990 return err;
1991 }
1992
1e270976
MKL
1993 if (copy_to_user(uinfo, &info, info_len) ||
1994 put_user(info_len, &uattr->info.info_len))
1995 return -EFAULT;
1996
1997 return 0;
1998}
1999
2000#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
2001
2002static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
2003 union bpf_attr __user *uattr)
2004{
2005 int ufd = attr->info.bpf_fd;
2006 struct fd f;
2007 int err;
2008
2009 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
2010 return -EINVAL;
2011
2012 f = fdget(ufd);
2013 if (!f.file)
2014 return -EBADFD;
2015
2016 if (f.file->f_op == &bpf_prog_fops)
2017 err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
2018 uattr);
2019 else if (f.file->f_op == &bpf_map_fops)
2020 err = bpf_map_get_info_by_fd(f.file->private_data, attr,
2021 uattr);
2022 else
2023 err = -EINVAL;
2024
2025 fdput(f);
2026 return err;
2027}
2028
99c55f7d
AS
2029SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
2030{
2031 union bpf_attr attr = {};
2032 int err;
2033
0fa4fe85 2034 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
99c55f7d
AS
2035 return -EPERM;
2036
1e270976
MKL
2037 err = check_uarg_tail_zero(uattr, sizeof(attr), size);
2038 if (err)
2039 return err;
2040 size = min_t(u32, size, sizeof(attr));
99c55f7d
AS
2041
2042 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
2043 if (copy_from_user(&attr, uattr, size) != 0)
2044 return -EFAULT;
2045
afdb09c7
CF
2046 err = security_bpf(cmd, &attr, size);
2047 if (err < 0)
2048 return err;
2049
99c55f7d
AS
2050 switch (cmd) {
2051 case BPF_MAP_CREATE:
2052 err = map_create(&attr);
2053 break;
db20fd2b
AS
2054 case BPF_MAP_LOOKUP_ELEM:
2055 err = map_lookup_elem(&attr);
2056 break;
2057 case BPF_MAP_UPDATE_ELEM:
2058 err = map_update_elem(&attr);
2059 break;
2060 case BPF_MAP_DELETE_ELEM:
2061 err = map_delete_elem(&attr);
2062 break;
2063 case BPF_MAP_GET_NEXT_KEY:
2064 err = map_get_next_key(&attr);
2065 break;
09756af4
AS
2066 case BPF_PROG_LOAD:
2067 err = bpf_prog_load(&attr);
2068 break;
b2197755
DB
2069 case BPF_OBJ_PIN:
2070 err = bpf_obj_pin(&attr);
2071 break;
2072 case BPF_OBJ_GET:
2073 err = bpf_obj_get(&attr);
2074 break;
f4324551
DM
2075#ifdef CONFIG_CGROUP_BPF
2076 case BPF_PROG_ATTACH:
2077 err = bpf_prog_attach(&attr);
2078 break;
2079 case BPF_PROG_DETACH:
2080 err = bpf_prog_detach(&attr);
2081 break;
468e2f64
AS
2082 case BPF_PROG_QUERY:
2083 err = bpf_prog_query(&attr, uattr);
2084 break;
f4324551 2085#endif
1cf1cae9
AS
2086 case BPF_PROG_TEST_RUN:
2087 err = bpf_prog_test_run(&attr, uattr);
2088 break;
34ad5580
MKL
2089 case BPF_PROG_GET_NEXT_ID:
2090 err = bpf_obj_get_next_id(&attr, uattr,
2091 &prog_idr, &prog_idr_lock);
2092 break;
2093 case BPF_MAP_GET_NEXT_ID:
2094 err = bpf_obj_get_next_id(&attr, uattr,
2095 &map_idr, &map_idr_lock);
2096 break;
b16d9aa4
MKL
2097 case BPF_PROG_GET_FD_BY_ID:
2098 err = bpf_prog_get_fd_by_id(&attr);
2099 break;
bd5f5f4e
MKL
2100 case BPF_MAP_GET_FD_BY_ID:
2101 err = bpf_map_get_fd_by_id(&attr);
2102 break;
1e270976
MKL
2103 case BPF_OBJ_GET_INFO_BY_FD:
2104 err = bpf_obj_get_info_by_fd(&attr, uattr);
2105 break;
c4f6699d
AS
2106 case BPF_RAW_TRACEPOINT_OPEN:
2107 err = bpf_raw_tracepoint_open(&attr);
2108 break;
99c55f7d
AS
2109 default:
2110 err = -EINVAL;
2111 break;
2112 }
2113
2114 return err;
2115}