]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/syscall.c
bpf: Add BPF_PROG_GET_FD_BY_ID
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / syscall.c
CommitLineData
99c55f7d
AS
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
a67edbf4 13#include <linux/bpf_trace.h>
99c55f7d
AS
14#include <linux/syscalls.h>
15#include <linux/slab.h>
3f07c014 16#include <linux/sched/signal.h>
d407bd25
DB
17#include <linux/vmalloc.h>
18#include <linux/mmzone.h>
99c55f7d 19#include <linux/anon_inodes.h>
db20fd2b 20#include <linux/file.h>
09756af4
AS
21#include <linux/license.h>
22#include <linux/filter.h>
2541517c 23#include <linux/version.h>
535e7b4b 24#include <linux/kernel.h>
dc4bb0e2 25#include <linux/idr.h>
99c55f7d 26
b121d1e7 27DEFINE_PER_CPU(int, bpf_prog_active);
dc4bb0e2
MKL
28static DEFINE_IDR(prog_idr);
29static DEFINE_SPINLOCK(prog_idr_lock);
f3f1c054
MKL
30static DEFINE_IDR(map_idr);
31static DEFINE_SPINLOCK(map_idr_lock);
b121d1e7 32
1be7f75d
AS
33int sysctl_unprivileged_bpf_disabled __read_mostly;
34
40077e0c
JB
35static const struct bpf_map_ops * const bpf_map_types[] = {
36#define BPF_PROG_TYPE(_id, _ops)
37#define BPF_MAP_TYPE(_id, _ops) \
38 [_id] = &_ops,
39#include <linux/bpf_types.h>
40#undef BPF_PROG_TYPE
41#undef BPF_MAP_TYPE
42};
99c55f7d
AS
43
44static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
45{
99c55f7d
AS
46 struct bpf_map *map;
47
40077e0c
JB
48 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
49 !bpf_map_types[attr->map_type])
50 return ERR_PTR(-EINVAL);
99c55f7d 51
40077e0c
JB
52 map = bpf_map_types[attr->map_type]->map_alloc(attr);
53 if (IS_ERR(map))
54 return map;
55 map->ops = bpf_map_types[attr->map_type];
56 map->map_type = attr->map_type;
57 return map;
99c55f7d
AS
58}
59
d407bd25
DB
60void *bpf_map_area_alloc(size_t size)
61{
62 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
63 * trigger under memory pressure as we really just want to
64 * fail instead.
65 */
66 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
67 void *area;
68
69 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
70 area = kmalloc(size, GFP_USER | flags);
71 if (area != NULL)
72 return area;
73 }
74
19809c2d 75 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
d407bd25
DB
76}
77
78void bpf_map_area_free(void *area)
79{
80 kvfree(area);
81}
82
6c905981
AS
83int bpf_map_precharge_memlock(u32 pages)
84{
85 struct user_struct *user = get_current_user();
86 unsigned long memlock_limit, cur;
87
88 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
89 cur = atomic_long_read(&user->locked_vm);
90 free_uid(user);
91 if (cur + pages > memlock_limit)
92 return -EPERM;
93 return 0;
94}
95
aaac3ba9
AS
96static int bpf_map_charge_memlock(struct bpf_map *map)
97{
98 struct user_struct *user = get_current_user();
99 unsigned long memlock_limit;
100
101 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
102
103 atomic_long_add(map->pages, &user->locked_vm);
104
105 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
106 atomic_long_sub(map->pages, &user->locked_vm);
107 free_uid(user);
108 return -EPERM;
109 }
110 map->user = user;
111 return 0;
112}
113
114static void bpf_map_uncharge_memlock(struct bpf_map *map)
115{
116 struct user_struct *user = map->user;
117
118 atomic_long_sub(map->pages, &user->locked_vm);
119 free_uid(user);
120}
121
f3f1c054
MKL
122static int bpf_map_alloc_id(struct bpf_map *map)
123{
124 int id;
125
126 spin_lock_bh(&map_idr_lock);
127 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
128 if (id > 0)
129 map->id = id;
130 spin_unlock_bh(&map_idr_lock);
131
132 if (WARN_ON_ONCE(!id))
133 return -ENOSPC;
134
135 return id > 0 ? 0 : id;
136}
137
138static void bpf_map_free_id(struct bpf_map *map)
139{
140 spin_lock_bh(&map_idr_lock);
141 idr_remove(&map_idr, map->id);
142 spin_unlock_bh(&map_idr_lock);
143}
144
99c55f7d
AS
145/* called from workqueue */
146static void bpf_map_free_deferred(struct work_struct *work)
147{
148 struct bpf_map *map = container_of(work, struct bpf_map, work);
149
aaac3ba9 150 bpf_map_uncharge_memlock(map);
99c55f7d
AS
151 /* implementation dependent freeing */
152 map->ops->map_free(map);
153}
154
c9da161c
DB
155static void bpf_map_put_uref(struct bpf_map *map)
156{
157 if (atomic_dec_and_test(&map->usercnt)) {
158 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
159 bpf_fd_array_map_clear(map);
160 }
161}
162
99c55f7d
AS
163/* decrement map refcnt and schedule it for freeing via workqueue
164 * (unrelying map implementation ops->map_free() might sleep)
165 */
166void bpf_map_put(struct bpf_map *map)
167{
168 if (atomic_dec_and_test(&map->refcnt)) {
34ad5580 169 /* bpf_map_free_id() must be called first */
f3f1c054 170 bpf_map_free_id(map);
99c55f7d
AS
171 INIT_WORK(&map->work, bpf_map_free_deferred);
172 schedule_work(&map->work);
173 }
174}
175
c9da161c 176void bpf_map_put_with_uref(struct bpf_map *map)
99c55f7d 177{
c9da161c 178 bpf_map_put_uref(map);
99c55f7d 179 bpf_map_put(map);
c9da161c
DB
180}
181
182static int bpf_map_release(struct inode *inode, struct file *filp)
183{
61d1b6a4
DB
184 struct bpf_map *map = filp->private_data;
185
186 if (map->ops->map_release)
187 map->ops->map_release(map, filp);
188
189 bpf_map_put_with_uref(map);
99c55f7d
AS
190 return 0;
191}
192
f99bf205
DB
193#ifdef CONFIG_PROC_FS
194static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
195{
196 const struct bpf_map *map = filp->private_data;
21116b70
DB
197 const struct bpf_array *array;
198 u32 owner_prog_type = 0;
199
200 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
201 array = container_of(map, struct bpf_array, map);
202 owner_prog_type = array->owner_prog_type;
203 }
f99bf205
DB
204
205 seq_printf(m,
206 "map_type:\t%u\n"
207 "key_size:\t%u\n"
208 "value_size:\t%u\n"
322cea2f 209 "max_entries:\t%u\n"
21116b70
DB
210 "map_flags:\t%#x\n"
211 "memlock:\t%llu\n",
f99bf205
DB
212 map->map_type,
213 map->key_size,
214 map->value_size,
322cea2f 215 map->max_entries,
21116b70
DB
216 map->map_flags,
217 map->pages * 1ULL << PAGE_SHIFT);
218
219 if (owner_prog_type)
220 seq_printf(m, "owner_prog_type:\t%u\n",
221 owner_prog_type);
f99bf205
DB
222}
223#endif
224
99c55f7d 225static const struct file_operations bpf_map_fops = {
f99bf205
DB
226#ifdef CONFIG_PROC_FS
227 .show_fdinfo = bpf_map_show_fdinfo,
228#endif
229 .release = bpf_map_release,
99c55f7d
AS
230};
231
b2197755 232int bpf_map_new_fd(struct bpf_map *map)
aa79781b
DB
233{
234 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
235 O_RDWR | O_CLOEXEC);
236}
237
99c55f7d
AS
238/* helper macro to check that unused fields 'union bpf_attr' are zero */
239#define CHECK_ATTR(CMD) \
240 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
241 sizeof(attr->CMD##_LAST_FIELD), 0, \
242 sizeof(*attr) - \
243 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
244 sizeof(attr->CMD##_LAST_FIELD)) != NULL
245
56f668df 246#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
99c55f7d
AS
247/* called via syscall */
248static int map_create(union bpf_attr *attr)
249{
250 struct bpf_map *map;
251 int err;
252
253 err = CHECK_ATTR(BPF_MAP_CREATE);
254 if (err)
255 return -EINVAL;
256
257 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
258 map = find_and_alloc_map(attr);
259 if (IS_ERR(map))
260 return PTR_ERR(map);
261
262 atomic_set(&map->refcnt, 1);
c9da161c 263 atomic_set(&map->usercnt, 1);
99c55f7d 264
aaac3ba9
AS
265 err = bpf_map_charge_memlock(map);
266 if (err)
20b2b24f 267 goto free_map_nouncharge;
aaac3ba9 268
f3f1c054
MKL
269 err = bpf_map_alloc_id(map);
270 if (err)
271 goto free_map;
272
aa79781b 273 err = bpf_map_new_fd(map);
99c55f7d
AS
274 if (err < 0)
275 /* failed to allocate fd */
f3f1c054 276 goto free_id;
99c55f7d 277
a67edbf4 278 trace_bpf_map_create(map, err);
99c55f7d
AS
279 return err;
280
f3f1c054
MKL
281free_id:
282 bpf_map_free_id(map);
99c55f7d 283free_map:
20b2b24f
DB
284 bpf_map_uncharge_memlock(map);
285free_map_nouncharge:
99c55f7d
AS
286 map->ops->map_free(map);
287 return err;
288}
289
db20fd2b
AS
290/* if error is returned, fd is released.
291 * On success caller should complete fd access with matching fdput()
292 */
c2101297 293struct bpf_map *__bpf_map_get(struct fd f)
db20fd2b 294{
db20fd2b
AS
295 if (!f.file)
296 return ERR_PTR(-EBADF);
db20fd2b
AS
297 if (f.file->f_op != &bpf_map_fops) {
298 fdput(f);
299 return ERR_PTR(-EINVAL);
300 }
301
c2101297
DB
302 return f.file->private_data;
303}
304
92117d84
AS
305/* prog's and map's refcnt limit */
306#define BPF_MAX_REFCNT 32768
307
308struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
c9da161c 309{
92117d84
AS
310 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
311 atomic_dec(&map->refcnt);
312 return ERR_PTR(-EBUSY);
313 }
c9da161c
DB
314 if (uref)
315 atomic_inc(&map->usercnt);
92117d84 316 return map;
c9da161c
DB
317}
318
319struct bpf_map *bpf_map_get_with_uref(u32 ufd)
c2101297
DB
320{
321 struct fd f = fdget(ufd);
322 struct bpf_map *map;
323
324 map = __bpf_map_get(f);
325 if (IS_ERR(map))
326 return map;
327
92117d84 328 map = bpf_map_inc(map, true);
c2101297 329 fdput(f);
db20fd2b
AS
330
331 return map;
332}
333
b8cdc051
AS
334int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
335{
336 return -ENOTSUPP;
337}
338
db20fd2b
AS
339/* last field in 'union bpf_attr' used by this command */
340#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
341
342static int map_lookup_elem(union bpf_attr *attr)
343{
535e7b4b
MS
344 void __user *ukey = u64_to_user_ptr(attr->key);
345 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 346 int ufd = attr->map_fd;
db20fd2b 347 struct bpf_map *map;
8ebe667c 348 void *key, *value, *ptr;
15a07b33 349 u32 value_size;
592867bf 350 struct fd f;
db20fd2b
AS
351 int err;
352
353 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
354 return -EINVAL;
355
592867bf 356 f = fdget(ufd);
c2101297 357 map = __bpf_map_get(f);
db20fd2b
AS
358 if (IS_ERR(map))
359 return PTR_ERR(map);
360
361 err = -ENOMEM;
362 key = kmalloc(map->key_size, GFP_USER);
363 if (!key)
364 goto err_put;
365
366 err = -EFAULT;
367 if (copy_from_user(key, ukey, map->key_size) != 0)
368 goto free_key;
369
15a07b33 370 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 371 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
372 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
373 value_size = round_up(map->value_size, 8) * num_possible_cpus();
374 else
375 value_size = map->value_size;
376
8ebe667c 377 err = -ENOMEM;
15a07b33 378 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b 379 if (!value)
8ebe667c
AS
380 goto free_key;
381
8f844938
MKL
382 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
383 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
384 err = bpf_percpu_hash_copy(map, key, value);
385 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
386 err = bpf_percpu_array_copy(map, key, value);
557c0c6e
AS
387 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
388 err = bpf_stackmap_copy(map, key, value);
bcc6b1b7
MKL
389 } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
390 map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
56f668df 391 err = -ENOTSUPP;
15a07b33
AS
392 } else {
393 rcu_read_lock();
394 ptr = map->ops->map_lookup_elem(map, key);
395 if (ptr)
396 memcpy(value, ptr, value_size);
397 rcu_read_unlock();
398 err = ptr ? 0 : -ENOENT;
399 }
8ebe667c 400
15a07b33 401 if (err)
8ebe667c 402 goto free_value;
db20fd2b
AS
403
404 err = -EFAULT;
15a07b33 405 if (copy_to_user(uvalue, value, value_size) != 0)
8ebe667c 406 goto free_value;
db20fd2b 407
a67edbf4 408 trace_bpf_map_lookup_elem(map, ufd, key, value);
db20fd2b
AS
409 err = 0;
410
8ebe667c
AS
411free_value:
412 kfree(value);
db20fd2b
AS
413free_key:
414 kfree(key);
415err_put:
416 fdput(f);
417 return err;
418}
419
3274f520 420#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
db20fd2b
AS
421
422static int map_update_elem(union bpf_attr *attr)
423{
535e7b4b
MS
424 void __user *ukey = u64_to_user_ptr(attr->key);
425 void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b 426 int ufd = attr->map_fd;
db20fd2b
AS
427 struct bpf_map *map;
428 void *key, *value;
15a07b33 429 u32 value_size;
592867bf 430 struct fd f;
db20fd2b
AS
431 int err;
432
433 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
434 return -EINVAL;
435
592867bf 436 f = fdget(ufd);
c2101297 437 map = __bpf_map_get(f);
db20fd2b
AS
438 if (IS_ERR(map))
439 return PTR_ERR(map);
440
441 err = -ENOMEM;
442 key = kmalloc(map->key_size, GFP_USER);
443 if (!key)
444 goto err_put;
445
446 err = -EFAULT;
447 if (copy_from_user(key, ukey, map->key_size) != 0)
448 goto free_key;
449
15a07b33 450 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f844938 451 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b33
AS
452 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
453 value_size = round_up(map->value_size, 8) * num_possible_cpus();
454 else
455 value_size = map->value_size;
456
db20fd2b 457 err = -ENOMEM;
15a07b33 458 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b
AS
459 if (!value)
460 goto free_key;
461
462 err = -EFAULT;
15a07b33 463 if (copy_from_user(value, uvalue, value_size) != 0)
db20fd2b
AS
464 goto free_value;
465
b121d1e7
AS
466 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
467 * inside bpf map update or delete otherwise deadlocks are possible
468 */
469 preempt_disable();
470 __this_cpu_inc(bpf_prog_active);
8f844938
MKL
471 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
472 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b33
AS
473 err = bpf_percpu_hash_update(map, key, value, attr->flags);
474 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
475 err = bpf_percpu_array_update(map, key, value, attr->flags);
d056a788 476 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
4ed8ec52 477 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
56f668df
MKL
478 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
479 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
d056a788
DB
480 rcu_read_lock();
481 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
482 attr->flags);
483 rcu_read_unlock();
bcc6b1b7
MKL
484 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
485 rcu_read_lock();
486 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
487 attr->flags);
488 rcu_read_unlock();
15a07b33
AS
489 } else {
490 rcu_read_lock();
491 err = map->ops->map_update_elem(map, key, value, attr->flags);
492 rcu_read_unlock();
493 }
b121d1e7
AS
494 __this_cpu_dec(bpf_prog_active);
495 preempt_enable();
db20fd2b 496
a67edbf4
DB
497 if (!err)
498 trace_bpf_map_update_elem(map, ufd, key, value);
db20fd2b
AS
499free_value:
500 kfree(value);
501free_key:
502 kfree(key);
503err_put:
504 fdput(f);
505 return err;
506}
507
508#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
509
510static int map_delete_elem(union bpf_attr *attr)
511{
535e7b4b 512 void __user *ukey = u64_to_user_ptr(attr->key);
db20fd2b 513 int ufd = attr->map_fd;
db20fd2b 514 struct bpf_map *map;
592867bf 515 struct fd f;
db20fd2b
AS
516 void *key;
517 int err;
518
519 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
520 return -EINVAL;
521
592867bf 522 f = fdget(ufd);
c2101297 523 map = __bpf_map_get(f);
db20fd2b
AS
524 if (IS_ERR(map))
525 return PTR_ERR(map);
526
527 err = -ENOMEM;
528 key = kmalloc(map->key_size, GFP_USER);
529 if (!key)
530 goto err_put;
531
532 err = -EFAULT;
533 if (copy_from_user(key, ukey, map->key_size) != 0)
534 goto free_key;
535
b121d1e7
AS
536 preempt_disable();
537 __this_cpu_inc(bpf_prog_active);
db20fd2b
AS
538 rcu_read_lock();
539 err = map->ops->map_delete_elem(map, key);
540 rcu_read_unlock();
b121d1e7
AS
541 __this_cpu_dec(bpf_prog_active);
542 preempt_enable();
db20fd2b 543
a67edbf4
DB
544 if (!err)
545 trace_bpf_map_delete_elem(map, ufd, key);
db20fd2b
AS
546free_key:
547 kfree(key);
548err_put:
549 fdput(f);
550 return err;
551}
552
553/* last field in 'union bpf_attr' used by this command */
554#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
555
556static int map_get_next_key(union bpf_attr *attr)
557{
535e7b4b
MS
558 void __user *ukey = u64_to_user_ptr(attr->key);
559 void __user *unext_key = u64_to_user_ptr(attr->next_key);
db20fd2b 560 int ufd = attr->map_fd;
db20fd2b
AS
561 struct bpf_map *map;
562 void *key, *next_key;
592867bf 563 struct fd f;
db20fd2b
AS
564 int err;
565
566 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
567 return -EINVAL;
568
592867bf 569 f = fdget(ufd);
c2101297 570 map = __bpf_map_get(f);
db20fd2b
AS
571 if (IS_ERR(map))
572 return PTR_ERR(map);
573
8fe45924
TQ
574 if (ukey) {
575 err = -ENOMEM;
576 key = kmalloc(map->key_size, GFP_USER);
577 if (!key)
578 goto err_put;
579
580 err = -EFAULT;
581 if (copy_from_user(key, ukey, map->key_size) != 0)
582 goto free_key;
583 } else {
584 key = NULL;
585 }
db20fd2b
AS
586
587 err = -ENOMEM;
588 next_key = kmalloc(map->key_size, GFP_USER);
589 if (!next_key)
590 goto free_key;
591
592 rcu_read_lock();
593 err = map->ops->map_get_next_key(map, key, next_key);
594 rcu_read_unlock();
595 if (err)
596 goto free_next_key;
597
598 err = -EFAULT;
599 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
600 goto free_next_key;
601
a67edbf4 602 trace_bpf_map_next_key(map, ufd, key, next_key);
db20fd2b
AS
603 err = 0;
604
605free_next_key:
606 kfree(next_key);
607free_key:
608 kfree(key);
609err_put:
610 fdput(f);
611 return err;
612}
613
be9370a7
JB
614static const struct bpf_verifier_ops * const bpf_prog_types[] = {
615#define BPF_PROG_TYPE(_id, _ops) \
616 [_id] = &_ops,
40077e0c 617#define BPF_MAP_TYPE(_id, _ops)
be9370a7
JB
618#include <linux/bpf_types.h>
619#undef BPF_PROG_TYPE
40077e0c 620#undef BPF_MAP_TYPE
be9370a7 621};
09756af4
AS
622
623static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
624{
be9370a7
JB
625 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
626 return -EINVAL;
09756af4 627
be9370a7
JB
628 prog->aux->ops = bpf_prog_types[type];
629 prog->type = type;
630 return 0;
09756af4
AS
631}
632
633/* drop refcnt on maps used by eBPF program and free auxilary data */
634static void free_used_maps(struct bpf_prog_aux *aux)
635{
636 int i;
637
638 for (i = 0; i < aux->used_map_cnt; i++)
639 bpf_map_put(aux->used_maps[i]);
640
641 kfree(aux->used_maps);
642}
643
5ccb071e
DB
644int __bpf_prog_charge(struct user_struct *user, u32 pages)
645{
646 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
647 unsigned long user_bufs;
648
649 if (user) {
650 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
651 if (user_bufs > memlock_limit) {
652 atomic_long_sub(pages, &user->locked_vm);
653 return -EPERM;
654 }
655 }
656
657 return 0;
658}
659
660void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
661{
662 if (user)
663 atomic_long_sub(pages, &user->locked_vm);
664}
665
aaac3ba9
AS
666static int bpf_prog_charge_memlock(struct bpf_prog *prog)
667{
668 struct user_struct *user = get_current_user();
5ccb071e 669 int ret;
aaac3ba9 670
5ccb071e
DB
671 ret = __bpf_prog_charge(user, prog->pages);
672 if (ret) {
aaac3ba9 673 free_uid(user);
5ccb071e 674 return ret;
aaac3ba9 675 }
5ccb071e 676
aaac3ba9
AS
677 prog->aux->user = user;
678 return 0;
679}
680
681static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
682{
683 struct user_struct *user = prog->aux->user;
684
5ccb071e 685 __bpf_prog_uncharge(user, prog->pages);
aaac3ba9
AS
686 free_uid(user);
687}
688
dc4bb0e2
MKL
689static int bpf_prog_alloc_id(struct bpf_prog *prog)
690{
691 int id;
692
693 spin_lock_bh(&prog_idr_lock);
694 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
695 if (id > 0)
696 prog->aux->id = id;
697 spin_unlock_bh(&prog_idr_lock);
698
699 /* id is in [1, INT_MAX) */
700 if (WARN_ON_ONCE(!id))
701 return -ENOSPC;
702
703 return id > 0 ? 0 : id;
704}
705
b16d9aa4 706static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
dc4bb0e2
MKL
707{
708 /* cBPF to eBPF migrations are currently not in the idr store. */
709 if (!prog->aux->id)
710 return;
711
b16d9aa4
MKL
712 if (do_idr_lock)
713 spin_lock_bh(&prog_idr_lock);
714 else
715 __acquire(&prog_idr_lock);
716
dc4bb0e2 717 idr_remove(&prog_idr, prog->aux->id);
b16d9aa4
MKL
718
719 if (do_idr_lock)
720 spin_unlock_bh(&prog_idr_lock);
721 else
722 __release(&prog_idr_lock);
dc4bb0e2
MKL
723}
724
1aacde3d 725static void __bpf_prog_put_rcu(struct rcu_head *rcu)
abf2e7d6
AS
726{
727 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
728
729 free_used_maps(aux);
aaac3ba9 730 bpf_prog_uncharge_memlock(aux->prog);
abf2e7d6
AS
731 bpf_prog_free(aux->prog);
732}
733
b16d9aa4 734static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
09756af4 735{
a67edbf4
DB
736 if (atomic_dec_and_test(&prog->aux->refcnt)) {
737 trace_bpf_prog_put_rcu(prog);
34ad5580 738 /* bpf_prog_free_id() must be called first */
b16d9aa4 739 bpf_prog_free_id(prog, do_idr_lock);
74451e66 740 bpf_prog_kallsyms_del(prog);
1aacde3d 741 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
a67edbf4 742 }
09756af4 743}
b16d9aa4
MKL
744
745void bpf_prog_put(struct bpf_prog *prog)
746{
747 __bpf_prog_put(prog, true);
748}
e2e9b654 749EXPORT_SYMBOL_GPL(bpf_prog_put);
09756af4
AS
750
751static int bpf_prog_release(struct inode *inode, struct file *filp)
752{
753 struct bpf_prog *prog = filp->private_data;
754
1aacde3d 755 bpf_prog_put(prog);
09756af4
AS
756 return 0;
757}
758
7bd509e3
DB
759#ifdef CONFIG_PROC_FS
760static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
761{
762 const struct bpf_prog *prog = filp->private_data;
f1f7714e 763 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
7bd509e3 764
f1f7714e 765 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
7bd509e3
DB
766 seq_printf(m,
767 "prog_type:\t%u\n"
768 "prog_jited:\t%u\n"
f1f7714e 769 "prog_tag:\t%s\n"
7bd509e3
DB
770 "memlock:\t%llu\n",
771 prog->type,
772 prog->jited,
f1f7714e 773 prog_tag,
7bd509e3
DB
774 prog->pages * 1ULL << PAGE_SHIFT);
775}
776#endif
777
09756af4 778static const struct file_operations bpf_prog_fops = {
7bd509e3
DB
779#ifdef CONFIG_PROC_FS
780 .show_fdinfo = bpf_prog_show_fdinfo,
781#endif
782 .release = bpf_prog_release,
09756af4
AS
783};
784
b2197755 785int bpf_prog_new_fd(struct bpf_prog *prog)
aa79781b
DB
786{
787 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
788 O_RDWR | O_CLOEXEC);
789}
790
113214be 791static struct bpf_prog *____bpf_prog_get(struct fd f)
09756af4 792{
09756af4
AS
793 if (!f.file)
794 return ERR_PTR(-EBADF);
09756af4
AS
795 if (f.file->f_op != &bpf_prog_fops) {
796 fdput(f);
797 return ERR_PTR(-EINVAL);
798 }
799
c2101297 800 return f.file->private_data;
09756af4
AS
801}
802
59d3656d 803struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
92117d84 804{
59d3656d
BB
805 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
806 atomic_sub(i, &prog->aux->refcnt);
92117d84
AS
807 return ERR_PTR(-EBUSY);
808 }
809 return prog;
810}
59d3656d
BB
811EXPORT_SYMBOL_GPL(bpf_prog_add);
812
c540594f
DB
813void bpf_prog_sub(struct bpf_prog *prog, int i)
814{
815 /* Only to be used for undoing previous bpf_prog_add() in some
816 * error path. We still know that another entity in our call
817 * path holds a reference to the program, thus atomic_sub() can
818 * be safely used in such cases!
819 */
820 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
821}
822EXPORT_SYMBOL_GPL(bpf_prog_sub);
823
59d3656d
BB
824struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
825{
826 return bpf_prog_add(prog, 1);
827}
97bc402d 828EXPORT_SYMBOL_GPL(bpf_prog_inc);
92117d84 829
b16d9aa4
MKL
830/* prog_idr_lock should have been held */
831static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
832{
833 int refold;
834
835 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
836
837 if (refold >= BPF_MAX_REFCNT) {
838 __bpf_prog_put(prog, false);
839 return ERR_PTR(-EBUSY);
840 }
841
842 if (!refold)
843 return ERR_PTR(-ENOENT);
844
845 return prog;
846}
847
113214be 848static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
09756af4
AS
849{
850 struct fd f = fdget(ufd);
851 struct bpf_prog *prog;
852
113214be 853 prog = ____bpf_prog_get(f);
09756af4
AS
854 if (IS_ERR(prog))
855 return prog;
113214be
DB
856 if (type && prog->type != *type) {
857 prog = ERR_PTR(-EINVAL);
858 goto out;
859 }
09756af4 860
92117d84 861 prog = bpf_prog_inc(prog);
113214be 862out:
09756af4
AS
863 fdput(f);
864 return prog;
865}
113214be
DB
866
867struct bpf_prog *bpf_prog_get(u32 ufd)
868{
869 return __bpf_prog_get(ufd, NULL);
870}
871
872struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
873{
a67edbf4
DB
874 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
875
876 if (!IS_ERR(prog))
877 trace_bpf_prog_get_type(prog);
878 return prog;
113214be
DB
879}
880EXPORT_SYMBOL_GPL(bpf_prog_get_type);
09756af4
AS
881
882/* last field in 'union bpf_attr' used by this command */
e07b98d9 883#define BPF_PROG_LOAD_LAST_FIELD prog_flags
09756af4
AS
884
885static int bpf_prog_load(union bpf_attr *attr)
886{
887 enum bpf_prog_type type = attr->prog_type;
888 struct bpf_prog *prog;
889 int err;
890 char license[128];
891 bool is_gpl;
892
893 if (CHECK_ATTR(BPF_PROG_LOAD))
894 return -EINVAL;
895
e07b98d9
DM
896 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
897 return -EINVAL;
898
09756af4 899 /* copy eBPF program license from user space */
535e7b4b 900 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
09756af4
AS
901 sizeof(license) - 1) < 0)
902 return -EFAULT;
903 license[sizeof(license) - 1] = 0;
904
905 /* eBPF programs must be GPL compatible to use GPL-ed functions */
906 is_gpl = license_is_gpl_compatible(license);
907
ef0915ca
DB
908 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
909 return -E2BIG;
09756af4 910
2541517c
AS
911 if (type == BPF_PROG_TYPE_KPROBE &&
912 attr->kern_version != LINUX_VERSION_CODE)
913 return -EINVAL;
914
80b7d819
CF
915 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
916 type != BPF_PROG_TYPE_CGROUP_SKB &&
917 !capable(CAP_SYS_ADMIN))
1be7f75d
AS
918 return -EPERM;
919
09756af4
AS
920 /* plain bpf_prog allocation */
921 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
922 if (!prog)
923 return -ENOMEM;
924
aaac3ba9
AS
925 err = bpf_prog_charge_memlock(prog);
926 if (err)
927 goto free_prog_nouncharge;
928
09756af4
AS
929 prog->len = attr->insn_cnt;
930
931 err = -EFAULT;
535e7b4b 932 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
aafe6ae9 933 bpf_prog_insn_size(prog)) != 0)
09756af4
AS
934 goto free_prog;
935
936 prog->orig_prog = NULL;
a91263d5 937 prog->jited = 0;
09756af4
AS
938
939 atomic_set(&prog->aux->refcnt, 1);
a91263d5 940 prog->gpl_compatible = is_gpl ? 1 : 0;
09756af4
AS
941
942 /* find program type: socket_filter vs tracing_filter */
943 err = find_prog_type(type, prog);
944 if (err < 0)
945 goto free_prog;
946
947 /* run eBPF verifier */
9bac3d6d 948 err = bpf_check(&prog, attr);
09756af4
AS
949 if (err < 0)
950 goto free_used_maps;
951
952 /* eBPF program is ready to be JITed */
d1c55ab5 953 prog = bpf_prog_select_runtime(prog, &err);
04fd61ab
AS
954 if (err < 0)
955 goto free_used_maps;
09756af4 956
dc4bb0e2
MKL
957 err = bpf_prog_alloc_id(prog);
958 if (err)
959 goto free_used_maps;
960
aa79781b 961 err = bpf_prog_new_fd(prog);
b16d9aa4
MKL
962 if (err < 0) {
963 /* failed to allocate fd.
964 * bpf_prog_put() is needed because the above
965 * bpf_prog_alloc_id() has published the prog
966 * to the userspace and the userspace may
967 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
968 */
969 bpf_prog_put(prog);
970 return err;
971 }
09756af4 972
74451e66 973 bpf_prog_kallsyms_add(prog);
a67edbf4 974 trace_bpf_prog_load(prog, err);
09756af4
AS
975 return err;
976
977free_used_maps:
978 free_used_maps(prog->aux);
979free_prog:
aaac3ba9
AS
980 bpf_prog_uncharge_memlock(prog);
981free_prog_nouncharge:
09756af4
AS
982 bpf_prog_free(prog);
983 return err;
984}
985
b2197755
DB
986#define BPF_OBJ_LAST_FIELD bpf_fd
987
988static int bpf_obj_pin(const union bpf_attr *attr)
989{
990 if (CHECK_ATTR(BPF_OBJ))
991 return -EINVAL;
992
535e7b4b 993 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
b2197755
DB
994}
995
996static int bpf_obj_get(const union bpf_attr *attr)
997{
998 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
999 return -EINVAL;
1000
535e7b4b 1001 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
b2197755
DB
1002}
1003
f4324551
DM
1004#ifdef CONFIG_CGROUP_BPF
1005
7f677633 1006#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
f4324551
DM
1007
1008static int bpf_prog_attach(const union bpf_attr *attr)
1009{
7f677633 1010 enum bpf_prog_type ptype;
f4324551
DM
1011 struct bpf_prog *prog;
1012 struct cgroup *cgrp;
7f677633 1013 int ret;
f4324551
DM
1014
1015 if (!capable(CAP_NET_ADMIN))
1016 return -EPERM;
1017
1018 if (CHECK_ATTR(BPF_PROG_ATTACH))
1019 return -EINVAL;
1020
7f677633
AS
1021 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1022 return -EINVAL;
1023
f4324551
DM
1024 switch (attr->attach_type) {
1025 case BPF_CGROUP_INET_INGRESS:
1026 case BPF_CGROUP_INET_EGRESS:
b2cd1257 1027 ptype = BPF_PROG_TYPE_CGROUP_SKB;
f4324551 1028 break;
61023658
DA
1029 case BPF_CGROUP_INET_SOCK_CREATE:
1030 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1031 break;
f4324551
DM
1032 default:
1033 return -EINVAL;
1034 }
1035
b2cd1257
DA
1036 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1037 if (IS_ERR(prog))
1038 return PTR_ERR(prog);
1039
1040 cgrp = cgroup_get_from_fd(attr->target_fd);
1041 if (IS_ERR(cgrp)) {
1042 bpf_prog_put(prog);
1043 return PTR_ERR(cgrp);
1044 }
1045
7f677633
AS
1046 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1047 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1048 if (ret)
1049 bpf_prog_put(prog);
b2cd1257
DA
1050 cgroup_put(cgrp);
1051
7f677633 1052 return ret;
f4324551
DM
1053}
1054
1055#define BPF_PROG_DETACH_LAST_FIELD attach_type
1056
1057static int bpf_prog_detach(const union bpf_attr *attr)
1058{
1059 struct cgroup *cgrp;
7f677633 1060 int ret;
f4324551
DM
1061
1062 if (!capable(CAP_NET_ADMIN))
1063 return -EPERM;
1064
1065 if (CHECK_ATTR(BPF_PROG_DETACH))
1066 return -EINVAL;
1067
1068 switch (attr->attach_type) {
1069 case BPF_CGROUP_INET_INGRESS:
1070 case BPF_CGROUP_INET_EGRESS:
61023658 1071 case BPF_CGROUP_INET_SOCK_CREATE:
f4324551
DM
1072 cgrp = cgroup_get_from_fd(attr->target_fd);
1073 if (IS_ERR(cgrp))
1074 return PTR_ERR(cgrp);
1075
7f677633 1076 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
f4324551
DM
1077 cgroup_put(cgrp);
1078 break;
1079
1080 default:
1081 return -EINVAL;
1082 }
1083
7f677633 1084 return ret;
f4324551
DM
1085}
1086#endif /* CONFIG_CGROUP_BPF */
1087
1cf1cae9
AS
1088#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1089
1090static int bpf_prog_test_run(const union bpf_attr *attr,
1091 union bpf_attr __user *uattr)
1092{
1093 struct bpf_prog *prog;
1094 int ret = -ENOTSUPP;
1095
1096 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1097 return -EINVAL;
1098
1099 prog = bpf_prog_get(attr->test.prog_fd);
1100 if (IS_ERR(prog))
1101 return PTR_ERR(prog);
1102
1103 if (prog->aux->ops->test_run)
1104 ret = prog->aux->ops->test_run(prog, attr, uattr);
1105
1106 bpf_prog_put(prog);
1107 return ret;
1108}
1109
34ad5580
MKL
1110#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1111
1112static int bpf_obj_get_next_id(const union bpf_attr *attr,
1113 union bpf_attr __user *uattr,
1114 struct idr *idr,
1115 spinlock_t *lock)
1116{
1117 u32 next_id = attr->start_id;
1118 int err = 0;
1119
1120 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1121 return -EINVAL;
1122
1123 if (!capable(CAP_SYS_ADMIN))
1124 return -EPERM;
1125
1126 next_id++;
1127 spin_lock_bh(lock);
1128 if (!idr_get_next(idr, &next_id))
1129 err = -ENOENT;
1130 spin_unlock_bh(lock);
1131
1132 if (!err)
1133 err = put_user(next_id, &uattr->next_id);
1134
1135 return err;
1136}
1137
b16d9aa4
MKL
1138#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1139
1140static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1141{
1142 struct bpf_prog *prog;
1143 u32 id = attr->prog_id;
1144 int fd;
1145
1146 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1147 return -EINVAL;
1148
1149 if (!capable(CAP_SYS_ADMIN))
1150 return -EPERM;
1151
1152 spin_lock_bh(&prog_idr_lock);
1153 prog = idr_find(&prog_idr, id);
1154 if (prog)
1155 prog = bpf_prog_inc_not_zero(prog);
1156 else
1157 prog = ERR_PTR(-ENOENT);
1158 spin_unlock_bh(&prog_idr_lock);
1159
1160 if (IS_ERR(prog))
1161 return PTR_ERR(prog);
1162
1163 fd = bpf_prog_new_fd(prog);
1164 if (fd < 0)
1165 bpf_prog_put(prog);
1166
1167 return fd;
1168}
1169
99c55f7d
AS
1170SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1171{
1172 union bpf_attr attr = {};
1173 int err;
1174
1be7f75d 1175 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
99c55f7d
AS
1176 return -EPERM;
1177
1178 if (!access_ok(VERIFY_READ, uattr, 1))
1179 return -EFAULT;
1180
1181 if (size > PAGE_SIZE) /* silly large */
1182 return -E2BIG;
1183
1184 /* If we're handed a bigger struct than we know of,
1185 * ensure all the unknown bits are 0 - i.e. new
1186 * user-space does not rely on any kernel feature
1187 * extensions we dont know about yet.
1188 */
1189 if (size > sizeof(attr)) {
1190 unsigned char __user *addr;
1191 unsigned char __user *end;
1192 unsigned char val;
1193
1194 addr = (void __user *)uattr + sizeof(attr);
1195 end = (void __user *)uattr + size;
1196
1197 for (; addr < end; addr++) {
1198 err = get_user(val, addr);
1199 if (err)
1200 return err;
1201 if (val)
1202 return -E2BIG;
1203 }
1204 size = sizeof(attr);
1205 }
1206
1207 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1208 if (copy_from_user(&attr, uattr, size) != 0)
1209 return -EFAULT;
1210
1211 switch (cmd) {
1212 case BPF_MAP_CREATE:
1213 err = map_create(&attr);
1214 break;
db20fd2b
AS
1215 case BPF_MAP_LOOKUP_ELEM:
1216 err = map_lookup_elem(&attr);
1217 break;
1218 case BPF_MAP_UPDATE_ELEM:
1219 err = map_update_elem(&attr);
1220 break;
1221 case BPF_MAP_DELETE_ELEM:
1222 err = map_delete_elem(&attr);
1223 break;
1224 case BPF_MAP_GET_NEXT_KEY:
1225 err = map_get_next_key(&attr);
1226 break;
09756af4
AS
1227 case BPF_PROG_LOAD:
1228 err = bpf_prog_load(&attr);
1229 break;
b2197755
DB
1230 case BPF_OBJ_PIN:
1231 err = bpf_obj_pin(&attr);
1232 break;
1233 case BPF_OBJ_GET:
1234 err = bpf_obj_get(&attr);
1235 break;
f4324551
DM
1236#ifdef CONFIG_CGROUP_BPF
1237 case BPF_PROG_ATTACH:
1238 err = bpf_prog_attach(&attr);
1239 break;
1240 case BPF_PROG_DETACH:
1241 err = bpf_prog_detach(&attr);
1242 break;
1243#endif
1cf1cae9
AS
1244 case BPF_PROG_TEST_RUN:
1245 err = bpf_prog_test_run(&attr, uattr);
1246 break;
34ad5580
MKL
1247 case BPF_PROG_GET_NEXT_ID:
1248 err = bpf_obj_get_next_id(&attr, uattr,
1249 &prog_idr, &prog_idr_lock);
1250 break;
1251 case BPF_MAP_GET_NEXT_ID:
1252 err = bpf_obj_get_next_id(&attr, uattr,
1253 &map_idr, &map_idr_lock);
1254 break;
b16d9aa4
MKL
1255 case BPF_PROG_GET_FD_BY_ID:
1256 err = bpf_prog_get_fd_by_id(&attr);
1257 break;
99c55f7d
AS
1258 default:
1259 err = -EINVAL;
1260 break;
1261 }
1262
1263 return err;
1264}