2 * Functions to manage eBPF programs attached to cgroups
4 * Copyright (c) 2016 Daniel Mack
6 * This file is subject to the terms and conditions of version 2 of the GNU
7 * General Public License. See the file COPYING in the main directory of the
8 * Linux distribution for more details.
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/filter.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
17 #include <linux/string.h>
18 #include <linux/bpf.h>
19 #include <linux/bpf-cgroup.h>
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key
);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key
);
26 * cgroup_bpf_put() - put references of all bpf programs
27 * @cgrp: the cgroup to modify
29 void cgroup_bpf_put(struct cgroup
*cgrp
)
31 enum bpf_cgroup_storage_type stype
;
34 for (type
= 0; type
< ARRAY_SIZE(cgrp
->bpf
.progs
); type
++) {
35 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
36 struct bpf_prog_list
*pl
, *tmp
;
38 list_for_each_entry_safe(pl
, tmp
, progs
, node
) {
40 bpf_prog_put(pl
->prog
);
41 for_each_cgroup_storage_type(stype
) {
42 bpf_cgroup_storage_unlink(pl
->storage
[stype
]);
43 bpf_cgroup_storage_free(pl
->storage
[stype
]);
46 static_branch_dec(&cgroup_bpf_enabled_key
);
48 bpf_prog_array_free(cgrp
->bpf
.effective
[type
]);
52 /* count number of elements in the list.
53 * it's slow but the list cannot be long
55 static u32
prog_list_length(struct list_head
*head
)
57 struct bpf_prog_list
*pl
;
60 list_for_each_entry(pl
, head
, node
) {
68 /* if parent has non-overridable prog attached,
69 * disallow attaching new programs to the descendent cgroup.
70 * if parent has overridable or multi-prog, allow attaching
72 static bool hierarchy_allows_attach(struct cgroup
*cgrp
,
73 enum bpf_attach_type type
,
78 p
= cgroup_parent(cgrp
);
82 u32 flags
= p
->bpf
.flags
[type
];
85 if (flags
& BPF_F_ALLOW_MULTI
)
87 cnt
= prog_list_length(&p
->bpf
.progs
[type
]);
88 WARN_ON_ONCE(cnt
> 1);
90 return !!(flags
& BPF_F_ALLOW_OVERRIDE
);
96 /* compute a chain of effective programs for a given cgroup:
97 * start from the list of programs in this cgroup and add
98 * all parent programs.
99 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
100 * to programs in this cgroup
102 static int compute_effective_progs(struct cgroup
*cgrp
,
103 enum bpf_attach_type type
,
104 struct bpf_prog_array __rcu
**array
)
106 enum bpf_cgroup_storage_type stype
;
107 struct bpf_prog_array
*progs
;
108 struct bpf_prog_list
*pl
;
109 struct cgroup
*p
= cgrp
;
112 /* count number of effective programs by walking parents */
114 if (cnt
== 0 || (p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
115 cnt
+= prog_list_length(&p
->bpf
.progs
[type
]);
116 p
= cgroup_parent(p
);
119 progs
= bpf_prog_array_alloc(cnt
, GFP_KERNEL
);
123 /* populate the array with effective progs */
127 if (cnt
> 0 && !(p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
130 list_for_each_entry(pl
, &p
->bpf
.progs
[type
], node
) {
134 progs
->items
[cnt
].prog
= pl
->prog
;
135 for_each_cgroup_storage_type(stype
)
136 progs
->items
[cnt
].cgroup_storage
[stype
] =
140 } while ((p
= cgroup_parent(p
)));
142 rcu_assign_pointer(*array
, progs
);
146 static void activate_effective_progs(struct cgroup
*cgrp
,
147 enum bpf_attach_type type
,
148 struct bpf_prog_array __rcu
*array
)
150 struct bpf_prog_array __rcu
*old_array
;
152 old_array
= xchg(&cgrp
->bpf
.effective
[type
], array
);
153 /* free prog array after grace period, since __cgroup_bpf_run_*()
154 * might be still walking the array
156 bpf_prog_array_free(old_array
);
160 * cgroup_bpf_inherit() - inherit effective programs from parent
161 * @cgrp: the cgroup to modify
163 int cgroup_bpf_inherit(struct cgroup
*cgrp
)
165 /* has to use marco instead of const int, since compiler thinks
166 * that array below is variable length
168 #define NR ARRAY_SIZE(cgrp->bpf.effective)
169 struct bpf_prog_array __rcu
*arrays
[NR
] = {};
172 for (i
= 0; i
< NR
; i
++)
173 INIT_LIST_HEAD(&cgrp
->bpf
.progs
[i
]);
175 for (i
= 0; i
< NR
; i
++)
176 if (compute_effective_progs(cgrp
, i
, &arrays
[i
]))
179 for (i
= 0; i
< NR
; i
++)
180 activate_effective_progs(cgrp
, i
, arrays
[i
]);
184 for (i
= 0; i
< NR
; i
++)
185 bpf_prog_array_free(arrays
[i
]);
189 static int update_effective_progs(struct cgroup
*cgrp
,
190 enum bpf_attach_type type
)
192 struct cgroup_subsys_state
*css
;
195 /* allocate and recompute effective prog arrays */
196 css_for_each_descendant_pre(css
, &cgrp
->self
) {
197 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
199 err
= compute_effective_progs(desc
, type
, &desc
->bpf
.inactive
);
204 /* all allocations were successful. Activate all prog arrays */
205 css_for_each_descendant_pre(css
, &cgrp
->self
) {
206 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
208 activate_effective_progs(desc
, type
, desc
->bpf
.inactive
);
209 desc
->bpf
.inactive
= NULL
;
215 /* oom while computing effective. Free all computed effective arrays
216 * since they were not activated
218 css_for_each_descendant_pre(css
, &cgrp
->self
) {
219 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
221 bpf_prog_array_free(desc
->bpf
.inactive
);
222 desc
->bpf
.inactive
= NULL
;
228 #define BPF_CGROUP_MAX_PROGS 64
231 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
232 * propagate the change to descendants
233 * @cgrp: The cgroup which descendants to traverse
234 * @prog: A program to attach
235 * @type: Type of attach operation
236 * @flags: Option flags
238 * Must be called with cgroup_mutex held.
240 int __cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
241 enum bpf_attach_type type
, u32 flags
)
243 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
244 struct bpf_prog
*old_prog
= NULL
;
245 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
],
246 *old_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
] = {NULL
};
247 enum bpf_cgroup_storage_type stype
;
248 struct bpf_prog_list
*pl
;
249 bool pl_was_allocated
;
252 if ((flags
& BPF_F_ALLOW_OVERRIDE
) && (flags
& BPF_F_ALLOW_MULTI
))
253 /* invalid combination */
256 if (!hierarchy_allows_attach(cgrp
, type
, flags
))
259 if (!list_empty(progs
) && cgrp
->bpf
.flags
[type
] != flags
)
260 /* Disallow attaching non-overridable on top
261 * of existing overridable in this cgroup.
262 * Disallow attaching multi-prog if overridable or none
266 if (prog_list_length(progs
) >= BPF_CGROUP_MAX_PROGS
)
269 for_each_cgroup_storage_type(stype
) {
270 storage
[stype
] = bpf_cgroup_storage_alloc(prog
, stype
);
271 if (IS_ERR(storage
[stype
])) {
272 storage
[stype
] = NULL
;
273 for_each_cgroup_storage_type(stype
)
274 bpf_cgroup_storage_free(storage
[stype
]);
279 if (flags
& BPF_F_ALLOW_MULTI
) {
280 list_for_each_entry(pl
, progs
, node
) {
281 if (pl
->prog
== prog
) {
282 /* disallow attaching the same prog twice */
283 for_each_cgroup_storage_type(stype
)
284 bpf_cgroup_storage_free(storage
[stype
]);
289 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
291 for_each_cgroup_storage_type(stype
)
292 bpf_cgroup_storage_free(storage
[stype
]);
296 pl_was_allocated
= true;
298 for_each_cgroup_storage_type(stype
)
299 pl
->storage
[stype
] = storage
[stype
];
300 list_add_tail(&pl
->node
, progs
);
302 if (list_empty(progs
)) {
303 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
305 for_each_cgroup_storage_type(stype
)
306 bpf_cgroup_storage_free(storage
[stype
]);
309 pl_was_allocated
= true;
310 list_add_tail(&pl
->node
, progs
);
312 pl
= list_first_entry(progs
, typeof(*pl
), node
);
314 for_each_cgroup_storage_type(stype
) {
315 old_storage
[stype
] = pl
->storage
[stype
];
316 bpf_cgroup_storage_unlink(old_storage
[stype
]);
318 pl_was_allocated
= false;
321 for_each_cgroup_storage_type(stype
)
322 pl
->storage
[stype
] = storage
[stype
];
325 cgrp
->bpf
.flags
[type
] = flags
;
327 err
= update_effective_progs(cgrp
, type
);
331 static_branch_inc(&cgroup_bpf_enabled_key
);
332 for_each_cgroup_storage_type(stype
) {
333 if (!old_storage
[stype
])
335 bpf_cgroup_storage_free(old_storage
[stype
]);
338 bpf_prog_put(old_prog
);
339 static_branch_dec(&cgroup_bpf_enabled_key
);
341 for_each_cgroup_storage_type(stype
)
342 bpf_cgroup_storage_link(storage
[stype
], cgrp
, type
);
346 /* and cleanup the prog list */
348 for_each_cgroup_storage_type(stype
) {
349 bpf_cgroup_storage_free(pl
->storage
[stype
]);
350 pl
->storage
[stype
] = old_storage
[stype
];
351 bpf_cgroup_storage_link(old_storage
[stype
], cgrp
, type
);
353 if (pl_was_allocated
) {
361 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
362 * propagate the change to descendants
363 * @cgrp: The cgroup which descendants to traverse
364 * @prog: A program to detach or NULL
365 * @type: Type of detach operation
367 * Must be called with cgroup_mutex held.
369 int __cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
370 enum bpf_attach_type type
)
372 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
373 enum bpf_cgroup_storage_type stype
;
374 u32 flags
= cgrp
->bpf
.flags
[type
];
375 struct bpf_prog
*old_prog
= NULL
;
376 struct bpf_prog_list
*pl
;
379 if (flags
& BPF_F_ALLOW_MULTI
) {
381 /* to detach MULTI prog the user has to specify valid FD
382 * of the program to be detached
386 if (list_empty(progs
))
387 /* report error when trying to detach and nothing is attached */
391 if (flags
& BPF_F_ALLOW_MULTI
) {
392 /* find the prog and detach it */
393 list_for_each_entry(pl
, progs
, node
) {
394 if (pl
->prog
!= prog
)
397 /* mark it deleted, so it's ignored while
398 * recomputing effective
406 /* to maintain backward compatibility NONE and OVERRIDE cgroups
407 * allow detaching with invalid FD (prog==NULL)
409 pl
= list_first_entry(progs
, typeof(*pl
), node
);
414 err
= update_effective_progs(cgrp
, type
);
418 /* now can actually delete it from this cgroup list */
420 for_each_cgroup_storage_type(stype
) {
421 bpf_cgroup_storage_unlink(pl
->storage
[stype
]);
422 bpf_cgroup_storage_free(pl
->storage
[stype
]);
425 if (list_empty(progs
))
426 /* last program was detached, reset flags to zero */
427 cgrp
->bpf
.flags
[type
] = 0;
429 bpf_prog_put(old_prog
);
430 static_branch_dec(&cgroup_bpf_enabled_key
);
434 /* and restore back old_prog */
439 /* Must be called with cgroup_mutex held to avoid races. */
440 int __cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
441 union bpf_attr __user
*uattr
)
443 __u32 __user
*prog_ids
= u64_to_user_ptr(attr
->query
.prog_ids
);
444 enum bpf_attach_type type
= attr
->query
.attach_type
;
445 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
446 u32 flags
= cgrp
->bpf
.flags
[type
];
449 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
)
450 cnt
= bpf_prog_array_length(cgrp
->bpf
.effective
[type
]);
452 cnt
= prog_list_length(progs
);
454 if (copy_to_user(&uattr
->query
.attach_flags
, &flags
, sizeof(flags
)))
456 if (copy_to_user(&uattr
->query
.prog_cnt
, &cnt
, sizeof(cnt
)))
458 if (attr
->query
.prog_cnt
== 0 || !prog_ids
|| !cnt
)
459 /* return early if user requested only program count + flags */
461 if (attr
->query
.prog_cnt
< cnt
) {
462 cnt
= attr
->query
.prog_cnt
;
466 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
) {
467 return bpf_prog_array_copy_to_user(cgrp
->bpf
.effective
[type
],
470 struct bpf_prog_list
*pl
;
474 list_for_each_entry(pl
, progs
, node
) {
475 id
= pl
->prog
->aux
->id
;
476 if (copy_to_user(prog_ids
+ i
, &id
, sizeof(id
)))
485 int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
486 enum bpf_prog_type ptype
, struct bpf_prog
*prog
)
491 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
493 return PTR_ERR(cgrp
);
495 ret
= cgroup_bpf_attach(cgrp
, prog
, attr
->attach_type
,
501 int cgroup_bpf_prog_detach(const union bpf_attr
*attr
, enum bpf_prog_type ptype
)
503 struct bpf_prog
*prog
;
507 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
509 return PTR_ERR(cgrp
);
511 prog
= bpf_prog_get_type(attr
->attach_bpf_fd
, ptype
);
515 ret
= cgroup_bpf_detach(cgrp
, prog
, attr
->attach_type
, 0);
523 int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
524 union bpf_attr __user
*uattr
)
529 cgrp
= cgroup_get_from_fd(attr
->query
.target_fd
);
531 return PTR_ERR(cgrp
);
533 ret
= cgroup_bpf_query(cgrp
, attr
, uattr
);
540 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
541 * @sk: The socket sending or receiving traffic
542 * @skb: The skb that is being sent or received
543 * @type: The type of program to be exectuted
545 * If no socket is passed, or the socket is not of type INET or INET6,
546 * this function does nothing and returns 0.
548 * The program type passed in via @type must be suitable for network
549 * filtering. No further check is performed to assert that.
551 * This function will return %-EPERM if any if an attached program was found
552 * and if it returned != 1 during execution. In all other cases, 0 is returned.
554 int __cgroup_bpf_run_filter_skb(struct sock
*sk
,
556 enum bpf_attach_type type
)
558 unsigned int offset
= skb
->data
- skb_network_header(skb
);
559 struct sock
*save_sk
;
560 void *saved_data_end
;
564 if (!sk
|| !sk_fullsock(sk
))
567 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
570 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
573 __skb_push(skb
, offset
);
575 /* compute pointers for the bpf prog */
576 bpf_compute_and_save_data_end(skb
, &saved_data_end
);
578 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], skb
,
579 __bpf_prog_run_save_cb
);
580 bpf_restore_data_end(skb
, saved_data_end
);
581 __skb_pull(skb
, offset
);
583 return ret
== 1 ? 0 : -EPERM
;
585 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb
);
588 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
589 * @sk: sock structure to manipulate
590 * @type: The type of program to be exectuted
592 * socket is passed is expected to be of type INET or INET6.
594 * The program type passed in via @type must be suitable for sock
595 * filtering. No further check is performed to assert that.
597 * This function will return %-EPERM if any if an attached program was found
598 * and if it returned != 1 during execution. In all other cases, 0 is returned.
600 int __cgroup_bpf_run_filter_sk(struct sock
*sk
,
601 enum bpf_attach_type type
)
603 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
606 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sk
, BPF_PROG_RUN
);
607 return ret
== 1 ? 0 : -EPERM
;
609 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk
);
612 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
613 * provided by user sockaddr
614 * @sk: sock struct that will use sockaddr
615 * @uaddr: sockaddr struct provided by user
616 * @type: The type of program to be exectuted
617 * @t_ctx: Pointer to attach type specific context
619 * socket is expected to be of type INET or INET6.
621 * This function will return %-EPERM if an attached program is found and
622 * returned value != 1 during execution. In all other cases, 0 is returned.
624 int __cgroup_bpf_run_filter_sock_addr(struct sock
*sk
,
625 struct sockaddr
*uaddr
,
626 enum bpf_attach_type type
,
629 struct bpf_sock_addr_kern ctx
= {
634 struct sockaddr_storage unspec
;
638 /* Check socket family since not all sockets represent network
639 * endpoint (e.g. AF_UNIX).
641 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
645 memset(&unspec
, 0, sizeof(unspec
));
646 ctx
.uaddr
= (struct sockaddr
*)&unspec
;
649 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
650 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
, BPF_PROG_RUN
);
652 return ret
== 1 ? 0 : -EPERM
;
654 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr
);
657 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
658 * @sk: socket to get cgroup from
659 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
660 * sk with connection information (IP addresses, etc.) May not contain
661 * cgroup info if it is a req sock.
662 * @type: The type of program to be exectuted
664 * socket passed is expected to be of type INET or INET6.
666 * The program type passed in via @type must be suitable for sock_ops
667 * filtering. No further check is performed to assert that.
669 * This function will return %-EPERM if any if an attached program was found
670 * and if it returned != 1 during execution. In all other cases, 0 is returned.
672 int __cgroup_bpf_run_filter_sock_ops(struct sock
*sk
,
673 struct bpf_sock_ops_kern
*sock_ops
,
674 enum bpf_attach_type type
)
676 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
679 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sock_ops
,
681 return ret
== 1 ? 0 : -EPERM
;
683 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops
);
685 int __cgroup_bpf_check_dev_permission(short dev_type
, u32 major
, u32 minor
,
686 short access
, enum bpf_attach_type type
)
689 struct bpf_cgroup_dev_ctx ctx
= {
690 .access_type
= (access
<< 16) | dev_type
,
697 cgrp
= task_dfl_cgroup(current
);
698 allow
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
,
704 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission
);
706 static const struct bpf_func_proto
*
707 cgroup_base_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
710 case BPF_FUNC_map_lookup_elem
:
711 return &bpf_map_lookup_elem_proto
;
712 case BPF_FUNC_map_update_elem
:
713 return &bpf_map_update_elem_proto
;
714 case BPF_FUNC_map_delete_elem
:
715 return &bpf_map_delete_elem_proto
;
716 case BPF_FUNC_get_current_uid_gid
:
717 return &bpf_get_current_uid_gid_proto
;
718 case BPF_FUNC_get_local_storage
:
719 return &bpf_get_local_storage_proto
;
720 case BPF_FUNC_get_current_cgroup_id
:
721 return &bpf_get_current_cgroup_id_proto
;
722 case BPF_FUNC_trace_printk
:
723 if (capable(CAP_SYS_ADMIN
))
724 return bpf_get_trace_printk_proto();
731 static const struct bpf_func_proto
*
732 cgroup_dev_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
734 return cgroup_base_func_proto(func_id
, prog
);
737 static bool cgroup_dev_is_valid_access(int off
, int size
,
738 enum bpf_access_type type
,
739 const struct bpf_prog
*prog
,
740 struct bpf_insn_access_aux
*info
)
742 const int size_default
= sizeof(__u32
);
744 if (type
== BPF_WRITE
)
747 if (off
< 0 || off
+ size
> sizeof(struct bpf_cgroup_dev_ctx
))
749 /* The verifier guarantees that size > 0. */
754 case bpf_ctx_range(struct bpf_cgroup_dev_ctx
, access_type
):
755 bpf_ctx_record_field_size(info
, size_default
);
756 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
760 if (size
!= size_default
)
767 const struct bpf_prog_ops cg_dev_prog_ops
= {
770 const struct bpf_verifier_ops cg_dev_verifier_ops
= {
771 .get_func_proto
= cgroup_dev_func_proto
,
772 .is_valid_access
= cgroup_dev_is_valid_access
,
776 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
778 * @head: sysctl table header
779 * @table: sysctl table
780 * @write: sysctl is being read (= 0) or written (= 1)
781 * @buf: pointer to buffer passed by user space
782 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
783 * result is size of @new_buf if program set new value, initial value
785 * @new_buf: pointer to pointer to new buffer that will be allocated if program
786 * overrides new value provided by user space on sysctl write
787 * NOTE: it's caller responsibility to free *new_buf if it was set
788 * @type: type of program to be executed
790 * Program is run when sysctl is being accessed, either read or written, and
791 * can allow or deny such access.
793 * This function will return %-EPERM if an attached program is found and
794 * returned value != 1 during execution. In all other cases 0 is returned.
796 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header
*head
,
797 struct ctl_table
*table
, int write
,
798 void __user
*buf
, size_t *pcount
,
799 void **new_buf
, enum bpf_attach_type type
)
801 struct bpf_sysctl_kern ctx
= {
806 .cur_len
= PAGE_SIZE
,
814 ctx
.cur_val
= kmalloc_track_caller(ctx
.cur_len
, GFP_KERNEL
);
821 if (table
->proc_handler(table
, 0, (void __user
*)ctx
.cur_val
,
822 &ctx
.cur_len
, &pos
)) {
823 /* Let BPF program decide how to proceed. */
828 /* Let BPF program decide how to proceed. */
832 if (write
&& buf
&& *pcount
) {
833 /* BPF program should be able to override new value with a
834 * buffer bigger than provided by user.
836 ctx
.new_val
= kmalloc_track_caller(PAGE_SIZE
, GFP_KERNEL
);
837 ctx
.new_len
= min(PAGE_SIZE
, *pcount
);
839 copy_from_user(ctx
.new_val
, buf
, ctx
.new_len
))
840 /* Let BPF program decide how to proceed. */
845 cgrp
= task_dfl_cgroup(current
);
846 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
, BPF_PROG_RUN
);
851 if (ret
== 1 && ctx
.new_updated
) {
852 *new_buf
= ctx
.new_val
;
853 *pcount
= ctx
.new_len
;
858 return ret
== 1 ? 0 : -EPERM
;
860 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl
);
862 static ssize_t
sysctl_cpy_dir(const struct ctl_dir
*dir
, char **bufp
,
865 ssize_t tmp_ret
= 0, ret
;
867 if (dir
->header
.parent
) {
868 tmp_ret
= sysctl_cpy_dir(dir
->header
.parent
, bufp
, lenp
);
873 ret
= strscpy(*bufp
, dir
->header
.ctl_table
[0].procname
, *lenp
);
880 /* Avoid leading slash. */
884 tmp_ret
= strscpy(*bufp
, "/", *lenp
);
890 return ret
+ tmp_ret
;
893 BPF_CALL_4(bpf_sysctl_get_name
, struct bpf_sysctl_kern
*, ctx
, char *, buf
,
894 size_t, buf_len
, u64
, flags
)
896 ssize_t tmp_ret
= 0, ret
;
901 if (!(flags
& BPF_F_SYSCTL_BASE_NAME
)) {
904 tmp_ret
= sysctl_cpy_dir(ctx
->head
->parent
, &buf
, &buf_len
);
909 ret
= strscpy(buf
, ctx
->table
->procname
, buf_len
);
911 return ret
< 0 ? ret
: tmp_ret
+ ret
;
914 static const struct bpf_func_proto bpf_sysctl_get_name_proto
= {
915 .func
= bpf_sysctl_get_name
,
917 .ret_type
= RET_INTEGER
,
918 .arg1_type
= ARG_PTR_TO_CTX
,
919 .arg2_type
= ARG_PTR_TO_MEM
,
920 .arg3_type
= ARG_CONST_SIZE
,
921 .arg4_type
= ARG_ANYTHING
,
924 static int copy_sysctl_value(char *dst
, size_t dst_len
, char *src
,
933 if (!src
|| !src_len
) {
934 memset(dst
, 0, dst_len
);
938 memcpy(dst
, src
, min(dst_len
, src_len
));
940 if (dst_len
> src_len
) {
941 memset(dst
+ src_len
, '\0', dst_len
- src_len
);
945 dst
[dst_len
- 1] = '\0';
950 BPF_CALL_3(bpf_sysctl_get_current_value
, struct bpf_sysctl_kern
*, ctx
,
951 char *, buf
, size_t, buf_len
)
953 return copy_sysctl_value(buf
, buf_len
, ctx
->cur_val
, ctx
->cur_len
);
956 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto
= {
957 .func
= bpf_sysctl_get_current_value
,
959 .ret_type
= RET_INTEGER
,
960 .arg1_type
= ARG_PTR_TO_CTX
,
961 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
962 .arg3_type
= ARG_CONST_SIZE
,
965 BPF_CALL_3(bpf_sysctl_get_new_value
, struct bpf_sysctl_kern
*, ctx
, char *, buf
,
970 memset(buf
, '\0', buf_len
);
973 return copy_sysctl_value(buf
, buf_len
, ctx
->new_val
, ctx
->new_len
);
976 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto
= {
977 .func
= bpf_sysctl_get_new_value
,
979 .ret_type
= RET_INTEGER
,
980 .arg1_type
= ARG_PTR_TO_CTX
,
981 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
982 .arg3_type
= ARG_CONST_SIZE
,
985 BPF_CALL_3(bpf_sysctl_set_new_value
, struct bpf_sysctl_kern
*, ctx
,
986 const char *, buf
, size_t, buf_len
)
988 if (!ctx
->write
|| !ctx
->new_val
|| !ctx
->new_len
|| !buf
|| !buf_len
)
991 if (buf_len
> PAGE_SIZE
- 1)
994 memcpy(ctx
->new_val
, buf
, buf_len
);
995 ctx
->new_len
= buf_len
;
996 ctx
->new_updated
= 1;
1001 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto
= {
1002 .func
= bpf_sysctl_set_new_value
,
1004 .ret_type
= RET_INTEGER
,
1005 .arg1_type
= ARG_PTR_TO_CTX
,
1006 .arg2_type
= ARG_PTR_TO_MEM
,
1007 .arg3_type
= ARG_CONST_SIZE
,
1010 static const struct bpf_func_proto
*
1011 sysctl_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1014 case BPF_FUNC_sysctl_get_name
:
1015 return &bpf_sysctl_get_name_proto
;
1016 case BPF_FUNC_sysctl_get_current_value
:
1017 return &bpf_sysctl_get_current_value_proto
;
1018 case BPF_FUNC_sysctl_get_new_value
:
1019 return &bpf_sysctl_get_new_value_proto
;
1020 case BPF_FUNC_sysctl_set_new_value
:
1021 return &bpf_sysctl_set_new_value_proto
;
1023 return cgroup_base_func_proto(func_id
, prog
);
1027 static bool sysctl_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1028 const struct bpf_prog
*prog
,
1029 struct bpf_insn_access_aux
*info
)
1031 const int size_default
= sizeof(__u32
);
1033 if (off
< 0 || off
+ size
> sizeof(struct bpf_sysctl
) ||
1034 off
% size
|| type
!= BPF_READ
)
1038 case offsetof(struct bpf_sysctl
, write
):
1039 bpf_ctx_record_field_size(info
, size_default
);
1040 return bpf_ctx_narrow_access_ok(off
, size
, size_default
);
1046 static u32
sysctl_convert_ctx_access(enum bpf_access_type type
,
1047 const struct bpf_insn
*si
,
1048 struct bpf_insn
*insn_buf
,
1049 struct bpf_prog
*prog
, u32
*target_size
)
1051 struct bpf_insn
*insn
= insn_buf
;
1054 case offsetof(struct bpf_sysctl
, write
):
1055 *insn
++ = BPF_LDX_MEM(
1056 BPF_SIZE(si
->code
), si
->dst_reg
, si
->src_reg
,
1057 bpf_target_off(struct bpf_sysctl_kern
, write
,
1058 FIELD_SIZEOF(struct bpf_sysctl_kern
,
1064 return insn
- insn_buf
;
1067 const struct bpf_verifier_ops cg_sysctl_verifier_ops
= {
1068 .get_func_proto
= sysctl_func_proto
,
1069 .is_valid_access
= sysctl_is_valid_access
,
1070 .convert_ctx_access
= sysctl_convert_ctx_access
,
1073 const struct bpf_prog_ops cg_sysctl_prog_ops
= {