]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/bpf-cgroup.h
Merge branch 'upstream-acpi-fixes' into WIP.x86/pti.base
[mirror_ubuntu-bionic-kernel.git] / include / linux / bpf-cgroup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/jump_label.h>
6 #include <uapi/linux/bpf.h>
7
8 struct sock;
9 struct cgroup;
10 struct sk_buff;
11 struct bpf_sock_ops_kern;
12
13 #ifdef CONFIG_CGROUP_BPF
14
15 extern struct static_key_false cgroup_bpf_enabled_key;
16 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
17
18 struct cgroup_bpf {
19 /*
20 * Store two sets of bpf_prog pointers, one for programs that are
21 * pinned directly to this cgroup, and one for those that are effective
22 * when this cgroup is accessed.
23 */
24 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
25 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
26 bool disallow_override[MAX_BPF_ATTACH_TYPE];
27 };
28
29 void cgroup_bpf_put(struct cgroup *cgrp);
30 void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
31
32 int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
33 struct bpf_prog *prog, enum bpf_attach_type type,
34 bool overridable);
35
36 /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
37 int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
38 enum bpf_attach_type type, bool overridable);
39
40 int __cgroup_bpf_run_filter_skb(struct sock *sk,
41 struct sk_buff *skb,
42 enum bpf_attach_type type);
43
44 int __cgroup_bpf_run_filter_sk(struct sock *sk,
45 enum bpf_attach_type type);
46
47 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
48 struct bpf_sock_ops_kern *sock_ops,
49 enum bpf_attach_type type);
50
51 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
52 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
53 ({ \
54 int __ret = 0; \
55 if (cgroup_bpf_enabled) \
56 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
57 BPF_CGROUP_INET_INGRESS); \
58 \
59 __ret; \
60 })
61
62 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
63 ({ \
64 int __ret = 0; \
65 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
66 typeof(sk) __sk = sk_to_full_sk(sk); \
67 if (sk_fullsock(__sk)) \
68 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
69 BPF_CGROUP_INET_EGRESS); \
70 } \
71 __ret; \
72 })
73
74 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
75 ({ \
76 int __ret = 0; \
77 if (cgroup_bpf_enabled && sk) { \
78 __ret = __cgroup_bpf_run_filter_sk(sk, \
79 BPF_CGROUP_INET_SOCK_CREATE); \
80 } \
81 __ret; \
82 })
83
84 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
85 ({ \
86 int __ret = 0; \
87 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
88 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
89 if (__sk && sk_fullsock(__sk)) \
90 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
91 sock_ops, \
92 BPF_CGROUP_SOCK_OPS); \
93 } \
94 __ret; \
95 })
96 #else
97
98 struct cgroup_bpf {};
99 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
100 static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
101 struct cgroup *parent) {}
102
103 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
104 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
105 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
106 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
107
108 #endif /* CONFIG_CGROUP_BPF */
109
110 #endif /* _BPF_CGROUP_H */