]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - include/linux/bpf-cgroup.h
e9871b012dac8011285362b3b18e4f7a2b2066c4
[mirror_ubuntu-eoan-kernel.git] / include / linux / bpf-cgroup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/rbtree.h>
10 #include <uapi/linux/bpf.h>
11
12 struct sock;
13 struct sockaddr;
14 struct cgroup;
15 struct sk_buff;
16 struct bpf_map;
17 struct bpf_prog;
18 struct bpf_sock_ops_kern;
19 struct bpf_cgroup_storage;
20
21 #ifdef CONFIG_CGROUP_BPF
22
23 extern struct static_key_false cgroup_bpf_enabled_key;
24 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
25
26 DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
27
28 #define for_each_cgroup_storage_type(stype) \
29 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
30
31 struct bpf_cgroup_storage_map;
32
33 struct bpf_storage_buffer {
34 struct rcu_head rcu;
35 char data[0];
36 };
37
38 struct bpf_cgroup_storage {
39 struct bpf_storage_buffer *buf;
40 struct bpf_cgroup_storage_map *map;
41 struct bpf_cgroup_storage_key key;
42 struct list_head list;
43 struct rb_node node;
44 struct rcu_head rcu;
45 };
46
47 struct bpf_prog_list {
48 struct list_head node;
49 struct bpf_prog *prog;
50 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
51 };
52
53 struct bpf_prog_array;
54
55 struct cgroup_bpf {
56 /* array of effective progs in this cgroup */
57 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
58
59 /* attached progs to this cgroup and attach flags
60 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
61 * have either zero or one element
62 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
63 */
64 struct list_head progs[MAX_BPF_ATTACH_TYPE];
65 u32 flags[MAX_BPF_ATTACH_TYPE];
66
67 /* temp storage for effective prog array used by prog_attach/detach */
68 struct bpf_prog_array __rcu *inactive;
69 };
70
71 void cgroup_bpf_put(struct cgroup *cgrp);
72 int cgroup_bpf_inherit(struct cgroup *cgrp);
73
74 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
75 enum bpf_attach_type type, u32 flags);
76 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
77 enum bpf_attach_type type, u32 flags);
78 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
79 union bpf_attr __user *uattr);
80
81 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
82 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
83 enum bpf_attach_type type, u32 flags);
84 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
85 enum bpf_attach_type type, u32 flags);
86 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
87 union bpf_attr __user *uattr);
88
89 int __cgroup_bpf_run_filter_skb(struct sock *sk,
90 struct sk_buff *skb,
91 enum bpf_attach_type type);
92
93 int __cgroup_bpf_run_filter_sk(struct sock *sk,
94 enum bpf_attach_type type);
95
96 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
97 struct sockaddr *uaddr,
98 enum bpf_attach_type type,
99 void *t_ctx);
100
101 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
102 struct bpf_sock_ops_kern *sock_ops,
103 enum bpf_attach_type type);
104
105 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
106 short access, enum bpf_attach_type type);
107
108 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
109 struct bpf_map *map)
110 {
111 return BPF_CGROUP_STORAGE_SHARED;
112 }
113
114 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
115 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
116 {
117 enum bpf_cgroup_storage_type stype;
118 struct bpf_storage_buffer *buf;
119
120 for_each_cgroup_storage_type(stype) {
121 if (!storage[stype])
122 continue;
123
124 buf = READ_ONCE(storage[stype]->buf);
125 this_cpu_write(bpf_cgroup_storage[stype], &buf->data[0]);
126 }
127 }
128
129 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
130 enum bpf_cgroup_storage_type stype);
131 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
132 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
133 struct cgroup *cgroup,
134 enum bpf_attach_type type);
135 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
136 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
137 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
138
139 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
140 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
141 ({ \
142 int __ret = 0; \
143 if (cgroup_bpf_enabled) \
144 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
145 BPF_CGROUP_INET_INGRESS); \
146 \
147 __ret; \
148 })
149
150 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
151 ({ \
152 int __ret = 0; \
153 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
154 typeof(sk) __sk = sk_to_full_sk(sk); \
155 if (sk_fullsock(__sk)) \
156 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
157 BPF_CGROUP_INET_EGRESS); \
158 } \
159 __ret; \
160 })
161
162 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
163 ({ \
164 int __ret = 0; \
165 if (cgroup_bpf_enabled) { \
166 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
167 } \
168 __ret; \
169 })
170
171 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
172 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
173
174 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
175 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
176
177 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
178 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
179
180 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
181 ({ \
182 int __ret = 0; \
183 if (cgroup_bpf_enabled) \
184 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
185 NULL); \
186 __ret; \
187 })
188
189 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
190 ({ \
191 int __ret = 0; \
192 if (cgroup_bpf_enabled) { \
193 lock_sock(sk); \
194 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
195 t_ctx); \
196 release_sock(sk); \
197 } \
198 __ret; \
199 })
200
201 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
202 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
203
204 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
205 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
206
207 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
208 sk->sk_prot->pre_connect)
209
210 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
211 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
212
213 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
214 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
215
216 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
217 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
218
219 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
220 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
221
222 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
223 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
224
225 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
226 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
227
228 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
229 ({ \
230 int __ret = 0; \
231 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
232 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
233 if (__sk && sk_fullsock(__sk)) \
234 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
235 sock_ops, \
236 BPF_CGROUP_SOCK_OPS); \
237 } \
238 __ret; \
239 })
240
241 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
242 ({ \
243 int __ret = 0; \
244 if (cgroup_bpf_enabled) \
245 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
246 access, \
247 BPF_CGROUP_DEVICE); \
248 \
249 __ret; \
250 })
251 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
252 enum bpf_prog_type ptype, struct bpf_prog *prog);
253 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
254 enum bpf_prog_type ptype);
255 int cgroup_bpf_prog_query(const union bpf_attr *attr,
256 union bpf_attr __user *uattr);
257 #else
258
259 struct bpf_prog;
260 struct cgroup_bpf {};
261 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
262 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
263
264 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
265 enum bpf_prog_type ptype,
266 struct bpf_prog *prog)
267 {
268 return -EINVAL;
269 }
270
271 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
272 enum bpf_prog_type ptype)
273 {
274 return -EINVAL;
275 }
276
277 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
278 union bpf_attr __user *uattr)
279 {
280 return -EINVAL;
281 }
282
283 static inline void bpf_cgroup_storage_set(
284 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
285 static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
286 struct bpf_map *map) { return 0; }
287 static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
288 struct bpf_map *map) {}
289 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
290 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; }
291 static inline void bpf_cgroup_storage_free(
292 struct bpf_cgroup_storage *storage) {}
293
294 #define cgroup_bpf_enabled (0)
295 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
296 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
297 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
298 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
299 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
300 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
301 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
302 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
303 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
304 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
305 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
306 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
307 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
308 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
309 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
310 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
311
312 #define for_each_cgroup_storage_type(stype) for (; false; )
313
314 #endif /* CONFIG_CGROUP_BPF */
315
316 #endif /* _BPF_CGROUP_H */