]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - include/linux/bpf-cgroup.h
bpf: Fix potentially incorrect results with bpf_get_local_storage()
[mirror_ubuntu-hirsute-kernel.git] / include / linux / bpf-cgroup.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
30070984
DM
2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H
4
8bad74f9 5#include <linux/bpf.h>
f292b87d 6#include <linux/errno.h>
30070984 7#include <linux/jump_label.h>
aa0ad5b0 8#include <linux/percpu.h>
4bfc0bb2 9#include <linux/percpu-refcount.h>
de9cbbaa 10#include <linux/rbtree.h>
30070984
DM
11#include <uapi/linux/bpf.h>
12
13struct sock;
4fbac77d 14struct sockaddr;
30070984
DM
15struct cgroup;
16struct sk_buff;
de9cbbaa
RG
17struct bpf_map;
18struct bpf_prog;
40304b2a 19struct bpf_sock_ops_kern;
de9cbbaa 20struct bpf_cgroup_storage;
7b146ceb
AI
21struct ctl_table;
22struct ctl_table_header;
6280f200 23struct task_struct;
30070984
DM
24
25#ifdef CONFIG_CGROUP_BPF
26
27extern struct static_key_false cgroup_bpf_enabled_key;
28#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
29
6280f200
YS
30#define BPF_CGROUP_STORAGE_NEST_MAX 8
31
32struct bpf_cgroup_storage_info {
33 struct task_struct *task;
34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
35};
36
37/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
38 * to use bpf cgroup storage simultaneously.
39 */
40DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
41 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
8bad74f9
RG
42
43#define for_each_cgroup_storage_type(stype) \
44 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
aa0ad5b0 45
de9cbbaa
RG
46struct bpf_cgroup_storage_map;
47
48struct bpf_storage_buffer {
49 struct rcu_head rcu;
d7f10df8 50 char data[];
de9cbbaa
RG
51};
52
53struct bpf_cgroup_storage {
b741f163
RG
54 union {
55 struct bpf_storage_buffer *buf;
56 void __percpu *percpu_buf;
57 };
de9cbbaa
RG
58 struct bpf_cgroup_storage_map *map;
59 struct bpf_cgroup_storage_key key;
7d9c3427
YZ
60 struct list_head list_map;
61 struct list_head list_cg;
de9cbbaa
RG
62 struct rb_node node;
63 struct rcu_head rcu;
64};
65
af6eea57
AN
66struct bpf_cgroup_link {
67 struct bpf_link link;
68 struct cgroup *cgroup;
69 enum bpf_attach_type type;
70};
71
324bda9e
AS
72struct bpf_prog_list {
73 struct list_head node;
74 struct bpf_prog *prog;
af6eea57 75 struct bpf_cgroup_link *link;
8bad74f9 76 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
324bda9e
AS
77};
78
79struct bpf_prog_array;
80
30070984 81struct cgroup_bpf {
324bda9e
AS
82 /* array of effective progs in this cgroup */
83 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
84
85 /* attached progs to this cgroup and attach flags
86 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
87 * have either zero or one element
88 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
30070984 89 */
324bda9e
AS
90 struct list_head progs[MAX_BPF_ATTACH_TYPE];
91 u32 flags[MAX_BPF_ATTACH_TYPE];
92
7d9c3427
YZ
93 /* list of cgroup shared storages */
94 struct list_head storages;
95
324bda9e 96 /* temp storage for effective prog array used by prog_attach/detach */
dbcc1ba2 97 struct bpf_prog_array *inactive;
4bfc0bb2
RG
98
99 /* reference counter used to detach bpf programs after cgroup removal */
100 struct percpu_ref refcnt;
101
102 /* cgroup_bpf is released using a work queue */
103 struct work_struct release_work;
30070984
DM
104};
105
324bda9e 106int cgroup_bpf_inherit(struct cgroup *cgrp);
4bfc0bb2 107void cgroup_bpf_offline(struct cgroup *cgrp);
30070984 108
af6eea57
AN
109int __cgroup_bpf_attach(struct cgroup *cgrp,
110 struct bpf_prog *prog, struct bpf_prog *replace_prog,
111 struct bpf_cgroup_link *link,
324bda9e
AS
112 enum bpf_attach_type type, u32 flags);
113int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
af6eea57 114 struct bpf_cgroup_link *link,
1832f4ef 115 enum bpf_attach_type type);
468e2f64
AS
116int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
117 union bpf_attr __user *uattr);
30070984 118
324bda9e 119/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
af6eea57
AN
120int cgroup_bpf_attach(struct cgroup *cgrp,
121 struct bpf_prog *prog, struct bpf_prog *replace_prog,
122 struct bpf_cgroup_link *link, enum bpf_attach_type type,
7dd68b32 123 u32 flags);
324bda9e 124int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
af6eea57 125 enum bpf_attach_type type);
468e2f64
AS
126int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
127 union bpf_attr __user *uattr);
30070984 128
b2cd1257
DA
129int __cgroup_bpf_run_filter_skb(struct sock *sk,
130 struct sk_buff *skb,
131 enum bpf_attach_type type);
132
61023658
DA
133int __cgroup_bpf_run_filter_sk(struct sock *sk,
134 enum bpf_attach_type type);
135
4fbac77d
AI
136int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
137 struct sockaddr *uaddr,
1cedee13
AI
138 enum bpf_attach_type type,
139 void *t_ctx);
4fbac77d 140
40304b2a
LB
141int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
142 struct bpf_sock_ops_kern *sock_ops,
143 enum bpf_attach_type type);
144
ebc614f6
RG
145int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
146 short access, enum bpf_attach_type type);
147
7b146ceb
AI
148int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
149 struct ctl_table *table, int write,
4bd6a735 150 char **buf, size_t *pcount, loff_t *ppos,
e1550bfe 151 enum bpf_attach_type type);
7b146ceb 152
0d01da6a
SF
153int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
154 int *optname, char __user *optval,
155 int *optlen, char **kernel_optval);
156int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
157 int optname, char __user *optval,
158 int __user *optlen, int max_optlen,
159 int retval);
160
8bad74f9
RG
161static inline enum bpf_cgroup_storage_type cgroup_storage_type(
162 struct bpf_map *map)
aa0ad5b0 163{
b741f163
RG
164 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
165 return BPF_CGROUP_STORAGE_PERCPU;
166
8bad74f9
RG
167 return BPF_CGROUP_STORAGE_SHARED;
168}
169
6280f200
YS
170static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
171 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
8bad74f9
RG
172{
173 enum bpf_cgroup_storage_type stype;
6280f200
YS
174 int i, err = 0;
175
176 preempt_disable();
177 for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
178 if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
179 continue;
180
181 this_cpu_write(bpf_cgroup_storage_info[i].task, current);
182 for_each_cgroup_storage_type(stype)
183 this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
184 storage[stype]);
185 goto out;
186 }
187 err = -EBUSY;
188 WARN_ON_ONCE(1);
189
190out:
191 preempt_enable();
192 return err;
193}
194
195static inline void bpf_cgroup_storage_unset(void)
196{
197 int i;
198
3f5aca72
YS
199 for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
200 if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
6280f200 201 continue;
aa0ad5b0 202
6280f200
YS
203 this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
204 return;
205 }
aa0ad5b0
RG
206}
207
7d9c3427
YZ
208struct bpf_cgroup_storage *
209cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
210 void *key, bool locked);
8bad74f9
RG
211struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
212 enum bpf_cgroup_storage_type stype);
de9cbbaa
RG
213void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
214void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
215 struct cgroup *cgroup,
216 enum bpf_attach_type type);
217void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
e4730423 218int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
de9cbbaa 219
b741f163
RG
220int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
221int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
222 void *value, u64 flags);
223
b2cd1257
DA
224/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
225#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
226({ \
227 int __ret = 0; \
228 if (cgroup_bpf_enabled) \
229 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
230 BPF_CGROUP_INET_INGRESS); \
231 \
232 __ret; \
30070984
DM
233})
234
b2cd1257
DA
235#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
236({ \
237 int __ret = 0; \
238 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
239 typeof(sk) __sk = sk_to_full_sk(sk); \
240 if (sk_fullsock(__sk)) \
241 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
242 BPF_CGROUP_INET_EGRESS); \
243 } \
244 __ret; \
30070984
DM
245})
246
aac3fc32 247#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
61023658
DA
248({ \
249 int __ret = 0; \
ee07862f 250 if (cgroup_bpf_enabled) { \
aac3fc32 251 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
61023658
DA
252 } \
253 __ret; \
254})
255
aac3fc32
AI
256#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
257 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
258
f5836749
SF
259#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
260 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
261
aac3fc32
AI
262#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
263 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
264
265#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
266 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
267
4fbac77d
AI
268#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
269({ \
270 int __ret = 0; \
271 if (cgroup_bpf_enabled) \
1cedee13
AI
272 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
273 NULL); \
4fbac77d
AI
274 __ret; \
275})
276
1cedee13 277#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
d74bad4e
AI
278({ \
279 int __ret = 0; \
280 if (cgroup_bpf_enabled) { \
281 lock_sock(sk); \
1cedee13
AI
282 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
283 t_ctx); \
d74bad4e
AI
284 release_sock(sk); \
285 } \
286 __ret; \
287})
288
427167c0
SF
289#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) \
290 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
4fbac77d 291
427167c0
SF
292#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \
293 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
4fbac77d 294
d74bad4e
AI
295#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
296 sk->sk_prot->pre_connect)
297
298#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
299 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
300
301#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
302 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
303
304#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
1cedee13 305 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
d74bad4e
AI
306
307#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
1cedee13
AI
308 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
309
310#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
311 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
312
313#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
314 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
d74bad4e 315
983695fa
DB
316#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
317 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
318
319#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
320 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
321
0813a841
MKL
322/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
323 * fullsock and its parent fullsock cannot be traced by
324 * sk_to_full_sk().
325 *
326 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
327 * Its listener-sk is not attached to the rsk_listener.
328 * In this case, the caller holds the listener-sk (unlocked),
329 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
330 * the listener-sk such that the cgroup-bpf-progs of the
331 * listener-sk will be run.
332 *
333 * Regardless of syncookie mode or not,
334 * calling bpf_setsockopt on listener-sk will not make sense anyway,
335 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
336 */
337#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
338({ \
339 int __ret = 0; \
340 if (cgroup_bpf_enabled) \
341 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
342 sock_ops, \
343 BPF_CGROUP_SOCK_OPS); \
344 __ret; \
345})
346
40304b2a
LB
347#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
348({ \
349 int __ret = 0; \
350 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
351 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
df39a9f1 352 if (__sk && sk_fullsock(__sk)) \
40304b2a
LB
353 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
354 sock_ops, \
355 BPF_CGROUP_SOCK_OPS); \
356 } \
357 __ret; \
358})
ebc614f6
RG
359
360#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
361({ \
362 int __ret = 0; \
363 if (cgroup_bpf_enabled) \
364 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
365 access, \
366 BPF_CGROUP_DEVICE); \
367 \
368 __ret; \
369})
7b146ceb
AI
370
371
32927393 372#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
7b146ceb
AI
373({ \
374 int __ret = 0; \
375 if (cgroup_bpf_enabled) \
376 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
32927393 377 buf, count, pos, \
7b146ceb
AI
378 BPF_CGROUP_SYSCTL); \
379 __ret; \
380})
381
0d01da6a
SF
382#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
383 kernel_optval) \
384({ \
385 int __ret = 0; \
386 if (cgroup_bpf_enabled) \
387 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
388 optname, optval, \
389 optlen, \
390 kernel_optval); \
391 __ret; \
392})
393
394#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
395({ \
396 int __ret = 0; \
397 if (cgroup_bpf_enabled) \
398 get_user(__ret, optlen); \
399 __ret; \
400})
401
402#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
403 max_optlen, retval) \
404({ \
405 int __ret = retval; \
406 if (cgroup_bpf_enabled) \
407 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
408 optname, optval, \
409 optlen, max_optlen, \
410 retval); \
411 __ret; \
412})
413
fdb5c453
SY
414int cgroup_bpf_prog_attach(const union bpf_attr *attr,
415 enum bpf_prog_type ptype, struct bpf_prog *prog);
416int cgroup_bpf_prog_detach(const union bpf_attr *attr,
417 enum bpf_prog_type ptype);
af6eea57 418int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
fdb5c453
SY
419int cgroup_bpf_prog_query(const union bpf_attr *attr,
420 union bpf_attr __user *uattr);
30070984
DM
421#else
422
fdb5c453 423struct bpf_prog;
30070984 424struct cgroup_bpf {};
324bda9e 425static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
4bfc0bb2 426static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
30070984 427
fdb5c453
SY
428static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
429 enum bpf_prog_type ptype,
430 struct bpf_prog *prog)
431{
432 return -EINVAL;
433}
434
435static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
436 enum bpf_prog_type ptype)
437{
438 return -EINVAL;
439}
440
af6eea57
AN
441static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
442 struct bpf_prog *prog)
443{
444 return -EINVAL;
445}
446
fdb5c453
SY
447static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
448 union bpf_attr __user *uattr)
449{
450 return -EINVAL;
451}
452
6280f200
YS
453static inline int bpf_cgroup_storage_set(
454 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
455static inline void bpf_cgroup_storage_unset(void) {}
e4730423 456static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
de9cbbaa 457 struct bpf_map *map) { return 0; }
de9cbbaa 458static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
71b91a50 459 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
de9cbbaa
RG
460static inline void bpf_cgroup_storage_free(
461 struct bpf_cgroup_storage *storage) {}
b741f163
RG
462static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
463 void *value) {
464 return 0;
465}
466static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
467 void *key, void *value, u64 flags) {
468 return 0;
469}
de9cbbaa 470
13193b0f 471#define cgroup_bpf_enabled (0)
1b66d253 472#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
d74bad4e 473#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
30070984
DM
474#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
475#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
61023658 476#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
f5836749 477#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
427167c0
SF
478#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
479#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
aac3fc32
AI
480#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
481#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
d74bad4e
AI
482#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
483#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
484#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
485#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
1cedee13
AI
486#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
487#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
983695fa
DB
488#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
489#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
40304b2a 490#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
ebc614f6 491#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
32927393 492#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
0d01da6a
SF
493#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
494#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
495 optlen, max_optlen, retval) ({ retval; })
496#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
497 kernel_optval) ({ 0; })
30070984 498
8bad74f9
RG
499#define for_each_cgroup_storage_type(stype) for (; false; )
500
30070984
DM
501#endif /* CONFIG_CGROUP_BPF */
502
503#endif /* _BPF_CGROUP_H */