1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/rbtree.h>
10 #include <uapi/linux/bpf.h>
18 struct bpf_sock_ops_kern
;
19 struct bpf_cgroup_storage
;
21 #ifdef CONFIG_CGROUP_BPF
23 extern struct static_key_false cgroup_bpf_enabled_key
;
24 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
26 DECLARE_PER_CPU(void*, bpf_cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
]);
28 #define for_each_cgroup_storage_type(stype) \
29 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
31 struct bpf_cgroup_storage_map
;
33 struct bpf_storage_buffer
{
38 struct bpf_cgroup_storage
{
39 struct bpf_storage_buffer
*buf
;
40 struct bpf_cgroup_storage_map
*map
;
41 struct bpf_cgroup_storage_key key
;
42 struct list_head list
;
47 struct bpf_prog_list
{
48 struct list_head node
;
49 struct bpf_prog
*prog
;
50 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
];
53 struct bpf_prog_array
;
56 /* array of effective progs in this cgroup */
57 struct bpf_prog_array __rcu
*effective
[MAX_BPF_ATTACH_TYPE
];
59 /* attached progs to this cgroup and attach flags
60 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
61 * have either zero or one element
62 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
64 struct list_head progs
[MAX_BPF_ATTACH_TYPE
];
65 u32 flags
[MAX_BPF_ATTACH_TYPE
];
67 /* temp storage for effective prog array used by prog_attach/detach */
68 struct bpf_prog_array __rcu
*inactive
;
71 void cgroup_bpf_put(struct cgroup
*cgrp
);
72 int cgroup_bpf_inherit(struct cgroup
*cgrp
);
74 int __cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
75 enum bpf_attach_type type
, u32 flags
);
76 int __cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
77 enum bpf_attach_type type
, u32 flags
);
78 int __cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
79 union bpf_attr __user
*uattr
);
81 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
82 int cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
83 enum bpf_attach_type type
, u32 flags
);
84 int cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
85 enum bpf_attach_type type
, u32 flags
);
86 int cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
87 union bpf_attr __user
*uattr
);
89 int __cgroup_bpf_run_filter_skb(struct sock
*sk
,
91 enum bpf_attach_type type
);
93 int __cgroup_bpf_run_filter_sk(struct sock
*sk
,
94 enum bpf_attach_type type
);
96 int __cgroup_bpf_run_filter_sock_addr(struct sock
*sk
,
97 struct sockaddr
*uaddr
,
98 enum bpf_attach_type type
,
101 int __cgroup_bpf_run_filter_sock_ops(struct sock
*sk
,
102 struct bpf_sock_ops_kern
*sock_ops
,
103 enum bpf_attach_type type
);
105 int __cgroup_bpf_check_dev_permission(short dev_type
, u32 major
, u32 minor
,
106 short access
, enum bpf_attach_type type
);
108 static inline enum bpf_cgroup_storage_type
cgroup_storage_type(
111 return BPF_CGROUP_STORAGE_SHARED
;
114 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
115 *storage
[MAX_BPF_CGROUP_STORAGE_TYPE
])
117 enum bpf_cgroup_storage_type stype
;
118 struct bpf_storage_buffer
*buf
;
120 for_each_cgroup_storage_type(stype
) {
124 buf
= READ_ONCE(storage
[stype
]->buf
);
125 this_cpu_write(bpf_cgroup_storage
[stype
], &buf
->data
[0]);
129 struct bpf_cgroup_storage
*bpf_cgroup_storage_alloc(struct bpf_prog
*prog
,
130 enum bpf_cgroup_storage_type stype
);
131 void bpf_cgroup_storage_free(struct bpf_cgroup_storage
*storage
);
132 void bpf_cgroup_storage_link(struct bpf_cgroup_storage
*storage
,
133 struct cgroup
*cgroup
,
134 enum bpf_attach_type type
);
135 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage
*storage
);
136 int bpf_cgroup_storage_assign(struct bpf_prog
*prog
, struct bpf_map
*map
);
137 void bpf_cgroup_storage_release(struct bpf_prog
*prog
, struct bpf_map
*map
);
139 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
140 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
143 if (cgroup_bpf_enabled) \
144 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
145 BPF_CGROUP_INET_INGRESS); \
150 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
153 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
154 typeof(sk) __sk = sk_to_full_sk(sk); \
155 if (sk_fullsock(__sk)) \
156 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
157 BPF_CGROUP_INET_EGRESS); \
162 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
165 if (cgroup_bpf_enabled) { \
166 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
171 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
172 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
174 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
175 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
177 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
178 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
180 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
183 if (cgroup_bpf_enabled) \
184 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
189 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
192 if (cgroup_bpf_enabled) { \
194 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
201 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
202 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
204 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
205 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
207 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
208 sk->sk_prot->pre_connect)
210 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
211 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
213 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
214 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
216 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
217 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
219 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
220 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
222 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
223 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
225 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
226 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
228 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
231 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
232 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
233 if (__sk && sk_fullsock(__sk)) \
234 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
236 BPF_CGROUP_SOCK_OPS); \
241 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
244 if (cgroup_bpf_enabled) \
245 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
247 BPF_CGROUP_DEVICE); \
251 int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
252 enum bpf_prog_type ptype
, struct bpf_prog
*prog
);
253 int cgroup_bpf_prog_detach(const union bpf_attr
*attr
,
254 enum bpf_prog_type ptype
);
255 int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
256 union bpf_attr __user
*uattr
);
260 struct cgroup_bpf
{};
261 static inline void cgroup_bpf_put(struct cgroup
*cgrp
) {}
262 static inline int cgroup_bpf_inherit(struct cgroup
*cgrp
) { return 0; }
264 static inline int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
265 enum bpf_prog_type ptype
,
266 struct bpf_prog
*prog
)
271 static inline int cgroup_bpf_prog_detach(const union bpf_attr
*attr
,
272 enum bpf_prog_type ptype
)
277 static inline int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
278 union bpf_attr __user
*uattr
)
283 static inline void bpf_cgroup_storage_set(
284 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
]) {}
285 static inline int bpf_cgroup_storage_assign(struct bpf_prog
*prog
,
286 struct bpf_map
*map
) { return 0; }
287 static inline void bpf_cgroup_storage_release(struct bpf_prog
*prog
,
288 struct bpf_map
*map
) {}
289 static inline struct bpf_cgroup_storage
*bpf_cgroup_storage_alloc(
290 struct bpf_prog
*prog
, enum bpf_cgroup_storage_type stype
) { return 0; }
291 static inline void bpf_cgroup_storage_free(
292 struct bpf_cgroup_storage
*storage
) {}
294 #define cgroup_bpf_enabled (0)
295 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
296 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
297 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
298 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
299 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
300 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
301 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
302 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
303 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
304 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
305 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
306 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
307 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
308 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
309 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
310 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
312 #define for_each_cgroup_storage_type(stype) for (; false; )
314 #endif /* CONFIG_CGROUP_BPF */
316 #endif /* _BPF_CGROUP_H */