]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/netfilter.h
abc7fdcb9eb1a204c5a93669e7d17d618151f329
[mirror_ubuntu-artful-kernel.git] / include / linux / netfilter.h
1 #ifndef __LINUX_NETFILTER_H
2 #define __LINUX_NETFILTER_H
3
4 #include <linux/init.h>
5 #include <linux/skbuff.h>
6 #include <linux/net.h>
7 #include <linux/if.h>
8 #include <linux/in.h>
9 #include <linux/in6.h>
10 #include <linux/wait.h>
11 #include <linux/list.h>
12 #include <linux/static_key.h>
13 #include <linux/netfilter_defs.h>
14 #include <linux/netdevice.h>
15 #include <net/net_namespace.h>
16
17 #ifdef CONFIG_NETFILTER
18 static inline int NF_DROP_GETERR(int verdict)
19 {
20 return -(verdict >> NF_VERDICT_QBITS);
21 }
22
23 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
24 const union nf_inet_addr *a2)
25 {
26 return a1->all[0] == a2->all[0] &&
27 a1->all[1] == a2->all[1] &&
28 a1->all[2] == a2->all[2] &&
29 a1->all[3] == a2->all[3];
30 }
31
32 static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
33 union nf_inet_addr *result,
34 const union nf_inet_addr *mask)
35 {
36 result->all[0] = a1->all[0] & mask->all[0];
37 result->all[1] = a1->all[1] & mask->all[1];
38 result->all[2] = a1->all[2] & mask->all[2];
39 result->all[3] = a1->all[3] & mask->all[3];
40 }
41
42 int netfilter_init(void);
43
44 struct sk_buff;
45
46 struct nf_hook_ops;
47
48 struct sock;
49
50 struct nf_hook_state {
51 unsigned int hook;
52 int thresh;
53 u_int8_t pf;
54 struct net_device *in;
55 struct net_device *out;
56 struct sock *sk;
57 struct net *net;
58 struct nf_hook_entry __rcu *hook_entries;
59 int (*okfn)(struct net *, struct sock *, struct sk_buff *);
60 };
61
62 typedef unsigned int nf_hookfn(void *priv,
63 struct sk_buff *skb,
64 const struct nf_hook_state *state);
65 struct nf_hook_ops {
66 struct list_head list;
67
68 /* User fills in from here down. */
69 nf_hookfn *hook;
70 struct net_device *dev;
71 void *priv;
72 u_int8_t pf;
73 unsigned int hooknum;
74 /* Hooks are ordered in ascending priority. */
75 int priority;
76 };
77
78 struct nf_hook_entry {
79 struct nf_hook_entry __rcu *next;
80 struct nf_hook_ops ops;
81 const struct nf_hook_ops *orig_ops;
82 };
83
84 static inline void nf_hook_state_init(struct nf_hook_state *p,
85 struct nf_hook_entry *hook_entry,
86 unsigned int hook,
87 int thresh, u_int8_t pf,
88 struct net_device *indev,
89 struct net_device *outdev,
90 struct sock *sk,
91 struct net *net,
92 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
93 {
94 p->hook = hook;
95 p->thresh = thresh;
96 p->pf = pf;
97 p->in = indev;
98 p->out = outdev;
99 p->sk = sk;
100 p->net = net;
101 RCU_INIT_POINTER(p->hook_entries, hook_entry);
102 p->okfn = okfn;
103 }
104
105
106
107 struct nf_sockopt_ops {
108 struct list_head list;
109
110 u_int8_t pf;
111
112 /* Non-inclusive ranges: use 0/0/NULL to never get called. */
113 int set_optmin;
114 int set_optmax;
115 int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
116 #ifdef CONFIG_COMPAT
117 int (*compat_set)(struct sock *sk, int optval,
118 void __user *user, unsigned int len);
119 #endif
120 int get_optmin;
121 int get_optmax;
122 int (*get)(struct sock *sk, int optval, void __user *user, int *len);
123 #ifdef CONFIG_COMPAT
124 int (*compat_get)(struct sock *sk, int optval,
125 void __user *user, int *len);
126 #endif
127 /* Use the module struct to lock set/get code in place */
128 struct module *owner;
129 };
130
131 /* Function to register/unregister hook points. */
132 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
133 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
134 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
135 unsigned int n);
136 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
137 unsigned int n);
138
139 int nf_register_hook(struct nf_hook_ops *reg);
140 void nf_unregister_hook(struct nf_hook_ops *reg);
141 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
142 void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
143 int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
144 void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
145
146 /* Functions to register get/setsockopt ranges (non-inclusive). You
147 need to check permissions yourself! */
148 int nf_register_sockopt(struct nf_sockopt_ops *reg);
149 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
150
151 #ifdef HAVE_JUMP_LABEL
152 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
153 #endif
154
155 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
156
157 /**
158 * nf_hook_thresh - call a netfilter hook
159 *
160 * Returns 1 if the hook has allowed the packet to pass. The function
161 * okfn must be invoked by the caller in this case. Any other return
162 * value indicates the packet has been consumed by the hook.
163 */
164 static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
165 struct net *net,
166 struct sock *sk,
167 struct sk_buff *skb,
168 struct net_device *indev,
169 struct net_device *outdev,
170 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
171 int thresh)
172 {
173 struct nf_hook_entry *hook_head;
174 int ret = 1;
175
176 #ifdef HAVE_JUMP_LABEL
177 if (__builtin_constant_p(pf) &&
178 __builtin_constant_p(hook) &&
179 !static_key_false(&nf_hooks_needed[pf][hook]))
180 return 1;
181 #endif
182
183 rcu_read_lock();
184 hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
185 if (hook_head) {
186 struct nf_hook_state state;
187
188 nf_hook_state_init(&state, hook_head, hook, thresh,
189 pf, indev, outdev, sk, net, okfn);
190
191 ret = nf_hook_slow(skb, &state);
192 }
193 rcu_read_unlock();
194
195 return ret;
196 }
197
198 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
199 struct sock *sk, struct sk_buff *skb,
200 struct net_device *indev, struct net_device *outdev,
201 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
202 {
203 return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN);
204 }
205
206 /* Activate hook; either okfn or kfree_skb called, unless a hook
207 returns NF_STOLEN (in which case, it's up to the hook to deal with
208 the consequences).
209
210 Returns -ERRNO if packet dropped. Zero means queued, stolen or
211 accepted.
212 */
213
214 /* RR:
215 > I don't want nf_hook to return anything because people might forget
216 > about async and trust the return value to mean "packet was ok".
217
218 AK:
219 Just document it clearly, then you can expect some sense from kernel
220 coders :)
221 */
222
223 static inline int
224 NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
225 struct sk_buff *skb, struct net_device *in,
226 struct net_device *out,
227 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
228 int thresh)
229 {
230 int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh);
231 if (ret == 1)
232 ret = okfn(net, sk, skb);
233 return ret;
234 }
235
236 static inline int
237 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
238 struct sk_buff *skb, struct net_device *in, struct net_device *out,
239 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
240 bool cond)
241 {
242 int ret;
243
244 if (!cond ||
245 ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1))
246 ret = okfn(net, sk, skb);
247 return ret;
248 }
249
250 static inline int
251 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
252 struct net_device *in, struct net_device *out,
253 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
254 {
255 return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN);
256 }
257
258 /* Call setsockopt() */
259 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
260 unsigned int len);
261 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
262 int *len);
263 #ifdef CONFIG_COMPAT
264 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
265 char __user *opt, unsigned int len);
266 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
267 char __user *opt, int *len);
268 #endif
269
270 /* Call this before modifying an existing packet: ensures it is
271 modifiable and linear to the point you care about (writable_len).
272 Returns true or false. */
273 int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
274
275 struct flowi;
276 struct nf_queue_entry;
277
278 struct nf_afinfo {
279 unsigned short family;
280 __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
281 unsigned int dataoff, u_int8_t protocol);
282 __sum16 (*checksum_partial)(struct sk_buff *skb,
283 unsigned int hook,
284 unsigned int dataoff,
285 unsigned int len,
286 u_int8_t protocol);
287 int (*route)(struct net *net, struct dst_entry **dst,
288 struct flowi *fl, bool strict);
289 void (*saveroute)(const struct sk_buff *skb,
290 struct nf_queue_entry *entry);
291 int (*reroute)(struct net *net, struct sk_buff *skb,
292 const struct nf_queue_entry *entry);
293 int route_key_size;
294 };
295
296 extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
297 static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
298 {
299 return rcu_dereference(nf_afinfo[family]);
300 }
301
302 static inline __sum16
303 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
304 u_int8_t protocol, unsigned short family)
305 {
306 const struct nf_afinfo *afinfo;
307 __sum16 csum = 0;
308
309 rcu_read_lock();
310 afinfo = nf_get_afinfo(family);
311 if (afinfo)
312 csum = afinfo->checksum(skb, hook, dataoff, protocol);
313 rcu_read_unlock();
314 return csum;
315 }
316
317 static inline __sum16
318 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
319 unsigned int dataoff, unsigned int len,
320 u_int8_t protocol, unsigned short family)
321 {
322 const struct nf_afinfo *afinfo;
323 __sum16 csum = 0;
324
325 rcu_read_lock();
326 afinfo = nf_get_afinfo(family);
327 if (afinfo)
328 csum = afinfo->checksum_partial(skb, hook, dataoff, len,
329 protocol);
330 rcu_read_unlock();
331 return csum;
332 }
333
334 int nf_register_afinfo(const struct nf_afinfo *afinfo);
335 void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
336
337 #include <net/flow.h>
338 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
339
340 static inline void
341 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
342 {
343 #ifdef CONFIG_NF_NAT_NEEDED
344 void (*decodefn)(struct sk_buff *, struct flowi *);
345
346 rcu_read_lock();
347 decodefn = rcu_dereference(nf_nat_decode_session_hook);
348 if (decodefn)
349 decodefn(skb, fl);
350 rcu_read_unlock();
351 #endif
352 }
353
354 #else /* !CONFIG_NETFILTER */
355 static inline int
356 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
357 struct sk_buff *skb, struct net_device *in, struct net_device *out,
358 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
359 bool cond)
360 {
361 return okfn(net, sk, skb);
362 }
363
364 static inline int
365 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
366 struct sk_buff *skb, struct net_device *in, struct net_device *out,
367 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
368 {
369 return okfn(net, sk, skb);
370 }
371
372 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
373 struct sock *sk, struct sk_buff *skb,
374 struct net_device *indev, struct net_device *outdev,
375 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
376 {
377 return 1;
378 }
379 struct flowi;
380 static inline void
381 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
382 {
383 }
384 #endif /*CONFIG_NETFILTER*/
385
386 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
387 #include <linux/netfilter/nf_conntrack_zones_common.h>
388
389 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
390 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
391 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
392 #else
393 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
394 #endif
395
396 struct nf_conn;
397 enum ip_conntrack_info;
398 struct nlattr;
399
400 struct nfnl_ct_hook {
401 struct nf_conn *(*get_ct)(const struct sk_buff *skb,
402 enum ip_conntrack_info *ctinfo);
403 size_t (*build_size)(const struct nf_conn *ct);
404 int (*build)(struct sk_buff *skb, struct nf_conn *ct,
405 enum ip_conntrack_info ctinfo,
406 u_int16_t ct_attr, u_int16_t ct_info_attr);
407 int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
408 int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
409 u32 portid, u32 report);
410 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
411 enum ip_conntrack_info ctinfo, s32 off);
412 };
413 extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
414
415 /**
416 * nf_skb_duplicated - TEE target has sent a packet
417 *
418 * When a xtables target sends a packet, the OUTPUT and POSTROUTING
419 * hooks are traversed again, i.e. nft and xtables are invoked recursively.
420 *
421 * This is used by xtables TEE target to prevent the duplicated skb from
422 * being duplicated again.
423 */
424 DECLARE_PER_CPU(bool, nf_skb_duplicated);
425
426 #endif /*__LINUX_NETFILTER_H*/