]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/netfilter/x_tables.h
Merge branch 'parisc-4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[mirror_ubuntu-artful-kernel.git] / include / linux / netfilter / x_tables.h
1 #ifndef _X_TABLES_H
2 #define _X_TABLES_H
3
4
5 #include <linux/netdevice.h>
6 #include <linux/static_key.h>
7 #include <uapi/linux/netfilter/x_tables.h>
8
9 /* Test a struct->invflags and a boolean for inequality */
10 #define NF_INVF(ptr, flag, boolean) \
11 ((boolean) ^ !!((ptr)->invflags & (flag)))
12
13 /**
14 * struct xt_action_param - parameters for matches/targets
15 *
16 * @match: the match extension
17 * @target: the target extension
18 * @matchinfo: per-match data
19 * @targetinfo: per-target data
20 * @net network namespace through which the action was invoked
21 * @in: input netdevice
22 * @out: output netdevice
23 * @fragoff: packet is a fragment, this is the data offset
24 * @thoff: position of transport header relative to skb->data
25 * @hook: hook number given packet came from
26 * @family: Actual NFPROTO_* through which the function is invoked
27 * (helpful when match->family == NFPROTO_UNSPEC)
28 *
29 * Fields written to by extensions:
30 *
31 * @hotdrop: drop packet if we had inspection problems
32 */
33 struct xt_action_param {
34 union {
35 const struct xt_match *match;
36 const struct xt_target *target;
37 };
38 union {
39 const void *matchinfo, *targinfo;
40 };
41 struct net *net;
42 const struct net_device *in, *out;
43 int fragoff;
44 unsigned int thoff;
45 unsigned int hooknum;
46 u_int8_t family;
47 bool hotdrop;
48 };
49
50 /**
51 * struct xt_mtchk_param - parameters for match extensions'
52 * checkentry functions
53 *
54 * @net: network namespace through which the check was invoked
55 * @table: table the rule is tried to be inserted into
56 * @entryinfo: the family-specific rule data
57 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
58 * @match: struct xt_match through which this function was invoked
59 * @matchinfo: per-match data
60 * @hook_mask: via which hooks the new rule is reachable
61 * Other fields as above.
62 */
63 struct xt_mtchk_param {
64 struct net *net;
65 const char *table;
66 const void *entryinfo;
67 const struct xt_match *match;
68 void *matchinfo;
69 unsigned int hook_mask;
70 u_int8_t family;
71 bool nft_compat;
72 };
73
74 /**
75 * struct xt_mdtor_param - match destructor parameters
76 * Fields as above.
77 */
78 struct xt_mtdtor_param {
79 struct net *net;
80 const struct xt_match *match;
81 void *matchinfo;
82 u_int8_t family;
83 };
84
85 /**
86 * struct xt_tgchk_param - parameters for target extensions'
87 * checkentry functions
88 *
89 * @entryinfo: the family-specific rule data
90 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
91 *
92 * Other fields see above.
93 */
94 struct xt_tgchk_param {
95 struct net *net;
96 const char *table;
97 const void *entryinfo;
98 const struct xt_target *target;
99 void *targinfo;
100 unsigned int hook_mask;
101 u_int8_t family;
102 bool nft_compat;
103 };
104
105 /* Target destructor parameters */
106 struct xt_tgdtor_param {
107 struct net *net;
108 const struct xt_target *target;
109 void *targinfo;
110 u_int8_t family;
111 };
112
113 struct xt_match {
114 struct list_head list;
115
116 const char name[XT_EXTENSION_MAXNAMELEN];
117 u_int8_t revision;
118
119 /* Return true or false: return FALSE and set *hotdrop = 1 to
120 force immediate packet drop. */
121 /* Arguments changed since 2.6.9, as this must now handle
122 non-linear skb, using skb_header_pointer and
123 skb_ip_make_writable. */
124 bool (*match)(const struct sk_buff *skb,
125 struct xt_action_param *);
126
127 /* Called when user tries to insert an entry of this type. */
128 int (*checkentry)(const struct xt_mtchk_param *);
129
130 /* Called when entry of this type deleted. */
131 void (*destroy)(const struct xt_mtdtor_param *);
132 #ifdef CONFIG_COMPAT
133 /* Called when userspace align differs from kernel space one */
134 void (*compat_from_user)(void *dst, const void *src);
135 int (*compat_to_user)(void __user *dst, const void *src);
136 #endif
137 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
138 struct module *me;
139
140 const char *table;
141 unsigned int matchsize;
142 #ifdef CONFIG_COMPAT
143 unsigned int compatsize;
144 #endif
145 unsigned int hooks;
146 unsigned short proto;
147
148 unsigned short family;
149 };
150
151 /* Registration hooks for targets. */
152 struct xt_target {
153 struct list_head list;
154
155 const char name[XT_EXTENSION_MAXNAMELEN];
156 u_int8_t revision;
157
158 /* Returns verdict. Argument order changed since 2.6.9, as this
159 must now handle non-linear skbs, using skb_copy_bits and
160 skb_ip_make_writable. */
161 unsigned int (*target)(struct sk_buff *skb,
162 const struct xt_action_param *);
163
164 /* Called when user tries to insert an entry of this type:
165 hook_mask is a bitmask of hooks from which it can be
166 called. */
167 /* Should return 0 on success or an error code otherwise (-Exxxx). */
168 int (*checkentry)(const struct xt_tgchk_param *);
169
170 /* Called when entry of this type deleted. */
171 void (*destroy)(const struct xt_tgdtor_param *);
172 #ifdef CONFIG_COMPAT
173 /* Called when userspace align differs from kernel space one */
174 void (*compat_from_user)(void *dst, const void *src);
175 int (*compat_to_user)(void __user *dst, const void *src);
176 #endif
177 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
178 struct module *me;
179
180 const char *table;
181 unsigned int targetsize;
182 #ifdef CONFIG_COMPAT
183 unsigned int compatsize;
184 #endif
185 unsigned int hooks;
186 unsigned short proto;
187
188 unsigned short family;
189 };
190
191 /* Furniture shopping... */
192 struct xt_table {
193 struct list_head list;
194
195 /* What hooks you will enter on */
196 unsigned int valid_hooks;
197
198 /* Man behind the curtain... */
199 struct xt_table_info *private;
200
201 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
202 struct module *me;
203
204 u_int8_t af; /* address/protocol family */
205 int priority; /* hook order */
206
207 /* called when table is needed in the given netns */
208 int (*table_init)(struct net *net);
209
210 /* A unique name... */
211 const char name[XT_TABLE_MAXNAMELEN];
212 };
213
214 #include <linux/netfilter_ipv4.h>
215
216 /* The table itself */
217 struct xt_table_info {
218 /* Size per table */
219 unsigned int size;
220 /* Number of entries: FIXME. --RR */
221 unsigned int number;
222 /* Initial number of entries. Needed for module usage count */
223 unsigned int initial_entries;
224
225 /* Entry points and underflows */
226 unsigned int hook_entry[NF_INET_NUMHOOKS];
227 unsigned int underflow[NF_INET_NUMHOOKS];
228
229 /*
230 * Number of user chains. Since tables cannot have loops, at most
231 * @stacksize jumps (number of user chains) can possibly be made.
232 */
233 unsigned int stacksize;
234 void ***jumpstack;
235
236 unsigned char entries[0] __aligned(8);
237 };
238
239 int xt_register_target(struct xt_target *target);
240 void xt_unregister_target(struct xt_target *target);
241 int xt_register_targets(struct xt_target *target, unsigned int n);
242 void xt_unregister_targets(struct xt_target *target, unsigned int n);
243
244 int xt_register_match(struct xt_match *target);
245 void xt_unregister_match(struct xt_match *target);
246 int xt_register_matches(struct xt_match *match, unsigned int n);
247 void xt_unregister_matches(struct xt_match *match, unsigned int n);
248
249 int xt_check_entry_offsets(const void *base, const char *elems,
250 unsigned int target_offset,
251 unsigned int next_offset);
252
253 unsigned int *xt_alloc_entry_offsets(unsigned int size);
254 bool xt_find_jump_offset(const unsigned int *offsets,
255 unsigned int target, unsigned int size);
256
257 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
258 bool inv_proto);
259 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
260 bool inv_proto);
261
262 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
263 struct xt_counters_info *info, bool compat);
264
265 struct xt_table *xt_register_table(struct net *net,
266 const struct xt_table *table,
267 struct xt_table_info *bootstrap,
268 struct xt_table_info *newinfo);
269 void *xt_unregister_table(struct xt_table *table);
270
271 struct xt_table_info *xt_replace_table(struct xt_table *table,
272 unsigned int num_counters,
273 struct xt_table_info *newinfo,
274 int *error);
275
276 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
277 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
278 struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
279 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
280 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
281 int *err);
282
283 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
284 const char *name);
285 void xt_table_unlock(struct xt_table *t);
286
287 int xt_proto_init(struct net *net, u_int8_t af);
288 void xt_proto_fini(struct net *net, u_int8_t af);
289
290 struct xt_table_info *xt_alloc_table_info(unsigned int size);
291 void xt_free_table_info(struct xt_table_info *info);
292
293 /**
294 * xt_recseq - recursive seqcount for netfilter use
295 *
296 * Packet processing changes the seqcount only if no recursion happened
297 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
298 * because we use the normal seqcount convention :
299 * Low order bit set to 1 if a writer is active.
300 */
301 DECLARE_PER_CPU(seqcount_t, xt_recseq);
302
303 /* xt_tee_enabled - true if x_tables needs to handle reentrancy
304 *
305 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
306 */
307 extern struct static_key xt_tee_enabled;
308
309 /**
310 * xt_write_recseq_begin - start of a write section
311 *
312 * Begin packet processing : all readers must wait the end
313 * 1) Must be called with preemption disabled
314 * 2) softirqs must be disabled too (or we should use this_cpu_add())
315 * Returns :
316 * 1 if no recursion on this cpu
317 * 0 if recursion detected
318 */
319 static inline unsigned int xt_write_recseq_begin(void)
320 {
321 unsigned int addend;
322
323 /*
324 * Low order bit of sequence is set if we already
325 * called xt_write_recseq_begin().
326 */
327 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
328
329 /*
330 * This is kind of a write_seqcount_begin(), but addend is 0 or 1
331 * We dont check addend value to avoid a test and conditional jump,
332 * since addend is most likely 1
333 */
334 __this_cpu_add(xt_recseq.sequence, addend);
335 smp_wmb();
336
337 return addend;
338 }
339
340 /**
341 * xt_write_recseq_end - end of a write section
342 * @addend: return value from previous xt_write_recseq_begin()
343 *
344 * End packet processing : all readers can proceed
345 * 1) Must be called with preemption disabled
346 * 2) softirqs must be disabled too (or we should use this_cpu_add())
347 */
348 static inline void xt_write_recseq_end(unsigned int addend)
349 {
350 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
351 smp_wmb();
352 __this_cpu_add(xt_recseq.sequence, addend);
353 }
354
355 /*
356 * This helper is performance critical and must be inlined
357 */
358 static inline unsigned long ifname_compare_aligned(const char *_a,
359 const char *_b,
360 const char *_mask)
361 {
362 const unsigned long *a = (const unsigned long *)_a;
363 const unsigned long *b = (const unsigned long *)_b;
364 const unsigned long *mask = (const unsigned long *)_mask;
365 unsigned long ret;
366
367 ret = (a[0] ^ b[0]) & mask[0];
368 if (IFNAMSIZ > sizeof(unsigned long))
369 ret |= (a[1] ^ b[1]) & mask[1];
370 if (IFNAMSIZ > 2 * sizeof(unsigned long))
371 ret |= (a[2] ^ b[2]) & mask[2];
372 if (IFNAMSIZ > 3 * sizeof(unsigned long))
373 ret |= (a[3] ^ b[3]) & mask[3];
374 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
375 return ret;
376 }
377
378
379 /* On SMP, ip(6)t_entry->counters.pcnt holds address of the
380 * real (percpu) counter. On !SMP, its just the packet count,
381 * so nothing needs to be done there.
382 *
383 * xt_percpu_counter_alloc returns the address of the percpu
384 * counter, or 0 on !SMP. We force an alignment of 16 bytes
385 * so that bytes/packets share a common cache line.
386 *
387 * Hence caller must use IS_ERR_VALUE to check for error, this
388 * allows us to return 0 for single core systems without forcing
389 * callers to deal with SMP vs. NONSMP issues.
390 */
391 static inline unsigned long xt_percpu_counter_alloc(void)
392 {
393 if (nr_cpu_ids > 1) {
394 void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
395 sizeof(struct xt_counters));
396
397 if (res == NULL)
398 return -ENOMEM;
399
400 return (__force unsigned long) res;
401 }
402
403 return 0;
404 }
405 static inline void xt_percpu_counter_free(u64 pcnt)
406 {
407 if (nr_cpu_ids > 1)
408 free_percpu((void __percpu *) (unsigned long) pcnt);
409 }
410
411 static inline struct xt_counters *
412 xt_get_this_cpu_counter(struct xt_counters *cnt)
413 {
414 if (nr_cpu_ids > 1)
415 return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
416
417 return cnt;
418 }
419
420 static inline struct xt_counters *
421 xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
422 {
423 if (nr_cpu_ids > 1)
424 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
425
426 return cnt;
427 }
428
429 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
430
431 #ifdef CONFIG_COMPAT
432 #include <net/compat.h>
433
434 struct compat_xt_entry_match {
435 union {
436 struct {
437 u_int16_t match_size;
438 char name[XT_FUNCTION_MAXNAMELEN - 1];
439 u_int8_t revision;
440 } user;
441 struct {
442 u_int16_t match_size;
443 compat_uptr_t match;
444 } kernel;
445 u_int16_t match_size;
446 } u;
447 unsigned char data[0];
448 };
449
450 struct compat_xt_entry_target {
451 union {
452 struct {
453 u_int16_t target_size;
454 char name[XT_FUNCTION_MAXNAMELEN - 1];
455 u_int8_t revision;
456 } user;
457 struct {
458 u_int16_t target_size;
459 compat_uptr_t target;
460 } kernel;
461 u_int16_t target_size;
462 } u;
463 unsigned char data[0];
464 };
465
466 /* FIXME: this works only on 32 bit tasks
467 * need to change whole approach in order to calculate align as function of
468 * current task alignment */
469
470 struct compat_xt_counters {
471 compat_u64 pcnt, bcnt; /* Packet and byte counters */
472 };
473
474 struct compat_xt_counters_info {
475 char name[XT_TABLE_MAXNAMELEN];
476 compat_uint_t num_counters;
477 struct compat_xt_counters counters[0];
478 };
479
480 struct _compat_xt_align {
481 __u8 u8;
482 __u16 u16;
483 __u32 u32;
484 compat_u64 u64;
485 };
486
487 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
488
489 void xt_compat_lock(u_int8_t af);
490 void xt_compat_unlock(u_int8_t af);
491
492 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
493 void xt_compat_flush_offsets(u_int8_t af);
494 void xt_compat_init_offsets(u_int8_t af, unsigned int number);
495 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
496
497 int xt_compat_match_offset(const struct xt_match *match);
498 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
499 unsigned int *size);
500 int xt_compat_match_to_user(const struct xt_entry_match *m,
501 void __user **dstptr, unsigned int *size);
502
503 int xt_compat_target_offset(const struct xt_target *target);
504 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
505 unsigned int *size);
506 int xt_compat_target_to_user(const struct xt_entry_target *t,
507 void __user **dstptr, unsigned int *size);
508 int xt_compat_check_entry_offsets(const void *base, const char *elems,
509 unsigned int target_offset,
510 unsigned int next_offset);
511
512 #endif /* CONFIG_COMPAT */
513 #endif /* _X_TABLES_H */