]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/netfilter/x_tables.h
netfilter: xtables: add struct xt_mtchk_param::net
[mirror_ubuntu-artful-kernel.git] / include / linux / netfilter / x_tables.h
CommitLineData
2e4e6a17
HW
1#ifndef _X_TABLES_H
2#define _X_TABLES_H
3
60c195c7
AB
4#include <linux/types.h>
5
2e4e6a17
HW
6#define XT_FUNCTION_MAXNAMELEN 30
7#define XT_TABLE_MAXNAMELEN 32
8
d94d9fee 9struct xt_entry_match {
1e30a014
DM
10 union {
11 struct {
60c195c7 12 __u16 match_size;
1e30a014
DM
13
14 /* Used by userspace */
15 char name[XT_FUNCTION_MAXNAMELEN-1];
16
60c195c7 17 __u8 revision;
1e30a014
DM
18 } user;
19 struct {
60c195c7 20 __u16 match_size;
1e30a014
DM
21
22 /* Used inside the kernel */
23 struct xt_match *match;
24 } kernel;
25
26 /* Total length */
60c195c7 27 __u16 match_size;
1e30a014
DM
28 } u;
29
30 unsigned char data[0];
31};
32
d94d9fee 33struct xt_entry_target {
1e30a014
DM
34 union {
35 struct {
60c195c7 36 __u16 target_size;
1e30a014
DM
37
38 /* Used by userspace */
39 char name[XT_FUNCTION_MAXNAMELEN-1];
40
60c195c7 41 __u8 revision;
1e30a014
DM
42 } user;
43 struct {
60c195c7 44 __u16 target_size;
1e30a014
DM
45
46 /* Used inside the kernel */
47 struct xt_target *target;
48 } kernel;
49
50 /* Total length */
60c195c7 51 __u16 target_size;
1e30a014
DM
52 } u;
53
54 unsigned char data[0];
55};
56
3c2ad469
PM
57#define XT_TARGET_INIT(__name, __size) \
58{ \
59 .target.u.user = { \
60 .target_size = XT_ALIGN(__size), \
61 .name = __name, \
62 }, \
63}
64
d94d9fee 65struct xt_standard_target {
1e30a014
DM
66 struct xt_entry_target target;
67 int verdict;
68};
69
2e4e6a17
HW
70/* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
71 * kernel supports, if >= revision. */
d94d9fee 72struct xt_get_revision {
2e4e6a17
HW
73 char name[XT_FUNCTION_MAXNAMELEN-1];
74
60c195c7 75 __u8 revision;
2e4e6a17
HW
76};
77
78/* CONTINUE verdict for targets */
79#define XT_CONTINUE 0xFFFFFFFF
80
81/* For standard target */
82#define XT_RETURN (-NF_REPEAT - 1)
83
6fbfc968
DM
84/* this is a dummy structure to find out the alignment requirement for a struct
85 * containing all the fundamental data types that are used in ipt_entry,
86 * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my
87 * personal pleasure to remove it -HW
88 */
d94d9fee 89struct _xt_align {
60c195c7
AB
90 __u8 u8;
91 __u16 u16;
92 __u32 u32;
93 __u64 u64;
6fbfc968
DM
94};
95
96#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \
97 & ~(__alignof__(struct _xt_align)-1))
2e4e6a17
HW
98
99/* Standard return verdict, or do jump. */
100#define XT_STANDARD_TARGET ""
101/* Error verdict. */
102#define XT_ERROR_TARGET "ERROR"
103
2e4e6a17
HW
104#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
105#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
106
d94d9fee 107struct xt_counters {
60c195c7 108 __u64 pcnt, bcnt; /* Packet and byte counters */
2e4e6a17
HW
109};
110
111/* The argument to IPT_SO_ADD_COUNTERS. */
d94d9fee 112struct xt_counters_info {
2e4e6a17
HW
113 /* Which table. */
114 char name[XT_TABLE_MAXNAMELEN];
115
116 unsigned int num_counters;
117
118 /* The counters (actually `number' of these). */
119 struct xt_counters counters[0];
120};
121
122#define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
123
89c002d6
PM
124/* fn returns 0 to continue iteration */
125#define XT_MATCH_ITERATE(type, e, fn, args...) \
126({ \
127 unsigned int __i; \
128 int __ret = 0; \
129 struct xt_entry_match *__m; \
130 \
131 for (__i = sizeof(type); \
132 __i < (e)->target_offset; \
133 __i += __m->u.match_size) { \
134 __m = (void *)e + __i; \
135 \
136 __ret = fn(__m , ## args); \
137 if (__ret != 0) \
138 break; \
139 } \
140 __ret; \
141})
142
143/* fn returns 0 to continue iteration */
144#define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \
145({ \
146 unsigned int __i, __n; \
147 int __ret = 0; \
148 type *__entry; \
149 \
150 for (__i = 0, __n = 0; __i < (size); \
151 __i += __entry->next_offset, __n++) { \
152 __entry = (void *)(entries) + __i; \
153 if (__n < n) \
154 continue; \
155 \
156 __ret = fn(__entry , ## args); \
157 if (__ret != 0) \
158 break; \
159 } \
160 __ret; \
161})
162
163/* fn returns 0 to continue iteration */
164#define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
165 XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
166
2e4e6a17
HW
167#ifdef __KERNEL__
168
169#include <linux/netdevice.h>
170
f7108a20
JE
171/**
172 * struct xt_match_param - parameters for match extensions' match functions
173 *
174 * @in: input netdevice
175 * @out: output netdevice
176 * @match: struct xt_match through which this function was invoked
177 * @matchinfo: per-match data
178 * @fragoff: packet is a fragment, this is the data offset
179 * @thoff: position of transport header relative to skb->data
a5e78820 180 * @hook: hook number given packet came from
916a917d
JE
181 * @family: Actual NFPROTO_* through which the function is invoked
182 * (helpful when match->family == NFPROTO_UNSPEC)
a5e78820 183 * @hotdrop: drop packet if we had inspection problems
f7108a20
JE
184 */
185struct xt_match_param {
186 const struct net_device *in, *out;
187 const struct xt_match *match;
188 const void *matchinfo;
189 int fragoff;
190 unsigned int thoff;
a5e78820 191 unsigned int hooknum;
916a917d 192 u_int8_t family;
a5e78820 193 bool *hotdrop;
f7108a20
JE
194};
195
9b4fce7a
JE
196/**
197 * struct xt_mtchk_param - parameters for match extensions'
198 * checkentry functions
199 *
200 * @table: table the rule is tried to be inserted into
201 * @entryinfo: the family-specific rule data
202 * (struct ipt_ip, ip6t_ip, ebt_entry)
203 * @match: struct xt_match through which this function was invoked
204 * @matchinfo: per-match data
205 * @hook_mask: via which hooks the new rule is reachable
206 */
207struct xt_mtchk_param {
a83d8e8d 208 struct net *net;
9b4fce7a
JE
209 const char *table;
210 const void *entryinfo;
211 const struct xt_match *match;
212 void *matchinfo;
213 unsigned int hook_mask;
916a917d 214 u_int8_t family;
9b4fce7a
JE
215};
216
6be3d859
JE
217/* Match destructor parameters */
218struct xt_mtdtor_param {
219 const struct xt_match *match;
220 void *matchinfo;
916a917d 221 u_int8_t family;
6be3d859
JE
222};
223
7eb35586
JE
224/**
225 * struct xt_target_param - parameters for target extensions' target functions
226 *
227 * @hooknum: hook through which this target was invoked
228 * @target: struct xt_target through which this function was invoked
229 * @targinfo: per-target data
230 *
231 * Other fields see above.
232 */
233struct xt_target_param {
234 const struct net_device *in, *out;
7eb35586
JE
235 const struct xt_target *target;
236 const void *targinfo;
98d89b41 237 unsigned int hooknum;
916a917d 238 u_int8_t family;
7eb35586
JE
239};
240
af5d6dc2
JE
241/**
242 * struct xt_tgchk_param - parameters for target extensions'
243 * checkentry functions
244 *
245 * @entryinfo: the family-specific rule data
246 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
247 *
248 * Other fields see above.
249 */
250struct xt_tgchk_param {
251 const char *table;
f79fca55 252 const void *entryinfo;
af5d6dc2
JE
253 const struct xt_target *target;
254 void *targinfo;
255 unsigned int hook_mask;
916a917d 256 u_int8_t family;
af5d6dc2
JE
257};
258
a2df1648
JE
259/* Target destructor parameters */
260struct xt_tgdtor_param {
261 const struct xt_target *target;
262 void *targinfo;
916a917d 263 u_int8_t family;
a2df1648
JE
264};
265
d94d9fee 266struct xt_match {
2e4e6a17
HW
267 struct list_head list;
268
269 const char name[XT_FUNCTION_MAXNAMELEN-1];
daaf83d2 270 u_int8_t revision;
2e4e6a17 271
2e4e6a17
HW
272 /* Return true or false: return FALSE and set *hotdrop = 1 to
273 force immediate packet drop. */
274 /* Arguments changed since 2.6.9, as this must now handle
275 non-linear skb, using skb_header_pointer and
276 skb_ip_make_writable. */
1d93a9cb 277 bool (*match)(const struct sk_buff *skb,
f7108a20 278 const struct xt_match_param *);
2e4e6a17
HW
279
280 /* Called when user tries to insert an entry of this type. */
9b4fce7a 281 bool (*checkentry)(const struct xt_mtchk_param *);
2e4e6a17
HW
282
283 /* Called when entry of this type deleted. */
6be3d859 284 void (*destroy)(const struct xt_mtdtor_param *);
2e4e6a17 285
2722971c 286 /* Called when userspace align differs from kernel space one */
9fa492cd
PM
287 void (*compat_from_user)(void *dst, void *src);
288 int (*compat_to_user)(void __user *dst, void *src);
2722971c 289
2e4e6a17
HW
290 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
291 struct module *me;
37f9f733 292
91270cf8
PM
293 /* Free to use by each match */
294 unsigned long data;
295
ecb6f85e 296 const char *table;
37f9f733 297 unsigned int matchsize;
9fa492cd 298 unsigned int compatsize;
37f9f733
PM
299 unsigned int hooks;
300 unsigned short proto;
c4b88513
PM
301
302 unsigned short family;
2e4e6a17
HW
303};
304
305/* Registration hooks for targets. */
d94d9fee 306struct xt_target {
2e4e6a17
HW
307 struct list_head list;
308
309 const char name[XT_FUNCTION_MAXNAMELEN-1];
310
2e4e6a17
HW
311 /* Returns verdict. Argument order changed since 2.6.9, as this
312 must now handle non-linear skbs, using skb_copy_bits and
313 skb_ip_make_writable. */
3db05fea 314 unsigned int (*target)(struct sk_buff *skb,
7eb35586 315 const struct xt_target_param *);
2e4e6a17
HW
316
317 /* Called when user tries to insert an entry of this type:
318 hook_mask is a bitmask of hooks from which it can be
319 called. */
320 /* Should return true or false. */
af5d6dc2 321 bool (*checkentry)(const struct xt_tgchk_param *);
2e4e6a17
HW
322
323 /* Called when entry of this type deleted. */
a2df1648 324 void (*destroy)(const struct xt_tgdtor_param *);
2e4e6a17 325
2722971c 326 /* Called when userspace align differs from kernel space one */
9fa492cd
PM
327 void (*compat_from_user)(void *dst, void *src);
328 int (*compat_to_user)(void __user *dst, void *src);
2722971c 329
2e4e6a17
HW
330 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
331 struct module *me;
37f9f733 332
ecb6f85e 333 const char *table;
37f9f733 334 unsigned int targetsize;
9fa492cd 335 unsigned int compatsize;
37f9f733
PM
336 unsigned int hooks;
337 unsigned short proto;
c4b88513
PM
338
339 unsigned short family;
37f9f733 340 u_int8_t revision;
2e4e6a17
HW
341};
342
343/* Furniture shopping... */
d94d9fee 344struct xt_table {
2e4e6a17
HW
345 struct list_head list;
346
2e4e6a17
HW
347 /* What hooks you will enter on */
348 unsigned int valid_hooks;
349
2e4e6a17 350 /* Man behind the curtain... */
4a2f965c 351 struct xt_table_info *private;
2e4e6a17
HW
352
353 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
354 struct module *me;
355
76108cea 356 u_int8_t af; /* address/protocol family */
4a2f965c
SH
357
358 /* A unique name... */
359 const char name[XT_TABLE_MAXNAMELEN];
2e4e6a17
HW
360};
361
362#include <linux/netfilter_ipv4.h>
363
364/* The table itself */
d94d9fee 365struct xt_table_info {
2e4e6a17
HW
366 /* Size per table */
367 unsigned int size;
368 /* Number of entries: FIXME. --RR */
369 unsigned int number;
370 /* Initial number of entries. Needed for module usage count */
371 unsigned int initial_entries;
372
373 /* Entry points and underflows */
6e23ae2a
PM
374 unsigned int hook_entry[NF_INET_NUMHOOKS];
375 unsigned int underflow[NF_INET_NUMHOOKS];
2e4e6a17
HW
376
377 /* ipt_entry tables: one per CPU */
259d4e41 378 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
78454473 379 void *entries[1];
2e4e6a17
HW
380};
381
259d4e41
ED
382#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
383 + nr_cpu_ids * sizeof(char *))
a45049c5
PNA
384extern int xt_register_target(struct xt_target *target);
385extern void xt_unregister_target(struct xt_target *target);
52d9c42e
PM
386extern int xt_register_targets(struct xt_target *target, unsigned int n);
387extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
388
a45049c5
PNA
389extern int xt_register_match(struct xt_match *target);
390extern void xt_unregister_match(struct xt_match *target);
52d9c42e
PM
391extern int xt_register_matches(struct xt_match *match, unsigned int n);
392extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
2e4e6a17 393
916a917d 394extern int xt_check_match(struct xt_mtchk_param *,
9b4fce7a 395 unsigned int size, u_int8_t proto, bool inv_proto);
916a917d 396extern int xt_check_target(struct xt_tgchk_param *,
af5d6dc2 397 unsigned int size, u_int8_t proto, bool inv_proto);
37f9f733 398
8d870052 399extern struct xt_table *xt_register_table(struct net *net,
35aad0ff 400 const struct xt_table *table,
a98da11d
AD
401 struct xt_table_info *bootstrap,
402 struct xt_table_info *newinfo);
2e4e6a17
HW
403extern void *xt_unregister_table(struct xt_table *table);
404
405extern struct xt_table_info *xt_replace_table(struct xt_table *table,
406 unsigned int num_counters,
407 struct xt_table_info *newinfo,
408 int *error);
409
76108cea
JE
410extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
411extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
412extern struct xt_target *xt_request_find_target(u8 af, const char *name,
2e4e6a17 413 u8 revision);
76108cea
JE
414extern int xt_find_revision(u8 af, const char *name, u8 revision,
415 int target, int *err);
2e4e6a17 416
76108cea 417extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
8d870052 418 const char *name);
2e4e6a17
HW
419extern void xt_table_unlock(struct xt_table *t);
420
76108cea
JE
421extern int xt_proto_init(struct net *net, u_int8_t af);
422extern void xt_proto_fini(struct net *net, u_int8_t af);
2e4e6a17
HW
423
424extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
425extern void xt_free_table_info(struct xt_table_info *info);
942e4a2b
SH
426
427/*
428 * Per-CPU spinlock associated with per-cpu table entries, and
429 * with a counter for the "reading" side that allows a recursive
430 * reader to avoid taking the lock and deadlocking.
431 *
432 * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
433 * It needs to ensure that the rules are not being changed while the packet
434 * is being processed. In some cases, the read lock will be acquired
435 * twice on the same CPU; this is okay because of the count.
436 *
437 * "writing" is used when reading counters.
438 * During replace any readers that are using the old tables have to complete
439 * before freeing the old table. This is handled by the write locking
440 * necessary for reading the counters.
441 */
442struct xt_info_lock {
443 spinlock_t lock;
444 unsigned char readers;
445};
446DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
447
448/*
449 * Note: we need to ensure that preemption is disabled before acquiring
450 * the per-cpu-variable, so we do it as a two step process rather than
451 * using "spin_lock_bh()".
452 *
453 * We _also_ need to disable bottom half processing before updating our
454 * nesting count, to make sure that the only kind of re-entrancy is this
455 * code being called by itself: since the count+lock is not an atomic
456 * operation, we can allow no races.
457 *
458 * _Only_ that special combination of being per-cpu and never getting
459 * re-entered asynchronously means that the count is safe.
460 */
461static inline void xt_info_rdlock_bh(void)
462{
463 struct xt_info_lock *lock;
464
465 local_bh_disable();
466 lock = &__get_cpu_var(xt_info_locks);
0f3d042e 467 if (likely(!lock->readers++))
942e4a2b
SH
468 spin_lock(&lock->lock);
469}
470
471static inline void xt_info_rdunlock_bh(void)
472{
473 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
474
0f3d042e 475 if (likely(!--lock->readers))
942e4a2b
SH
476 spin_unlock(&lock->lock);
477 local_bh_enable();
478}
479
480/*
481 * The "writer" side needs to get exclusive access to the lock,
482 * regardless of readers. This must be called with bottom half
483 * processing (and thus also preemption) disabled.
484 */
485static inline void xt_info_wrlock(unsigned int cpu)
486{
487 spin_lock(&per_cpu(xt_info_locks, cpu).lock);
488}
489
490static inline void xt_info_wrunlock(unsigned int cpu)
491{
492 spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
493}
2e4e6a17 494
b8dfe498
ED
495/*
496 * This helper is performance critical and must be inlined
497 */
498static inline unsigned long ifname_compare_aligned(const char *_a,
499 const char *_b,
500 const char *_mask)
501{
502 const unsigned long *a = (const unsigned long *)_a;
503 const unsigned long *b = (const unsigned long *)_b;
504 const unsigned long *mask = (const unsigned long *)_mask;
505 unsigned long ret;
506
507 ret = (a[0] ^ b[0]) & mask[0];
508 if (IFNAMSIZ > sizeof(unsigned long))
509 ret |= (a[1] ^ b[1]) & mask[1];
510 if (IFNAMSIZ > 2 * sizeof(unsigned long))
511 ret |= (a[2] ^ b[2]) & mask[2];
512 if (IFNAMSIZ > 3 * sizeof(unsigned long))
513 ret |= (a[3] ^ b[3]) & mask[3];
514 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
515 return ret;
516}
517
2722971c
DM
518#ifdef CONFIG_COMPAT
519#include <net/compat.h>
520
d94d9fee 521struct compat_xt_entry_match {
2722971c
DM
522 union {
523 struct {
524 u_int16_t match_size;
525 char name[XT_FUNCTION_MAXNAMELEN - 1];
526 u_int8_t revision;
527 } user;
46c5ea3c
PM
528 struct {
529 u_int16_t match_size;
530 compat_uptr_t match;
531 } kernel;
2722971c
DM
532 u_int16_t match_size;
533 } u;
534 unsigned char data[0];
535};
536
d94d9fee 537struct compat_xt_entry_target {
2722971c
DM
538 union {
539 struct {
540 u_int16_t target_size;
541 char name[XT_FUNCTION_MAXNAMELEN - 1];
542 u_int8_t revision;
543 } user;
46c5ea3c
PM
544 struct {
545 u_int16_t target_size;
546 compat_uptr_t target;
547 } kernel;
2722971c
DM
548 u_int16_t target_size;
549 } u;
550 unsigned char data[0];
551};
552
553/* FIXME: this works only on 32 bit tasks
554 * need to change whole approach in order to calculate align as function of
555 * current task alignment */
556
d94d9fee 557struct compat_xt_counters {
55fe5866 558#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
2722971c 559 u_int32_t cnt[4];
55fe5866
PM
560#else
561 u_int64_t cnt[2];
562#endif
2722971c
DM
563};
564
d94d9fee 565struct compat_xt_counters_info {
2722971c
DM
566 char name[XT_TABLE_MAXNAMELEN];
567 compat_uint_t num_counters;
568 struct compat_xt_counters counters[0];
569};
570
571#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
572 & ~(__alignof__(struct compat_xt_counters)-1))
573
76108cea
JE
574extern void xt_compat_lock(u_int8_t af);
575extern void xt_compat_unlock(u_int8_t af);
9fa492cd 576
76108cea
JE
577extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
578extern void xt_compat_flush_offsets(u_int8_t af);
579extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset);
b386d9f5 580
5452e425 581extern int xt_compat_match_offset(const struct xt_match *match);
89566951 582extern int xt_compat_match_from_user(struct xt_entry_match *m,
b0a6363c 583 void **dstptr, unsigned int *size);
9fa492cd 584extern int xt_compat_match_to_user(struct xt_entry_match *m,
b0a6363c 585 void __user **dstptr, unsigned int *size);
9fa492cd 586
5452e425 587extern int xt_compat_target_offset(const struct xt_target *target);
9fa492cd 588extern void xt_compat_target_from_user(struct xt_entry_target *t,
b0a6363c 589 void **dstptr, unsigned int *size);
9fa492cd 590extern int xt_compat_target_to_user(struct xt_entry_target *t,
b0a6363c 591 void __user **dstptr, unsigned int *size);
2722971c
DM
592
593#endif /* CONFIG_COMPAT */
2e4e6a17
HW
594#endif /* __KERNEL__ */
595
596#endif /* _X_TABLES_H */