]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/netfilter/x_tables.h
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / include / linux / netfilter / x_tables.h
1 #ifndef _X_TABLES_H
2 #define _X_TABLES_H
3
4
5 #include <linux/netdevice.h>
6 #include <linux/static_key.h>
7 #include <linux/netfilter.h>
8 #include <uapi/linux/netfilter/x_tables.h>
9
10 /* Test a struct->invflags and a boolean for inequality */
11 #define NF_INVF(ptr, flag, boolean) \
12 ((boolean) ^ !!((ptr)->invflags & (flag)))
13
14 /**
15 * struct xt_action_param - parameters for matches/targets
16 *
17 * @match: the match extension
18 * @target: the target extension
19 * @matchinfo: per-match data
20 * @targetinfo: per-target data
21 * @state: pointer to hook state this packet came from
22 * @fragoff: packet is a fragment, this is the data offset
23 * @thoff: position of transport header relative to skb->data
24 *
25 * Fields written to by extensions:
26 *
27 * @hotdrop: drop packet if we had inspection problems
28 */
29 struct xt_action_param {
30 union {
31 const struct xt_match *match;
32 const struct xt_target *target;
33 };
34 union {
35 const void *matchinfo, *targinfo;
36 };
37 const struct nf_hook_state *state;
38 int fragoff;
39 unsigned int thoff;
40 bool hotdrop;
41 };
42
43 static inline struct net *xt_net(const struct xt_action_param *par)
44 {
45 return par->state->net;
46 }
47
48 static inline struct net_device *xt_in(const struct xt_action_param *par)
49 {
50 return par->state->in;
51 }
52
53 static inline const char *xt_inname(const struct xt_action_param *par)
54 {
55 return par->state->in->name;
56 }
57
58 static inline struct net_device *xt_out(const struct xt_action_param *par)
59 {
60 return par->state->out;
61 }
62
63 static inline const char *xt_outname(const struct xt_action_param *par)
64 {
65 return par->state->out->name;
66 }
67
68 static inline unsigned int xt_hooknum(const struct xt_action_param *par)
69 {
70 return par->state->hook;
71 }
72
73 static inline u_int8_t xt_family(const struct xt_action_param *par)
74 {
75 return par->state->pf;
76 }
77
78 /**
79 * struct xt_mtchk_param - parameters for match extensions'
80 * checkentry functions
81 *
82 * @net: network namespace through which the check was invoked
83 * @table: table the rule is tried to be inserted into
84 * @entryinfo: the family-specific rule data
85 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
86 * @match: struct xt_match through which this function was invoked
87 * @matchinfo: per-match data
88 * @hook_mask: via which hooks the new rule is reachable
89 * Other fields as above.
90 */
91 struct xt_mtchk_param {
92 struct net *net;
93 const char *table;
94 const void *entryinfo;
95 const struct xt_match *match;
96 void *matchinfo;
97 unsigned int hook_mask;
98 u_int8_t family;
99 bool nft_compat;
100 };
101
102 /**
103 * struct xt_mdtor_param - match destructor parameters
104 * Fields as above.
105 */
106 struct xt_mtdtor_param {
107 struct net *net;
108 const struct xt_match *match;
109 void *matchinfo;
110 u_int8_t family;
111 };
112
113 /**
114 * struct xt_tgchk_param - parameters for target extensions'
115 * checkentry functions
116 *
117 * @entryinfo: the family-specific rule data
118 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
119 *
120 * Other fields see above.
121 */
122 struct xt_tgchk_param {
123 struct net *net;
124 const char *table;
125 const void *entryinfo;
126 const struct xt_target *target;
127 void *targinfo;
128 unsigned int hook_mask;
129 u_int8_t family;
130 bool nft_compat;
131 };
132
133 /* Target destructor parameters */
134 struct xt_tgdtor_param {
135 struct net *net;
136 const struct xt_target *target;
137 void *targinfo;
138 u_int8_t family;
139 };
140
141 struct xt_match {
142 struct list_head list;
143
144 const char name[XT_EXTENSION_MAXNAMELEN];
145 u_int8_t revision;
146
147 /* Return true or false: return FALSE and set *hotdrop = 1 to
148 force immediate packet drop. */
149 /* Arguments changed since 2.6.9, as this must now handle
150 non-linear skb, using skb_header_pointer and
151 skb_ip_make_writable. */
152 bool (*match)(const struct sk_buff *skb,
153 struct xt_action_param *);
154
155 /* Called when user tries to insert an entry of this type. */
156 int (*checkentry)(const struct xt_mtchk_param *);
157
158 /* Called when entry of this type deleted. */
159 void (*destroy)(const struct xt_mtdtor_param *);
160 #ifdef CONFIG_COMPAT
161 /* Called when userspace align differs from kernel space one */
162 void (*compat_from_user)(void *dst, const void *src);
163 int (*compat_to_user)(void __user *dst, const void *src);
164 #endif
165 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
166 struct module *me;
167
168 const char *table;
169 unsigned int matchsize;
170 unsigned int usersize;
171 #ifdef CONFIG_COMPAT
172 unsigned int compatsize;
173 #endif
174 unsigned int hooks;
175 unsigned short proto;
176
177 unsigned short family;
178 };
179
180 /* Registration hooks for targets. */
181 struct xt_target {
182 struct list_head list;
183
184 const char name[XT_EXTENSION_MAXNAMELEN];
185 u_int8_t revision;
186
187 /* Returns verdict. Argument order changed since 2.6.9, as this
188 must now handle non-linear skbs, using skb_copy_bits and
189 skb_ip_make_writable. */
190 unsigned int (*target)(struct sk_buff *skb,
191 const struct xt_action_param *);
192
193 /* Called when user tries to insert an entry of this type:
194 hook_mask is a bitmask of hooks from which it can be
195 called. */
196 /* Should return 0 on success or an error code otherwise (-Exxxx). */
197 int (*checkentry)(const struct xt_tgchk_param *);
198
199 /* Called when entry of this type deleted. */
200 void (*destroy)(const struct xt_tgdtor_param *);
201 #ifdef CONFIG_COMPAT
202 /* Called when userspace align differs from kernel space one */
203 void (*compat_from_user)(void *dst, const void *src);
204 int (*compat_to_user)(void __user *dst, const void *src);
205 #endif
206 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
207 struct module *me;
208
209 const char *table;
210 unsigned int targetsize;
211 unsigned int usersize;
212 #ifdef CONFIG_COMPAT
213 unsigned int compatsize;
214 #endif
215 unsigned int hooks;
216 unsigned short proto;
217
218 unsigned short family;
219 };
220
221 /* Furniture shopping... */
222 struct xt_table {
223 struct list_head list;
224
225 /* What hooks you will enter on */
226 unsigned int valid_hooks;
227
228 /* Man behind the curtain... */
229 struct xt_table_info *private;
230
231 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
232 struct module *me;
233
234 u_int8_t af; /* address/protocol family */
235 int priority; /* hook order */
236
237 /* called when table is needed in the given netns */
238 int (*table_init)(struct net *net);
239
240 /* A unique name... */
241 const char name[XT_TABLE_MAXNAMELEN];
242 };
243
244 #include <linux/netfilter_ipv4.h>
245
246 /* The table itself */
247 struct xt_table_info {
248 /* Size per table */
249 unsigned int size;
250 /* Number of entries: FIXME. --RR */
251 unsigned int number;
252 /* Initial number of entries. Needed for module usage count */
253 unsigned int initial_entries;
254
255 /* Entry points and underflows */
256 unsigned int hook_entry[NF_INET_NUMHOOKS];
257 unsigned int underflow[NF_INET_NUMHOOKS];
258
259 /*
260 * Number of user chains. Since tables cannot have loops, at most
261 * @stacksize jumps (number of user chains) can possibly be made.
262 */
263 unsigned int stacksize;
264 void ***jumpstack;
265
266 unsigned char entries[0] __aligned(8);
267 };
268
269 int xt_register_target(struct xt_target *target);
270 void xt_unregister_target(struct xt_target *target);
271 int xt_register_targets(struct xt_target *target, unsigned int n);
272 void xt_unregister_targets(struct xt_target *target, unsigned int n);
273
274 int xt_register_match(struct xt_match *target);
275 void xt_unregister_match(struct xt_match *target);
276 int xt_register_matches(struct xt_match *match, unsigned int n);
277 void xt_unregister_matches(struct xt_match *match, unsigned int n);
278
279 int xt_check_entry_offsets(const void *base, const char *elems,
280 unsigned int target_offset,
281 unsigned int next_offset);
282
283 unsigned int *xt_alloc_entry_offsets(unsigned int size);
284 bool xt_find_jump_offset(const unsigned int *offsets,
285 unsigned int target, unsigned int size);
286
287 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
288 bool inv_proto);
289 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
290 bool inv_proto);
291
292 int xt_match_to_user(const struct xt_entry_match *m,
293 struct xt_entry_match __user *u);
294 int xt_target_to_user(const struct xt_entry_target *t,
295 struct xt_entry_target __user *u);
296 int xt_data_to_user(void __user *dst, const void *src,
297 int usersize, int size, int aligned_size);
298
299 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
300 struct xt_counters_info *info, bool compat);
301
302 struct xt_table *xt_register_table(struct net *net,
303 const struct xt_table *table,
304 struct xt_table_info *bootstrap,
305 struct xt_table_info *newinfo);
306 void *xt_unregister_table(struct xt_table *table);
307
308 struct xt_table_info *xt_replace_table(struct xt_table *table,
309 unsigned int num_counters,
310 struct xt_table_info *newinfo,
311 int *error);
312
313 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
314 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
315 struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
316 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
317 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
318 int *err);
319
320 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
321 const char *name);
322 void xt_table_unlock(struct xt_table *t);
323
324 int xt_proto_init(struct net *net, u_int8_t af);
325 void xt_proto_fini(struct net *net, u_int8_t af);
326
327 struct xt_table_info *xt_alloc_table_info(unsigned int size);
328 void xt_free_table_info(struct xt_table_info *info);
329
330 /**
331 * xt_recseq - recursive seqcount for netfilter use
332 *
333 * Packet processing changes the seqcount only if no recursion happened
334 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
335 * because we use the normal seqcount convention :
336 * Low order bit set to 1 if a writer is active.
337 */
338 DECLARE_PER_CPU(seqcount_t, xt_recseq);
339
340 /* xt_tee_enabled - true if x_tables needs to handle reentrancy
341 *
342 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
343 */
344 extern struct static_key xt_tee_enabled;
345
346 /**
347 * xt_write_recseq_begin - start of a write section
348 *
349 * Begin packet processing : all readers must wait the end
350 * 1) Must be called with preemption disabled
351 * 2) softirqs must be disabled too (or we should use this_cpu_add())
352 * Returns :
353 * 1 if no recursion on this cpu
354 * 0 if recursion detected
355 */
356 static inline unsigned int xt_write_recseq_begin(void)
357 {
358 unsigned int addend;
359
360 /*
361 * Low order bit of sequence is set if we already
362 * called xt_write_recseq_begin().
363 */
364 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
365
366 /*
367 * This is kind of a write_seqcount_begin(), but addend is 0 or 1
368 * We dont check addend value to avoid a test and conditional jump,
369 * since addend is most likely 1
370 */
371 __this_cpu_add(xt_recseq.sequence, addend);
372 smp_wmb();
373
374 return addend;
375 }
376
377 /**
378 * xt_write_recseq_end - end of a write section
379 * @addend: return value from previous xt_write_recseq_begin()
380 *
381 * End packet processing : all readers can proceed
382 * 1) Must be called with preemption disabled
383 * 2) softirqs must be disabled too (or we should use this_cpu_add())
384 */
385 static inline void xt_write_recseq_end(unsigned int addend)
386 {
387 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
388 smp_wmb();
389 __this_cpu_add(xt_recseq.sequence, addend);
390 }
391
392 /*
393 * This helper is performance critical and must be inlined
394 */
395 static inline unsigned long ifname_compare_aligned(const char *_a,
396 const char *_b,
397 const char *_mask)
398 {
399 const unsigned long *a = (const unsigned long *)_a;
400 const unsigned long *b = (const unsigned long *)_b;
401 const unsigned long *mask = (const unsigned long *)_mask;
402 unsigned long ret;
403
404 ret = (a[0] ^ b[0]) & mask[0];
405 if (IFNAMSIZ > sizeof(unsigned long))
406 ret |= (a[1] ^ b[1]) & mask[1];
407 if (IFNAMSIZ > 2 * sizeof(unsigned long))
408 ret |= (a[2] ^ b[2]) & mask[2];
409 if (IFNAMSIZ > 3 * sizeof(unsigned long))
410 ret |= (a[3] ^ b[3]) & mask[3];
411 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
412 return ret;
413 }
414
415 struct xt_percpu_counter_alloc_state {
416 unsigned int off;
417 const char __percpu *mem;
418 };
419
420 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
421 struct xt_counters *counter);
422 void xt_percpu_counter_free(struct xt_counters *cnt);
423
424 static inline struct xt_counters *
425 xt_get_this_cpu_counter(struct xt_counters *cnt)
426 {
427 if (nr_cpu_ids > 1)
428 return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
429
430 return cnt;
431 }
432
433 static inline struct xt_counters *
434 xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
435 {
436 if (nr_cpu_ids > 1)
437 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
438
439 return cnt;
440 }
441
442 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
443
444 #ifdef CONFIG_COMPAT
445 #include <net/compat.h>
446
447 struct compat_xt_entry_match {
448 union {
449 struct {
450 u_int16_t match_size;
451 char name[XT_FUNCTION_MAXNAMELEN - 1];
452 u_int8_t revision;
453 } user;
454 struct {
455 u_int16_t match_size;
456 compat_uptr_t match;
457 } kernel;
458 u_int16_t match_size;
459 } u;
460 unsigned char data[0];
461 };
462
463 struct compat_xt_entry_target {
464 union {
465 struct {
466 u_int16_t target_size;
467 char name[XT_FUNCTION_MAXNAMELEN - 1];
468 u_int8_t revision;
469 } user;
470 struct {
471 u_int16_t target_size;
472 compat_uptr_t target;
473 } kernel;
474 u_int16_t target_size;
475 } u;
476 unsigned char data[0];
477 };
478
479 /* FIXME: this works only on 32 bit tasks
480 * need to change whole approach in order to calculate align as function of
481 * current task alignment */
482
483 struct compat_xt_counters {
484 compat_u64 pcnt, bcnt; /* Packet and byte counters */
485 };
486
487 struct compat_xt_counters_info {
488 char name[XT_TABLE_MAXNAMELEN];
489 compat_uint_t num_counters;
490 struct compat_xt_counters counters[0];
491 };
492
493 struct _compat_xt_align {
494 __u8 u8;
495 __u16 u16;
496 __u32 u32;
497 compat_u64 u64;
498 };
499
500 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
501
502 void xt_compat_lock(u_int8_t af);
503 void xt_compat_unlock(u_int8_t af);
504
505 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
506 void xt_compat_flush_offsets(u_int8_t af);
507 void xt_compat_init_offsets(u_int8_t af, unsigned int number);
508 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
509
510 int xt_compat_match_offset(const struct xt_match *match);
511 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
512 unsigned int *size);
513 int xt_compat_match_to_user(const struct xt_entry_match *m,
514 void __user **dstptr, unsigned int *size);
515
516 int xt_compat_target_offset(const struct xt_target *target);
517 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
518 unsigned int *size);
519 int xt_compat_target_to_user(const struct xt_entry_target *t,
520 void __user **dstptr, unsigned int *size);
521 int xt_compat_check_entry_offsets(const void *base, const char *elems,
522 unsigned int target_offset,
523 unsigned int next_offset);
524
525 #endif /* CONFIG_COMPAT */
526 #endif /* _X_TABLES_H */