]>
Commit | Line | Data |
---|---|---|
2e4e6a17 HW |
1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables | |
3 | * | |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> | |
f229f6ce | 5 | * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> |
2e4e6a17 HW |
6 | * |
7 | * Based on existing ip_tables code which is | |
8 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | |
9 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | */ | |
be91fd5e | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2e4e6a17 | 17 | #include <linux/kernel.h> |
3a9a231d | 18 | #include <linux/module.h> |
2e4e6a17 HW |
19 | #include <linux/socket.h> |
20 | #include <linux/net.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/vmalloc.h> | |
9e19bb6d | 25 | #include <linux/mutex.h> |
d7fe0f24 | 26 | #include <linux/mm.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
fbabf31e | 28 | #include <linux/audit.h> |
f13f2aee | 29 | #include <linux/user_namespace.h> |
457c4cbc | 30 | #include <net/net_namespace.h> |
2e4e6a17 HW |
31 | |
32 | #include <linux/netfilter/x_tables.h> | |
33 | #include <linux/netfilter_arp.h> | |
e3eaa991 JE |
34 | #include <linux/netfilter_ipv4/ip_tables.h> |
35 | #include <linux/netfilter_ipv6/ip6_tables.h> | |
36 | #include <linux/netfilter_arp/arp_tables.h> | |
9e19bb6d | 37 | |
2e4e6a17 HW |
38 | MODULE_LICENSE("GPL"); |
39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |
043ef46c | 40 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
2e4e6a17 HW |
41 | |
42 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) | |
ae0ac0ed | 43 | #define XT_PCPU_BLOCK_SIZE 4096 |
2e4e6a17 | 44 | |
b386d9f5 | 45 | struct compat_delta { |
255d0dc3 ED |
46 | unsigned int offset; /* offset in kernel */ |
47 | int delta; /* delta in 32bit user land */ | |
b386d9f5 PM |
48 | }; |
49 | ||
2e4e6a17 | 50 | struct xt_af { |
9e19bb6d | 51 | struct mutex mutex; |
2e4e6a17 HW |
52 | struct list_head match; |
53 | struct list_head target; | |
b386d9f5 | 54 | #ifdef CONFIG_COMPAT |
2722971c | 55 | struct mutex compat_mutex; |
255d0dc3 ED |
56 | struct compat_delta *compat_tab; |
57 | unsigned int number; /* number of slots in compat_tab[] */ | |
58 | unsigned int cur; /* number of used slots in compat_tab[] */ | |
b386d9f5 | 59 | #endif |
2e4e6a17 HW |
60 | }; |
61 | ||
62 | static struct xt_af *xt; | |
63 | ||
7e9c6eeb JE |
64 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
65 | [NFPROTO_UNSPEC] = "x", | |
66 | [NFPROTO_IPV4] = "ip", | |
67 | [NFPROTO_ARP] = "arp", | |
68 | [NFPROTO_BRIDGE] = "eb", | |
69 | [NFPROTO_IPV6] = "ip6", | |
37f9f733 PM |
70 | }; |
71 | ||
2e4e6a17 | 72 | /* Registration hooks for targets. */ |
7926dbfa | 73 | int xt_register_target(struct xt_target *target) |
2e4e6a17 | 74 | { |
76108cea | 75 | u_int8_t af = target->family; |
2e4e6a17 | 76 | |
7926dbfa | 77 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 78 | list_add(&target->list, &xt[af].target); |
9e19bb6d | 79 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 80 | return 0; |
2e4e6a17 HW |
81 | } |
82 | EXPORT_SYMBOL(xt_register_target); | |
83 | ||
84 | void | |
a45049c5 | 85 | xt_unregister_target(struct xt_target *target) |
2e4e6a17 | 86 | { |
76108cea | 87 | u_int8_t af = target->family; |
a45049c5 | 88 | |
9e19bb6d | 89 | mutex_lock(&xt[af].mutex); |
df0933dc | 90 | list_del(&target->list); |
9e19bb6d | 91 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
92 | } |
93 | EXPORT_SYMBOL(xt_unregister_target); | |
94 | ||
52d9c42e PM |
95 | int |
96 | xt_register_targets(struct xt_target *target, unsigned int n) | |
97 | { | |
98 | unsigned int i; | |
99 | int err = 0; | |
100 | ||
101 | for (i = 0; i < n; i++) { | |
102 | err = xt_register_target(&target[i]); | |
103 | if (err) | |
104 | goto err; | |
105 | } | |
106 | return err; | |
107 | ||
108 | err: | |
109 | if (i > 0) | |
110 | xt_unregister_targets(target, i); | |
111 | return err; | |
112 | } | |
113 | EXPORT_SYMBOL(xt_register_targets); | |
114 | ||
115 | void | |
116 | xt_unregister_targets(struct xt_target *target, unsigned int n) | |
117 | { | |
f68c5301 CG |
118 | while (n-- > 0) |
119 | xt_unregister_target(&target[n]); | |
52d9c42e PM |
120 | } |
121 | EXPORT_SYMBOL(xt_unregister_targets); | |
122 | ||
7926dbfa | 123 | int xt_register_match(struct xt_match *match) |
2e4e6a17 | 124 | { |
76108cea | 125 | u_int8_t af = match->family; |
2e4e6a17 | 126 | |
7926dbfa | 127 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 128 | list_add(&match->list, &xt[af].match); |
9e19bb6d | 129 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 130 | return 0; |
2e4e6a17 HW |
131 | } |
132 | EXPORT_SYMBOL(xt_register_match); | |
133 | ||
134 | void | |
a45049c5 | 135 | xt_unregister_match(struct xt_match *match) |
2e4e6a17 | 136 | { |
76108cea | 137 | u_int8_t af = match->family; |
a45049c5 | 138 | |
9e19bb6d | 139 | mutex_lock(&xt[af].mutex); |
df0933dc | 140 | list_del(&match->list); |
9e19bb6d | 141 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
142 | } |
143 | EXPORT_SYMBOL(xt_unregister_match); | |
144 | ||
52d9c42e PM |
145 | int |
146 | xt_register_matches(struct xt_match *match, unsigned int n) | |
147 | { | |
148 | unsigned int i; | |
149 | int err = 0; | |
150 | ||
151 | for (i = 0; i < n; i++) { | |
152 | err = xt_register_match(&match[i]); | |
153 | if (err) | |
154 | goto err; | |
155 | } | |
156 | return err; | |
157 | ||
158 | err: | |
159 | if (i > 0) | |
160 | xt_unregister_matches(match, i); | |
161 | return err; | |
162 | } | |
163 | EXPORT_SYMBOL(xt_register_matches); | |
164 | ||
165 | void | |
166 | xt_unregister_matches(struct xt_match *match, unsigned int n) | |
167 | { | |
f68c5301 CG |
168 | while (n-- > 0) |
169 | xt_unregister_match(&match[n]); | |
52d9c42e PM |
170 | } |
171 | EXPORT_SYMBOL(xt_unregister_matches); | |
172 | ||
2e4e6a17 HW |
173 | |
174 | /* | |
175 | * These are weird, but module loading must not be done with mutex | |
176 | * held (since they will register), and we have to have a single | |
adb00ae2 | 177 | * function to use. |
2e4e6a17 HW |
178 | */ |
179 | ||
180 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ | |
76108cea | 181 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
182 | { |
183 | struct xt_match *m; | |
42046e2e | 184 | int err = -ENOENT; |
2e4e6a17 | 185 | |
7926dbfa | 186 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
187 | list_for_each_entry(m, &xt[af].match, list) { |
188 | if (strcmp(m->name, name) == 0) { | |
189 | if (m->revision == revision) { | |
190 | if (try_module_get(m->me)) { | |
9e19bb6d | 191 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
192 | return m; |
193 | } | |
194 | } else | |
195 | err = -EPROTOTYPE; /* Found something. */ | |
196 | } | |
197 | } | |
9e19bb6d | 198 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
199 | |
200 | if (af != NFPROTO_UNSPEC) | |
201 | /* Try searching again in the family-independent list */ | |
202 | return xt_find_match(NFPROTO_UNSPEC, name, revision); | |
203 | ||
2e4e6a17 HW |
204 | return ERR_PTR(err); |
205 | } | |
206 | EXPORT_SYMBOL(xt_find_match); | |
207 | ||
fd0ec0e6 JE |
208 | struct xt_match * |
209 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | |
210 | { | |
211 | struct xt_match *match; | |
212 | ||
adb00ae2 SH |
213 | match = xt_find_match(nfproto, name, revision); |
214 | if (IS_ERR(match)) { | |
215 | request_module("%st_%s", xt_prefix[nfproto], name); | |
216 | match = xt_find_match(nfproto, name, revision); | |
217 | } | |
218 | ||
219 | return match; | |
fd0ec0e6 JE |
220 | } |
221 | EXPORT_SYMBOL_GPL(xt_request_find_match); | |
222 | ||
2e4e6a17 | 223 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
76108cea | 224 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
225 | { |
226 | struct xt_target *t; | |
42046e2e | 227 | int err = -ENOENT; |
2e4e6a17 | 228 | |
7926dbfa | 229 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
230 | list_for_each_entry(t, &xt[af].target, list) { |
231 | if (strcmp(t->name, name) == 0) { | |
232 | if (t->revision == revision) { | |
233 | if (try_module_get(t->me)) { | |
9e19bb6d | 234 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
235 | return t; |
236 | } | |
237 | } else | |
238 | err = -EPROTOTYPE; /* Found something. */ | |
239 | } | |
240 | } | |
9e19bb6d | 241 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
242 | |
243 | if (af != NFPROTO_UNSPEC) | |
244 | /* Try searching again in the family-independent list */ | |
245 | return xt_find_target(NFPROTO_UNSPEC, name, revision); | |
246 | ||
2e4e6a17 HW |
247 | return ERR_PTR(err); |
248 | } | |
249 | EXPORT_SYMBOL(xt_find_target); | |
250 | ||
76108cea | 251 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
252 | { |
253 | struct xt_target *target; | |
254 | ||
adb00ae2 SH |
255 | target = xt_find_target(af, name, revision); |
256 | if (IS_ERR(target)) { | |
257 | request_module("%st_%s", xt_prefix[af], name); | |
258 | target = xt_find_target(af, name, revision); | |
259 | } | |
260 | ||
261 | return target; | |
2e4e6a17 HW |
262 | } |
263 | EXPORT_SYMBOL_GPL(xt_request_find_target); | |
264 | ||
76108cea | 265 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 266 | { |
5452e425 | 267 | const struct xt_match *m; |
2e4e6a17 HW |
268 | int have_rev = 0; |
269 | ||
270 | list_for_each_entry(m, &xt[af].match, list) { | |
271 | if (strcmp(m->name, name) == 0) { | |
272 | if (m->revision > *bestp) | |
273 | *bestp = m->revision; | |
274 | if (m->revision == revision) | |
275 | have_rev = 1; | |
276 | } | |
277 | } | |
656caff2 PM |
278 | |
279 | if (af != NFPROTO_UNSPEC && !have_rev) | |
280 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
281 | ||
2e4e6a17 HW |
282 | return have_rev; |
283 | } | |
284 | ||
76108cea | 285 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 286 | { |
5452e425 | 287 | const struct xt_target *t; |
2e4e6a17 HW |
288 | int have_rev = 0; |
289 | ||
290 | list_for_each_entry(t, &xt[af].target, list) { | |
291 | if (strcmp(t->name, name) == 0) { | |
292 | if (t->revision > *bestp) | |
293 | *bestp = t->revision; | |
294 | if (t->revision == revision) | |
295 | have_rev = 1; | |
296 | } | |
297 | } | |
656caff2 PM |
298 | |
299 | if (af != NFPROTO_UNSPEC && !have_rev) | |
300 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
301 | ||
2e4e6a17 HW |
302 | return have_rev; |
303 | } | |
304 | ||
305 | /* Returns true or false (if no such extension at all) */ | |
76108cea | 306 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
2e4e6a17 HW |
307 | int *err) |
308 | { | |
309 | int have_rev, best = -1; | |
310 | ||
7926dbfa | 311 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
312 | if (target == 1) |
313 | have_rev = target_revfn(af, name, revision, &best); | |
314 | else | |
315 | have_rev = match_revfn(af, name, revision, &best); | |
9e19bb6d | 316 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
317 | |
318 | /* Nothing at all? Return 0 to try loading module. */ | |
319 | if (best == -1) { | |
320 | *err = -ENOENT; | |
321 | return 0; | |
322 | } | |
323 | ||
324 | *err = best; | |
325 | if (!have_rev) | |
326 | *err = -EPROTONOSUPPORT; | |
327 | return 1; | |
328 | } | |
329 | EXPORT_SYMBOL_GPL(xt_find_revision); | |
330 | ||
5b76c494 JE |
331 | static char * |
332 | textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) | |
45185364 | 333 | { |
5b76c494 | 334 | static const char *const inetbr_names[] = { |
45185364 JE |
335 | "PREROUTING", "INPUT", "FORWARD", |
336 | "OUTPUT", "POSTROUTING", "BROUTING", | |
337 | }; | |
5b76c494 JE |
338 | static const char *const arp_names[] = { |
339 | "INPUT", "FORWARD", "OUTPUT", | |
340 | }; | |
341 | const char *const *names; | |
342 | unsigned int i, max; | |
45185364 JE |
343 | char *p = buf; |
344 | bool np = false; | |
345 | int res; | |
346 | ||
5b76c494 JE |
347 | names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; |
348 | max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : | |
349 | ARRAY_SIZE(inetbr_names); | |
45185364 | 350 | *p = '\0'; |
5b76c494 | 351 | for (i = 0; i < max; ++i) { |
45185364 JE |
352 | if (!(mask & (1 << i))) |
353 | continue; | |
354 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); | |
355 | if (res > 0) { | |
356 | size -= res; | |
357 | p += res; | |
358 | } | |
359 | np = true; | |
360 | } | |
361 | ||
362 | return buf; | |
363 | } | |
364 | ||
916a917d | 365 | int xt_check_match(struct xt_mtchk_param *par, |
9b4fce7a | 366 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 367 | { |
bd414ee6 JE |
368 | int ret; |
369 | ||
9b4fce7a JE |
370 | if (XT_ALIGN(par->match->matchsize) != size && |
371 | par->match->matchsize != -1) { | |
043ef46c JE |
372 | /* |
373 | * ebt_among is exempt from centralized matchsize checking | |
374 | * because it uses a dynamic-size data set. | |
375 | */ | |
b402405d JE |
376 | pr_err("%s_tables: %s.%u match: invalid size " |
377 | "%u (kernel) != (user) %u\n", | |
916a917d | 378 | xt_prefix[par->family], par->match->name, |
b402405d | 379 | par->match->revision, |
9b4fce7a | 380 | XT_ALIGN(par->match->matchsize), size); |
37f9f733 PM |
381 | return -EINVAL; |
382 | } | |
9b4fce7a JE |
383 | if (par->match->table != NULL && |
384 | strcmp(par->match->table, par->table) != 0) { | |
3dd5d7e3 | 385 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
916a917d | 386 | xt_prefix[par->family], par->match->name, |
9b4fce7a | 387 | par->match->table, par->table); |
37f9f733 PM |
388 | return -EINVAL; |
389 | } | |
9b4fce7a | 390 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
45185364 JE |
391 | char used[64], allow[64]; |
392 | ||
3dd5d7e3 | 393 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
45185364 | 394 | "valid from %s\n", |
916a917d | 395 | xt_prefix[par->family], par->match->name, |
5b76c494 JE |
396 | textify_hooks(used, sizeof(used), par->hook_mask, |
397 | par->family), | |
398 | textify_hooks(allow, sizeof(allow), par->match->hooks, | |
399 | par->family)); | |
37f9f733 PM |
400 | return -EINVAL; |
401 | } | |
9b4fce7a | 402 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
3dd5d7e3 | 403 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
916a917d JE |
404 | xt_prefix[par->family], par->match->name, |
405 | par->match->proto); | |
37f9f733 PM |
406 | return -EINVAL; |
407 | } | |
bd414ee6 JE |
408 | if (par->match->checkentry != NULL) { |
409 | ret = par->match->checkentry(par); | |
410 | if (ret < 0) | |
411 | return ret; | |
412 | else if (ret > 0) | |
413 | /* Flag up potential errors. */ | |
414 | return -EIO; | |
415 | } | |
37f9f733 PM |
416 | return 0; |
417 | } | |
418 | EXPORT_SYMBOL_GPL(xt_check_match); | |
419 | ||
13631bfc FW |
420 | /** xt_check_entry_match - check that matches end before start of target |
421 | * | |
422 | * @match: beginning of xt_entry_match | |
423 | * @target: beginning of this rules target (alleged end of matches) | |
424 | * @alignment: alignment requirement of match structures | |
425 | * | |
426 | * Validates that all matches add up to the beginning of the target, | |
427 | * and that each match covers at least the base structure size. | |
428 | * | |
429 | * Return: 0 on success, negative errno on failure. | |
430 | */ | |
431 | static int xt_check_entry_match(const char *match, const char *target, | |
432 | const size_t alignment) | |
433 | { | |
434 | const struct xt_entry_match *pos; | |
435 | int length = target - match; | |
436 | ||
437 | if (length == 0) /* no matches */ | |
438 | return 0; | |
439 | ||
440 | pos = (struct xt_entry_match *)match; | |
441 | do { | |
442 | if ((unsigned long)pos % alignment) | |
443 | return -EINVAL; | |
444 | ||
445 | if (length < (int)sizeof(struct xt_entry_match)) | |
446 | return -EINVAL; | |
447 | ||
448 | if (pos->u.match_size < sizeof(struct xt_entry_match)) | |
449 | return -EINVAL; | |
450 | ||
451 | if (pos->u.match_size > length) | |
452 | return -EINVAL; | |
453 | ||
454 | length -= pos->u.match_size; | |
455 | pos = ((void *)((char *)(pos) + (pos)->u.match_size)); | |
456 | } while (length > 0); | |
457 | ||
458 | return 0; | |
459 | } | |
460 | ||
2722971c | 461 | #ifdef CONFIG_COMPAT |
255d0dc3 | 462 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) |
b386d9f5 | 463 | { |
255d0dc3 | 464 | struct xt_af *xp = &xt[af]; |
b386d9f5 | 465 | |
255d0dc3 ED |
466 | if (!xp->compat_tab) { |
467 | if (!xp->number) | |
468 | return -EINVAL; | |
469 | xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); | |
470 | if (!xp->compat_tab) | |
471 | return -ENOMEM; | |
472 | xp->cur = 0; | |
473 | } | |
b386d9f5 | 474 | |
255d0dc3 ED |
475 | if (xp->cur >= xp->number) |
476 | return -EINVAL; | |
b386d9f5 | 477 | |
255d0dc3 ED |
478 | if (xp->cur) |
479 | delta += xp->compat_tab[xp->cur - 1].delta; | |
480 | xp->compat_tab[xp->cur].offset = offset; | |
481 | xp->compat_tab[xp->cur].delta = delta; | |
482 | xp->cur++; | |
b386d9f5 PM |
483 | return 0; |
484 | } | |
485 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); | |
486 | ||
76108cea | 487 | void xt_compat_flush_offsets(u_int8_t af) |
b386d9f5 | 488 | { |
255d0dc3 ED |
489 | if (xt[af].compat_tab) { |
490 | vfree(xt[af].compat_tab); | |
491 | xt[af].compat_tab = NULL; | |
492 | xt[af].number = 0; | |
5a6351ee | 493 | xt[af].cur = 0; |
b386d9f5 PM |
494 | } |
495 | } | |
496 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | |
497 | ||
3e5e524f | 498 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
b386d9f5 | 499 | { |
255d0dc3 ED |
500 | struct compat_delta *tmp = xt[af].compat_tab; |
501 | int mid, left = 0, right = xt[af].cur - 1; | |
502 | ||
503 | while (left <= right) { | |
504 | mid = (left + right) >> 1; | |
505 | if (offset > tmp[mid].offset) | |
506 | left = mid + 1; | |
507 | else if (offset < tmp[mid].offset) | |
508 | right = mid - 1; | |
509 | else | |
510 | return mid ? tmp[mid - 1].delta : 0; | |
511 | } | |
5a6351ee | 512 | return left ? tmp[left - 1].delta : 0; |
b386d9f5 PM |
513 | } |
514 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | |
515 | ||
255d0dc3 ED |
516 | void xt_compat_init_offsets(u_int8_t af, unsigned int number) |
517 | { | |
518 | xt[af].number = number; | |
519 | xt[af].cur = 0; | |
520 | } | |
521 | EXPORT_SYMBOL(xt_compat_init_offsets); | |
522 | ||
5452e425 | 523 | int xt_compat_match_offset(const struct xt_match *match) |
2722971c | 524 | { |
9fa492cd PM |
525 | u_int16_t csize = match->compatsize ? : match->matchsize; |
526 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); | |
527 | } | |
528 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); | |
529 | ||
0188346f FW |
530 | void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
531 | unsigned int *size) | |
9fa492cd | 532 | { |
5452e425 | 533 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
534 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
535 | int pad, off = xt_compat_match_offset(match); | |
536 | u_int16_t msize = cm->u.user.match_size; | |
09d96860 | 537 | char name[sizeof(m->u.user.name)]; |
9fa492cd PM |
538 | |
539 | m = *dstptr; | |
540 | memcpy(m, cm, sizeof(*cm)); | |
541 | if (match->compat_from_user) | |
542 | match->compat_from_user(m->data, cm->data); | |
543 | else | |
544 | memcpy(m->data, cm->data, msize - sizeof(*cm)); | |
545 | pad = XT_ALIGN(match->matchsize) - match->matchsize; | |
546 | if (pad > 0) | |
547 | memset(m->data + match->matchsize, 0, pad); | |
548 | ||
549 | msize += off; | |
550 | m->u.user.match_size = msize; | |
09d96860 FW |
551 | strlcpy(name, match->name, sizeof(name)); |
552 | module_put(match->me); | |
553 | strncpy(m->u.user.name, name, sizeof(m->u.user.name)); | |
9fa492cd PM |
554 | |
555 | *size += off; | |
556 | *dstptr += msize; | |
557 | } | |
558 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | |
559 | ||
739674fb JE |
560 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
561 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 562 | { |
5452e425 | 563 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
564 | struct compat_xt_entry_match __user *cm = *dstptr; |
565 | int off = xt_compat_match_offset(match); | |
566 | u_int16_t msize = m->u.user.match_size - off; | |
567 | ||
568 | if (copy_to_user(cm, m, sizeof(*cm)) || | |
a18aa31b PM |
569 | put_user(msize, &cm->u.user.match_size) || |
570 | copy_to_user(cm->u.user.name, m->u.kernel.match->name, | |
571 | strlen(m->u.kernel.match->name) + 1)) | |
601e68e1 | 572 | return -EFAULT; |
9fa492cd PM |
573 | |
574 | if (match->compat_to_user) { | |
575 | if (match->compat_to_user((void __user *)cm->data, m->data)) | |
576 | return -EFAULT; | |
577 | } else { | |
578 | if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) | |
579 | return -EFAULT; | |
2722971c | 580 | } |
9fa492cd PM |
581 | |
582 | *size -= off; | |
583 | *dstptr += msize; | |
584 | return 0; | |
2722971c | 585 | } |
9fa492cd | 586 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
fc1221b3 | 587 | |
7ed2abdd FW |
588 | /* non-compat version may have padding after verdict */ |
589 | struct compat_xt_standard_target { | |
590 | struct compat_xt_entry_target t; | |
591 | compat_uint_t verdict; | |
592 | }; | |
593 | ||
ce683e5f | 594 | int xt_compat_check_entry_offsets(const void *base, const char *elems, |
fc1221b3 FW |
595 | unsigned int target_offset, |
596 | unsigned int next_offset) | |
597 | { | |
ce683e5f | 598 | long size_of_base_struct = elems - (const char *)base; |
fc1221b3 FW |
599 | const struct compat_xt_entry_target *t; |
600 | const char *e = base; | |
601 | ||
ce683e5f FW |
602 | if (target_offset < size_of_base_struct) |
603 | return -EINVAL; | |
604 | ||
fc1221b3 FW |
605 | if (target_offset + sizeof(*t) > next_offset) |
606 | return -EINVAL; | |
607 | ||
608 | t = (void *)(e + target_offset); | |
609 | if (t->u.target_size < sizeof(*t)) | |
610 | return -EINVAL; | |
611 | ||
612 | if (target_offset + t->u.target_size > next_offset) | |
613 | return -EINVAL; | |
614 | ||
7ed2abdd | 615 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 616 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
7ed2abdd FW |
617 | return -EINVAL; |
618 | ||
13631bfc FW |
619 | /* compat_xt_entry match has less strict aligment requirements, |
620 | * otherwise they are identical. In case of padding differences | |
621 | * we need to add compat version of xt_check_entry_match. | |
622 | */ | |
623 | BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); | |
624 | ||
625 | return xt_check_entry_match(elems, base + target_offset, | |
626 | __alignof__(struct compat_xt_entry_match)); | |
fc1221b3 FW |
627 | } |
628 | EXPORT_SYMBOL(xt_compat_check_entry_offsets); | |
9fa492cd | 629 | #endif /* CONFIG_COMPAT */ |
2722971c | 630 | |
7d35812c FW |
631 | /** |
632 | * xt_check_entry_offsets - validate arp/ip/ip6t_entry | |
633 | * | |
634 | * @base: pointer to arp/ip/ip6t_entry | |
ce683e5f | 635 | * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems |
7d35812c FW |
636 | * @target_offset: the arp/ip/ip6_t->target_offset |
637 | * @next_offset: the arp/ip/ip6_t->next_offset | |
638 | * | |
13631bfc FW |
639 | * validates that target_offset and next_offset are sane and that all |
640 | * match sizes (if any) align with the target offset. | |
7d35812c | 641 | * |
ce683e5f | 642 | * This function does not validate the targets or matches themselves, it |
13631bfc FW |
643 | * only tests that all the offsets and sizes are correct, that all |
644 | * match structures are aligned, and that the last structure ends where | |
645 | * the target structure begins. | |
646 | * | |
647 | * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. | |
ce683e5f | 648 | * |
7d35812c FW |
649 | * The arp/ip/ip6t_entry structure @base must have passed following tests: |
650 | * - it must point to a valid memory location | |
651 | * - base to base + next_offset must be accessible, i.e. not exceed allocated | |
652 | * length. | |
653 | * | |
13631bfc FW |
654 | * A well-formed entry looks like this: |
655 | * | |
656 | * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry | |
657 | * e->elems[]-----' | | | |
658 | * matchsize | | | |
659 | * matchsize | | | |
660 | * | | | |
661 | * target_offset---------------------------------' | | |
662 | * next_offset---------------------------------------------------' | |
663 | * | |
664 | * elems[]: flexible array member at end of ip(6)/arpt_entry struct. | |
665 | * This is where matches (if any) and the target reside. | |
666 | * target_offset: beginning of target. | |
667 | * next_offset: start of the next rule; also: size of this rule. | |
668 | * Since targets have a minimum size, target_offset + minlen <= next_offset. | |
669 | * | |
670 | * Every match stores its size, sum of sizes must not exceed target_offset. | |
671 | * | |
7d35812c FW |
672 | * Return: 0 on success, negative errno on failure. |
673 | */ | |
674 | int xt_check_entry_offsets(const void *base, | |
ce683e5f | 675 | const char *elems, |
7d35812c FW |
676 | unsigned int target_offset, |
677 | unsigned int next_offset) | |
678 | { | |
ce683e5f | 679 | long size_of_base_struct = elems - (const char *)base; |
7d35812c FW |
680 | const struct xt_entry_target *t; |
681 | const char *e = base; | |
682 | ||
ce683e5f FW |
683 | /* target start is within the ip/ip6/arpt_entry struct */ |
684 | if (target_offset < size_of_base_struct) | |
685 | return -EINVAL; | |
686 | ||
7d35812c FW |
687 | if (target_offset + sizeof(*t) > next_offset) |
688 | return -EINVAL; | |
689 | ||
690 | t = (void *)(e + target_offset); | |
a08e4e19 FW |
691 | if (t->u.target_size < sizeof(*t)) |
692 | return -EINVAL; | |
693 | ||
7d35812c FW |
694 | if (target_offset + t->u.target_size > next_offset) |
695 | return -EINVAL; | |
696 | ||
7ed2abdd | 697 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 698 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
7ed2abdd FW |
699 | return -EINVAL; |
700 | ||
13631bfc FW |
701 | return xt_check_entry_match(elems, base + target_offset, |
702 | __alignof__(struct xt_entry_match)); | |
7d35812c FW |
703 | } |
704 | EXPORT_SYMBOL(xt_check_entry_offsets); | |
705 | ||
f4dc7771 FW |
706 | /** |
707 | * xt_alloc_entry_offsets - allocate array to store rule head offsets | |
708 | * | |
709 | * @size: number of entries | |
710 | * | |
711 | * Return: NULL or kmalloc'd or vmalloc'd array | |
712 | */ | |
713 | unsigned int *xt_alloc_entry_offsets(unsigned int size) | |
714 | { | |
715 | unsigned int *off; | |
716 | ||
717 | off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); | |
718 | ||
719 | if (off) | |
720 | return off; | |
721 | ||
722 | if (size < (SIZE_MAX / sizeof(unsigned int))) | |
723 | off = vmalloc(size * sizeof(unsigned int)); | |
724 | ||
725 | return off; | |
726 | } | |
727 | EXPORT_SYMBOL(xt_alloc_entry_offsets); | |
728 | ||
729 | /** | |
730 | * xt_find_jump_offset - check if target is a valid jump offset | |
731 | * | |
732 | * @offsets: array containing all valid rule start offsets of a rule blob | |
733 | * @target: the jump target to search for | |
734 | * @size: entries in @offset | |
735 | */ | |
736 | bool xt_find_jump_offset(const unsigned int *offsets, | |
737 | unsigned int target, unsigned int size) | |
738 | { | |
739 | int m, low = 0, hi = size; | |
740 | ||
741 | while (hi > low) { | |
742 | m = (low + hi) / 2u; | |
743 | ||
744 | if (offsets[m] > target) | |
745 | hi = m; | |
746 | else if (offsets[m] < target) | |
747 | low = m + 1; | |
748 | else | |
749 | return true; | |
750 | } | |
751 | ||
752 | return false; | |
753 | } | |
754 | EXPORT_SYMBOL(xt_find_jump_offset); | |
755 | ||
916a917d | 756 | int xt_check_target(struct xt_tgchk_param *par, |
af5d6dc2 | 757 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 758 | { |
d6b00a53 JE |
759 | int ret; |
760 | ||
af5d6dc2 | 761 | if (XT_ALIGN(par->target->targetsize) != size) { |
b402405d JE |
762 | pr_err("%s_tables: %s.%u target: invalid size " |
763 | "%u (kernel) != (user) %u\n", | |
916a917d | 764 | xt_prefix[par->family], par->target->name, |
b402405d | 765 | par->target->revision, |
af5d6dc2 | 766 | XT_ALIGN(par->target->targetsize), size); |
37f9f733 PM |
767 | return -EINVAL; |
768 | } | |
af5d6dc2 JE |
769 | if (par->target->table != NULL && |
770 | strcmp(par->target->table, par->table) != 0) { | |
3dd5d7e3 | 771 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
916a917d | 772 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 773 | par->target->table, par->table); |
37f9f733 PM |
774 | return -EINVAL; |
775 | } | |
af5d6dc2 | 776 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
45185364 JE |
777 | char used[64], allow[64]; |
778 | ||
3dd5d7e3 | 779 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
45185364 | 780 | "usable from %s\n", |
916a917d | 781 | xt_prefix[par->family], par->target->name, |
5b76c494 JE |
782 | textify_hooks(used, sizeof(used), par->hook_mask, |
783 | par->family), | |
784 | textify_hooks(allow, sizeof(allow), par->target->hooks, | |
785 | par->family)); | |
37f9f733 PM |
786 | return -EINVAL; |
787 | } | |
af5d6dc2 | 788 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
3dd5d7e3 | 789 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
916a917d | 790 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 791 | par->target->proto); |
37f9f733 PM |
792 | return -EINVAL; |
793 | } | |
d6b00a53 JE |
794 | if (par->target->checkentry != NULL) { |
795 | ret = par->target->checkentry(par); | |
796 | if (ret < 0) | |
797 | return ret; | |
798 | else if (ret > 0) | |
799 | /* Flag up potential errors. */ | |
800 | return -EIO; | |
801 | } | |
37f9f733 PM |
802 | return 0; |
803 | } | |
804 | EXPORT_SYMBOL_GPL(xt_check_target); | |
805 | ||
d7591f0c FW |
806 | /** |
807 | * xt_copy_counters_from_user - copy counters and metadata from userspace | |
808 | * | |
809 | * @user: src pointer to userspace memory | |
810 | * @len: alleged size of userspace memory | |
811 | * @info: where to store the xt_counters_info metadata | |
812 | * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel | |
813 | * | |
814 | * Copies counter meta data from @user and stores it in @info. | |
815 | * | |
816 | * vmallocs memory to hold the counters, then copies the counter data | |
817 | * from @user to the new memory and returns a pointer to it. | |
818 | * | |
819 | * If @compat is true, @info gets converted automatically to the 64bit | |
820 | * representation. | |
821 | * | |
822 | * The metadata associated with the counters is stored in @info. | |
823 | * | |
824 | * Return: returns pointer that caller has to test via IS_ERR(). | |
825 | * If IS_ERR is false, caller has to vfree the pointer. | |
826 | */ | |
827 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |
828 | struct xt_counters_info *info, bool compat) | |
829 | { | |
830 | void *mem; | |
831 | u64 size; | |
832 | ||
833 | #ifdef CONFIG_COMPAT | |
834 | if (compat) { | |
835 | /* structures only differ in size due to alignment */ | |
836 | struct compat_xt_counters_info compat_tmp; | |
837 | ||
838 | if (len <= sizeof(compat_tmp)) | |
839 | return ERR_PTR(-EINVAL); | |
840 | ||
841 | len -= sizeof(compat_tmp); | |
842 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | |
843 | return ERR_PTR(-EFAULT); | |
844 | ||
845 | strlcpy(info->name, compat_tmp.name, sizeof(info->name)); | |
846 | info->num_counters = compat_tmp.num_counters; | |
847 | user += sizeof(compat_tmp); | |
848 | } else | |
849 | #endif | |
850 | { | |
851 | if (len <= sizeof(*info)) | |
852 | return ERR_PTR(-EINVAL); | |
853 | ||
854 | len -= sizeof(*info); | |
855 | if (copy_from_user(info, user, sizeof(*info)) != 0) | |
856 | return ERR_PTR(-EFAULT); | |
857 | ||
858 | info->name[sizeof(info->name) - 1] = '\0'; | |
859 | user += sizeof(*info); | |
860 | } | |
861 | ||
862 | size = sizeof(struct xt_counters); | |
863 | size *= info->num_counters; | |
864 | ||
865 | if (size != (u64)len) | |
866 | return ERR_PTR(-EINVAL); | |
867 | ||
868 | mem = vmalloc(len); | |
869 | if (!mem) | |
870 | return ERR_PTR(-ENOMEM); | |
871 | ||
872 | if (copy_from_user(mem, user, len) == 0) | |
873 | return mem; | |
874 | ||
875 | vfree(mem); | |
876 | return ERR_PTR(-EFAULT); | |
877 | } | |
878 | EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); | |
879 | ||
2722971c | 880 | #ifdef CONFIG_COMPAT |
5452e425 | 881 | int xt_compat_target_offset(const struct xt_target *target) |
2722971c | 882 | { |
9fa492cd PM |
883 | u_int16_t csize = target->compatsize ? : target->targetsize; |
884 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); | |
885 | } | |
886 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | |
887 | ||
888 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | |
b0a6363c | 889 | unsigned int *size) |
9fa492cd | 890 | { |
5452e425 | 891 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
892 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
893 | int pad, off = xt_compat_target_offset(target); | |
894 | u_int16_t tsize = ct->u.user.target_size; | |
09d96860 | 895 | char name[sizeof(t->u.user.name)]; |
9fa492cd PM |
896 | |
897 | t = *dstptr; | |
898 | memcpy(t, ct, sizeof(*ct)); | |
899 | if (target->compat_from_user) | |
900 | target->compat_from_user(t->data, ct->data); | |
901 | else | |
902 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); | |
903 | pad = XT_ALIGN(target->targetsize) - target->targetsize; | |
904 | if (pad > 0) | |
905 | memset(t->data + target->targetsize, 0, pad); | |
906 | ||
907 | tsize += off; | |
908 | t->u.user.target_size = tsize; | |
09d96860 FW |
909 | strlcpy(name, target->name, sizeof(name)); |
910 | module_put(target->me); | |
911 | strncpy(t->u.user.name, name, sizeof(t->u.user.name)); | |
9fa492cd PM |
912 | |
913 | *size += off; | |
914 | *dstptr += tsize; | |
915 | } | |
916 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); | |
917 | ||
739674fb JE |
918 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
919 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 920 | { |
5452e425 | 921 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
922 | struct compat_xt_entry_target __user *ct = *dstptr; |
923 | int off = xt_compat_target_offset(target); | |
924 | u_int16_t tsize = t->u.user.target_size - off; | |
925 | ||
926 | if (copy_to_user(ct, t, sizeof(*ct)) || | |
a18aa31b PM |
927 | put_user(tsize, &ct->u.user.target_size) || |
928 | copy_to_user(ct->u.user.name, t->u.kernel.target->name, | |
929 | strlen(t->u.kernel.target->name) + 1)) | |
601e68e1 | 930 | return -EFAULT; |
9fa492cd PM |
931 | |
932 | if (target->compat_to_user) { | |
933 | if (target->compat_to_user((void __user *)ct->data, t->data)) | |
934 | return -EFAULT; | |
935 | } else { | |
936 | if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) | |
937 | return -EFAULT; | |
2722971c | 938 | } |
9fa492cd PM |
939 | |
940 | *size -= off; | |
941 | *dstptr += tsize; | |
942 | return 0; | |
2722971c | 943 | } |
9fa492cd | 944 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
2722971c DM |
945 | #endif |
946 | ||
2e4e6a17 HW |
947 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
948 | { | |
711bdde6 ED |
949 | struct xt_table_info *info = NULL; |
950 | size_t sz = sizeof(*info) + size; | |
2e4e6a17 | 951 | |
d157bd76 FW |
952 | if (sz < sizeof(*info)) |
953 | return NULL; | |
954 | ||
2e4e6a17 | 955 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
4481374c | 956 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
2e4e6a17 HW |
957 | return NULL; |
958 | ||
711bdde6 ED |
959 | if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) |
960 | info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | |
961 | if (!info) { | |
5bad8734 MRL |
962 | info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | |
963 | __GFP_NORETRY | __GFP_HIGHMEM, | |
964 | PAGE_KERNEL); | |
711bdde6 ED |
965 | if (!info) |
966 | return NULL; | |
2e4e6a17 | 967 | } |
711bdde6 ED |
968 | memset(info, 0, sizeof(*info)); |
969 | info->size = size; | |
970 | return info; | |
2e4e6a17 HW |
971 | } |
972 | EXPORT_SYMBOL(xt_alloc_table_info); | |
973 | ||
974 | void xt_free_table_info(struct xt_table_info *info) | |
975 | { | |
976 | int cpu; | |
977 | ||
f3c5c1bf | 978 | if (info->jumpstack != NULL) { |
f6b50824 ED |
979 | for_each_possible_cpu(cpu) |
980 | kvfree(info->jumpstack[cpu]); | |
981 | kvfree(info->jumpstack); | |
f3c5c1bf JE |
982 | } |
983 | ||
711bdde6 | 984 | kvfree(info); |
2e4e6a17 HW |
985 | } |
986 | EXPORT_SYMBOL(xt_free_table_info); | |
987 | ||
eb1a6bdc | 988 | /* Find table by name, grabs mutex & ref. Returns NULL on error. */ |
76108cea JE |
989 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
990 | const char *name) | |
2e4e6a17 | 991 | { |
b9e69e12 | 992 | struct xt_table *t, *found = NULL; |
2e4e6a17 | 993 | |
7926dbfa | 994 | mutex_lock(&xt[af].mutex); |
8d870052 | 995 | list_for_each_entry(t, &net->xt.tables[af], list) |
2e4e6a17 HW |
996 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
997 | return t; | |
b9e69e12 FW |
998 | |
999 | if (net == &init_net) | |
1000 | goto out; | |
1001 | ||
1002 | /* Table doesn't exist in this netns, re-try init */ | |
1003 | list_for_each_entry(t, &init_net.xt.tables[af], list) { | |
1004 | if (strcmp(t->name, name)) | |
1005 | continue; | |
1006 | if (!try_module_get(t->me)) | |
1007 | return NULL; | |
1008 | ||
1009 | mutex_unlock(&xt[af].mutex); | |
1010 | if (t->table_init(net) != 0) { | |
1011 | module_put(t->me); | |
1012 | return NULL; | |
1013 | } | |
1014 | ||
1015 | found = t; | |
1016 | ||
1017 | mutex_lock(&xt[af].mutex); | |
1018 | break; | |
1019 | } | |
1020 | ||
1021 | if (!found) | |
1022 | goto out; | |
1023 | ||
1024 | /* and once again: */ | |
1025 | list_for_each_entry(t, &net->xt.tables[af], list) | |
1026 | if (strcmp(t->name, name) == 0) | |
1027 | return t; | |
1028 | ||
1029 | module_put(found->me); | |
1030 | out: | |
9e19bb6d | 1031 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
1032 | return NULL; |
1033 | } | |
1034 | EXPORT_SYMBOL_GPL(xt_find_table_lock); | |
1035 | ||
1036 | void xt_table_unlock(struct xt_table *table) | |
1037 | { | |
9e19bb6d | 1038 | mutex_unlock(&xt[table->af].mutex); |
2e4e6a17 HW |
1039 | } |
1040 | EXPORT_SYMBOL_GPL(xt_table_unlock); | |
1041 | ||
2722971c | 1042 | #ifdef CONFIG_COMPAT |
76108cea | 1043 | void xt_compat_lock(u_int8_t af) |
2722971c DM |
1044 | { |
1045 | mutex_lock(&xt[af].compat_mutex); | |
1046 | } | |
1047 | EXPORT_SYMBOL_GPL(xt_compat_lock); | |
1048 | ||
76108cea | 1049 | void xt_compat_unlock(u_int8_t af) |
2722971c DM |
1050 | { |
1051 | mutex_unlock(&xt[af].compat_mutex); | |
1052 | } | |
1053 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | |
1054 | #endif | |
2e4e6a17 | 1055 | |
7f5c6d4f ED |
1056 | DEFINE_PER_CPU(seqcount_t, xt_recseq); |
1057 | EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); | |
942e4a2b | 1058 | |
dcebd315 FW |
1059 | struct static_key xt_tee_enabled __read_mostly; |
1060 | EXPORT_SYMBOL_GPL(xt_tee_enabled); | |
1061 | ||
f3c5c1bf JE |
1062 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
1063 | { | |
1064 | unsigned int size; | |
1065 | int cpu; | |
1066 | ||
f3c5c1bf JE |
1067 | size = sizeof(void **) * nr_cpu_ids; |
1068 | if (size > PAGE_SIZE) | |
3dbd4439 | 1069 | i->jumpstack = vzalloc(size); |
f3c5c1bf | 1070 | else |
3dbd4439 | 1071 | i->jumpstack = kzalloc(size, GFP_KERNEL); |
f3c5c1bf JE |
1072 | if (i->jumpstack == NULL) |
1073 | return -ENOMEM; | |
f3c5c1bf | 1074 | |
98d1bd80 FW |
1075 | /* ruleset without jumps -- no stack needed */ |
1076 | if (i->stacksize == 0) | |
1077 | return 0; | |
1078 | ||
7814b6ec FW |
1079 | /* Jumpstack needs to be able to record two full callchains, one |
1080 | * from the first rule set traversal, plus one table reentrancy | |
1081 | * via -j TEE without clobbering the callchain that brought us to | |
1082 | * TEE target. | |
1083 | * | |
1084 | * This is done by allocating two jumpstacks per cpu, on reentry | |
1085 | * the upper half of the stack is used. | |
1086 | * | |
1087 | * see the jumpstack setup in ipt_do_table() for more details. | |
1088 | */ | |
1089 | size = sizeof(void *) * i->stacksize * 2u; | |
f3c5c1bf JE |
1090 | for_each_possible_cpu(cpu) { |
1091 | if (size > PAGE_SIZE) | |
1092 | i->jumpstack[cpu] = vmalloc_node(size, | |
1093 | cpu_to_node(cpu)); | |
1094 | else | |
1095 | i->jumpstack[cpu] = kmalloc_node(size, | |
1096 | GFP_KERNEL, cpu_to_node(cpu)); | |
1097 | if (i->jumpstack[cpu] == NULL) | |
1098 | /* | |
1099 | * Freeing will be done later on by the callers. The | |
1100 | * chain is: xt_replace_table -> __do_replace -> | |
1101 | * do_replace -> xt_free_table_info. | |
1102 | */ | |
1103 | return -ENOMEM; | |
1104 | } | |
1105 | ||
1106 | return 0; | |
1107 | } | |
942e4a2b | 1108 | |
2e4e6a17 HW |
1109 | struct xt_table_info * |
1110 | xt_replace_table(struct xt_table *table, | |
1111 | unsigned int num_counters, | |
1112 | struct xt_table_info *newinfo, | |
1113 | int *error) | |
1114 | { | |
942e4a2b | 1115 | struct xt_table_info *private; |
f3c5c1bf | 1116 | int ret; |
2e4e6a17 | 1117 | |
d97a9e47 JE |
1118 | ret = xt_jumpstack_alloc(newinfo); |
1119 | if (ret < 0) { | |
1120 | *error = ret; | |
1121 | return NULL; | |
1122 | } | |
1123 | ||
2e4e6a17 | 1124 | /* Do the substitution. */ |
942e4a2b | 1125 | local_bh_disable(); |
2e4e6a17 | 1126 | private = table->private; |
942e4a2b | 1127 | |
2e4e6a17 HW |
1128 | /* Check inside lock: is the old number correct? */ |
1129 | if (num_counters != private->number) { | |
be91fd5e | 1130 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
2e4e6a17 | 1131 | num_counters, private->number); |
942e4a2b | 1132 | local_bh_enable(); |
2e4e6a17 HW |
1133 | *error = -EAGAIN; |
1134 | return NULL; | |
1135 | } | |
2e4e6a17 | 1136 | |
942e4a2b | 1137 | newinfo->initial_entries = private->initial_entries; |
b416c144 WD |
1138 | /* |
1139 | * Ensure contents of newinfo are visible before assigning to | |
1140 | * private. | |
1141 | */ | |
1142 | smp_wmb(); | |
1143 | table->private = newinfo; | |
942e4a2b SH |
1144 | |
1145 | /* | |
1146 | * Even though table entries have now been swapped, other CPU's | |
1147 | * may still be using the old entries. This is okay, because | |
1148 | * resynchronization happens because of the locking done | |
1149 | * during the get_counters() routine. | |
1150 | */ | |
1151 | local_bh_enable(); | |
1152 | ||
fbabf31e TG |
1153 | #ifdef CONFIG_AUDIT |
1154 | if (audit_enabled) { | |
1155 | struct audit_buffer *ab; | |
1156 | ||
1157 | ab = audit_log_start(current->audit_context, GFP_KERNEL, | |
1158 | AUDIT_NETFILTER_CFG); | |
1159 | if (ab) { | |
1160 | audit_log_format(ab, "table=%s family=%u entries=%u", | |
1161 | table->name, table->af, | |
1162 | private->number); | |
1163 | audit_log_end(ab); | |
1164 | } | |
1165 | } | |
1166 | #endif | |
1167 | ||
942e4a2b | 1168 | return private; |
2e4e6a17 HW |
1169 | } |
1170 | EXPORT_SYMBOL_GPL(xt_replace_table); | |
1171 | ||
35aad0ff JE |
1172 | struct xt_table *xt_register_table(struct net *net, |
1173 | const struct xt_table *input_table, | |
a98da11d AD |
1174 | struct xt_table_info *bootstrap, |
1175 | struct xt_table_info *newinfo) | |
2e4e6a17 HW |
1176 | { |
1177 | int ret; | |
1178 | struct xt_table_info *private; | |
35aad0ff | 1179 | struct xt_table *t, *table; |
2e4e6a17 | 1180 | |
44d34e72 | 1181 | /* Don't add one object to multiple lists. */ |
35aad0ff | 1182 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
44d34e72 AD |
1183 | if (!table) { |
1184 | ret = -ENOMEM; | |
1185 | goto out; | |
1186 | } | |
1187 | ||
7926dbfa | 1188 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1189 | /* Don't autoload: we'd eat our tail... */ |
8d870052 | 1190 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
df0933dc PM |
1191 | if (strcmp(t->name, table->name) == 0) { |
1192 | ret = -EEXIST; | |
1193 | goto unlock; | |
1194 | } | |
2e4e6a17 HW |
1195 | } |
1196 | ||
1197 | /* Simplifies replace_table code. */ | |
1198 | table->private = bootstrap; | |
78454473 | 1199 | |
2e4e6a17 HW |
1200 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
1201 | goto unlock; | |
1202 | ||
1203 | private = table->private; | |
be91fd5e | 1204 | pr_debug("table->private->number = %u\n", private->number); |
2e4e6a17 HW |
1205 | |
1206 | /* save number of initial entries */ | |
1207 | private->initial_entries = private->number; | |
1208 | ||
8d870052 | 1209 | list_add(&table->list, &net->xt.tables[table->af]); |
a98da11d AD |
1210 | mutex_unlock(&xt[table->af].mutex); |
1211 | return table; | |
2e4e6a17 | 1212 | |
7926dbfa | 1213 | unlock: |
9e19bb6d | 1214 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1215 | kfree(table); |
a98da11d AD |
1216 | out: |
1217 | return ERR_PTR(ret); | |
2e4e6a17 HW |
1218 | } |
1219 | EXPORT_SYMBOL_GPL(xt_register_table); | |
1220 | ||
1221 | void *xt_unregister_table(struct xt_table *table) | |
1222 | { | |
1223 | struct xt_table_info *private; | |
1224 | ||
9e19bb6d | 1225 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1226 | private = table->private; |
df0933dc | 1227 | list_del(&table->list); |
9e19bb6d | 1228 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1229 | kfree(table); |
2e4e6a17 HW |
1230 | |
1231 | return private; | |
1232 | } | |
1233 | EXPORT_SYMBOL_GPL(xt_unregister_table); | |
1234 | ||
1235 | #ifdef CONFIG_PROC_FS | |
715cf35a AD |
1236 | struct xt_names_priv { |
1237 | struct seq_net_private p; | |
76108cea | 1238 | u_int8_t af; |
715cf35a | 1239 | }; |
025d93d1 | 1240 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
2e4e6a17 | 1241 | { |
715cf35a | 1242 | struct xt_names_priv *priv = seq->private; |
1218854a | 1243 | struct net *net = seq_file_net(seq); |
76108cea | 1244 | u_int8_t af = priv->af; |
2e4e6a17 | 1245 | |
025d93d1 | 1246 | mutex_lock(&xt[af].mutex); |
715cf35a | 1247 | return seq_list_start(&net->xt.tables[af], *pos); |
025d93d1 | 1248 | } |
2e4e6a17 | 1249 | |
025d93d1 AD |
1250 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1251 | { | |
715cf35a | 1252 | struct xt_names_priv *priv = seq->private; |
1218854a | 1253 | struct net *net = seq_file_net(seq); |
76108cea | 1254 | u_int8_t af = priv->af; |
2e4e6a17 | 1255 | |
715cf35a | 1256 | return seq_list_next(v, &net->xt.tables[af], pos); |
2e4e6a17 HW |
1257 | } |
1258 | ||
025d93d1 | 1259 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1260 | { |
715cf35a | 1261 | struct xt_names_priv *priv = seq->private; |
76108cea | 1262 | u_int8_t af = priv->af; |
2e4e6a17 | 1263 | |
025d93d1 AD |
1264 | mutex_unlock(&xt[af].mutex); |
1265 | } | |
2e4e6a17 | 1266 | |
025d93d1 AD |
1267 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
1268 | { | |
1269 | struct xt_table *table = list_entry(v, struct xt_table, list); | |
2e4e6a17 | 1270 | |
861fb107 | 1271 | if (*table->name) |
e71456ae | 1272 | seq_printf(seq, "%s\n", table->name); |
861fb107 | 1273 | return 0; |
025d93d1 | 1274 | } |
601e68e1 | 1275 | |
025d93d1 AD |
1276 | static const struct seq_operations xt_table_seq_ops = { |
1277 | .start = xt_table_seq_start, | |
1278 | .next = xt_table_seq_next, | |
1279 | .stop = xt_table_seq_stop, | |
1280 | .show = xt_table_seq_show, | |
1281 | }; | |
1282 | ||
1283 | static int xt_table_open(struct inode *inode, struct file *file) | |
1284 | { | |
1285 | int ret; | |
715cf35a | 1286 | struct xt_names_priv *priv; |
025d93d1 | 1287 | |
715cf35a AD |
1288 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
1289 | sizeof(struct xt_names_priv)); | |
025d93d1 | 1290 | if (!ret) { |
715cf35a | 1291 | priv = ((struct seq_file *)file->private_data)->private; |
d9dda78b | 1292 | priv->af = (unsigned long)PDE_DATA(inode); |
025d93d1 AD |
1293 | } |
1294 | return ret; | |
2e4e6a17 HW |
1295 | } |
1296 | ||
025d93d1 AD |
1297 | static const struct file_operations xt_table_ops = { |
1298 | .owner = THIS_MODULE, | |
1299 | .open = xt_table_open, | |
1300 | .read = seq_read, | |
1301 | .llseek = seq_lseek, | |
0e93bb94 | 1302 | .release = seq_release_net, |
025d93d1 AD |
1303 | }; |
1304 | ||
eb132205 JE |
1305 | /* |
1306 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | |
1307 | * the multi-AF mutexes. | |
1308 | */ | |
1309 | struct nf_mttg_trav { | |
1310 | struct list_head *head, *curr; | |
1311 | uint8_t class, nfproto; | |
1312 | }; | |
1313 | ||
1314 | enum { | |
1315 | MTTG_TRAV_INIT, | |
1316 | MTTG_TRAV_NFP_UNSPEC, | |
1317 | MTTG_TRAV_NFP_SPEC, | |
1318 | MTTG_TRAV_DONE, | |
1319 | }; | |
1320 | ||
1321 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | |
1322 | bool is_target) | |
2e4e6a17 | 1323 | { |
eb132205 JE |
1324 | static const uint8_t next_class[] = { |
1325 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, | |
1326 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | |
1327 | }; | |
1328 | struct nf_mttg_trav *trav = seq->private; | |
1329 | ||
1330 | switch (trav->class) { | |
1331 | case MTTG_TRAV_INIT: | |
1332 | trav->class = MTTG_TRAV_NFP_UNSPEC; | |
1333 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | |
1334 | trav->head = trav->curr = is_target ? | |
1335 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | |
1336 | break; | |
1337 | case MTTG_TRAV_NFP_UNSPEC: | |
1338 | trav->curr = trav->curr->next; | |
1339 | if (trav->curr != trav->head) | |
1340 | break; | |
1341 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1342 | mutex_lock(&xt[trav->nfproto].mutex); | |
1343 | trav->head = trav->curr = is_target ? | |
1344 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | |
1345 | trav->class = next_class[trav->class]; | |
1346 | break; | |
1347 | case MTTG_TRAV_NFP_SPEC: | |
1348 | trav->curr = trav->curr->next; | |
1349 | if (trav->curr != trav->head) | |
1350 | break; | |
1351 | /* fallthru, _stop will unlock */ | |
1352 | default: | |
1353 | return NULL; | |
1354 | } | |
2e4e6a17 | 1355 | |
eb132205 JE |
1356 | if (ppos != NULL) |
1357 | ++*ppos; | |
1358 | return trav; | |
025d93d1 | 1359 | } |
601e68e1 | 1360 | |
eb132205 JE |
1361 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1362 | bool is_target) | |
025d93d1 | 1363 | { |
eb132205 JE |
1364 | struct nf_mttg_trav *trav = seq->private; |
1365 | unsigned int j; | |
2e4e6a17 | 1366 | |
eb132205 JE |
1367 | trav->class = MTTG_TRAV_INIT; |
1368 | for (j = 0; j < *pos; ++j) | |
1369 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | |
1370 | return NULL; | |
1371 | return trav; | |
2e4e6a17 HW |
1372 | } |
1373 | ||
eb132205 | 1374 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1375 | { |
eb132205 JE |
1376 | struct nf_mttg_trav *trav = seq->private; |
1377 | ||
1378 | switch (trav->class) { | |
1379 | case MTTG_TRAV_NFP_UNSPEC: | |
1380 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1381 | break; | |
1382 | case MTTG_TRAV_NFP_SPEC: | |
1383 | mutex_unlock(&xt[trav->nfproto].mutex); | |
1384 | break; | |
1385 | } | |
1386 | } | |
2e4e6a17 | 1387 | |
eb132205 JE |
1388 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1389 | { | |
1390 | return xt_mttg_seq_start(seq, pos, false); | |
2e4e6a17 HW |
1391 | } |
1392 | ||
eb132205 | 1393 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
2e4e6a17 | 1394 | { |
eb132205 JE |
1395 | return xt_mttg_seq_next(seq, v, ppos, false); |
1396 | } | |
2e4e6a17 | 1397 | |
eb132205 JE |
1398 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1399 | { | |
1400 | const struct nf_mttg_trav *trav = seq->private; | |
1401 | const struct xt_match *match; | |
1402 | ||
1403 | switch (trav->class) { | |
1404 | case MTTG_TRAV_NFP_UNSPEC: | |
1405 | case MTTG_TRAV_NFP_SPEC: | |
1406 | if (trav->curr == trav->head) | |
1407 | return 0; | |
1408 | match = list_entry(trav->curr, struct xt_match, list); | |
861fb107 JP |
1409 | if (*match->name) |
1410 | seq_printf(seq, "%s\n", match->name); | |
eb132205 JE |
1411 | } |
1412 | return 0; | |
2e4e6a17 HW |
1413 | } |
1414 | ||
025d93d1 AD |
1415 | static const struct seq_operations xt_match_seq_ops = { |
1416 | .start = xt_match_seq_start, | |
1417 | .next = xt_match_seq_next, | |
eb132205 | 1418 | .stop = xt_mttg_seq_stop, |
025d93d1 | 1419 | .show = xt_match_seq_show, |
2e4e6a17 HW |
1420 | }; |
1421 | ||
025d93d1 | 1422 | static int xt_match_open(struct inode *inode, struct file *file) |
2e4e6a17 | 1423 | { |
eb132205 | 1424 | struct nf_mttg_trav *trav; |
772476df RJ |
1425 | trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); |
1426 | if (!trav) | |
eb132205 | 1427 | return -ENOMEM; |
2e4e6a17 | 1428 | |
d9dda78b | 1429 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1430 | return 0; |
025d93d1 AD |
1431 | } |
1432 | ||
1433 | static const struct file_operations xt_match_ops = { | |
1434 | .owner = THIS_MODULE, | |
1435 | .open = xt_match_open, | |
1436 | .read = seq_read, | |
1437 | .llseek = seq_lseek, | |
eb132205 | 1438 | .release = seq_release_private, |
025d93d1 | 1439 | }; |
2e4e6a17 | 1440 | |
025d93d1 AD |
1441 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1442 | { | |
eb132205 | 1443 | return xt_mttg_seq_start(seq, pos, true); |
025d93d1 AD |
1444 | } |
1445 | ||
eb132205 | 1446 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
025d93d1 | 1447 | { |
eb132205 | 1448 | return xt_mttg_seq_next(seq, v, ppos, true); |
025d93d1 AD |
1449 | } |
1450 | ||
1451 | static int xt_target_seq_show(struct seq_file *seq, void *v) | |
1452 | { | |
eb132205 JE |
1453 | const struct nf_mttg_trav *trav = seq->private; |
1454 | const struct xt_target *target; | |
1455 | ||
1456 | switch (trav->class) { | |
1457 | case MTTG_TRAV_NFP_UNSPEC: | |
1458 | case MTTG_TRAV_NFP_SPEC: | |
1459 | if (trav->curr == trav->head) | |
1460 | return 0; | |
1461 | target = list_entry(trav->curr, struct xt_target, list); | |
861fb107 JP |
1462 | if (*target->name) |
1463 | seq_printf(seq, "%s\n", target->name); | |
eb132205 JE |
1464 | } |
1465 | return 0; | |
025d93d1 AD |
1466 | } |
1467 | ||
1468 | static const struct seq_operations xt_target_seq_ops = { | |
1469 | .start = xt_target_seq_start, | |
1470 | .next = xt_target_seq_next, | |
eb132205 | 1471 | .stop = xt_mttg_seq_stop, |
025d93d1 AD |
1472 | .show = xt_target_seq_show, |
1473 | }; | |
1474 | ||
1475 | static int xt_target_open(struct inode *inode, struct file *file) | |
1476 | { | |
eb132205 | 1477 | struct nf_mttg_trav *trav; |
772476df RJ |
1478 | trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); |
1479 | if (!trav) | |
eb132205 | 1480 | return -ENOMEM; |
025d93d1 | 1481 | |
d9dda78b | 1482 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1483 | return 0; |
2e4e6a17 HW |
1484 | } |
1485 | ||
025d93d1 | 1486 | static const struct file_operations xt_target_ops = { |
2e4e6a17 | 1487 | .owner = THIS_MODULE, |
025d93d1 | 1488 | .open = xt_target_open, |
2e4e6a17 HW |
1489 | .read = seq_read, |
1490 | .llseek = seq_lseek, | |
eb132205 | 1491 | .release = seq_release_private, |
2e4e6a17 HW |
1492 | }; |
1493 | ||
1494 | #define FORMAT_TABLES "_tables_names" | |
1495 | #define FORMAT_MATCHES "_tables_matches" | |
1496 | #define FORMAT_TARGETS "_tables_targets" | |
1497 | ||
1498 | #endif /* CONFIG_PROC_FS */ | |
1499 | ||
2b95efe7 | 1500 | /** |
b9e69e12 | 1501 | * xt_hook_ops_alloc - set up hooks for a new table |
2b95efe7 JE |
1502 | * @table: table with metadata needed to set up hooks |
1503 | * @fn: Hook function | |
1504 | * | |
b9e69e12 FW |
1505 | * This function will create the nf_hook_ops that the x_table needs |
1506 | * to hand to xt_hook_link_net(). | |
2b95efe7 | 1507 | */ |
b9e69e12 FW |
1508 | struct nf_hook_ops * |
1509 | xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) | |
2b95efe7 JE |
1510 | { |
1511 | unsigned int hook_mask = table->valid_hooks; | |
1512 | uint8_t i, num_hooks = hweight32(hook_mask); | |
1513 | uint8_t hooknum; | |
1514 | struct nf_hook_ops *ops; | |
2b95efe7 | 1515 | |
a6d0bae1 XL |
1516 | if (!num_hooks) |
1517 | return ERR_PTR(-EINVAL); | |
1518 | ||
1ecc281e | 1519 | ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL); |
2b95efe7 JE |
1520 | if (ops == NULL) |
1521 | return ERR_PTR(-ENOMEM); | |
1522 | ||
1523 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; | |
1524 | hook_mask >>= 1, ++hooknum) { | |
1525 | if (!(hook_mask & 1)) | |
1526 | continue; | |
1527 | ops[i].hook = fn; | |
2b95efe7 JE |
1528 | ops[i].pf = table->af; |
1529 | ops[i].hooknum = hooknum; | |
1530 | ops[i].priority = table->priority; | |
1531 | ++i; | |
1532 | } | |
1533 | ||
2b95efe7 JE |
1534 | return ops; |
1535 | } | |
b9e69e12 | 1536 | EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); |
2b95efe7 | 1537 | |
76108cea | 1538 | int xt_proto_init(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1539 | { |
1540 | #ifdef CONFIG_PROC_FS | |
1541 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1542 | struct proc_dir_entry *proc; | |
f13f2aee PW |
1543 | kuid_t root_uid; |
1544 | kgid_t root_gid; | |
2e4e6a17 HW |
1545 | #endif |
1546 | ||
7e9c6eeb | 1547 | if (af >= ARRAY_SIZE(xt_prefix)) |
2e4e6a17 HW |
1548 | return -EINVAL; |
1549 | ||
1550 | ||
1551 | #ifdef CONFIG_PROC_FS | |
f13f2aee PW |
1552 | root_uid = make_kuid(net->user_ns, 0); |
1553 | root_gid = make_kgid(net->user_ns, 0); | |
1554 | ||
ce18afe5 | 1555 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1556 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
8b169240 DL |
1557 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1558 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1559 | if (!proc) |
1560 | goto out; | |
f13f2aee PW |
1561 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1562 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1563 | |
ce18afe5 | 1564 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1565 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
8b169240 DL |
1566 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1567 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1568 | if (!proc) |
1569 | goto out_remove_tables; | |
f13f2aee PW |
1570 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1571 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1572 | |
ce18afe5 | 1573 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1574 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
8b169240 DL |
1575 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1576 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1577 | if (!proc) |
1578 | goto out_remove_matches; | |
f13f2aee PW |
1579 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1580 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 HW |
1581 | #endif |
1582 | ||
1583 | return 0; | |
1584 | ||
1585 | #ifdef CONFIG_PROC_FS | |
1586 | out_remove_matches: | |
ce18afe5 | 1587 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1588 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1589 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1590 | |
1591 | out_remove_tables: | |
ce18afe5 | 1592 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1593 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1594 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1595 | out: |
1596 | return -1; | |
1597 | #endif | |
1598 | } | |
1599 | EXPORT_SYMBOL_GPL(xt_proto_init); | |
1600 | ||
76108cea | 1601 | void xt_proto_fini(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1602 | { |
1603 | #ifdef CONFIG_PROC_FS | |
1604 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1605 | ||
ce18afe5 | 1606 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1607 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1608 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1609 | |
ce18afe5 | 1610 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1611 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
ece31ffd | 1612 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1613 | |
ce18afe5 | 1614 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1615 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1616 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1617 | #endif /*CONFIG_PROC_FS*/ |
1618 | } | |
1619 | EXPORT_SYMBOL_GPL(xt_proto_fini); | |
1620 | ||
f28e15ba FW |
1621 | /** |
1622 | * xt_percpu_counter_alloc - allocate x_tables rule counter | |
1623 | * | |
ae0ac0ed | 1624 | * @state: pointer to xt_percpu allocation state |
f28e15ba FW |
1625 | * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct |
1626 | * | |
1627 | * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then | |
1628 | * contain the address of the real (percpu) counter. | |
1629 | * | |
1630 | * Rule evaluation needs to use xt_get_this_cpu_counter() helper | |
1631 | * to fetch the real percpu counter. | |
1632 | * | |
ae0ac0ed FW |
1633 | * To speed up allocation and improve data locality, a 4kb block is |
1634 | * allocated. | |
1635 | * | |
1636 | * xt_percpu_counter_alloc_state contains the base address of the | |
1637 | * allocated page and the current sub-offset. | |
1638 | * | |
f28e15ba FW |
1639 | * returns false on error. |
1640 | */ | |
ae0ac0ed FW |
1641 | bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, |
1642 | struct xt_counters *counter) | |
f28e15ba | 1643 | { |
ae0ac0ed | 1644 | BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); |
f28e15ba FW |
1645 | |
1646 | if (nr_cpu_ids <= 1) | |
1647 | return true; | |
1648 | ||
ae0ac0ed FW |
1649 | if (!state->mem) { |
1650 | state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, | |
1651 | XT_PCPU_BLOCK_SIZE); | |
1652 | if (!state->mem) | |
1653 | return false; | |
1654 | } | |
1655 | counter->pcnt = (__force unsigned long)(state->mem + state->off); | |
1656 | state->off += sizeof(*counter); | |
1657 | if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { | |
1658 | state->mem = NULL; | |
1659 | state->off = 0; | |
1660 | } | |
f28e15ba FW |
1661 | return true; |
1662 | } | |
1663 | EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); | |
1664 | ||
4d31eef5 FW |
1665 | void xt_percpu_counter_free(struct xt_counters *counters) |
1666 | { | |
1667 | unsigned long pcnt = counters->pcnt; | |
1668 | ||
ae0ac0ed | 1669 | if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) |
4d31eef5 FW |
1670 | free_percpu((void __percpu *)pcnt); |
1671 | } | |
1672 | EXPORT_SYMBOL_GPL(xt_percpu_counter_free); | |
1673 | ||
8d870052 AD |
1674 | static int __net_init xt_net_init(struct net *net) |
1675 | { | |
1676 | int i; | |
1677 | ||
7e9c6eeb | 1678 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
8d870052 AD |
1679 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1680 | return 0; | |
1681 | } | |
1682 | ||
1683 | static struct pernet_operations xt_net_ops = { | |
1684 | .init = xt_net_init, | |
1685 | }; | |
2e4e6a17 HW |
1686 | |
1687 | static int __init xt_init(void) | |
1688 | { | |
942e4a2b SH |
1689 | unsigned int i; |
1690 | int rv; | |
1691 | ||
1692 | for_each_possible_cpu(i) { | |
7f5c6d4f | 1693 | seqcount_init(&per_cpu(xt_recseq, i)); |
942e4a2b | 1694 | } |
2e4e6a17 | 1695 | |
7e9c6eeb | 1696 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
2e4e6a17 HW |
1697 | if (!xt) |
1698 | return -ENOMEM; | |
1699 | ||
7e9c6eeb | 1700 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
9e19bb6d | 1701 | mutex_init(&xt[i].mutex); |
2722971c DM |
1702 | #ifdef CONFIG_COMPAT |
1703 | mutex_init(&xt[i].compat_mutex); | |
255d0dc3 | 1704 | xt[i].compat_tab = NULL; |
2722971c | 1705 | #endif |
2e4e6a17 HW |
1706 | INIT_LIST_HEAD(&xt[i].target); |
1707 | INIT_LIST_HEAD(&xt[i].match); | |
2e4e6a17 | 1708 | } |
8d870052 AD |
1709 | rv = register_pernet_subsys(&xt_net_ops); |
1710 | if (rv < 0) | |
1711 | kfree(xt); | |
1712 | return rv; | |
2e4e6a17 HW |
1713 | } |
1714 | ||
1715 | static void __exit xt_fini(void) | |
1716 | { | |
8d870052 | 1717 | unregister_pernet_subsys(&xt_net_ops); |
2e4e6a17 HW |
1718 | kfree(xt); |
1719 | } | |
1720 | ||
1721 | module_init(xt_init); | |
1722 | module_exit(xt_fini); | |
1723 |