]>
Commit | Line | Data |
---|---|---|
2e4e6a17 HW |
1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables | |
3 | * | |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> | |
f229f6ce | 5 | * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> |
2e4e6a17 HW |
6 | * |
7 | * Based on existing ip_tables code which is | |
8 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | |
9 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | */ | |
be91fd5e | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2e4e6a17 | 17 | #include <linux/kernel.h> |
3a9a231d | 18 | #include <linux/module.h> |
2e4e6a17 HW |
19 | #include <linux/socket.h> |
20 | #include <linux/net.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/vmalloc.h> | |
9e19bb6d | 25 | #include <linux/mutex.h> |
d7fe0f24 | 26 | #include <linux/mm.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
fbabf31e | 28 | #include <linux/audit.h> |
f13f2aee | 29 | #include <linux/user_namespace.h> |
457c4cbc | 30 | #include <net/net_namespace.h> |
2e4e6a17 HW |
31 | |
32 | #include <linux/netfilter/x_tables.h> | |
33 | #include <linux/netfilter_arp.h> | |
e3eaa991 JE |
34 | #include <linux/netfilter_ipv4/ip_tables.h> |
35 | #include <linux/netfilter_ipv6/ip6_tables.h> | |
36 | #include <linux/netfilter_arp/arp_tables.h> | |
9e19bb6d | 37 | |
2e4e6a17 HW |
38 | MODULE_LICENSE("GPL"); |
39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |
043ef46c | 40 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
2e4e6a17 HW |
41 | |
42 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) | |
ae0ac0ed | 43 | #define XT_PCPU_BLOCK_SIZE 4096 |
2e4e6a17 | 44 | |
b386d9f5 | 45 | struct compat_delta { |
255d0dc3 ED |
46 | unsigned int offset; /* offset in kernel */ |
47 | int delta; /* delta in 32bit user land */ | |
b386d9f5 PM |
48 | }; |
49 | ||
2e4e6a17 | 50 | struct xt_af { |
9e19bb6d | 51 | struct mutex mutex; |
2e4e6a17 HW |
52 | struct list_head match; |
53 | struct list_head target; | |
b386d9f5 | 54 | #ifdef CONFIG_COMPAT |
2722971c | 55 | struct mutex compat_mutex; |
255d0dc3 ED |
56 | struct compat_delta *compat_tab; |
57 | unsigned int number; /* number of slots in compat_tab[] */ | |
58 | unsigned int cur; /* number of used slots in compat_tab[] */ | |
b386d9f5 | 59 | #endif |
2e4e6a17 HW |
60 | }; |
61 | ||
62 | static struct xt_af *xt; | |
63 | ||
7e9c6eeb JE |
64 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
65 | [NFPROTO_UNSPEC] = "x", | |
66 | [NFPROTO_IPV4] = "ip", | |
67 | [NFPROTO_ARP] = "arp", | |
68 | [NFPROTO_BRIDGE] = "eb", | |
69 | [NFPROTO_IPV6] = "ip6", | |
37f9f733 PM |
70 | }; |
71 | ||
2e4e6a17 | 72 | /* Registration hooks for targets. */ |
7926dbfa | 73 | int xt_register_target(struct xt_target *target) |
2e4e6a17 | 74 | { |
76108cea | 75 | u_int8_t af = target->family; |
2e4e6a17 | 76 | |
7926dbfa | 77 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 78 | list_add(&target->list, &xt[af].target); |
9e19bb6d | 79 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 80 | return 0; |
2e4e6a17 HW |
81 | } |
82 | EXPORT_SYMBOL(xt_register_target); | |
83 | ||
84 | void | |
a45049c5 | 85 | xt_unregister_target(struct xt_target *target) |
2e4e6a17 | 86 | { |
76108cea | 87 | u_int8_t af = target->family; |
a45049c5 | 88 | |
9e19bb6d | 89 | mutex_lock(&xt[af].mutex); |
df0933dc | 90 | list_del(&target->list); |
9e19bb6d | 91 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
92 | } |
93 | EXPORT_SYMBOL(xt_unregister_target); | |
94 | ||
52d9c42e PM |
95 | int |
96 | xt_register_targets(struct xt_target *target, unsigned int n) | |
97 | { | |
98 | unsigned int i; | |
99 | int err = 0; | |
100 | ||
101 | for (i = 0; i < n; i++) { | |
102 | err = xt_register_target(&target[i]); | |
103 | if (err) | |
104 | goto err; | |
105 | } | |
106 | return err; | |
107 | ||
108 | err: | |
109 | if (i > 0) | |
110 | xt_unregister_targets(target, i); | |
111 | return err; | |
112 | } | |
113 | EXPORT_SYMBOL(xt_register_targets); | |
114 | ||
115 | void | |
116 | xt_unregister_targets(struct xt_target *target, unsigned int n) | |
117 | { | |
f68c5301 CG |
118 | while (n-- > 0) |
119 | xt_unregister_target(&target[n]); | |
52d9c42e PM |
120 | } |
121 | EXPORT_SYMBOL(xt_unregister_targets); | |
122 | ||
7926dbfa | 123 | int xt_register_match(struct xt_match *match) |
2e4e6a17 | 124 | { |
76108cea | 125 | u_int8_t af = match->family; |
2e4e6a17 | 126 | |
7926dbfa | 127 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 128 | list_add(&match->list, &xt[af].match); |
9e19bb6d | 129 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 130 | return 0; |
2e4e6a17 HW |
131 | } |
132 | EXPORT_SYMBOL(xt_register_match); | |
133 | ||
134 | void | |
a45049c5 | 135 | xt_unregister_match(struct xt_match *match) |
2e4e6a17 | 136 | { |
76108cea | 137 | u_int8_t af = match->family; |
a45049c5 | 138 | |
9e19bb6d | 139 | mutex_lock(&xt[af].mutex); |
df0933dc | 140 | list_del(&match->list); |
9e19bb6d | 141 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
142 | } |
143 | EXPORT_SYMBOL(xt_unregister_match); | |
144 | ||
52d9c42e PM |
145 | int |
146 | xt_register_matches(struct xt_match *match, unsigned int n) | |
147 | { | |
148 | unsigned int i; | |
149 | int err = 0; | |
150 | ||
151 | for (i = 0; i < n; i++) { | |
152 | err = xt_register_match(&match[i]); | |
153 | if (err) | |
154 | goto err; | |
155 | } | |
156 | return err; | |
157 | ||
158 | err: | |
159 | if (i > 0) | |
160 | xt_unregister_matches(match, i); | |
161 | return err; | |
162 | } | |
163 | EXPORT_SYMBOL(xt_register_matches); | |
164 | ||
165 | void | |
166 | xt_unregister_matches(struct xt_match *match, unsigned int n) | |
167 | { | |
f68c5301 CG |
168 | while (n-- > 0) |
169 | xt_unregister_match(&match[n]); | |
52d9c42e PM |
170 | } |
171 | EXPORT_SYMBOL(xt_unregister_matches); | |
172 | ||
2e4e6a17 HW |
173 | |
174 | /* | |
175 | * These are weird, but module loading must not be done with mutex | |
176 | * held (since they will register), and we have to have a single | |
adb00ae2 | 177 | * function to use. |
2e4e6a17 HW |
178 | */ |
179 | ||
180 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ | |
76108cea | 181 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
182 | { |
183 | struct xt_match *m; | |
42046e2e | 184 | int err = -ENOENT; |
2e4e6a17 | 185 | |
7926dbfa | 186 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
187 | list_for_each_entry(m, &xt[af].match, list) { |
188 | if (strcmp(m->name, name) == 0) { | |
189 | if (m->revision == revision) { | |
190 | if (try_module_get(m->me)) { | |
9e19bb6d | 191 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
192 | return m; |
193 | } | |
194 | } else | |
195 | err = -EPROTOTYPE; /* Found something. */ | |
196 | } | |
197 | } | |
9e19bb6d | 198 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
199 | |
200 | if (af != NFPROTO_UNSPEC) | |
201 | /* Try searching again in the family-independent list */ | |
202 | return xt_find_match(NFPROTO_UNSPEC, name, revision); | |
203 | ||
2e4e6a17 HW |
204 | return ERR_PTR(err); |
205 | } | |
206 | EXPORT_SYMBOL(xt_find_match); | |
207 | ||
fd0ec0e6 JE |
208 | struct xt_match * |
209 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | |
210 | { | |
211 | struct xt_match *match; | |
212 | ||
adb00ae2 SH |
213 | match = xt_find_match(nfproto, name, revision); |
214 | if (IS_ERR(match)) { | |
215 | request_module("%st_%s", xt_prefix[nfproto], name); | |
216 | match = xt_find_match(nfproto, name, revision); | |
217 | } | |
218 | ||
219 | return match; | |
fd0ec0e6 JE |
220 | } |
221 | EXPORT_SYMBOL_GPL(xt_request_find_match); | |
222 | ||
2e4e6a17 | 223 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
76108cea | 224 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
225 | { |
226 | struct xt_target *t; | |
42046e2e | 227 | int err = -ENOENT; |
2e4e6a17 | 228 | |
7926dbfa | 229 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
230 | list_for_each_entry(t, &xt[af].target, list) { |
231 | if (strcmp(t->name, name) == 0) { | |
232 | if (t->revision == revision) { | |
233 | if (try_module_get(t->me)) { | |
9e19bb6d | 234 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
235 | return t; |
236 | } | |
237 | } else | |
238 | err = -EPROTOTYPE; /* Found something. */ | |
239 | } | |
240 | } | |
9e19bb6d | 241 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
242 | |
243 | if (af != NFPROTO_UNSPEC) | |
244 | /* Try searching again in the family-independent list */ | |
245 | return xt_find_target(NFPROTO_UNSPEC, name, revision); | |
246 | ||
2e4e6a17 HW |
247 | return ERR_PTR(err); |
248 | } | |
249 | EXPORT_SYMBOL(xt_find_target); | |
250 | ||
76108cea | 251 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
252 | { |
253 | struct xt_target *target; | |
254 | ||
adb00ae2 SH |
255 | target = xt_find_target(af, name, revision); |
256 | if (IS_ERR(target)) { | |
257 | request_module("%st_%s", xt_prefix[af], name); | |
258 | target = xt_find_target(af, name, revision); | |
259 | } | |
260 | ||
261 | return target; | |
2e4e6a17 HW |
262 | } |
263 | EXPORT_SYMBOL_GPL(xt_request_find_target); | |
264 | ||
f32815d2 WB |
265 | |
266 | static int xt_obj_to_user(u16 __user *psize, u16 size, | |
267 | void __user *pname, const char *name, | |
268 | u8 __user *prev, u8 rev) | |
269 | { | |
270 | if (put_user(size, psize)) | |
271 | return -EFAULT; | |
272 | if (copy_to_user(pname, name, strlen(name) + 1)) | |
273 | return -EFAULT; | |
274 | if (put_user(rev, prev)) | |
275 | return -EFAULT; | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
280 | #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ | |
281 | xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ | |
282 | U->u.user.name, K->u.kernel.TYPE->name, \ | |
283 | &U->u.user.revision, K->u.kernel.TYPE->revision) | |
284 | ||
285 | int xt_data_to_user(void __user *dst, const void *src, | |
286 | int usersize, int size) | |
287 | { | |
288 | usersize = usersize ? : size; | |
289 | if (copy_to_user(dst, src, usersize)) | |
290 | return -EFAULT; | |
291 | if (usersize != size && clear_user(dst + usersize, size - usersize)) | |
292 | return -EFAULT; | |
293 | ||
294 | return 0; | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(xt_data_to_user); | |
297 | ||
298 | #define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ | |
299 | xt_data_to_user(U->data, K->data, \ | |
300 | K->u.kernel.TYPE->usersize, \ | |
301 | C_SIZE ? : K->u.kernel.TYPE->TYPE##size) | |
302 | ||
303 | int xt_match_to_user(const struct xt_entry_match *m, | |
304 | struct xt_entry_match __user *u) | |
305 | { | |
306 | return XT_OBJ_TO_USER(u, m, match, 0) || | |
307 | XT_DATA_TO_USER(u, m, match, 0); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(xt_match_to_user); | |
310 | ||
311 | int xt_target_to_user(const struct xt_entry_target *t, | |
312 | struct xt_entry_target __user *u) | |
313 | { | |
314 | return XT_OBJ_TO_USER(u, t, target, 0) || | |
315 | XT_DATA_TO_USER(u, t, target, 0); | |
316 | } | |
317 | EXPORT_SYMBOL_GPL(xt_target_to_user); | |
318 | ||
76108cea | 319 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 320 | { |
5452e425 | 321 | const struct xt_match *m; |
2e4e6a17 HW |
322 | int have_rev = 0; |
323 | ||
324 | list_for_each_entry(m, &xt[af].match, list) { | |
325 | if (strcmp(m->name, name) == 0) { | |
326 | if (m->revision > *bestp) | |
327 | *bestp = m->revision; | |
328 | if (m->revision == revision) | |
329 | have_rev = 1; | |
330 | } | |
331 | } | |
656caff2 PM |
332 | |
333 | if (af != NFPROTO_UNSPEC && !have_rev) | |
334 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
335 | ||
2e4e6a17 HW |
336 | return have_rev; |
337 | } | |
338 | ||
76108cea | 339 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 340 | { |
5452e425 | 341 | const struct xt_target *t; |
2e4e6a17 HW |
342 | int have_rev = 0; |
343 | ||
344 | list_for_each_entry(t, &xt[af].target, list) { | |
345 | if (strcmp(t->name, name) == 0) { | |
346 | if (t->revision > *bestp) | |
347 | *bestp = t->revision; | |
348 | if (t->revision == revision) | |
349 | have_rev = 1; | |
350 | } | |
351 | } | |
656caff2 PM |
352 | |
353 | if (af != NFPROTO_UNSPEC && !have_rev) | |
354 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
355 | ||
2e4e6a17 HW |
356 | return have_rev; |
357 | } | |
358 | ||
359 | /* Returns true or false (if no such extension at all) */ | |
76108cea | 360 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
2e4e6a17 HW |
361 | int *err) |
362 | { | |
363 | int have_rev, best = -1; | |
364 | ||
7926dbfa | 365 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
366 | if (target == 1) |
367 | have_rev = target_revfn(af, name, revision, &best); | |
368 | else | |
369 | have_rev = match_revfn(af, name, revision, &best); | |
9e19bb6d | 370 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
371 | |
372 | /* Nothing at all? Return 0 to try loading module. */ | |
373 | if (best == -1) { | |
374 | *err = -ENOENT; | |
375 | return 0; | |
376 | } | |
377 | ||
378 | *err = best; | |
379 | if (!have_rev) | |
380 | *err = -EPROTONOSUPPORT; | |
381 | return 1; | |
382 | } | |
383 | EXPORT_SYMBOL_GPL(xt_find_revision); | |
384 | ||
5b76c494 JE |
385 | static char * |
386 | textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) | |
45185364 | 387 | { |
5b76c494 | 388 | static const char *const inetbr_names[] = { |
45185364 JE |
389 | "PREROUTING", "INPUT", "FORWARD", |
390 | "OUTPUT", "POSTROUTING", "BROUTING", | |
391 | }; | |
5b76c494 JE |
392 | static const char *const arp_names[] = { |
393 | "INPUT", "FORWARD", "OUTPUT", | |
394 | }; | |
395 | const char *const *names; | |
396 | unsigned int i, max; | |
45185364 JE |
397 | char *p = buf; |
398 | bool np = false; | |
399 | int res; | |
400 | ||
5b76c494 JE |
401 | names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; |
402 | max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : | |
403 | ARRAY_SIZE(inetbr_names); | |
45185364 | 404 | *p = '\0'; |
5b76c494 | 405 | for (i = 0; i < max; ++i) { |
45185364 JE |
406 | if (!(mask & (1 << i))) |
407 | continue; | |
408 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); | |
409 | if (res > 0) { | |
410 | size -= res; | |
411 | p += res; | |
412 | } | |
413 | np = true; | |
414 | } | |
415 | ||
416 | return buf; | |
417 | } | |
418 | ||
916a917d | 419 | int xt_check_match(struct xt_mtchk_param *par, |
9b4fce7a | 420 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 421 | { |
bd414ee6 JE |
422 | int ret; |
423 | ||
9b4fce7a JE |
424 | if (XT_ALIGN(par->match->matchsize) != size && |
425 | par->match->matchsize != -1) { | |
043ef46c JE |
426 | /* |
427 | * ebt_among is exempt from centralized matchsize checking | |
428 | * because it uses a dynamic-size data set. | |
429 | */ | |
b402405d JE |
430 | pr_err("%s_tables: %s.%u match: invalid size " |
431 | "%u (kernel) != (user) %u\n", | |
916a917d | 432 | xt_prefix[par->family], par->match->name, |
b402405d | 433 | par->match->revision, |
9b4fce7a | 434 | XT_ALIGN(par->match->matchsize), size); |
37f9f733 PM |
435 | return -EINVAL; |
436 | } | |
9b4fce7a JE |
437 | if (par->match->table != NULL && |
438 | strcmp(par->match->table, par->table) != 0) { | |
3dd5d7e3 | 439 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
916a917d | 440 | xt_prefix[par->family], par->match->name, |
9b4fce7a | 441 | par->match->table, par->table); |
37f9f733 PM |
442 | return -EINVAL; |
443 | } | |
9b4fce7a | 444 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
45185364 JE |
445 | char used[64], allow[64]; |
446 | ||
3dd5d7e3 | 447 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
45185364 | 448 | "valid from %s\n", |
916a917d | 449 | xt_prefix[par->family], par->match->name, |
5b76c494 JE |
450 | textify_hooks(used, sizeof(used), par->hook_mask, |
451 | par->family), | |
452 | textify_hooks(allow, sizeof(allow), par->match->hooks, | |
453 | par->family)); | |
37f9f733 PM |
454 | return -EINVAL; |
455 | } | |
9b4fce7a | 456 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
3dd5d7e3 | 457 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
916a917d JE |
458 | xt_prefix[par->family], par->match->name, |
459 | par->match->proto); | |
37f9f733 PM |
460 | return -EINVAL; |
461 | } | |
bd414ee6 JE |
462 | if (par->match->checkentry != NULL) { |
463 | ret = par->match->checkentry(par); | |
464 | if (ret < 0) | |
465 | return ret; | |
466 | else if (ret > 0) | |
467 | /* Flag up potential errors. */ | |
468 | return -EIO; | |
469 | } | |
37f9f733 PM |
470 | return 0; |
471 | } | |
472 | EXPORT_SYMBOL_GPL(xt_check_match); | |
473 | ||
13631bfc FW |
474 | /** xt_check_entry_match - check that matches end before start of target |
475 | * | |
476 | * @match: beginning of xt_entry_match | |
477 | * @target: beginning of this rules target (alleged end of matches) | |
478 | * @alignment: alignment requirement of match structures | |
479 | * | |
480 | * Validates that all matches add up to the beginning of the target, | |
481 | * and that each match covers at least the base structure size. | |
482 | * | |
483 | * Return: 0 on success, negative errno on failure. | |
484 | */ | |
485 | static int xt_check_entry_match(const char *match, const char *target, | |
486 | const size_t alignment) | |
487 | { | |
488 | const struct xt_entry_match *pos; | |
489 | int length = target - match; | |
490 | ||
491 | if (length == 0) /* no matches */ | |
492 | return 0; | |
493 | ||
494 | pos = (struct xt_entry_match *)match; | |
495 | do { | |
496 | if ((unsigned long)pos % alignment) | |
497 | return -EINVAL; | |
498 | ||
499 | if (length < (int)sizeof(struct xt_entry_match)) | |
500 | return -EINVAL; | |
501 | ||
502 | if (pos->u.match_size < sizeof(struct xt_entry_match)) | |
503 | return -EINVAL; | |
504 | ||
505 | if (pos->u.match_size > length) | |
506 | return -EINVAL; | |
507 | ||
508 | length -= pos->u.match_size; | |
509 | pos = ((void *)((char *)(pos) + (pos)->u.match_size)); | |
510 | } while (length > 0); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
2722971c | 515 | #ifdef CONFIG_COMPAT |
255d0dc3 | 516 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) |
b386d9f5 | 517 | { |
255d0dc3 | 518 | struct xt_af *xp = &xt[af]; |
b386d9f5 | 519 | |
255d0dc3 ED |
520 | if (!xp->compat_tab) { |
521 | if (!xp->number) | |
522 | return -EINVAL; | |
523 | xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); | |
524 | if (!xp->compat_tab) | |
525 | return -ENOMEM; | |
526 | xp->cur = 0; | |
527 | } | |
b386d9f5 | 528 | |
255d0dc3 ED |
529 | if (xp->cur >= xp->number) |
530 | return -EINVAL; | |
b386d9f5 | 531 | |
255d0dc3 ED |
532 | if (xp->cur) |
533 | delta += xp->compat_tab[xp->cur - 1].delta; | |
534 | xp->compat_tab[xp->cur].offset = offset; | |
535 | xp->compat_tab[xp->cur].delta = delta; | |
536 | xp->cur++; | |
b386d9f5 PM |
537 | return 0; |
538 | } | |
539 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); | |
540 | ||
76108cea | 541 | void xt_compat_flush_offsets(u_int8_t af) |
b386d9f5 | 542 | { |
255d0dc3 ED |
543 | if (xt[af].compat_tab) { |
544 | vfree(xt[af].compat_tab); | |
545 | xt[af].compat_tab = NULL; | |
546 | xt[af].number = 0; | |
5a6351ee | 547 | xt[af].cur = 0; |
b386d9f5 PM |
548 | } |
549 | } | |
550 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | |
551 | ||
3e5e524f | 552 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
b386d9f5 | 553 | { |
255d0dc3 ED |
554 | struct compat_delta *tmp = xt[af].compat_tab; |
555 | int mid, left = 0, right = xt[af].cur - 1; | |
556 | ||
557 | while (left <= right) { | |
558 | mid = (left + right) >> 1; | |
559 | if (offset > tmp[mid].offset) | |
560 | left = mid + 1; | |
561 | else if (offset < tmp[mid].offset) | |
562 | right = mid - 1; | |
563 | else | |
564 | return mid ? tmp[mid - 1].delta : 0; | |
565 | } | |
5a6351ee | 566 | return left ? tmp[left - 1].delta : 0; |
b386d9f5 PM |
567 | } |
568 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | |
569 | ||
255d0dc3 ED |
570 | void xt_compat_init_offsets(u_int8_t af, unsigned int number) |
571 | { | |
572 | xt[af].number = number; | |
573 | xt[af].cur = 0; | |
574 | } | |
575 | EXPORT_SYMBOL(xt_compat_init_offsets); | |
576 | ||
5452e425 | 577 | int xt_compat_match_offset(const struct xt_match *match) |
2722971c | 578 | { |
9fa492cd PM |
579 | u_int16_t csize = match->compatsize ? : match->matchsize; |
580 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); | |
581 | } | |
582 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); | |
583 | ||
0188346f FW |
584 | void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
585 | unsigned int *size) | |
9fa492cd | 586 | { |
5452e425 | 587 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
588 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
589 | int pad, off = xt_compat_match_offset(match); | |
590 | u_int16_t msize = cm->u.user.match_size; | |
09d96860 | 591 | char name[sizeof(m->u.user.name)]; |
9fa492cd PM |
592 | |
593 | m = *dstptr; | |
594 | memcpy(m, cm, sizeof(*cm)); | |
595 | if (match->compat_from_user) | |
596 | match->compat_from_user(m->data, cm->data); | |
597 | else | |
598 | memcpy(m->data, cm->data, msize - sizeof(*cm)); | |
599 | pad = XT_ALIGN(match->matchsize) - match->matchsize; | |
600 | if (pad > 0) | |
601 | memset(m->data + match->matchsize, 0, pad); | |
602 | ||
603 | msize += off; | |
604 | m->u.user.match_size = msize; | |
09d96860 FW |
605 | strlcpy(name, match->name, sizeof(name)); |
606 | module_put(match->me); | |
607 | strncpy(m->u.user.name, name, sizeof(m->u.user.name)); | |
9fa492cd PM |
608 | |
609 | *size += off; | |
610 | *dstptr += msize; | |
611 | } | |
612 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | |
613 | ||
739674fb JE |
614 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
615 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 616 | { |
5452e425 | 617 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
618 | struct compat_xt_entry_match __user *cm = *dstptr; |
619 | int off = xt_compat_match_offset(match); | |
620 | u_int16_t msize = m->u.user.match_size - off; | |
621 | ||
622 | if (copy_to_user(cm, m, sizeof(*cm)) || | |
a18aa31b PM |
623 | put_user(msize, &cm->u.user.match_size) || |
624 | copy_to_user(cm->u.user.name, m->u.kernel.match->name, | |
625 | strlen(m->u.kernel.match->name) + 1)) | |
601e68e1 | 626 | return -EFAULT; |
9fa492cd PM |
627 | |
628 | if (match->compat_to_user) { | |
629 | if (match->compat_to_user((void __user *)cm->data, m->data)) | |
630 | return -EFAULT; | |
631 | } else { | |
632 | if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) | |
633 | return -EFAULT; | |
2722971c | 634 | } |
9fa492cd PM |
635 | |
636 | *size -= off; | |
637 | *dstptr += msize; | |
638 | return 0; | |
2722971c | 639 | } |
9fa492cd | 640 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
fc1221b3 | 641 | |
7ed2abdd FW |
642 | /* non-compat version may have padding after verdict */ |
643 | struct compat_xt_standard_target { | |
644 | struct compat_xt_entry_target t; | |
645 | compat_uint_t verdict; | |
646 | }; | |
647 | ||
ce683e5f | 648 | int xt_compat_check_entry_offsets(const void *base, const char *elems, |
fc1221b3 FW |
649 | unsigned int target_offset, |
650 | unsigned int next_offset) | |
651 | { | |
ce683e5f | 652 | long size_of_base_struct = elems - (const char *)base; |
fc1221b3 FW |
653 | const struct compat_xt_entry_target *t; |
654 | const char *e = base; | |
655 | ||
ce683e5f FW |
656 | if (target_offset < size_of_base_struct) |
657 | return -EINVAL; | |
658 | ||
fc1221b3 FW |
659 | if (target_offset + sizeof(*t) > next_offset) |
660 | return -EINVAL; | |
661 | ||
662 | t = (void *)(e + target_offset); | |
663 | if (t->u.target_size < sizeof(*t)) | |
664 | return -EINVAL; | |
665 | ||
666 | if (target_offset + t->u.target_size > next_offset) | |
667 | return -EINVAL; | |
668 | ||
7ed2abdd | 669 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 670 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
7ed2abdd FW |
671 | return -EINVAL; |
672 | ||
13631bfc FW |
673 | /* compat_xt_entry match has less strict aligment requirements, |
674 | * otherwise they are identical. In case of padding differences | |
675 | * we need to add compat version of xt_check_entry_match. | |
676 | */ | |
677 | BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); | |
678 | ||
679 | return xt_check_entry_match(elems, base + target_offset, | |
680 | __alignof__(struct compat_xt_entry_match)); | |
fc1221b3 FW |
681 | } |
682 | EXPORT_SYMBOL(xt_compat_check_entry_offsets); | |
9fa492cd | 683 | #endif /* CONFIG_COMPAT */ |
2722971c | 684 | |
7d35812c FW |
685 | /** |
686 | * xt_check_entry_offsets - validate arp/ip/ip6t_entry | |
687 | * | |
688 | * @base: pointer to arp/ip/ip6t_entry | |
ce683e5f | 689 | * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems |
7d35812c FW |
690 | * @target_offset: the arp/ip/ip6_t->target_offset |
691 | * @next_offset: the arp/ip/ip6_t->next_offset | |
692 | * | |
13631bfc FW |
693 | * validates that target_offset and next_offset are sane and that all |
694 | * match sizes (if any) align with the target offset. | |
7d35812c | 695 | * |
ce683e5f | 696 | * This function does not validate the targets or matches themselves, it |
13631bfc FW |
697 | * only tests that all the offsets and sizes are correct, that all |
698 | * match structures are aligned, and that the last structure ends where | |
699 | * the target structure begins. | |
700 | * | |
701 | * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. | |
ce683e5f | 702 | * |
7d35812c FW |
703 | * The arp/ip/ip6t_entry structure @base must have passed following tests: |
704 | * - it must point to a valid memory location | |
705 | * - base to base + next_offset must be accessible, i.e. not exceed allocated | |
706 | * length. | |
707 | * | |
13631bfc FW |
708 | * A well-formed entry looks like this: |
709 | * | |
710 | * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry | |
711 | * e->elems[]-----' | | | |
712 | * matchsize | | | |
713 | * matchsize | | | |
714 | * | | | |
715 | * target_offset---------------------------------' | | |
716 | * next_offset---------------------------------------------------' | |
717 | * | |
718 | * elems[]: flexible array member at end of ip(6)/arpt_entry struct. | |
719 | * This is where matches (if any) and the target reside. | |
720 | * target_offset: beginning of target. | |
721 | * next_offset: start of the next rule; also: size of this rule. | |
722 | * Since targets have a minimum size, target_offset + minlen <= next_offset. | |
723 | * | |
724 | * Every match stores its size, sum of sizes must not exceed target_offset. | |
725 | * | |
7d35812c FW |
726 | * Return: 0 on success, negative errno on failure. |
727 | */ | |
728 | int xt_check_entry_offsets(const void *base, | |
ce683e5f | 729 | const char *elems, |
7d35812c FW |
730 | unsigned int target_offset, |
731 | unsigned int next_offset) | |
732 | { | |
ce683e5f | 733 | long size_of_base_struct = elems - (const char *)base; |
7d35812c FW |
734 | const struct xt_entry_target *t; |
735 | const char *e = base; | |
736 | ||
ce683e5f FW |
737 | /* target start is within the ip/ip6/arpt_entry struct */ |
738 | if (target_offset < size_of_base_struct) | |
739 | return -EINVAL; | |
740 | ||
7d35812c FW |
741 | if (target_offset + sizeof(*t) > next_offset) |
742 | return -EINVAL; | |
743 | ||
744 | t = (void *)(e + target_offset); | |
a08e4e19 FW |
745 | if (t->u.target_size < sizeof(*t)) |
746 | return -EINVAL; | |
747 | ||
7d35812c FW |
748 | if (target_offset + t->u.target_size > next_offset) |
749 | return -EINVAL; | |
750 | ||
7ed2abdd | 751 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 752 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
7ed2abdd FW |
753 | return -EINVAL; |
754 | ||
13631bfc FW |
755 | return xt_check_entry_match(elems, base + target_offset, |
756 | __alignof__(struct xt_entry_match)); | |
7d35812c FW |
757 | } |
758 | EXPORT_SYMBOL(xt_check_entry_offsets); | |
759 | ||
f4dc7771 FW |
760 | /** |
761 | * xt_alloc_entry_offsets - allocate array to store rule head offsets | |
762 | * | |
763 | * @size: number of entries | |
764 | * | |
765 | * Return: NULL or kmalloc'd or vmalloc'd array | |
766 | */ | |
767 | unsigned int *xt_alloc_entry_offsets(unsigned int size) | |
768 | { | |
769 | unsigned int *off; | |
770 | ||
771 | off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); | |
772 | ||
773 | if (off) | |
774 | return off; | |
775 | ||
776 | if (size < (SIZE_MAX / sizeof(unsigned int))) | |
777 | off = vmalloc(size * sizeof(unsigned int)); | |
778 | ||
779 | return off; | |
780 | } | |
781 | EXPORT_SYMBOL(xt_alloc_entry_offsets); | |
782 | ||
783 | /** | |
784 | * xt_find_jump_offset - check if target is a valid jump offset | |
785 | * | |
786 | * @offsets: array containing all valid rule start offsets of a rule blob | |
787 | * @target: the jump target to search for | |
788 | * @size: entries in @offset | |
789 | */ | |
790 | bool xt_find_jump_offset(const unsigned int *offsets, | |
791 | unsigned int target, unsigned int size) | |
792 | { | |
793 | int m, low = 0, hi = size; | |
794 | ||
795 | while (hi > low) { | |
796 | m = (low + hi) / 2u; | |
797 | ||
798 | if (offsets[m] > target) | |
799 | hi = m; | |
800 | else if (offsets[m] < target) | |
801 | low = m + 1; | |
802 | else | |
803 | return true; | |
804 | } | |
805 | ||
806 | return false; | |
807 | } | |
808 | EXPORT_SYMBOL(xt_find_jump_offset); | |
809 | ||
916a917d | 810 | int xt_check_target(struct xt_tgchk_param *par, |
af5d6dc2 | 811 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 812 | { |
d6b00a53 JE |
813 | int ret; |
814 | ||
af5d6dc2 | 815 | if (XT_ALIGN(par->target->targetsize) != size) { |
b402405d JE |
816 | pr_err("%s_tables: %s.%u target: invalid size " |
817 | "%u (kernel) != (user) %u\n", | |
916a917d | 818 | xt_prefix[par->family], par->target->name, |
b402405d | 819 | par->target->revision, |
af5d6dc2 | 820 | XT_ALIGN(par->target->targetsize), size); |
37f9f733 PM |
821 | return -EINVAL; |
822 | } | |
af5d6dc2 JE |
823 | if (par->target->table != NULL && |
824 | strcmp(par->target->table, par->table) != 0) { | |
3dd5d7e3 | 825 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
916a917d | 826 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 827 | par->target->table, par->table); |
37f9f733 PM |
828 | return -EINVAL; |
829 | } | |
af5d6dc2 | 830 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
45185364 JE |
831 | char used[64], allow[64]; |
832 | ||
3dd5d7e3 | 833 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
45185364 | 834 | "usable from %s\n", |
916a917d | 835 | xt_prefix[par->family], par->target->name, |
5b76c494 JE |
836 | textify_hooks(used, sizeof(used), par->hook_mask, |
837 | par->family), | |
838 | textify_hooks(allow, sizeof(allow), par->target->hooks, | |
839 | par->family)); | |
37f9f733 PM |
840 | return -EINVAL; |
841 | } | |
af5d6dc2 | 842 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
3dd5d7e3 | 843 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
916a917d | 844 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 845 | par->target->proto); |
37f9f733 PM |
846 | return -EINVAL; |
847 | } | |
d6b00a53 JE |
848 | if (par->target->checkentry != NULL) { |
849 | ret = par->target->checkentry(par); | |
850 | if (ret < 0) | |
851 | return ret; | |
852 | else if (ret > 0) | |
853 | /* Flag up potential errors. */ | |
854 | return -EIO; | |
855 | } | |
37f9f733 PM |
856 | return 0; |
857 | } | |
858 | EXPORT_SYMBOL_GPL(xt_check_target); | |
859 | ||
d7591f0c FW |
860 | /** |
861 | * xt_copy_counters_from_user - copy counters and metadata from userspace | |
862 | * | |
863 | * @user: src pointer to userspace memory | |
864 | * @len: alleged size of userspace memory | |
865 | * @info: where to store the xt_counters_info metadata | |
866 | * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel | |
867 | * | |
868 | * Copies counter meta data from @user and stores it in @info. | |
869 | * | |
870 | * vmallocs memory to hold the counters, then copies the counter data | |
871 | * from @user to the new memory and returns a pointer to it. | |
872 | * | |
873 | * If @compat is true, @info gets converted automatically to the 64bit | |
874 | * representation. | |
875 | * | |
876 | * The metadata associated with the counters is stored in @info. | |
877 | * | |
878 | * Return: returns pointer that caller has to test via IS_ERR(). | |
879 | * If IS_ERR is false, caller has to vfree the pointer. | |
880 | */ | |
881 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |
882 | struct xt_counters_info *info, bool compat) | |
883 | { | |
884 | void *mem; | |
885 | u64 size; | |
886 | ||
887 | #ifdef CONFIG_COMPAT | |
888 | if (compat) { | |
889 | /* structures only differ in size due to alignment */ | |
890 | struct compat_xt_counters_info compat_tmp; | |
891 | ||
892 | if (len <= sizeof(compat_tmp)) | |
893 | return ERR_PTR(-EINVAL); | |
894 | ||
895 | len -= sizeof(compat_tmp); | |
896 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | |
897 | return ERR_PTR(-EFAULT); | |
898 | ||
899 | strlcpy(info->name, compat_tmp.name, sizeof(info->name)); | |
900 | info->num_counters = compat_tmp.num_counters; | |
901 | user += sizeof(compat_tmp); | |
902 | } else | |
903 | #endif | |
904 | { | |
905 | if (len <= sizeof(*info)) | |
906 | return ERR_PTR(-EINVAL); | |
907 | ||
908 | len -= sizeof(*info); | |
909 | if (copy_from_user(info, user, sizeof(*info)) != 0) | |
910 | return ERR_PTR(-EFAULT); | |
911 | ||
912 | info->name[sizeof(info->name) - 1] = '\0'; | |
913 | user += sizeof(*info); | |
914 | } | |
915 | ||
916 | size = sizeof(struct xt_counters); | |
917 | size *= info->num_counters; | |
918 | ||
919 | if (size != (u64)len) | |
920 | return ERR_PTR(-EINVAL); | |
921 | ||
922 | mem = vmalloc(len); | |
923 | if (!mem) | |
924 | return ERR_PTR(-ENOMEM); | |
925 | ||
926 | if (copy_from_user(mem, user, len) == 0) | |
927 | return mem; | |
928 | ||
929 | vfree(mem); | |
930 | return ERR_PTR(-EFAULT); | |
931 | } | |
932 | EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); | |
933 | ||
2722971c | 934 | #ifdef CONFIG_COMPAT |
5452e425 | 935 | int xt_compat_target_offset(const struct xt_target *target) |
2722971c | 936 | { |
9fa492cd PM |
937 | u_int16_t csize = target->compatsize ? : target->targetsize; |
938 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); | |
939 | } | |
940 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | |
941 | ||
942 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | |
b0a6363c | 943 | unsigned int *size) |
9fa492cd | 944 | { |
5452e425 | 945 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
946 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
947 | int pad, off = xt_compat_target_offset(target); | |
948 | u_int16_t tsize = ct->u.user.target_size; | |
09d96860 | 949 | char name[sizeof(t->u.user.name)]; |
9fa492cd PM |
950 | |
951 | t = *dstptr; | |
952 | memcpy(t, ct, sizeof(*ct)); | |
953 | if (target->compat_from_user) | |
954 | target->compat_from_user(t->data, ct->data); | |
955 | else | |
956 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); | |
957 | pad = XT_ALIGN(target->targetsize) - target->targetsize; | |
958 | if (pad > 0) | |
959 | memset(t->data + target->targetsize, 0, pad); | |
960 | ||
961 | tsize += off; | |
962 | t->u.user.target_size = tsize; | |
09d96860 FW |
963 | strlcpy(name, target->name, sizeof(name)); |
964 | module_put(target->me); | |
965 | strncpy(t->u.user.name, name, sizeof(t->u.user.name)); | |
9fa492cd PM |
966 | |
967 | *size += off; | |
968 | *dstptr += tsize; | |
969 | } | |
970 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); | |
971 | ||
739674fb JE |
972 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
973 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 974 | { |
5452e425 | 975 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
976 | struct compat_xt_entry_target __user *ct = *dstptr; |
977 | int off = xt_compat_target_offset(target); | |
978 | u_int16_t tsize = t->u.user.target_size - off; | |
979 | ||
980 | if (copy_to_user(ct, t, sizeof(*ct)) || | |
a18aa31b PM |
981 | put_user(tsize, &ct->u.user.target_size) || |
982 | copy_to_user(ct->u.user.name, t->u.kernel.target->name, | |
983 | strlen(t->u.kernel.target->name) + 1)) | |
601e68e1 | 984 | return -EFAULT; |
9fa492cd PM |
985 | |
986 | if (target->compat_to_user) { | |
987 | if (target->compat_to_user((void __user *)ct->data, t->data)) | |
988 | return -EFAULT; | |
989 | } else { | |
990 | if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) | |
991 | return -EFAULT; | |
2722971c | 992 | } |
9fa492cd PM |
993 | |
994 | *size -= off; | |
995 | *dstptr += tsize; | |
996 | return 0; | |
2722971c | 997 | } |
9fa492cd | 998 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
2722971c DM |
999 | #endif |
1000 | ||
2e4e6a17 HW |
1001 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
1002 | { | |
711bdde6 ED |
1003 | struct xt_table_info *info = NULL; |
1004 | size_t sz = sizeof(*info) + size; | |
2e4e6a17 | 1005 | |
d157bd76 FW |
1006 | if (sz < sizeof(*info)) |
1007 | return NULL; | |
1008 | ||
2e4e6a17 | 1009 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
4481374c | 1010 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
2e4e6a17 HW |
1011 | return NULL; |
1012 | ||
711bdde6 ED |
1013 | if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) |
1014 | info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | |
1015 | if (!info) { | |
5bad8734 MRL |
1016 | info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | |
1017 | __GFP_NORETRY | __GFP_HIGHMEM, | |
1018 | PAGE_KERNEL); | |
711bdde6 ED |
1019 | if (!info) |
1020 | return NULL; | |
2e4e6a17 | 1021 | } |
711bdde6 ED |
1022 | memset(info, 0, sizeof(*info)); |
1023 | info->size = size; | |
1024 | return info; | |
2e4e6a17 HW |
1025 | } |
1026 | EXPORT_SYMBOL(xt_alloc_table_info); | |
1027 | ||
1028 | void xt_free_table_info(struct xt_table_info *info) | |
1029 | { | |
1030 | int cpu; | |
1031 | ||
f3c5c1bf | 1032 | if (info->jumpstack != NULL) { |
f6b50824 ED |
1033 | for_each_possible_cpu(cpu) |
1034 | kvfree(info->jumpstack[cpu]); | |
1035 | kvfree(info->jumpstack); | |
f3c5c1bf JE |
1036 | } |
1037 | ||
711bdde6 | 1038 | kvfree(info); |
2e4e6a17 HW |
1039 | } |
1040 | EXPORT_SYMBOL(xt_free_table_info); | |
1041 | ||
eb1a6bdc | 1042 | /* Find table by name, grabs mutex & ref. Returns NULL on error. */ |
76108cea JE |
1043 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
1044 | const char *name) | |
2e4e6a17 | 1045 | { |
b9e69e12 | 1046 | struct xt_table *t, *found = NULL; |
2e4e6a17 | 1047 | |
7926dbfa | 1048 | mutex_lock(&xt[af].mutex); |
8d870052 | 1049 | list_for_each_entry(t, &net->xt.tables[af], list) |
2e4e6a17 HW |
1050 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
1051 | return t; | |
b9e69e12 FW |
1052 | |
1053 | if (net == &init_net) | |
1054 | goto out; | |
1055 | ||
1056 | /* Table doesn't exist in this netns, re-try init */ | |
1057 | list_for_each_entry(t, &init_net.xt.tables[af], list) { | |
1058 | if (strcmp(t->name, name)) | |
1059 | continue; | |
1060 | if (!try_module_get(t->me)) | |
1061 | return NULL; | |
1062 | ||
1063 | mutex_unlock(&xt[af].mutex); | |
1064 | if (t->table_init(net) != 0) { | |
1065 | module_put(t->me); | |
1066 | return NULL; | |
1067 | } | |
1068 | ||
1069 | found = t; | |
1070 | ||
1071 | mutex_lock(&xt[af].mutex); | |
1072 | break; | |
1073 | } | |
1074 | ||
1075 | if (!found) | |
1076 | goto out; | |
1077 | ||
1078 | /* and once again: */ | |
1079 | list_for_each_entry(t, &net->xt.tables[af], list) | |
1080 | if (strcmp(t->name, name) == 0) | |
1081 | return t; | |
1082 | ||
1083 | module_put(found->me); | |
1084 | out: | |
9e19bb6d | 1085 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
1086 | return NULL; |
1087 | } | |
1088 | EXPORT_SYMBOL_GPL(xt_find_table_lock); | |
1089 | ||
1090 | void xt_table_unlock(struct xt_table *table) | |
1091 | { | |
9e19bb6d | 1092 | mutex_unlock(&xt[table->af].mutex); |
2e4e6a17 HW |
1093 | } |
1094 | EXPORT_SYMBOL_GPL(xt_table_unlock); | |
1095 | ||
2722971c | 1096 | #ifdef CONFIG_COMPAT |
76108cea | 1097 | void xt_compat_lock(u_int8_t af) |
2722971c DM |
1098 | { |
1099 | mutex_lock(&xt[af].compat_mutex); | |
1100 | } | |
1101 | EXPORT_SYMBOL_GPL(xt_compat_lock); | |
1102 | ||
76108cea | 1103 | void xt_compat_unlock(u_int8_t af) |
2722971c DM |
1104 | { |
1105 | mutex_unlock(&xt[af].compat_mutex); | |
1106 | } | |
1107 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | |
1108 | #endif | |
2e4e6a17 | 1109 | |
7f5c6d4f ED |
1110 | DEFINE_PER_CPU(seqcount_t, xt_recseq); |
1111 | EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); | |
942e4a2b | 1112 | |
dcebd315 FW |
1113 | struct static_key xt_tee_enabled __read_mostly; |
1114 | EXPORT_SYMBOL_GPL(xt_tee_enabled); | |
1115 | ||
f3c5c1bf JE |
1116 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
1117 | { | |
1118 | unsigned int size; | |
1119 | int cpu; | |
1120 | ||
f3c5c1bf JE |
1121 | size = sizeof(void **) * nr_cpu_ids; |
1122 | if (size > PAGE_SIZE) | |
3dbd4439 | 1123 | i->jumpstack = vzalloc(size); |
f3c5c1bf | 1124 | else |
3dbd4439 | 1125 | i->jumpstack = kzalloc(size, GFP_KERNEL); |
f3c5c1bf JE |
1126 | if (i->jumpstack == NULL) |
1127 | return -ENOMEM; | |
f3c5c1bf | 1128 | |
98d1bd80 FW |
1129 | /* ruleset without jumps -- no stack needed */ |
1130 | if (i->stacksize == 0) | |
1131 | return 0; | |
1132 | ||
7814b6ec FW |
1133 | /* Jumpstack needs to be able to record two full callchains, one |
1134 | * from the first rule set traversal, plus one table reentrancy | |
1135 | * via -j TEE without clobbering the callchain that brought us to | |
1136 | * TEE target. | |
1137 | * | |
1138 | * This is done by allocating two jumpstacks per cpu, on reentry | |
1139 | * the upper half of the stack is used. | |
1140 | * | |
1141 | * see the jumpstack setup in ipt_do_table() for more details. | |
1142 | */ | |
1143 | size = sizeof(void *) * i->stacksize * 2u; | |
f3c5c1bf JE |
1144 | for_each_possible_cpu(cpu) { |
1145 | if (size > PAGE_SIZE) | |
1146 | i->jumpstack[cpu] = vmalloc_node(size, | |
1147 | cpu_to_node(cpu)); | |
1148 | else | |
1149 | i->jumpstack[cpu] = kmalloc_node(size, | |
1150 | GFP_KERNEL, cpu_to_node(cpu)); | |
1151 | if (i->jumpstack[cpu] == NULL) | |
1152 | /* | |
1153 | * Freeing will be done later on by the callers. The | |
1154 | * chain is: xt_replace_table -> __do_replace -> | |
1155 | * do_replace -> xt_free_table_info. | |
1156 | */ | |
1157 | return -ENOMEM; | |
1158 | } | |
1159 | ||
1160 | return 0; | |
1161 | } | |
942e4a2b | 1162 | |
2e4e6a17 HW |
1163 | struct xt_table_info * |
1164 | xt_replace_table(struct xt_table *table, | |
1165 | unsigned int num_counters, | |
1166 | struct xt_table_info *newinfo, | |
1167 | int *error) | |
1168 | { | |
942e4a2b | 1169 | struct xt_table_info *private; |
f3c5c1bf | 1170 | int ret; |
2e4e6a17 | 1171 | |
d97a9e47 JE |
1172 | ret = xt_jumpstack_alloc(newinfo); |
1173 | if (ret < 0) { | |
1174 | *error = ret; | |
1175 | return NULL; | |
1176 | } | |
1177 | ||
2e4e6a17 | 1178 | /* Do the substitution. */ |
942e4a2b | 1179 | local_bh_disable(); |
2e4e6a17 | 1180 | private = table->private; |
942e4a2b | 1181 | |
2e4e6a17 HW |
1182 | /* Check inside lock: is the old number correct? */ |
1183 | if (num_counters != private->number) { | |
be91fd5e | 1184 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
2e4e6a17 | 1185 | num_counters, private->number); |
942e4a2b | 1186 | local_bh_enable(); |
2e4e6a17 HW |
1187 | *error = -EAGAIN; |
1188 | return NULL; | |
1189 | } | |
2e4e6a17 | 1190 | |
942e4a2b | 1191 | newinfo->initial_entries = private->initial_entries; |
b416c144 WD |
1192 | /* |
1193 | * Ensure contents of newinfo are visible before assigning to | |
1194 | * private. | |
1195 | */ | |
1196 | smp_wmb(); | |
1197 | table->private = newinfo; | |
942e4a2b SH |
1198 | |
1199 | /* | |
1200 | * Even though table entries have now been swapped, other CPU's | |
1201 | * may still be using the old entries. This is okay, because | |
1202 | * resynchronization happens because of the locking done | |
1203 | * during the get_counters() routine. | |
1204 | */ | |
1205 | local_bh_enable(); | |
1206 | ||
fbabf31e TG |
1207 | #ifdef CONFIG_AUDIT |
1208 | if (audit_enabled) { | |
1209 | struct audit_buffer *ab; | |
1210 | ||
1211 | ab = audit_log_start(current->audit_context, GFP_KERNEL, | |
1212 | AUDIT_NETFILTER_CFG); | |
1213 | if (ab) { | |
1214 | audit_log_format(ab, "table=%s family=%u entries=%u", | |
1215 | table->name, table->af, | |
1216 | private->number); | |
1217 | audit_log_end(ab); | |
1218 | } | |
1219 | } | |
1220 | #endif | |
1221 | ||
942e4a2b | 1222 | return private; |
2e4e6a17 HW |
1223 | } |
1224 | EXPORT_SYMBOL_GPL(xt_replace_table); | |
1225 | ||
35aad0ff JE |
1226 | struct xt_table *xt_register_table(struct net *net, |
1227 | const struct xt_table *input_table, | |
a98da11d AD |
1228 | struct xt_table_info *bootstrap, |
1229 | struct xt_table_info *newinfo) | |
2e4e6a17 HW |
1230 | { |
1231 | int ret; | |
1232 | struct xt_table_info *private; | |
35aad0ff | 1233 | struct xt_table *t, *table; |
2e4e6a17 | 1234 | |
44d34e72 | 1235 | /* Don't add one object to multiple lists. */ |
35aad0ff | 1236 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
44d34e72 AD |
1237 | if (!table) { |
1238 | ret = -ENOMEM; | |
1239 | goto out; | |
1240 | } | |
1241 | ||
7926dbfa | 1242 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1243 | /* Don't autoload: we'd eat our tail... */ |
8d870052 | 1244 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
df0933dc PM |
1245 | if (strcmp(t->name, table->name) == 0) { |
1246 | ret = -EEXIST; | |
1247 | goto unlock; | |
1248 | } | |
2e4e6a17 HW |
1249 | } |
1250 | ||
1251 | /* Simplifies replace_table code. */ | |
1252 | table->private = bootstrap; | |
78454473 | 1253 | |
2e4e6a17 HW |
1254 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
1255 | goto unlock; | |
1256 | ||
1257 | private = table->private; | |
be91fd5e | 1258 | pr_debug("table->private->number = %u\n", private->number); |
2e4e6a17 HW |
1259 | |
1260 | /* save number of initial entries */ | |
1261 | private->initial_entries = private->number; | |
1262 | ||
8d870052 | 1263 | list_add(&table->list, &net->xt.tables[table->af]); |
a98da11d AD |
1264 | mutex_unlock(&xt[table->af].mutex); |
1265 | return table; | |
2e4e6a17 | 1266 | |
7926dbfa | 1267 | unlock: |
9e19bb6d | 1268 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1269 | kfree(table); |
a98da11d AD |
1270 | out: |
1271 | return ERR_PTR(ret); | |
2e4e6a17 HW |
1272 | } |
1273 | EXPORT_SYMBOL_GPL(xt_register_table); | |
1274 | ||
1275 | void *xt_unregister_table(struct xt_table *table) | |
1276 | { | |
1277 | struct xt_table_info *private; | |
1278 | ||
9e19bb6d | 1279 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1280 | private = table->private; |
df0933dc | 1281 | list_del(&table->list); |
9e19bb6d | 1282 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1283 | kfree(table); |
2e4e6a17 HW |
1284 | |
1285 | return private; | |
1286 | } | |
1287 | EXPORT_SYMBOL_GPL(xt_unregister_table); | |
1288 | ||
1289 | #ifdef CONFIG_PROC_FS | |
715cf35a AD |
1290 | struct xt_names_priv { |
1291 | struct seq_net_private p; | |
76108cea | 1292 | u_int8_t af; |
715cf35a | 1293 | }; |
025d93d1 | 1294 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
2e4e6a17 | 1295 | { |
715cf35a | 1296 | struct xt_names_priv *priv = seq->private; |
1218854a | 1297 | struct net *net = seq_file_net(seq); |
76108cea | 1298 | u_int8_t af = priv->af; |
2e4e6a17 | 1299 | |
025d93d1 | 1300 | mutex_lock(&xt[af].mutex); |
715cf35a | 1301 | return seq_list_start(&net->xt.tables[af], *pos); |
025d93d1 | 1302 | } |
2e4e6a17 | 1303 | |
025d93d1 AD |
1304 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1305 | { | |
715cf35a | 1306 | struct xt_names_priv *priv = seq->private; |
1218854a | 1307 | struct net *net = seq_file_net(seq); |
76108cea | 1308 | u_int8_t af = priv->af; |
2e4e6a17 | 1309 | |
715cf35a | 1310 | return seq_list_next(v, &net->xt.tables[af], pos); |
2e4e6a17 HW |
1311 | } |
1312 | ||
025d93d1 | 1313 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1314 | { |
715cf35a | 1315 | struct xt_names_priv *priv = seq->private; |
76108cea | 1316 | u_int8_t af = priv->af; |
2e4e6a17 | 1317 | |
025d93d1 AD |
1318 | mutex_unlock(&xt[af].mutex); |
1319 | } | |
2e4e6a17 | 1320 | |
025d93d1 AD |
1321 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
1322 | { | |
1323 | struct xt_table *table = list_entry(v, struct xt_table, list); | |
2e4e6a17 | 1324 | |
861fb107 | 1325 | if (*table->name) |
e71456ae | 1326 | seq_printf(seq, "%s\n", table->name); |
861fb107 | 1327 | return 0; |
025d93d1 | 1328 | } |
601e68e1 | 1329 | |
025d93d1 AD |
1330 | static const struct seq_operations xt_table_seq_ops = { |
1331 | .start = xt_table_seq_start, | |
1332 | .next = xt_table_seq_next, | |
1333 | .stop = xt_table_seq_stop, | |
1334 | .show = xt_table_seq_show, | |
1335 | }; | |
1336 | ||
1337 | static int xt_table_open(struct inode *inode, struct file *file) | |
1338 | { | |
1339 | int ret; | |
715cf35a | 1340 | struct xt_names_priv *priv; |
025d93d1 | 1341 | |
715cf35a AD |
1342 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
1343 | sizeof(struct xt_names_priv)); | |
025d93d1 | 1344 | if (!ret) { |
715cf35a | 1345 | priv = ((struct seq_file *)file->private_data)->private; |
d9dda78b | 1346 | priv->af = (unsigned long)PDE_DATA(inode); |
025d93d1 AD |
1347 | } |
1348 | return ret; | |
2e4e6a17 HW |
1349 | } |
1350 | ||
025d93d1 AD |
1351 | static const struct file_operations xt_table_ops = { |
1352 | .owner = THIS_MODULE, | |
1353 | .open = xt_table_open, | |
1354 | .read = seq_read, | |
1355 | .llseek = seq_lseek, | |
0e93bb94 | 1356 | .release = seq_release_net, |
025d93d1 AD |
1357 | }; |
1358 | ||
eb132205 JE |
1359 | /* |
1360 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | |
1361 | * the multi-AF mutexes. | |
1362 | */ | |
1363 | struct nf_mttg_trav { | |
1364 | struct list_head *head, *curr; | |
1365 | uint8_t class, nfproto; | |
1366 | }; | |
1367 | ||
1368 | enum { | |
1369 | MTTG_TRAV_INIT, | |
1370 | MTTG_TRAV_NFP_UNSPEC, | |
1371 | MTTG_TRAV_NFP_SPEC, | |
1372 | MTTG_TRAV_DONE, | |
1373 | }; | |
1374 | ||
1375 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | |
1376 | bool is_target) | |
2e4e6a17 | 1377 | { |
eb132205 JE |
1378 | static const uint8_t next_class[] = { |
1379 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, | |
1380 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | |
1381 | }; | |
1382 | struct nf_mttg_trav *trav = seq->private; | |
1383 | ||
1384 | switch (trav->class) { | |
1385 | case MTTG_TRAV_INIT: | |
1386 | trav->class = MTTG_TRAV_NFP_UNSPEC; | |
1387 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | |
1388 | trav->head = trav->curr = is_target ? | |
1389 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | |
1390 | break; | |
1391 | case MTTG_TRAV_NFP_UNSPEC: | |
1392 | trav->curr = trav->curr->next; | |
1393 | if (trav->curr != trav->head) | |
1394 | break; | |
1395 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1396 | mutex_lock(&xt[trav->nfproto].mutex); | |
1397 | trav->head = trav->curr = is_target ? | |
1398 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | |
1399 | trav->class = next_class[trav->class]; | |
1400 | break; | |
1401 | case MTTG_TRAV_NFP_SPEC: | |
1402 | trav->curr = trav->curr->next; | |
1403 | if (trav->curr != trav->head) | |
1404 | break; | |
1405 | /* fallthru, _stop will unlock */ | |
1406 | default: | |
1407 | return NULL; | |
1408 | } | |
2e4e6a17 | 1409 | |
eb132205 JE |
1410 | if (ppos != NULL) |
1411 | ++*ppos; | |
1412 | return trav; | |
025d93d1 | 1413 | } |
601e68e1 | 1414 | |
eb132205 JE |
1415 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1416 | bool is_target) | |
025d93d1 | 1417 | { |
eb132205 JE |
1418 | struct nf_mttg_trav *trav = seq->private; |
1419 | unsigned int j; | |
2e4e6a17 | 1420 | |
eb132205 JE |
1421 | trav->class = MTTG_TRAV_INIT; |
1422 | for (j = 0; j < *pos; ++j) | |
1423 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | |
1424 | return NULL; | |
1425 | return trav; | |
2e4e6a17 HW |
1426 | } |
1427 | ||
eb132205 | 1428 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1429 | { |
eb132205 JE |
1430 | struct nf_mttg_trav *trav = seq->private; |
1431 | ||
1432 | switch (trav->class) { | |
1433 | case MTTG_TRAV_NFP_UNSPEC: | |
1434 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1435 | break; | |
1436 | case MTTG_TRAV_NFP_SPEC: | |
1437 | mutex_unlock(&xt[trav->nfproto].mutex); | |
1438 | break; | |
1439 | } | |
1440 | } | |
2e4e6a17 | 1441 | |
eb132205 JE |
1442 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1443 | { | |
1444 | return xt_mttg_seq_start(seq, pos, false); | |
2e4e6a17 HW |
1445 | } |
1446 | ||
eb132205 | 1447 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
2e4e6a17 | 1448 | { |
eb132205 JE |
1449 | return xt_mttg_seq_next(seq, v, ppos, false); |
1450 | } | |
2e4e6a17 | 1451 | |
eb132205 JE |
1452 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1453 | { | |
1454 | const struct nf_mttg_trav *trav = seq->private; | |
1455 | const struct xt_match *match; | |
1456 | ||
1457 | switch (trav->class) { | |
1458 | case MTTG_TRAV_NFP_UNSPEC: | |
1459 | case MTTG_TRAV_NFP_SPEC: | |
1460 | if (trav->curr == trav->head) | |
1461 | return 0; | |
1462 | match = list_entry(trav->curr, struct xt_match, list); | |
861fb107 JP |
1463 | if (*match->name) |
1464 | seq_printf(seq, "%s\n", match->name); | |
eb132205 JE |
1465 | } |
1466 | return 0; | |
2e4e6a17 HW |
1467 | } |
1468 | ||
025d93d1 AD |
1469 | static const struct seq_operations xt_match_seq_ops = { |
1470 | .start = xt_match_seq_start, | |
1471 | .next = xt_match_seq_next, | |
eb132205 | 1472 | .stop = xt_mttg_seq_stop, |
025d93d1 | 1473 | .show = xt_match_seq_show, |
2e4e6a17 HW |
1474 | }; |
1475 | ||
025d93d1 | 1476 | static int xt_match_open(struct inode *inode, struct file *file) |
2e4e6a17 | 1477 | { |
eb132205 | 1478 | struct nf_mttg_trav *trav; |
772476df RJ |
1479 | trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); |
1480 | if (!trav) | |
eb132205 | 1481 | return -ENOMEM; |
2e4e6a17 | 1482 | |
d9dda78b | 1483 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1484 | return 0; |
025d93d1 AD |
1485 | } |
1486 | ||
1487 | static const struct file_operations xt_match_ops = { | |
1488 | .owner = THIS_MODULE, | |
1489 | .open = xt_match_open, | |
1490 | .read = seq_read, | |
1491 | .llseek = seq_lseek, | |
eb132205 | 1492 | .release = seq_release_private, |
025d93d1 | 1493 | }; |
2e4e6a17 | 1494 | |
025d93d1 AD |
1495 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1496 | { | |
eb132205 | 1497 | return xt_mttg_seq_start(seq, pos, true); |
025d93d1 AD |
1498 | } |
1499 | ||
eb132205 | 1500 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
025d93d1 | 1501 | { |
eb132205 | 1502 | return xt_mttg_seq_next(seq, v, ppos, true); |
025d93d1 AD |
1503 | } |
1504 | ||
1505 | static int xt_target_seq_show(struct seq_file *seq, void *v) | |
1506 | { | |
eb132205 JE |
1507 | const struct nf_mttg_trav *trav = seq->private; |
1508 | const struct xt_target *target; | |
1509 | ||
1510 | switch (trav->class) { | |
1511 | case MTTG_TRAV_NFP_UNSPEC: | |
1512 | case MTTG_TRAV_NFP_SPEC: | |
1513 | if (trav->curr == trav->head) | |
1514 | return 0; | |
1515 | target = list_entry(trav->curr, struct xt_target, list); | |
861fb107 JP |
1516 | if (*target->name) |
1517 | seq_printf(seq, "%s\n", target->name); | |
eb132205 JE |
1518 | } |
1519 | return 0; | |
025d93d1 AD |
1520 | } |
1521 | ||
1522 | static const struct seq_operations xt_target_seq_ops = { | |
1523 | .start = xt_target_seq_start, | |
1524 | .next = xt_target_seq_next, | |
eb132205 | 1525 | .stop = xt_mttg_seq_stop, |
025d93d1 AD |
1526 | .show = xt_target_seq_show, |
1527 | }; | |
1528 | ||
1529 | static int xt_target_open(struct inode *inode, struct file *file) | |
1530 | { | |
eb132205 | 1531 | struct nf_mttg_trav *trav; |
772476df RJ |
1532 | trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); |
1533 | if (!trav) | |
eb132205 | 1534 | return -ENOMEM; |
025d93d1 | 1535 | |
d9dda78b | 1536 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1537 | return 0; |
2e4e6a17 HW |
1538 | } |
1539 | ||
025d93d1 | 1540 | static const struct file_operations xt_target_ops = { |
2e4e6a17 | 1541 | .owner = THIS_MODULE, |
025d93d1 | 1542 | .open = xt_target_open, |
2e4e6a17 HW |
1543 | .read = seq_read, |
1544 | .llseek = seq_lseek, | |
eb132205 | 1545 | .release = seq_release_private, |
2e4e6a17 HW |
1546 | }; |
1547 | ||
1548 | #define FORMAT_TABLES "_tables_names" | |
1549 | #define FORMAT_MATCHES "_tables_matches" | |
1550 | #define FORMAT_TARGETS "_tables_targets" | |
1551 | ||
1552 | #endif /* CONFIG_PROC_FS */ | |
1553 | ||
2b95efe7 | 1554 | /** |
b9e69e12 | 1555 | * xt_hook_ops_alloc - set up hooks for a new table |
2b95efe7 JE |
1556 | * @table: table with metadata needed to set up hooks |
1557 | * @fn: Hook function | |
1558 | * | |
b9e69e12 FW |
1559 | * This function will create the nf_hook_ops that the x_table needs |
1560 | * to hand to xt_hook_link_net(). | |
2b95efe7 | 1561 | */ |
b9e69e12 FW |
1562 | struct nf_hook_ops * |
1563 | xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) | |
2b95efe7 JE |
1564 | { |
1565 | unsigned int hook_mask = table->valid_hooks; | |
1566 | uint8_t i, num_hooks = hweight32(hook_mask); | |
1567 | uint8_t hooknum; | |
1568 | struct nf_hook_ops *ops; | |
2b95efe7 | 1569 | |
a6d0bae1 XL |
1570 | if (!num_hooks) |
1571 | return ERR_PTR(-EINVAL); | |
1572 | ||
1ecc281e | 1573 | ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL); |
2b95efe7 JE |
1574 | if (ops == NULL) |
1575 | return ERR_PTR(-ENOMEM); | |
1576 | ||
1577 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; | |
1578 | hook_mask >>= 1, ++hooknum) { | |
1579 | if (!(hook_mask & 1)) | |
1580 | continue; | |
1581 | ops[i].hook = fn; | |
2b95efe7 JE |
1582 | ops[i].pf = table->af; |
1583 | ops[i].hooknum = hooknum; | |
1584 | ops[i].priority = table->priority; | |
1585 | ++i; | |
1586 | } | |
1587 | ||
2b95efe7 JE |
1588 | return ops; |
1589 | } | |
b9e69e12 | 1590 | EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); |
2b95efe7 | 1591 | |
76108cea | 1592 | int xt_proto_init(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1593 | { |
1594 | #ifdef CONFIG_PROC_FS | |
1595 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1596 | struct proc_dir_entry *proc; | |
f13f2aee PW |
1597 | kuid_t root_uid; |
1598 | kgid_t root_gid; | |
2e4e6a17 HW |
1599 | #endif |
1600 | ||
7e9c6eeb | 1601 | if (af >= ARRAY_SIZE(xt_prefix)) |
2e4e6a17 HW |
1602 | return -EINVAL; |
1603 | ||
1604 | ||
1605 | #ifdef CONFIG_PROC_FS | |
f13f2aee PW |
1606 | root_uid = make_kuid(net->user_ns, 0); |
1607 | root_gid = make_kgid(net->user_ns, 0); | |
1608 | ||
ce18afe5 | 1609 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1610 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
8b169240 DL |
1611 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1612 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1613 | if (!proc) |
1614 | goto out; | |
f13f2aee PW |
1615 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1616 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1617 | |
ce18afe5 | 1618 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1619 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
8b169240 DL |
1620 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1621 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1622 | if (!proc) |
1623 | goto out_remove_tables; | |
f13f2aee PW |
1624 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1625 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1626 | |
ce18afe5 | 1627 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1628 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
8b169240 DL |
1629 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1630 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1631 | if (!proc) |
1632 | goto out_remove_matches; | |
f13f2aee PW |
1633 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1634 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 HW |
1635 | #endif |
1636 | ||
1637 | return 0; | |
1638 | ||
1639 | #ifdef CONFIG_PROC_FS | |
1640 | out_remove_matches: | |
ce18afe5 | 1641 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1642 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1643 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1644 | |
1645 | out_remove_tables: | |
ce18afe5 | 1646 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1647 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1648 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1649 | out: |
1650 | return -1; | |
1651 | #endif | |
1652 | } | |
1653 | EXPORT_SYMBOL_GPL(xt_proto_init); | |
1654 | ||
76108cea | 1655 | void xt_proto_fini(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1656 | { |
1657 | #ifdef CONFIG_PROC_FS | |
1658 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1659 | ||
ce18afe5 | 1660 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1661 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1662 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1663 | |
ce18afe5 | 1664 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1665 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
ece31ffd | 1666 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1667 | |
ce18afe5 | 1668 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1669 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1670 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1671 | #endif /*CONFIG_PROC_FS*/ |
1672 | } | |
1673 | EXPORT_SYMBOL_GPL(xt_proto_fini); | |
1674 | ||
f28e15ba FW |
1675 | /** |
1676 | * xt_percpu_counter_alloc - allocate x_tables rule counter | |
1677 | * | |
ae0ac0ed | 1678 | * @state: pointer to xt_percpu allocation state |
f28e15ba FW |
1679 | * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct |
1680 | * | |
1681 | * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then | |
1682 | * contain the address of the real (percpu) counter. | |
1683 | * | |
1684 | * Rule evaluation needs to use xt_get_this_cpu_counter() helper | |
1685 | * to fetch the real percpu counter. | |
1686 | * | |
ae0ac0ed FW |
1687 | * To speed up allocation and improve data locality, a 4kb block is |
1688 | * allocated. | |
1689 | * | |
1690 | * xt_percpu_counter_alloc_state contains the base address of the | |
1691 | * allocated page and the current sub-offset. | |
1692 | * | |
f28e15ba FW |
1693 | * returns false on error. |
1694 | */ | |
ae0ac0ed FW |
1695 | bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, |
1696 | struct xt_counters *counter) | |
f28e15ba | 1697 | { |
ae0ac0ed | 1698 | BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); |
f28e15ba FW |
1699 | |
1700 | if (nr_cpu_ids <= 1) | |
1701 | return true; | |
1702 | ||
ae0ac0ed FW |
1703 | if (!state->mem) { |
1704 | state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, | |
1705 | XT_PCPU_BLOCK_SIZE); | |
1706 | if (!state->mem) | |
1707 | return false; | |
1708 | } | |
1709 | counter->pcnt = (__force unsigned long)(state->mem + state->off); | |
1710 | state->off += sizeof(*counter); | |
1711 | if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { | |
1712 | state->mem = NULL; | |
1713 | state->off = 0; | |
1714 | } | |
f28e15ba FW |
1715 | return true; |
1716 | } | |
1717 | EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); | |
1718 | ||
4d31eef5 FW |
1719 | void xt_percpu_counter_free(struct xt_counters *counters) |
1720 | { | |
1721 | unsigned long pcnt = counters->pcnt; | |
1722 | ||
ae0ac0ed | 1723 | if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) |
4d31eef5 FW |
1724 | free_percpu((void __percpu *)pcnt); |
1725 | } | |
1726 | EXPORT_SYMBOL_GPL(xt_percpu_counter_free); | |
1727 | ||
8d870052 AD |
1728 | static int __net_init xt_net_init(struct net *net) |
1729 | { | |
1730 | int i; | |
1731 | ||
7e9c6eeb | 1732 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
8d870052 AD |
1733 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1734 | return 0; | |
1735 | } | |
1736 | ||
1737 | static struct pernet_operations xt_net_ops = { | |
1738 | .init = xt_net_init, | |
1739 | }; | |
2e4e6a17 HW |
1740 | |
1741 | static int __init xt_init(void) | |
1742 | { | |
942e4a2b SH |
1743 | unsigned int i; |
1744 | int rv; | |
1745 | ||
1746 | for_each_possible_cpu(i) { | |
7f5c6d4f | 1747 | seqcount_init(&per_cpu(xt_recseq, i)); |
942e4a2b | 1748 | } |
2e4e6a17 | 1749 | |
7e9c6eeb | 1750 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
2e4e6a17 HW |
1751 | if (!xt) |
1752 | return -ENOMEM; | |
1753 | ||
7e9c6eeb | 1754 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
9e19bb6d | 1755 | mutex_init(&xt[i].mutex); |
2722971c DM |
1756 | #ifdef CONFIG_COMPAT |
1757 | mutex_init(&xt[i].compat_mutex); | |
255d0dc3 | 1758 | xt[i].compat_tab = NULL; |
2722971c | 1759 | #endif |
2e4e6a17 HW |
1760 | INIT_LIST_HEAD(&xt[i].target); |
1761 | INIT_LIST_HEAD(&xt[i].match); | |
2e4e6a17 | 1762 | } |
8d870052 AD |
1763 | rv = register_pernet_subsys(&xt_net_ops); |
1764 | if (rv < 0) | |
1765 | kfree(xt); | |
1766 | return rv; | |
2e4e6a17 HW |
1767 | } |
1768 | ||
1769 | static void __exit xt_fini(void) | |
1770 | { | |
8d870052 | 1771 | unregister_pernet_subsys(&xt_net_ops); |
2e4e6a17 HW |
1772 | kfree(xt); |
1773 | } | |
1774 | ||
1775 | module_init(xt_init); | |
1776 | module_exit(xt_fini); | |
1777 |