]>
Commit | Line | Data |
---|---|---|
2e4e6a17 HW |
1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables | |
3 | * | |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> | |
f229f6ce | 5 | * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> |
2e4e6a17 HW |
6 | * |
7 | * Based on existing ip_tables code which is | |
8 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | |
9 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | */ | |
be91fd5e | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2e4e6a17 | 17 | #include <linux/kernel.h> |
3a9a231d | 18 | #include <linux/module.h> |
2e4e6a17 HW |
19 | #include <linux/socket.h> |
20 | #include <linux/net.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/vmalloc.h> | |
9e19bb6d | 25 | #include <linux/mutex.h> |
d7fe0f24 | 26 | #include <linux/mm.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
fbabf31e | 28 | #include <linux/audit.h> |
f13f2aee | 29 | #include <linux/user_namespace.h> |
457c4cbc | 30 | #include <net/net_namespace.h> |
2e4e6a17 HW |
31 | |
32 | #include <linux/netfilter/x_tables.h> | |
33 | #include <linux/netfilter_arp.h> | |
e3eaa991 JE |
34 | #include <linux/netfilter_ipv4/ip_tables.h> |
35 | #include <linux/netfilter_ipv6/ip6_tables.h> | |
36 | #include <linux/netfilter_arp/arp_tables.h> | |
9e19bb6d | 37 | |
2e4e6a17 HW |
38 | MODULE_LICENSE("GPL"); |
39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |
043ef46c | 40 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
2e4e6a17 HW |
41 | |
42 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) | |
ae0ac0ed | 43 | #define XT_PCPU_BLOCK_SIZE 4096 |
2e4e6a17 | 44 | |
b386d9f5 | 45 | struct compat_delta { |
255d0dc3 ED |
46 | unsigned int offset; /* offset in kernel */ |
47 | int delta; /* delta in 32bit user land */ | |
b386d9f5 PM |
48 | }; |
49 | ||
2e4e6a17 | 50 | struct xt_af { |
9e19bb6d | 51 | struct mutex mutex; |
2e4e6a17 HW |
52 | struct list_head match; |
53 | struct list_head target; | |
b386d9f5 | 54 | #ifdef CONFIG_COMPAT |
2722971c | 55 | struct mutex compat_mutex; |
255d0dc3 ED |
56 | struct compat_delta *compat_tab; |
57 | unsigned int number; /* number of slots in compat_tab[] */ | |
58 | unsigned int cur; /* number of used slots in compat_tab[] */ | |
b386d9f5 | 59 | #endif |
2e4e6a17 HW |
60 | }; |
61 | ||
62 | static struct xt_af *xt; | |
63 | ||
7e9c6eeb JE |
64 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
65 | [NFPROTO_UNSPEC] = "x", | |
66 | [NFPROTO_IPV4] = "ip", | |
67 | [NFPROTO_ARP] = "arp", | |
68 | [NFPROTO_BRIDGE] = "eb", | |
69 | [NFPROTO_IPV6] = "ip6", | |
37f9f733 PM |
70 | }; |
71 | ||
2e4e6a17 | 72 | /* Registration hooks for targets. */ |
7926dbfa | 73 | int xt_register_target(struct xt_target *target) |
2e4e6a17 | 74 | { |
76108cea | 75 | u_int8_t af = target->family; |
2e4e6a17 | 76 | |
7926dbfa | 77 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 78 | list_add(&target->list, &xt[af].target); |
9e19bb6d | 79 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 80 | return 0; |
2e4e6a17 HW |
81 | } |
82 | EXPORT_SYMBOL(xt_register_target); | |
83 | ||
84 | void | |
a45049c5 | 85 | xt_unregister_target(struct xt_target *target) |
2e4e6a17 | 86 | { |
76108cea | 87 | u_int8_t af = target->family; |
a45049c5 | 88 | |
9e19bb6d | 89 | mutex_lock(&xt[af].mutex); |
df0933dc | 90 | list_del(&target->list); |
9e19bb6d | 91 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
92 | } |
93 | EXPORT_SYMBOL(xt_unregister_target); | |
94 | ||
52d9c42e PM |
95 | int |
96 | xt_register_targets(struct xt_target *target, unsigned int n) | |
97 | { | |
98 | unsigned int i; | |
99 | int err = 0; | |
100 | ||
101 | for (i = 0; i < n; i++) { | |
102 | err = xt_register_target(&target[i]); | |
103 | if (err) | |
104 | goto err; | |
105 | } | |
106 | return err; | |
107 | ||
108 | err: | |
109 | if (i > 0) | |
110 | xt_unregister_targets(target, i); | |
111 | return err; | |
112 | } | |
113 | EXPORT_SYMBOL(xt_register_targets); | |
114 | ||
115 | void | |
116 | xt_unregister_targets(struct xt_target *target, unsigned int n) | |
117 | { | |
f68c5301 CG |
118 | while (n-- > 0) |
119 | xt_unregister_target(&target[n]); | |
52d9c42e PM |
120 | } |
121 | EXPORT_SYMBOL(xt_unregister_targets); | |
122 | ||
7926dbfa | 123 | int xt_register_match(struct xt_match *match) |
2e4e6a17 | 124 | { |
76108cea | 125 | u_int8_t af = match->family; |
2e4e6a17 | 126 | |
7926dbfa | 127 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 128 | list_add(&match->list, &xt[af].match); |
9e19bb6d | 129 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 130 | return 0; |
2e4e6a17 HW |
131 | } |
132 | EXPORT_SYMBOL(xt_register_match); | |
133 | ||
134 | void | |
a45049c5 | 135 | xt_unregister_match(struct xt_match *match) |
2e4e6a17 | 136 | { |
76108cea | 137 | u_int8_t af = match->family; |
a45049c5 | 138 | |
9e19bb6d | 139 | mutex_lock(&xt[af].mutex); |
df0933dc | 140 | list_del(&match->list); |
9e19bb6d | 141 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
142 | } |
143 | EXPORT_SYMBOL(xt_unregister_match); | |
144 | ||
52d9c42e PM |
145 | int |
146 | xt_register_matches(struct xt_match *match, unsigned int n) | |
147 | { | |
148 | unsigned int i; | |
149 | int err = 0; | |
150 | ||
151 | for (i = 0; i < n; i++) { | |
152 | err = xt_register_match(&match[i]); | |
153 | if (err) | |
154 | goto err; | |
155 | } | |
156 | return err; | |
157 | ||
158 | err: | |
159 | if (i > 0) | |
160 | xt_unregister_matches(match, i); | |
161 | return err; | |
162 | } | |
163 | EXPORT_SYMBOL(xt_register_matches); | |
164 | ||
165 | void | |
166 | xt_unregister_matches(struct xt_match *match, unsigned int n) | |
167 | { | |
f68c5301 CG |
168 | while (n-- > 0) |
169 | xt_unregister_match(&match[n]); | |
52d9c42e PM |
170 | } |
171 | EXPORT_SYMBOL(xt_unregister_matches); | |
172 | ||
2e4e6a17 HW |
173 | |
174 | /* | |
175 | * These are weird, but module loading must not be done with mutex | |
176 | * held (since they will register), and we have to have a single | |
adb00ae2 | 177 | * function to use. |
2e4e6a17 HW |
178 | */ |
179 | ||
180 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ | |
76108cea | 181 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
182 | { |
183 | struct xt_match *m; | |
42046e2e | 184 | int err = -ENOENT; |
2e4e6a17 | 185 | |
7926dbfa | 186 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
187 | list_for_each_entry(m, &xt[af].match, list) { |
188 | if (strcmp(m->name, name) == 0) { | |
189 | if (m->revision == revision) { | |
190 | if (try_module_get(m->me)) { | |
9e19bb6d | 191 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
192 | return m; |
193 | } | |
194 | } else | |
195 | err = -EPROTOTYPE; /* Found something. */ | |
196 | } | |
197 | } | |
9e19bb6d | 198 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
199 | |
200 | if (af != NFPROTO_UNSPEC) | |
201 | /* Try searching again in the family-independent list */ | |
202 | return xt_find_match(NFPROTO_UNSPEC, name, revision); | |
203 | ||
2e4e6a17 HW |
204 | return ERR_PTR(err); |
205 | } | |
206 | EXPORT_SYMBOL(xt_find_match); | |
207 | ||
fd0ec0e6 JE |
208 | struct xt_match * |
209 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | |
210 | { | |
211 | struct xt_match *match; | |
212 | ||
adb00ae2 SH |
213 | match = xt_find_match(nfproto, name, revision); |
214 | if (IS_ERR(match)) { | |
215 | request_module("%st_%s", xt_prefix[nfproto], name); | |
216 | match = xt_find_match(nfproto, name, revision); | |
217 | } | |
218 | ||
219 | return match; | |
fd0ec0e6 JE |
220 | } |
221 | EXPORT_SYMBOL_GPL(xt_request_find_match); | |
222 | ||
2e4e6a17 | 223 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
76108cea | 224 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
225 | { |
226 | struct xt_target *t; | |
42046e2e | 227 | int err = -ENOENT; |
2e4e6a17 | 228 | |
7926dbfa | 229 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
230 | list_for_each_entry(t, &xt[af].target, list) { |
231 | if (strcmp(t->name, name) == 0) { | |
232 | if (t->revision == revision) { | |
233 | if (try_module_get(t->me)) { | |
9e19bb6d | 234 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
235 | return t; |
236 | } | |
237 | } else | |
238 | err = -EPROTOTYPE; /* Found something. */ | |
239 | } | |
240 | } | |
9e19bb6d | 241 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
242 | |
243 | if (af != NFPROTO_UNSPEC) | |
244 | /* Try searching again in the family-independent list */ | |
245 | return xt_find_target(NFPROTO_UNSPEC, name, revision); | |
246 | ||
2e4e6a17 HW |
247 | return ERR_PTR(err); |
248 | } | |
249 | EXPORT_SYMBOL(xt_find_target); | |
250 | ||
76108cea | 251 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
252 | { |
253 | struct xt_target *target; | |
254 | ||
adb00ae2 SH |
255 | target = xt_find_target(af, name, revision); |
256 | if (IS_ERR(target)) { | |
257 | request_module("%st_%s", xt_prefix[af], name); | |
258 | target = xt_find_target(af, name, revision); | |
259 | } | |
260 | ||
261 | return target; | |
2e4e6a17 HW |
262 | } |
263 | EXPORT_SYMBOL_GPL(xt_request_find_target); | |
264 | ||
f32815d2 WB |
265 | |
266 | static int xt_obj_to_user(u16 __user *psize, u16 size, | |
267 | void __user *pname, const char *name, | |
268 | u8 __user *prev, u8 rev) | |
269 | { | |
270 | if (put_user(size, psize)) | |
271 | return -EFAULT; | |
272 | if (copy_to_user(pname, name, strlen(name) + 1)) | |
273 | return -EFAULT; | |
274 | if (put_user(rev, prev)) | |
275 | return -EFAULT; | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
280 | #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ | |
281 | xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ | |
282 | U->u.user.name, K->u.kernel.TYPE->name, \ | |
283 | &U->u.user.revision, K->u.kernel.TYPE->revision) | |
284 | ||
285 | int xt_data_to_user(void __user *dst, const void *src, | |
286 | int usersize, int size) | |
287 | { | |
288 | usersize = usersize ? : size; | |
289 | if (copy_to_user(dst, src, usersize)) | |
290 | return -EFAULT; | |
291 | if (usersize != size && clear_user(dst + usersize, size - usersize)) | |
292 | return -EFAULT; | |
293 | ||
294 | return 0; | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(xt_data_to_user); | |
297 | ||
298 | #define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ | |
299 | xt_data_to_user(U->data, K->data, \ | |
300 | K->u.kernel.TYPE->usersize, \ | |
301 | C_SIZE ? : K->u.kernel.TYPE->TYPE##size) | |
302 | ||
303 | int xt_match_to_user(const struct xt_entry_match *m, | |
304 | struct xt_entry_match __user *u) | |
305 | { | |
306 | return XT_OBJ_TO_USER(u, m, match, 0) || | |
307 | XT_DATA_TO_USER(u, m, match, 0); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(xt_match_to_user); | |
310 | ||
311 | int xt_target_to_user(const struct xt_entry_target *t, | |
312 | struct xt_entry_target __user *u) | |
313 | { | |
314 | return XT_OBJ_TO_USER(u, t, target, 0) || | |
315 | XT_DATA_TO_USER(u, t, target, 0); | |
316 | } | |
317 | EXPORT_SYMBOL_GPL(xt_target_to_user); | |
318 | ||
76108cea | 319 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 320 | { |
5452e425 | 321 | const struct xt_match *m; |
2e4e6a17 HW |
322 | int have_rev = 0; |
323 | ||
324 | list_for_each_entry(m, &xt[af].match, list) { | |
325 | if (strcmp(m->name, name) == 0) { | |
326 | if (m->revision > *bestp) | |
327 | *bestp = m->revision; | |
328 | if (m->revision == revision) | |
329 | have_rev = 1; | |
330 | } | |
331 | } | |
656caff2 PM |
332 | |
333 | if (af != NFPROTO_UNSPEC && !have_rev) | |
334 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
335 | ||
2e4e6a17 HW |
336 | return have_rev; |
337 | } | |
338 | ||
76108cea | 339 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 340 | { |
5452e425 | 341 | const struct xt_target *t; |
2e4e6a17 HW |
342 | int have_rev = 0; |
343 | ||
344 | list_for_each_entry(t, &xt[af].target, list) { | |
345 | if (strcmp(t->name, name) == 0) { | |
346 | if (t->revision > *bestp) | |
347 | *bestp = t->revision; | |
348 | if (t->revision == revision) | |
349 | have_rev = 1; | |
350 | } | |
351 | } | |
656caff2 PM |
352 | |
353 | if (af != NFPROTO_UNSPEC && !have_rev) | |
354 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
355 | ||
2e4e6a17 HW |
356 | return have_rev; |
357 | } | |
358 | ||
359 | /* Returns true or false (if no such extension at all) */ | |
76108cea | 360 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
2e4e6a17 HW |
361 | int *err) |
362 | { | |
363 | int have_rev, best = -1; | |
364 | ||
7926dbfa | 365 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
366 | if (target == 1) |
367 | have_rev = target_revfn(af, name, revision, &best); | |
368 | else | |
369 | have_rev = match_revfn(af, name, revision, &best); | |
9e19bb6d | 370 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
371 | |
372 | /* Nothing at all? Return 0 to try loading module. */ | |
373 | if (best == -1) { | |
374 | *err = -ENOENT; | |
375 | return 0; | |
376 | } | |
377 | ||
378 | *err = best; | |
379 | if (!have_rev) | |
380 | *err = -EPROTONOSUPPORT; | |
381 | return 1; | |
382 | } | |
383 | EXPORT_SYMBOL_GPL(xt_find_revision); | |
384 | ||
5b76c494 JE |
385 | static char * |
386 | textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) | |
45185364 | 387 | { |
5b76c494 | 388 | static const char *const inetbr_names[] = { |
45185364 JE |
389 | "PREROUTING", "INPUT", "FORWARD", |
390 | "OUTPUT", "POSTROUTING", "BROUTING", | |
391 | }; | |
5b76c494 JE |
392 | static const char *const arp_names[] = { |
393 | "INPUT", "FORWARD", "OUTPUT", | |
394 | }; | |
395 | const char *const *names; | |
396 | unsigned int i, max; | |
45185364 JE |
397 | char *p = buf; |
398 | bool np = false; | |
399 | int res; | |
400 | ||
5b76c494 JE |
401 | names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; |
402 | max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : | |
403 | ARRAY_SIZE(inetbr_names); | |
45185364 | 404 | *p = '\0'; |
5b76c494 | 405 | for (i = 0; i < max; ++i) { |
45185364 JE |
406 | if (!(mask & (1 << i))) |
407 | continue; | |
408 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); | |
409 | if (res > 0) { | |
410 | size -= res; | |
411 | p += res; | |
412 | } | |
413 | np = true; | |
414 | } | |
415 | ||
416 | return buf; | |
417 | } | |
418 | ||
916a917d | 419 | int xt_check_match(struct xt_mtchk_param *par, |
9b4fce7a | 420 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 421 | { |
bd414ee6 JE |
422 | int ret; |
423 | ||
9b4fce7a JE |
424 | if (XT_ALIGN(par->match->matchsize) != size && |
425 | par->match->matchsize != -1) { | |
043ef46c JE |
426 | /* |
427 | * ebt_among is exempt from centralized matchsize checking | |
428 | * because it uses a dynamic-size data set. | |
429 | */ | |
b402405d JE |
430 | pr_err("%s_tables: %s.%u match: invalid size " |
431 | "%u (kernel) != (user) %u\n", | |
916a917d | 432 | xt_prefix[par->family], par->match->name, |
b402405d | 433 | par->match->revision, |
9b4fce7a | 434 | XT_ALIGN(par->match->matchsize), size); |
37f9f733 PM |
435 | return -EINVAL; |
436 | } | |
9b4fce7a JE |
437 | if (par->match->table != NULL && |
438 | strcmp(par->match->table, par->table) != 0) { | |
3dd5d7e3 | 439 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
916a917d | 440 | xt_prefix[par->family], par->match->name, |
9b4fce7a | 441 | par->match->table, par->table); |
37f9f733 PM |
442 | return -EINVAL; |
443 | } | |
9b4fce7a | 444 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
45185364 JE |
445 | char used[64], allow[64]; |
446 | ||
3dd5d7e3 | 447 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
45185364 | 448 | "valid from %s\n", |
916a917d | 449 | xt_prefix[par->family], par->match->name, |
5b76c494 JE |
450 | textify_hooks(used, sizeof(used), par->hook_mask, |
451 | par->family), | |
452 | textify_hooks(allow, sizeof(allow), par->match->hooks, | |
453 | par->family)); | |
37f9f733 PM |
454 | return -EINVAL; |
455 | } | |
9b4fce7a | 456 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
3dd5d7e3 | 457 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
916a917d JE |
458 | xt_prefix[par->family], par->match->name, |
459 | par->match->proto); | |
37f9f733 PM |
460 | return -EINVAL; |
461 | } | |
bd414ee6 JE |
462 | if (par->match->checkentry != NULL) { |
463 | ret = par->match->checkentry(par); | |
464 | if (ret < 0) | |
465 | return ret; | |
466 | else if (ret > 0) | |
467 | /* Flag up potential errors. */ | |
468 | return -EIO; | |
469 | } | |
37f9f733 PM |
470 | return 0; |
471 | } | |
472 | EXPORT_SYMBOL_GPL(xt_check_match); | |
473 | ||
13631bfc FW |
474 | /** xt_check_entry_match - check that matches end before start of target |
475 | * | |
476 | * @match: beginning of xt_entry_match | |
477 | * @target: beginning of this rules target (alleged end of matches) | |
478 | * @alignment: alignment requirement of match structures | |
479 | * | |
480 | * Validates that all matches add up to the beginning of the target, | |
481 | * and that each match covers at least the base structure size. | |
482 | * | |
483 | * Return: 0 on success, negative errno on failure. | |
484 | */ | |
485 | static int xt_check_entry_match(const char *match, const char *target, | |
486 | const size_t alignment) | |
487 | { | |
488 | const struct xt_entry_match *pos; | |
489 | int length = target - match; | |
490 | ||
491 | if (length == 0) /* no matches */ | |
492 | return 0; | |
493 | ||
494 | pos = (struct xt_entry_match *)match; | |
495 | do { | |
496 | if ((unsigned long)pos % alignment) | |
497 | return -EINVAL; | |
498 | ||
499 | if (length < (int)sizeof(struct xt_entry_match)) | |
500 | return -EINVAL; | |
501 | ||
502 | if (pos->u.match_size < sizeof(struct xt_entry_match)) | |
503 | return -EINVAL; | |
504 | ||
505 | if (pos->u.match_size > length) | |
506 | return -EINVAL; | |
507 | ||
508 | length -= pos->u.match_size; | |
509 | pos = ((void *)((char *)(pos) + (pos)->u.match_size)); | |
510 | } while (length > 0); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
2722971c | 515 | #ifdef CONFIG_COMPAT |
255d0dc3 | 516 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) |
b386d9f5 | 517 | { |
255d0dc3 | 518 | struct xt_af *xp = &xt[af]; |
b386d9f5 | 519 | |
255d0dc3 ED |
520 | if (!xp->compat_tab) { |
521 | if (!xp->number) | |
522 | return -EINVAL; | |
523 | xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); | |
524 | if (!xp->compat_tab) | |
525 | return -ENOMEM; | |
526 | xp->cur = 0; | |
527 | } | |
b386d9f5 | 528 | |
255d0dc3 ED |
529 | if (xp->cur >= xp->number) |
530 | return -EINVAL; | |
b386d9f5 | 531 | |
255d0dc3 ED |
532 | if (xp->cur) |
533 | delta += xp->compat_tab[xp->cur - 1].delta; | |
534 | xp->compat_tab[xp->cur].offset = offset; | |
535 | xp->compat_tab[xp->cur].delta = delta; | |
536 | xp->cur++; | |
b386d9f5 PM |
537 | return 0; |
538 | } | |
539 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); | |
540 | ||
76108cea | 541 | void xt_compat_flush_offsets(u_int8_t af) |
b386d9f5 | 542 | { |
255d0dc3 ED |
543 | if (xt[af].compat_tab) { |
544 | vfree(xt[af].compat_tab); | |
545 | xt[af].compat_tab = NULL; | |
546 | xt[af].number = 0; | |
5a6351ee | 547 | xt[af].cur = 0; |
b386d9f5 PM |
548 | } |
549 | } | |
550 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | |
551 | ||
3e5e524f | 552 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
b386d9f5 | 553 | { |
255d0dc3 ED |
554 | struct compat_delta *tmp = xt[af].compat_tab; |
555 | int mid, left = 0, right = xt[af].cur - 1; | |
556 | ||
557 | while (left <= right) { | |
558 | mid = (left + right) >> 1; | |
559 | if (offset > tmp[mid].offset) | |
560 | left = mid + 1; | |
561 | else if (offset < tmp[mid].offset) | |
562 | right = mid - 1; | |
563 | else | |
564 | return mid ? tmp[mid - 1].delta : 0; | |
565 | } | |
5a6351ee | 566 | return left ? tmp[left - 1].delta : 0; |
b386d9f5 PM |
567 | } |
568 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | |
569 | ||
255d0dc3 ED |
570 | void xt_compat_init_offsets(u_int8_t af, unsigned int number) |
571 | { | |
572 | xt[af].number = number; | |
573 | xt[af].cur = 0; | |
574 | } | |
575 | EXPORT_SYMBOL(xt_compat_init_offsets); | |
576 | ||
5452e425 | 577 | int xt_compat_match_offset(const struct xt_match *match) |
2722971c | 578 | { |
9fa492cd PM |
579 | u_int16_t csize = match->compatsize ? : match->matchsize; |
580 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); | |
581 | } | |
582 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); | |
583 | ||
0188346f FW |
584 | void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
585 | unsigned int *size) | |
9fa492cd | 586 | { |
5452e425 | 587 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
588 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
589 | int pad, off = xt_compat_match_offset(match); | |
590 | u_int16_t msize = cm->u.user.match_size; | |
09d96860 | 591 | char name[sizeof(m->u.user.name)]; |
9fa492cd PM |
592 | |
593 | m = *dstptr; | |
594 | memcpy(m, cm, sizeof(*cm)); | |
595 | if (match->compat_from_user) | |
596 | match->compat_from_user(m->data, cm->data); | |
597 | else | |
598 | memcpy(m->data, cm->data, msize - sizeof(*cm)); | |
599 | pad = XT_ALIGN(match->matchsize) - match->matchsize; | |
600 | if (pad > 0) | |
601 | memset(m->data + match->matchsize, 0, pad); | |
602 | ||
603 | msize += off; | |
604 | m->u.user.match_size = msize; | |
09d96860 FW |
605 | strlcpy(name, match->name, sizeof(name)); |
606 | module_put(match->me); | |
607 | strncpy(m->u.user.name, name, sizeof(m->u.user.name)); | |
9fa492cd PM |
608 | |
609 | *size += off; | |
610 | *dstptr += msize; | |
611 | } | |
612 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | |
613 | ||
739674fb JE |
614 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
615 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 616 | { |
5452e425 | 617 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
618 | struct compat_xt_entry_match __user *cm = *dstptr; |
619 | int off = xt_compat_match_offset(match); | |
620 | u_int16_t msize = m->u.user.match_size - off; | |
621 | ||
4915f7bb | 622 | if (XT_OBJ_TO_USER(cm, m, match, msize)) |
601e68e1 | 623 | return -EFAULT; |
9fa492cd PM |
624 | |
625 | if (match->compat_to_user) { | |
626 | if (match->compat_to_user((void __user *)cm->data, m->data)) | |
627 | return -EFAULT; | |
628 | } else { | |
4915f7bb | 629 | if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) |
9fa492cd | 630 | return -EFAULT; |
2722971c | 631 | } |
9fa492cd PM |
632 | |
633 | *size -= off; | |
634 | *dstptr += msize; | |
635 | return 0; | |
2722971c | 636 | } |
9fa492cd | 637 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
fc1221b3 | 638 | |
7ed2abdd FW |
639 | /* non-compat version may have padding after verdict */ |
640 | struct compat_xt_standard_target { | |
641 | struct compat_xt_entry_target t; | |
642 | compat_uint_t verdict; | |
643 | }; | |
644 | ||
ce683e5f | 645 | int xt_compat_check_entry_offsets(const void *base, const char *elems, |
fc1221b3 FW |
646 | unsigned int target_offset, |
647 | unsigned int next_offset) | |
648 | { | |
ce683e5f | 649 | long size_of_base_struct = elems - (const char *)base; |
fc1221b3 FW |
650 | const struct compat_xt_entry_target *t; |
651 | const char *e = base; | |
652 | ||
ce683e5f FW |
653 | if (target_offset < size_of_base_struct) |
654 | return -EINVAL; | |
655 | ||
fc1221b3 FW |
656 | if (target_offset + sizeof(*t) > next_offset) |
657 | return -EINVAL; | |
658 | ||
659 | t = (void *)(e + target_offset); | |
660 | if (t->u.target_size < sizeof(*t)) | |
661 | return -EINVAL; | |
662 | ||
663 | if (target_offset + t->u.target_size > next_offset) | |
664 | return -EINVAL; | |
665 | ||
7ed2abdd | 666 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 667 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
7ed2abdd FW |
668 | return -EINVAL; |
669 | ||
550116d2 | 670 | /* compat_xt_entry match has less strict alignment requirements, |
13631bfc FW |
671 | * otherwise they are identical. In case of padding differences |
672 | * we need to add compat version of xt_check_entry_match. | |
673 | */ | |
674 | BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); | |
675 | ||
676 | return xt_check_entry_match(elems, base + target_offset, | |
677 | __alignof__(struct compat_xt_entry_match)); | |
fc1221b3 FW |
678 | } |
679 | EXPORT_SYMBOL(xt_compat_check_entry_offsets); | |
9fa492cd | 680 | #endif /* CONFIG_COMPAT */ |
2722971c | 681 | |
7d35812c FW |
682 | /** |
683 | * xt_check_entry_offsets - validate arp/ip/ip6t_entry | |
684 | * | |
685 | * @base: pointer to arp/ip/ip6t_entry | |
ce683e5f | 686 | * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems |
7d35812c FW |
687 | * @target_offset: the arp/ip/ip6_t->target_offset |
688 | * @next_offset: the arp/ip/ip6_t->next_offset | |
689 | * | |
13631bfc FW |
690 | * validates that target_offset and next_offset are sane and that all |
691 | * match sizes (if any) align with the target offset. | |
7d35812c | 692 | * |
ce683e5f | 693 | * This function does not validate the targets or matches themselves, it |
13631bfc FW |
694 | * only tests that all the offsets and sizes are correct, that all |
695 | * match structures are aligned, and that the last structure ends where | |
696 | * the target structure begins. | |
697 | * | |
698 | * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. | |
ce683e5f | 699 | * |
7d35812c FW |
700 | * The arp/ip/ip6t_entry structure @base must have passed following tests: |
701 | * - it must point to a valid memory location | |
702 | * - base to base + next_offset must be accessible, i.e. not exceed allocated | |
703 | * length. | |
704 | * | |
13631bfc FW |
705 | * A well-formed entry looks like this: |
706 | * | |
707 | * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry | |
708 | * e->elems[]-----' | | | |
709 | * matchsize | | | |
710 | * matchsize | | | |
711 | * | | | |
712 | * target_offset---------------------------------' | | |
713 | * next_offset---------------------------------------------------' | |
714 | * | |
715 | * elems[]: flexible array member at end of ip(6)/arpt_entry struct. | |
716 | * This is where matches (if any) and the target reside. | |
717 | * target_offset: beginning of target. | |
718 | * next_offset: start of the next rule; also: size of this rule. | |
719 | * Since targets have a minimum size, target_offset + minlen <= next_offset. | |
720 | * | |
721 | * Every match stores its size, sum of sizes must not exceed target_offset. | |
722 | * | |
7d35812c FW |
723 | * Return: 0 on success, negative errno on failure. |
724 | */ | |
725 | int xt_check_entry_offsets(const void *base, | |
ce683e5f | 726 | const char *elems, |
7d35812c FW |
727 | unsigned int target_offset, |
728 | unsigned int next_offset) | |
729 | { | |
ce683e5f | 730 | long size_of_base_struct = elems - (const char *)base; |
7d35812c FW |
731 | const struct xt_entry_target *t; |
732 | const char *e = base; | |
733 | ||
ce683e5f FW |
734 | /* target start is within the ip/ip6/arpt_entry struct */ |
735 | if (target_offset < size_of_base_struct) | |
736 | return -EINVAL; | |
737 | ||
7d35812c FW |
738 | if (target_offset + sizeof(*t) > next_offset) |
739 | return -EINVAL; | |
740 | ||
741 | t = (void *)(e + target_offset); | |
a08e4e19 FW |
742 | if (t->u.target_size < sizeof(*t)) |
743 | return -EINVAL; | |
744 | ||
7d35812c FW |
745 | if (target_offset + t->u.target_size > next_offset) |
746 | return -EINVAL; | |
747 | ||
7ed2abdd | 748 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 749 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
7ed2abdd FW |
750 | return -EINVAL; |
751 | ||
13631bfc FW |
752 | return xt_check_entry_match(elems, base + target_offset, |
753 | __alignof__(struct xt_entry_match)); | |
7d35812c FW |
754 | } |
755 | EXPORT_SYMBOL(xt_check_entry_offsets); | |
756 | ||
f4dc7771 FW |
757 | /** |
758 | * xt_alloc_entry_offsets - allocate array to store rule head offsets | |
759 | * | |
760 | * @size: number of entries | |
761 | * | |
762 | * Return: NULL or kmalloc'd or vmalloc'd array | |
763 | */ | |
764 | unsigned int *xt_alloc_entry_offsets(unsigned int size) | |
765 | { | |
766 | unsigned int *off; | |
767 | ||
768 | off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); | |
769 | ||
770 | if (off) | |
771 | return off; | |
772 | ||
773 | if (size < (SIZE_MAX / sizeof(unsigned int))) | |
774 | off = vmalloc(size * sizeof(unsigned int)); | |
775 | ||
776 | return off; | |
777 | } | |
778 | EXPORT_SYMBOL(xt_alloc_entry_offsets); | |
779 | ||
780 | /** | |
781 | * xt_find_jump_offset - check if target is a valid jump offset | |
782 | * | |
783 | * @offsets: array containing all valid rule start offsets of a rule blob | |
784 | * @target: the jump target to search for | |
785 | * @size: entries in @offset | |
786 | */ | |
787 | bool xt_find_jump_offset(const unsigned int *offsets, | |
788 | unsigned int target, unsigned int size) | |
789 | { | |
790 | int m, low = 0, hi = size; | |
791 | ||
792 | while (hi > low) { | |
793 | m = (low + hi) / 2u; | |
794 | ||
795 | if (offsets[m] > target) | |
796 | hi = m; | |
797 | else if (offsets[m] < target) | |
798 | low = m + 1; | |
799 | else | |
800 | return true; | |
801 | } | |
802 | ||
803 | return false; | |
804 | } | |
805 | EXPORT_SYMBOL(xt_find_jump_offset); | |
806 | ||
916a917d | 807 | int xt_check_target(struct xt_tgchk_param *par, |
af5d6dc2 | 808 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 809 | { |
d6b00a53 JE |
810 | int ret; |
811 | ||
af5d6dc2 | 812 | if (XT_ALIGN(par->target->targetsize) != size) { |
b402405d JE |
813 | pr_err("%s_tables: %s.%u target: invalid size " |
814 | "%u (kernel) != (user) %u\n", | |
916a917d | 815 | xt_prefix[par->family], par->target->name, |
b402405d | 816 | par->target->revision, |
af5d6dc2 | 817 | XT_ALIGN(par->target->targetsize), size); |
37f9f733 PM |
818 | return -EINVAL; |
819 | } | |
af5d6dc2 JE |
820 | if (par->target->table != NULL && |
821 | strcmp(par->target->table, par->table) != 0) { | |
3dd5d7e3 | 822 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
916a917d | 823 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 824 | par->target->table, par->table); |
37f9f733 PM |
825 | return -EINVAL; |
826 | } | |
af5d6dc2 | 827 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
45185364 JE |
828 | char used[64], allow[64]; |
829 | ||
3dd5d7e3 | 830 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
45185364 | 831 | "usable from %s\n", |
916a917d | 832 | xt_prefix[par->family], par->target->name, |
5b76c494 JE |
833 | textify_hooks(used, sizeof(used), par->hook_mask, |
834 | par->family), | |
835 | textify_hooks(allow, sizeof(allow), par->target->hooks, | |
836 | par->family)); | |
37f9f733 PM |
837 | return -EINVAL; |
838 | } | |
af5d6dc2 | 839 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
3dd5d7e3 | 840 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
916a917d | 841 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 842 | par->target->proto); |
37f9f733 PM |
843 | return -EINVAL; |
844 | } | |
d6b00a53 JE |
845 | if (par->target->checkentry != NULL) { |
846 | ret = par->target->checkentry(par); | |
847 | if (ret < 0) | |
848 | return ret; | |
849 | else if (ret > 0) | |
850 | /* Flag up potential errors. */ | |
851 | return -EIO; | |
852 | } | |
37f9f733 PM |
853 | return 0; |
854 | } | |
855 | EXPORT_SYMBOL_GPL(xt_check_target); | |
856 | ||
d7591f0c FW |
857 | /** |
858 | * xt_copy_counters_from_user - copy counters and metadata from userspace | |
859 | * | |
860 | * @user: src pointer to userspace memory | |
861 | * @len: alleged size of userspace memory | |
862 | * @info: where to store the xt_counters_info metadata | |
863 | * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel | |
864 | * | |
865 | * Copies counter meta data from @user and stores it in @info. | |
866 | * | |
867 | * vmallocs memory to hold the counters, then copies the counter data | |
868 | * from @user to the new memory and returns a pointer to it. | |
869 | * | |
870 | * If @compat is true, @info gets converted automatically to the 64bit | |
871 | * representation. | |
872 | * | |
873 | * The metadata associated with the counters is stored in @info. | |
874 | * | |
875 | * Return: returns pointer that caller has to test via IS_ERR(). | |
876 | * If IS_ERR is false, caller has to vfree the pointer. | |
877 | */ | |
878 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |
879 | struct xt_counters_info *info, bool compat) | |
880 | { | |
881 | void *mem; | |
882 | u64 size; | |
883 | ||
884 | #ifdef CONFIG_COMPAT | |
885 | if (compat) { | |
886 | /* structures only differ in size due to alignment */ | |
887 | struct compat_xt_counters_info compat_tmp; | |
888 | ||
889 | if (len <= sizeof(compat_tmp)) | |
890 | return ERR_PTR(-EINVAL); | |
891 | ||
892 | len -= sizeof(compat_tmp); | |
893 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | |
894 | return ERR_PTR(-EFAULT); | |
895 | ||
896 | strlcpy(info->name, compat_tmp.name, sizeof(info->name)); | |
897 | info->num_counters = compat_tmp.num_counters; | |
898 | user += sizeof(compat_tmp); | |
899 | } else | |
900 | #endif | |
901 | { | |
902 | if (len <= sizeof(*info)) | |
903 | return ERR_PTR(-EINVAL); | |
904 | ||
905 | len -= sizeof(*info); | |
906 | if (copy_from_user(info, user, sizeof(*info)) != 0) | |
907 | return ERR_PTR(-EFAULT); | |
908 | ||
909 | info->name[sizeof(info->name) - 1] = '\0'; | |
910 | user += sizeof(*info); | |
911 | } | |
912 | ||
913 | size = sizeof(struct xt_counters); | |
914 | size *= info->num_counters; | |
915 | ||
916 | if (size != (u64)len) | |
917 | return ERR_PTR(-EINVAL); | |
918 | ||
919 | mem = vmalloc(len); | |
920 | if (!mem) | |
921 | return ERR_PTR(-ENOMEM); | |
922 | ||
923 | if (copy_from_user(mem, user, len) == 0) | |
924 | return mem; | |
925 | ||
926 | vfree(mem); | |
927 | return ERR_PTR(-EFAULT); | |
928 | } | |
929 | EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); | |
930 | ||
2722971c | 931 | #ifdef CONFIG_COMPAT |
5452e425 | 932 | int xt_compat_target_offset(const struct xt_target *target) |
2722971c | 933 | { |
9fa492cd PM |
934 | u_int16_t csize = target->compatsize ? : target->targetsize; |
935 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); | |
936 | } | |
937 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | |
938 | ||
939 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | |
b0a6363c | 940 | unsigned int *size) |
9fa492cd | 941 | { |
5452e425 | 942 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
943 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
944 | int pad, off = xt_compat_target_offset(target); | |
945 | u_int16_t tsize = ct->u.user.target_size; | |
09d96860 | 946 | char name[sizeof(t->u.user.name)]; |
9fa492cd PM |
947 | |
948 | t = *dstptr; | |
949 | memcpy(t, ct, sizeof(*ct)); | |
950 | if (target->compat_from_user) | |
951 | target->compat_from_user(t->data, ct->data); | |
952 | else | |
953 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); | |
954 | pad = XT_ALIGN(target->targetsize) - target->targetsize; | |
955 | if (pad > 0) | |
956 | memset(t->data + target->targetsize, 0, pad); | |
957 | ||
958 | tsize += off; | |
959 | t->u.user.target_size = tsize; | |
09d96860 FW |
960 | strlcpy(name, target->name, sizeof(name)); |
961 | module_put(target->me); | |
962 | strncpy(t->u.user.name, name, sizeof(t->u.user.name)); | |
9fa492cd PM |
963 | |
964 | *size += off; | |
965 | *dstptr += tsize; | |
966 | } | |
967 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); | |
968 | ||
739674fb JE |
969 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
970 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 971 | { |
5452e425 | 972 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
973 | struct compat_xt_entry_target __user *ct = *dstptr; |
974 | int off = xt_compat_target_offset(target); | |
975 | u_int16_t tsize = t->u.user.target_size - off; | |
976 | ||
4915f7bb | 977 | if (XT_OBJ_TO_USER(ct, t, target, tsize)) |
601e68e1 | 978 | return -EFAULT; |
9fa492cd PM |
979 | |
980 | if (target->compat_to_user) { | |
981 | if (target->compat_to_user((void __user *)ct->data, t->data)) | |
982 | return -EFAULT; | |
983 | } else { | |
4915f7bb | 984 | if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) |
9fa492cd | 985 | return -EFAULT; |
2722971c | 986 | } |
9fa492cd PM |
987 | |
988 | *size -= off; | |
989 | *dstptr += tsize; | |
990 | return 0; | |
2722971c | 991 | } |
9fa492cd | 992 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
2722971c DM |
993 | #endif |
994 | ||
2e4e6a17 HW |
995 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
996 | { | |
711bdde6 ED |
997 | struct xt_table_info *info = NULL; |
998 | size_t sz = sizeof(*info) + size; | |
2e4e6a17 | 999 | |
d157bd76 FW |
1000 | if (sz < sizeof(*info)) |
1001 | return NULL; | |
1002 | ||
2e4e6a17 | 1003 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
4481374c | 1004 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
2e4e6a17 HW |
1005 | return NULL; |
1006 | ||
711bdde6 ED |
1007 | if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) |
1008 | info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | |
1009 | if (!info) { | |
5bad8734 MRL |
1010 | info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | |
1011 | __GFP_NORETRY | __GFP_HIGHMEM, | |
1012 | PAGE_KERNEL); | |
711bdde6 ED |
1013 | if (!info) |
1014 | return NULL; | |
2e4e6a17 | 1015 | } |
711bdde6 ED |
1016 | memset(info, 0, sizeof(*info)); |
1017 | info->size = size; | |
1018 | return info; | |
2e4e6a17 HW |
1019 | } |
1020 | EXPORT_SYMBOL(xt_alloc_table_info); | |
1021 | ||
1022 | void xt_free_table_info(struct xt_table_info *info) | |
1023 | { | |
1024 | int cpu; | |
1025 | ||
f3c5c1bf | 1026 | if (info->jumpstack != NULL) { |
f6b50824 ED |
1027 | for_each_possible_cpu(cpu) |
1028 | kvfree(info->jumpstack[cpu]); | |
1029 | kvfree(info->jumpstack); | |
f3c5c1bf JE |
1030 | } |
1031 | ||
711bdde6 | 1032 | kvfree(info); |
2e4e6a17 HW |
1033 | } |
1034 | EXPORT_SYMBOL(xt_free_table_info); | |
1035 | ||
eb1a6bdc | 1036 | /* Find table by name, grabs mutex & ref. Returns NULL on error. */ |
76108cea JE |
1037 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
1038 | const char *name) | |
2e4e6a17 | 1039 | { |
b9e69e12 | 1040 | struct xt_table *t, *found = NULL; |
2e4e6a17 | 1041 | |
7926dbfa | 1042 | mutex_lock(&xt[af].mutex); |
8d870052 | 1043 | list_for_each_entry(t, &net->xt.tables[af], list) |
2e4e6a17 HW |
1044 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
1045 | return t; | |
b9e69e12 FW |
1046 | |
1047 | if (net == &init_net) | |
1048 | goto out; | |
1049 | ||
1050 | /* Table doesn't exist in this netns, re-try init */ | |
1051 | list_for_each_entry(t, &init_net.xt.tables[af], list) { | |
1052 | if (strcmp(t->name, name)) | |
1053 | continue; | |
1054 | if (!try_module_get(t->me)) | |
1055 | return NULL; | |
1056 | ||
1057 | mutex_unlock(&xt[af].mutex); | |
1058 | if (t->table_init(net) != 0) { | |
1059 | module_put(t->me); | |
1060 | return NULL; | |
1061 | } | |
1062 | ||
1063 | found = t; | |
1064 | ||
1065 | mutex_lock(&xt[af].mutex); | |
1066 | break; | |
1067 | } | |
1068 | ||
1069 | if (!found) | |
1070 | goto out; | |
1071 | ||
1072 | /* and once again: */ | |
1073 | list_for_each_entry(t, &net->xt.tables[af], list) | |
1074 | if (strcmp(t->name, name) == 0) | |
1075 | return t; | |
1076 | ||
1077 | module_put(found->me); | |
1078 | out: | |
9e19bb6d | 1079 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
1080 | return NULL; |
1081 | } | |
1082 | EXPORT_SYMBOL_GPL(xt_find_table_lock); | |
1083 | ||
1084 | void xt_table_unlock(struct xt_table *table) | |
1085 | { | |
9e19bb6d | 1086 | mutex_unlock(&xt[table->af].mutex); |
2e4e6a17 HW |
1087 | } |
1088 | EXPORT_SYMBOL_GPL(xt_table_unlock); | |
1089 | ||
2722971c | 1090 | #ifdef CONFIG_COMPAT |
76108cea | 1091 | void xt_compat_lock(u_int8_t af) |
2722971c DM |
1092 | { |
1093 | mutex_lock(&xt[af].compat_mutex); | |
1094 | } | |
1095 | EXPORT_SYMBOL_GPL(xt_compat_lock); | |
1096 | ||
76108cea | 1097 | void xt_compat_unlock(u_int8_t af) |
2722971c DM |
1098 | { |
1099 | mutex_unlock(&xt[af].compat_mutex); | |
1100 | } | |
1101 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | |
1102 | #endif | |
2e4e6a17 | 1103 | |
7f5c6d4f ED |
1104 | DEFINE_PER_CPU(seqcount_t, xt_recseq); |
1105 | EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); | |
942e4a2b | 1106 | |
dcebd315 FW |
1107 | struct static_key xt_tee_enabled __read_mostly; |
1108 | EXPORT_SYMBOL_GPL(xt_tee_enabled); | |
1109 | ||
f3c5c1bf JE |
1110 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
1111 | { | |
1112 | unsigned int size; | |
1113 | int cpu; | |
1114 | ||
f3c5c1bf JE |
1115 | size = sizeof(void **) * nr_cpu_ids; |
1116 | if (size > PAGE_SIZE) | |
3dbd4439 | 1117 | i->jumpstack = vzalloc(size); |
f3c5c1bf | 1118 | else |
3dbd4439 | 1119 | i->jumpstack = kzalloc(size, GFP_KERNEL); |
f3c5c1bf JE |
1120 | if (i->jumpstack == NULL) |
1121 | return -ENOMEM; | |
f3c5c1bf | 1122 | |
98d1bd80 FW |
1123 | /* ruleset without jumps -- no stack needed */ |
1124 | if (i->stacksize == 0) | |
1125 | return 0; | |
1126 | ||
7814b6ec FW |
1127 | /* Jumpstack needs to be able to record two full callchains, one |
1128 | * from the first rule set traversal, plus one table reentrancy | |
1129 | * via -j TEE without clobbering the callchain that brought us to | |
1130 | * TEE target. | |
1131 | * | |
1132 | * This is done by allocating two jumpstacks per cpu, on reentry | |
1133 | * the upper half of the stack is used. | |
1134 | * | |
1135 | * see the jumpstack setup in ipt_do_table() for more details. | |
1136 | */ | |
1137 | size = sizeof(void *) * i->stacksize * 2u; | |
f3c5c1bf JE |
1138 | for_each_possible_cpu(cpu) { |
1139 | if (size > PAGE_SIZE) | |
1140 | i->jumpstack[cpu] = vmalloc_node(size, | |
1141 | cpu_to_node(cpu)); | |
1142 | else | |
1143 | i->jumpstack[cpu] = kmalloc_node(size, | |
1144 | GFP_KERNEL, cpu_to_node(cpu)); | |
1145 | if (i->jumpstack[cpu] == NULL) | |
1146 | /* | |
1147 | * Freeing will be done later on by the callers. The | |
1148 | * chain is: xt_replace_table -> __do_replace -> | |
1149 | * do_replace -> xt_free_table_info. | |
1150 | */ | |
1151 | return -ENOMEM; | |
1152 | } | |
1153 | ||
1154 | return 0; | |
1155 | } | |
942e4a2b | 1156 | |
2e4e6a17 HW |
1157 | struct xt_table_info * |
1158 | xt_replace_table(struct xt_table *table, | |
1159 | unsigned int num_counters, | |
1160 | struct xt_table_info *newinfo, | |
1161 | int *error) | |
1162 | { | |
942e4a2b | 1163 | struct xt_table_info *private; |
f3c5c1bf | 1164 | int ret; |
2e4e6a17 | 1165 | |
d97a9e47 JE |
1166 | ret = xt_jumpstack_alloc(newinfo); |
1167 | if (ret < 0) { | |
1168 | *error = ret; | |
1169 | return NULL; | |
1170 | } | |
1171 | ||
2e4e6a17 | 1172 | /* Do the substitution. */ |
942e4a2b | 1173 | local_bh_disable(); |
2e4e6a17 | 1174 | private = table->private; |
942e4a2b | 1175 | |
2e4e6a17 HW |
1176 | /* Check inside lock: is the old number correct? */ |
1177 | if (num_counters != private->number) { | |
be91fd5e | 1178 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
2e4e6a17 | 1179 | num_counters, private->number); |
942e4a2b | 1180 | local_bh_enable(); |
2e4e6a17 HW |
1181 | *error = -EAGAIN; |
1182 | return NULL; | |
1183 | } | |
2e4e6a17 | 1184 | |
942e4a2b | 1185 | newinfo->initial_entries = private->initial_entries; |
b416c144 WD |
1186 | /* |
1187 | * Ensure contents of newinfo are visible before assigning to | |
1188 | * private. | |
1189 | */ | |
1190 | smp_wmb(); | |
1191 | table->private = newinfo; | |
942e4a2b SH |
1192 | |
1193 | /* | |
1194 | * Even though table entries have now been swapped, other CPU's | |
1195 | * may still be using the old entries. This is okay, because | |
1196 | * resynchronization happens because of the locking done | |
1197 | * during the get_counters() routine. | |
1198 | */ | |
1199 | local_bh_enable(); | |
1200 | ||
fbabf31e TG |
1201 | #ifdef CONFIG_AUDIT |
1202 | if (audit_enabled) { | |
1203 | struct audit_buffer *ab; | |
1204 | ||
1205 | ab = audit_log_start(current->audit_context, GFP_KERNEL, | |
1206 | AUDIT_NETFILTER_CFG); | |
1207 | if (ab) { | |
1208 | audit_log_format(ab, "table=%s family=%u entries=%u", | |
1209 | table->name, table->af, | |
1210 | private->number); | |
1211 | audit_log_end(ab); | |
1212 | } | |
1213 | } | |
1214 | #endif | |
1215 | ||
942e4a2b | 1216 | return private; |
2e4e6a17 HW |
1217 | } |
1218 | EXPORT_SYMBOL_GPL(xt_replace_table); | |
1219 | ||
35aad0ff JE |
1220 | struct xt_table *xt_register_table(struct net *net, |
1221 | const struct xt_table *input_table, | |
a98da11d AD |
1222 | struct xt_table_info *bootstrap, |
1223 | struct xt_table_info *newinfo) | |
2e4e6a17 HW |
1224 | { |
1225 | int ret; | |
1226 | struct xt_table_info *private; | |
35aad0ff | 1227 | struct xt_table *t, *table; |
2e4e6a17 | 1228 | |
44d34e72 | 1229 | /* Don't add one object to multiple lists. */ |
35aad0ff | 1230 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
44d34e72 AD |
1231 | if (!table) { |
1232 | ret = -ENOMEM; | |
1233 | goto out; | |
1234 | } | |
1235 | ||
7926dbfa | 1236 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1237 | /* Don't autoload: we'd eat our tail... */ |
8d870052 | 1238 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
df0933dc PM |
1239 | if (strcmp(t->name, table->name) == 0) { |
1240 | ret = -EEXIST; | |
1241 | goto unlock; | |
1242 | } | |
2e4e6a17 HW |
1243 | } |
1244 | ||
1245 | /* Simplifies replace_table code. */ | |
1246 | table->private = bootstrap; | |
78454473 | 1247 | |
2e4e6a17 HW |
1248 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
1249 | goto unlock; | |
1250 | ||
1251 | private = table->private; | |
be91fd5e | 1252 | pr_debug("table->private->number = %u\n", private->number); |
2e4e6a17 HW |
1253 | |
1254 | /* save number of initial entries */ | |
1255 | private->initial_entries = private->number; | |
1256 | ||
8d870052 | 1257 | list_add(&table->list, &net->xt.tables[table->af]); |
a98da11d AD |
1258 | mutex_unlock(&xt[table->af].mutex); |
1259 | return table; | |
2e4e6a17 | 1260 | |
7926dbfa | 1261 | unlock: |
9e19bb6d | 1262 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1263 | kfree(table); |
a98da11d AD |
1264 | out: |
1265 | return ERR_PTR(ret); | |
2e4e6a17 HW |
1266 | } |
1267 | EXPORT_SYMBOL_GPL(xt_register_table); | |
1268 | ||
1269 | void *xt_unregister_table(struct xt_table *table) | |
1270 | { | |
1271 | struct xt_table_info *private; | |
1272 | ||
9e19bb6d | 1273 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1274 | private = table->private; |
df0933dc | 1275 | list_del(&table->list); |
9e19bb6d | 1276 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1277 | kfree(table); |
2e4e6a17 HW |
1278 | |
1279 | return private; | |
1280 | } | |
1281 | EXPORT_SYMBOL_GPL(xt_unregister_table); | |
1282 | ||
1283 | #ifdef CONFIG_PROC_FS | |
715cf35a AD |
1284 | struct xt_names_priv { |
1285 | struct seq_net_private p; | |
76108cea | 1286 | u_int8_t af; |
715cf35a | 1287 | }; |
025d93d1 | 1288 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
2e4e6a17 | 1289 | { |
715cf35a | 1290 | struct xt_names_priv *priv = seq->private; |
1218854a | 1291 | struct net *net = seq_file_net(seq); |
76108cea | 1292 | u_int8_t af = priv->af; |
2e4e6a17 | 1293 | |
025d93d1 | 1294 | mutex_lock(&xt[af].mutex); |
715cf35a | 1295 | return seq_list_start(&net->xt.tables[af], *pos); |
025d93d1 | 1296 | } |
2e4e6a17 | 1297 | |
025d93d1 AD |
1298 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1299 | { | |
715cf35a | 1300 | struct xt_names_priv *priv = seq->private; |
1218854a | 1301 | struct net *net = seq_file_net(seq); |
76108cea | 1302 | u_int8_t af = priv->af; |
2e4e6a17 | 1303 | |
715cf35a | 1304 | return seq_list_next(v, &net->xt.tables[af], pos); |
2e4e6a17 HW |
1305 | } |
1306 | ||
025d93d1 | 1307 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1308 | { |
715cf35a | 1309 | struct xt_names_priv *priv = seq->private; |
76108cea | 1310 | u_int8_t af = priv->af; |
2e4e6a17 | 1311 | |
025d93d1 AD |
1312 | mutex_unlock(&xt[af].mutex); |
1313 | } | |
2e4e6a17 | 1314 | |
025d93d1 AD |
1315 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
1316 | { | |
1317 | struct xt_table *table = list_entry(v, struct xt_table, list); | |
2e4e6a17 | 1318 | |
861fb107 | 1319 | if (*table->name) |
e71456ae | 1320 | seq_printf(seq, "%s\n", table->name); |
861fb107 | 1321 | return 0; |
025d93d1 | 1322 | } |
601e68e1 | 1323 | |
025d93d1 AD |
1324 | static const struct seq_operations xt_table_seq_ops = { |
1325 | .start = xt_table_seq_start, | |
1326 | .next = xt_table_seq_next, | |
1327 | .stop = xt_table_seq_stop, | |
1328 | .show = xt_table_seq_show, | |
1329 | }; | |
1330 | ||
1331 | static int xt_table_open(struct inode *inode, struct file *file) | |
1332 | { | |
1333 | int ret; | |
715cf35a | 1334 | struct xt_names_priv *priv; |
025d93d1 | 1335 | |
715cf35a AD |
1336 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
1337 | sizeof(struct xt_names_priv)); | |
025d93d1 | 1338 | if (!ret) { |
715cf35a | 1339 | priv = ((struct seq_file *)file->private_data)->private; |
d9dda78b | 1340 | priv->af = (unsigned long)PDE_DATA(inode); |
025d93d1 AD |
1341 | } |
1342 | return ret; | |
2e4e6a17 HW |
1343 | } |
1344 | ||
025d93d1 AD |
1345 | static const struct file_operations xt_table_ops = { |
1346 | .owner = THIS_MODULE, | |
1347 | .open = xt_table_open, | |
1348 | .read = seq_read, | |
1349 | .llseek = seq_lseek, | |
0e93bb94 | 1350 | .release = seq_release_net, |
025d93d1 AD |
1351 | }; |
1352 | ||
eb132205 JE |
1353 | /* |
1354 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | |
1355 | * the multi-AF mutexes. | |
1356 | */ | |
1357 | struct nf_mttg_trav { | |
1358 | struct list_head *head, *curr; | |
1359 | uint8_t class, nfproto; | |
1360 | }; | |
1361 | ||
1362 | enum { | |
1363 | MTTG_TRAV_INIT, | |
1364 | MTTG_TRAV_NFP_UNSPEC, | |
1365 | MTTG_TRAV_NFP_SPEC, | |
1366 | MTTG_TRAV_DONE, | |
1367 | }; | |
1368 | ||
1369 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | |
1370 | bool is_target) | |
2e4e6a17 | 1371 | { |
eb132205 JE |
1372 | static const uint8_t next_class[] = { |
1373 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, | |
1374 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | |
1375 | }; | |
1376 | struct nf_mttg_trav *trav = seq->private; | |
1377 | ||
1378 | switch (trav->class) { | |
1379 | case MTTG_TRAV_INIT: | |
1380 | trav->class = MTTG_TRAV_NFP_UNSPEC; | |
1381 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | |
1382 | trav->head = trav->curr = is_target ? | |
1383 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | |
1384 | break; | |
1385 | case MTTG_TRAV_NFP_UNSPEC: | |
1386 | trav->curr = trav->curr->next; | |
1387 | if (trav->curr != trav->head) | |
1388 | break; | |
1389 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1390 | mutex_lock(&xt[trav->nfproto].mutex); | |
1391 | trav->head = trav->curr = is_target ? | |
1392 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | |
1393 | trav->class = next_class[trav->class]; | |
1394 | break; | |
1395 | case MTTG_TRAV_NFP_SPEC: | |
1396 | trav->curr = trav->curr->next; | |
1397 | if (trav->curr != trav->head) | |
1398 | break; | |
1399 | /* fallthru, _stop will unlock */ | |
1400 | default: | |
1401 | return NULL; | |
1402 | } | |
2e4e6a17 | 1403 | |
eb132205 JE |
1404 | if (ppos != NULL) |
1405 | ++*ppos; | |
1406 | return trav; | |
025d93d1 | 1407 | } |
601e68e1 | 1408 | |
eb132205 JE |
1409 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1410 | bool is_target) | |
025d93d1 | 1411 | { |
eb132205 JE |
1412 | struct nf_mttg_trav *trav = seq->private; |
1413 | unsigned int j; | |
2e4e6a17 | 1414 | |
eb132205 JE |
1415 | trav->class = MTTG_TRAV_INIT; |
1416 | for (j = 0; j < *pos; ++j) | |
1417 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | |
1418 | return NULL; | |
1419 | return trav; | |
2e4e6a17 HW |
1420 | } |
1421 | ||
eb132205 | 1422 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1423 | { |
eb132205 JE |
1424 | struct nf_mttg_trav *trav = seq->private; |
1425 | ||
1426 | switch (trav->class) { | |
1427 | case MTTG_TRAV_NFP_UNSPEC: | |
1428 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1429 | break; | |
1430 | case MTTG_TRAV_NFP_SPEC: | |
1431 | mutex_unlock(&xt[trav->nfproto].mutex); | |
1432 | break; | |
1433 | } | |
1434 | } | |
2e4e6a17 | 1435 | |
eb132205 JE |
1436 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1437 | { | |
1438 | return xt_mttg_seq_start(seq, pos, false); | |
2e4e6a17 HW |
1439 | } |
1440 | ||
eb132205 | 1441 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
2e4e6a17 | 1442 | { |
eb132205 JE |
1443 | return xt_mttg_seq_next(seq, v, ppos, false); |
1444 | } | |
2e4e6a17 | 1445 | |
eb132205 JE |
1446 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1447 | { | |
1448 | const struct nf_mttg_trav *trav = seq->private; | |
1449 | const struct xt_match *match; | |
1450 | ||
1451 | switch (trav->class) { | |
1452 | case MTTG_TRAV_NFP_UNSPEC: | |
1453 | case MTTG_TRAV_NFP_SPEC: | |
1454 | if (trav->curr == trav->head) | |
1455 | return 0; | |
1456 | match = list_entry(trav->curr, struct xt_match, list); | |
861fb107 JP |
1457 | if (*match->name) |
1458 | seq_printf(seq, "%s\n", match->name); | |
eb132205 JE |
1459 | } |
1460 | return 0; | |
2e4e6a17 HW |
1461 | } |
1462 | ||
025d93d1 AD |
1463 | static const struct seq_operations xt_match_seq_ops = { |
1464 | .start = xt_match_seq_start, | |
1465 | .next = xt_match_seq_next, | |
eb132205 | 1466 | .stop = xt_mttg_seq_stop, |
025d93d1 | 1467 | .show = xt_match_seq_show, |
2e4e6a17 HW |
1468 | }; |
1469 | ||
025d93d1 | 1470 | static int xt_match_open(struct inode *inode, struct file *file) |
2e4e6a17 | 1471 | { |
eb132205 | 1472 | struct nf_mttg_trav *trav; |
772476df RJ |
1473 | trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); |
1474 | if (!trav) | |
eb132205 | 1475 | return -ENOMEM; |
2e4e6a17 | 1476 | |
d9dda78b | 1477 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1478 | return 0; |
025d93d1 AD |
1479 | } |
1480 | ||
1481 | static const struct file_operations xt_match_ops = { | |
1482 | .owner = THIS_MODULE, | |
1483 | .open = xt_match_open, | |
1484 | .read = seq_read, | |
1485 | .llseek = seq_lseek, | |
eb132205 | 1486 | .release = seq_release_private, |
025d93d1 | 1487 | }; |
2e4e6a17 | 1488 | |
025d93d1 AD |
1489 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1490 | { | |
eb132205 | 1491 | return xt_mttg_seq_start(seq, pos, true); |
025d93d1 AD |
1492 | } |
1493 | ||
eb132205 | 1494 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
025d93d1 | 1495 | { |
eb132205 | 1496 | return xt_mttg_seq_next(seq, v, ppos, true); |
025d93d1 AD |
1497 | } |
1498 | ||
1499 | static int xt_target_seq_show(struct seq_file *seq, void *v) | |
1500 | { | |
eb132205 JE |
1501 | const struct nf_mttg_trav *trav = seq->private; |
1502 | const struct xt_target *target; | |
1503 | ||
1504 | switch (trav->class) { | |
1505 | case MTTG_TRAV_NFP_UNSPEC: | |
1506 | case MTTG_TRAV_NFP_SPEC: | |
1507 | if (trav->curr == trav->head) | |
1508 | return 0; | |
1509 | target = list_entry(trav->curr, struct xt_target, list); | |
861fb107 JP |
1510 | if (*target->name) |
1511 | seq_printf(seq, "%s\n", target->name); | |
eb132205 JE |
1512 | } |
1513 | return 0; | |
025d93d1 AD |
1514 | } |
1515 | ||
1516 | static const struct seq_operations xt_target_seq_ops = { | |
1517 | .start = xt_target_seq_start, | |
1518 | .next = xt_target_seq_next, | |
eb132205 | 1519 | .stop = xt_mttg_seq_stop, |
025d93d1 AD |
1520 | .show = xt_target_seq_show, |
1521 | }; | |
1522 | ||
1523 | static int xt_target_open(struct inode *inode, struct file *file) | |
1524 | { | |
eb132205 | 1525 | struct nf_mttg_trav *trav; |
772476df RJ |
1526 | trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); |
1527 | if (!trav) | |
eb132205 | 1528 | return -ENOMEM; |
025d93d1 | 1529 | |
d9dda78b | 1530 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1531 | return 0; |
2e4e6a17 HW |
1532 | } |
1533 | ||
025d93d1 | 1534 | static const struct file_operations xt_target_ops = { |
2e4e6a17 | 1535 | .owner = THIS_MODULE, |
025d93d1 | 1536 | .open = xt_target_open, |
2e4e6a17 HW |
1537 | .read = seq_read, |
1538 | .llseek = seq_lseek, | |
eb132205 | 1539 | .release = seq_release_private, |
2e4e6a17 HW |
1540 | }; |
1541 | ||
1542 | #define FORMAT_TABLES "_tables_names" | |
1543 | #define FORMAT_MATCHES "_tables_matches" | |
1544 | #define FORMAT_TARGETS "_tables_targets" | |
1545 | ||
1546 | #endif /* CONFIG_PROC_FS */ | |
1547 | ||
2b95efe7 | 1548 | /** |
b9e69e12 | 1549 | * xt_hook_ops_alloc - set up hooks for a new table |
2b95efe7 JE |
1550 | * @table: table with metadata needed to set up hooks |
1551 | * @fn: Hook function | |
1552 | * | |
b9e69e12 FW |
1553 | * This function will create the nf_hook_ops that the x_table needs |
1554 | * to hand to xt_hook_link_net(). | |
2b95efe7 | 1555 | */ |
b9e69e12 FW |
1556 | struct nf_hook_ops * |
1557 | xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) | |
2b95efe7 JE |
1558 | { |
1559 | unsigned int hook_mask = table->valid_hooks; | |
1560 | uint8_t i, num_hooks = hweight32(hook_mask); | |
1561 | uint8_t hooknum; | |
1562 | struct nf_hook_ops *ops; | |
2b95efe7 | 1563 | |
a6d0bae1 XL |
1564 | if (!num_hooks) |
1565 | return ERR_PTR(-EINVAL); | |
1566 | ||
1ecc281e | 1567 | ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL); |
2b95efe7 JE |
1568 | if (ops == NULL) |
1569 | return ERR_PTR(-ENOMEM); | |
1570 | ||
1571 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; | |
1572 | hook_mask >>= 1, ++hooknum) { | |
1573 | if (!(hook_mask & 1)) | |
1574 | continue; | |
1575 | ops[i].hook = fn; | |
2b95efe7 JE |
1576 | ops[i].pf = table->af; |
1577 | ops[i].hooknum = hooknum; | |
1578 | ops[i].priority = table->priority; | |
1579 | ++i; | |
1580 | } | |
1581 | ||
2b95efe7 JE |
1582 | return ops; |
1583 | } | |
b9e69e12 | 1584 | EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); |
2b95efe7 | 1585 | |
76108cea | 1586 | int xt_proto_init(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1587 | { |
1588 | #ifdef CONFIG_PROC_FS | |
1589 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1590 | struct proc_dir_entry *proc; | |
f13f2aee PW |
1591 | kuid_t root_uid; |
1592 | kgid_t root_gid; | |
2e4e6a17 HW |
1593 | #endif |
1594 | ||
7e9c6eeb | 1595 | if (af >= ARRAY_SIZE(xt_prefix)) |
2e4e6a17 HW |
1596 | return -EINVAL; |
1597 | ||
1598 | ||
1599 | #ifdef CONFIG_PROC_FS | |
f13f2aee PW |
1600 | root_uid = make_kuid(net->user_ns, 0); |
1601 | root_gid = make_kgid(net->user_ns, 0); | |
1602 | ||
ce18afe5 | 1603 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1604 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
8b169240 DL |
1605 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1606 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1607 | if (!proc) |
1608 | goto out; | |
f13f2aee PW |
1609 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1610 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1611 | |
ce18afe5 | 1612 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1613 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
8b169240 DL |
1614 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1615 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1616 | if (!proc) |
1617 | goto out_remove_tables; | |
f13f2aee PW |
1618 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1619 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1620 | |
ce18afe5 | 1621 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1622 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
8b169240 DL |
1623 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1624 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1625 | if (!proc) |
1626 | goto out_remove_matches; | |
f13f2aee PW |
1627 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1628 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 HW |
1629 | #endif |
1630 | ||
1631 | return 0; | |
1632 | ||
1633 | #ifdef CONFIG_PROC_FS | |
1634 | out_remove_matches: | |
ce18afe5 | 1635 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1636 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1637 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1638 | |
1639 | out_remove_tables: | |
ce18afe5 | 1640 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1641 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1642 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1643 | out: |
1644 | return -1; | |
1645 | #endif | |
1646 | } | |
1647 | EXPORT_SYMBOL_GPL(xt_proto_init); | |
1648 | ||
76108cea | 1649 | void xt_proto_fini(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1650 | { |
1651 | #ifdef CONFIG_PROC_FS | |
1652 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1653 | ||
ce18afe5 | 1654 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1655 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1656 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1657 | |
ce18afe5 | 1658 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1659 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
ece31ffd | 1660 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1661 | |
ce18afe5 | 1662 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1663 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1664 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1665 | #endif /*CONFIG_PROC_FS*/ |
1666 | } | |
1667 | EXPORT_SYMBOL_GPL(xt_proto_fini); | |
1668 | ||
f28e15ba FW |
1669 | /** |
1670 | * xt_percpu_counter_alloc - allocate x_tables rule counter | |
1671 | * | |
ae0ac0ed | 1672 | * @state: pointer to xt_percpu allocation state |
f28e15ba FW |
1673 | * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct |
1674 | * | |
1675 | * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then | |
1676 | * contain the address of the real (percpu) counter. | |
1677 | * | |
1678 | * Rule evaluation needs to use xt_get_this_cpu_counter() helper | |
1679 | * to fetch the real percpu counter. | |
1680 | * | |
ae0ac0ed FW |
1681 | * To speed up allocation and improve data locality, a 4kb block is |
1682 | * allocated. | |
1683 | * | |
1684 | * xt_percpu_counter_alloc_state contains the base address of the | |
1685 | * allocated page and the current sub-offset. | |
1686 | * | |
f28e15ba FW |
1687 | * returns false on error. |
1688 | */ | |
ae0ac0ed FW |
1689 | bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, |
1690 | struct xt_counters *counter) | |
f28e15ba | 1691 | { |
ae0ac0ed | 1692 | BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); |
f28e15ba FW |
1693 | |
1694 | if (nr_cpu_ids <= 1) | |
1695 | return true; | |
1696 | ||
ae0ac0ed FW |
1697 | if (!state->mem) { |
1698 | state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, | |
1699 | XT_PCPU_BLOCK_SIZE); | |
1700 | if (!state->mem) | |
1701 | return false; | |
1702 | } | |
1703 | counter->pcnt = (__force unsigned long)(state->mem + state->off); | |
1704 | state->off += sizeof(*counter); | |
1705 | if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { | |
1706 | state->mem = NULL; | |
1707 | state->off = 0; | |
1708 | } | |
f28e15ba FW |
1709 | return true; |
1710 | } | |
1711 | EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); | |
1712 | ||
4d31eef5 FW |
1713 | void xt_percpu_counter_free(struct xt_counters *counters) |
1714 | { | |
1715 | unsigned long pcnt = counters->pcnt; | |
1716 | ||
ae0ac0ed | 1717 | if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) |
4d31eef5 FW |
1718 | free_percpu((void __percpu *)pcnt); |
1719 | } | |
1720 | EXPORT_SYMBOL_GPL(xt_percpu_counter_free); | |
1721 | ||
8d870052 AD |
1722 | static int __net_init xt_net_init(struct net *net) |
1723 | { | |
1724 | int i; | |
1725 | ||
7e9c6eeb | 1726 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
8d870052 AD |
1727 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1728 | return 0; | |
1729 | } | |
1730 | ||
1731 | static struct pernet_operations xt_net_ops = { | |
1732 | .init = xt_net_init, | |
1733 | }; | |
2e4e6a17 HW |
1734 | |
1735 | static int __init xt_init(void) | |
1736 | { | |
942e4a2b SH |
1737 | unsigned int i; |
1738 | int rv; | |
1739 | ||
1740 | for_each_possible_cpu(i) { | |
7f5c6d4f | 1741 | seqcount_init(&per_cpu(xt_recseq, i)); |
942e4a2b | 1742 | } |
2e4e6a17 | 1743 | |
7e9c6eeb | 1744 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
2e4e6a17 HW |
1745 | if (!xt) |
1746 | return -ENOMEM; | |
1747 | ||
7e9c6eeb | 1748 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
9e19bb6d | 1749 | mutex_init(&xt[i].mutex); |
2722971c DM |
1750 | #ifdef CONFIG_COMPAT |
1751 | mutex_init(&xt[i].compat_mutex); | |
255d0dc3 | 1752 | xt[i].compat_tab = NULL; |
2722971c | 1753 | #endif |
2e4e6a17 HW |
1754 | INIT_LIST_HEAD(&xt[i].target); |
1755 | INIT_LIST_HEAD(&xt[i].match); | |
2e4e6a17 | 1756 | } |
8d870052 AD |
1757 | rv = register_pernet_subsys(&xt_net_ops); |
1758 | if (rv < 0) | |
1759 | kfree(xt); | |
1760 | return rv; | |
2e4e6a17 HW |
1761 | } |
1762 | ||
1763 | static void __exit xt_fini(void) | |
1764 | { | |
8d870052 | 1765 | unregister_pernet_subsys(&xt_net_ops); |
2e4e6a17 HW |
1766 | kfree(xt); |
1767 | } | |
1768 | ||
1769 | module_init(xt_init); | |
1770 | module_exit(xt_fini); | |
1771 |