]>
Commit | Line | Data |
---|---|---|
2e4e6a17 HW |
1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables | |
3 | * | |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> | |
f229f6ce | 5 | * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> |
2e4e6a17 HW |
6 | * |
7 | * Based on existing ip_tables code which is | |
8 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | |
9 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | */ | |
be91fd5e | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2e4e6a17 | 17 | #include <linux/kernel.h> |
3a9a231d | 18 | #include <linux/module.h> |
2e4e6a17 HW |
19 | #include <linux/socket.h> |
20 | #include <linux/net.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/vmalloc.h> | |
9e19bb6d | 25 | #include <linux/mutex.h> |
d7fe0f24 | 26 | #include <linux/mm.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
fbabf31e | 28 | #include <linux/audit.h> |
f13f2aee | 29 | #include <linux/user_namespace.h> |
457c4cbc | 30 | #include <net/net_namespace.h> |
2e4e6a17 HW |
31 | |
32 | #include <linux/netfilter/x_tables.h> | |
33 | #include <linux/netfilter_arp.h> | |
e3eaa991 JE |
34 | #include <linux/netfilter_ipv4/ip_tables.h> |
35 | #include <linux/netfilter_ipv6/ip6_tables.h> | |
36 | #include <linux/netfilter_arp/arp_tables.h> | |
9e19bb6d | 37 | |
2e4e6a17 HW |
38 | MODULE_LICENSE("GPL"); |
39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |
043ef46c | 40 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
2e4e6a17 HW |
41 | |
42 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) | |
ae0ac0ed | 43 | #define XT_PCPU_BLOCK_SIZE 4096 |
2e4e6a17 | 44 | |
b386d9f5 | 45 | struct compat_delta { |
255d0dc3 ED |
46 | unsigned int offset; /* offset in kernel */ |
47 | int delta; /* delta in 32bit user land */ | |
b386d9f5 PM |
48 | }; |
49 | ||
2e4e6a17 | 50 | struct xt_af { |
9e19bb6d | 51 | struct mutex mutex; |
2e4e6a17 HW |
52 | struct list_head match; |
53 | struct list_head target; | |
b386d9f5 | 54 | #ifdef CONFIG_COMPAT |
2722971c | 55 | struct mutex compat_mutex; |
255d0dc3 ED |
56 | struct compat_delta *compat_tab; |
57 | unsigned int number; /* number of slots in compat_tab[] */ | |
58 | unsigned int cur; /* number of used slots in compat_tab[] */ | |
b386d9f5 | 59 | #endif |
2e4e6a17 HW |
60 | }; |
61 | ||
62 | static struct xt_af *xt; | |
63 | ||
7e9c6eeb JE |
64 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
65 | [NFPROTO_UNSPEC] = "x", | |
66 | [NFPROTO_IPV4] = "ip", | |
67 | [NFPROTO_ARP] = "arp", | |
68 | [NFPROTO_BRIDGE] = "eb", | |
69 | [NFPROTO_IPV6] = "ip6", | |
37f9f733 PM |
70 | }; |
71 | ||
2e4e6a17 | 72 | /* Registration hooks for targets. */ |
7926dbfa | 73 | int xt_register_target(struct xt_target *target) |
2e4e6a17 | 74 | { |
76108cea | 75 | u_int8_t af = target->family; |
2e4e6a17 | 76 | |
7926dbfa | 77 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 78 | list_add(&target->list, &xt[af].target); |
9e19bb6d | 79 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 80 | return 0; |
2e4e6a17 HW |
81 | } |
82 | EXPORT_SYMBOL(xt_register_target); | |
83 | ||
84 | void | |
a45049c5 | 85 | xt_unregister_target(struct xt_target *target) |
2e4e6a17 | 86 | { |
76108cea | 87 | u_int8_t af = target->family; |
a45049c5 | 88 | |
9e19bb6d | 89 | mutex_lock(&xt[af].mutex); |
df0933dc | 90 | list_del(&target->list); |
9e19bb6d | 91 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
92 | } |
93 | EXPORT_SYMBOL(xt_unregister_target); | |
94 | ||
52d9c42e PM |
95 | int |
96 | xt_register_targets(struct xt_target *target, unsigned int n) | |
97 | { | |
98 | unsigned int i; | |
99 | int err = 0; | |
100 | ||
101 | for (i = 0; i < n; i++) { | |
102 | err = xt_register_target(&target[i]); | |
103 | if (err) | |
104 | goto err; | |
105 | } | |
106 | return err; | |
107 | ||
108 | err: | |
109 | if (i > 0) | |
110 | xt_unregister_targets(target, i); | |
111 | return err; | |
112 | } | |
113 | EXPORT_SYMBOL(xt_register_targets); | |
114 | ||
115 | void | |
116 | xt_unregister_targets(struct xt_target *target, unsigned int n) | |
117 | { | |
f68c5301 CG |
118 | while (n-- > 0) |
119 | xt_unregister_target(&target[n]); | |
52d9c42e PM |
120 | } |
121 | EXPORT_SYMBOL(xt_unregister_targets); | |
122 | ||
7926dbfa | 123 | int xt_register_match(struct xt_match *match) |
2e4e6a17 | 124 | { |
76108cea | 125 | u_int8_t af = match->family; |
2e4e6a17 | 126 | |
7926dbfa | 127 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 128 | list_add(&match->list, &xt[af].match); |
9e19bb6d | 129 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 130 | return 0; |
2e4e6a17 HW |
131 | } |
132 | EXPORT_SYMBOL(xt_register_match); | |
133 | ||
134 | void | |
a45049c5 | 135 | xt_unregister_match(struct xt_match *match) |
2e4e6a17 | 136 | { |
76108cea | 137 | u_int8_t af = match->family; |
a45049c5 | 138 | |
9e19bb6d | 139 | mutex_lock(&xt[af].mutex); |
df0933dc | 140 | list_del(&match->list); |
9e19bb6d | 141 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
142 | } |
143 | EXPORT_SYMBOL(xt_unregister_match); | |
144 | ||
52d9c42e PM |
145 | int |
146 | xt_register_matches(struct xt_match *match, unsigned int n) | |
147 | { | |
148 | unsigned int i; | |
149 | int err = 0; | |
150 | ||
151 | for (i = 0; i < n; i++) { | |
152 | err = xt_register_match(&match[i]); | |
153 | if (err) | |
154 | goto err; | |
155 | } | |
156 | return err; | |
157 | ||
158 | err: | |
159 | if (i > 0) | |
160 | xt_unregister_matches(match, i); | |
161 | return err; | |
162 | } | |
163 | EXPORT_SYMBOL(xt_register_matches); | |
164 | ||
165 | void | |
166 | xt_unregister_matches(struct xt_match *match, unsigned int n) | |
167 | { | |
f68c5301 CG |
168 | while (n-- > 0) |
169 | xt_unregister_match(&match[n]); | |
52d9c42e PM |
170 | } |
171 | EXPORT_SYMBOL(xt_unregister_matches); | |
172 | ||
2e4e6a17 HW |
173 | |
174 | /* | |
175 | * These are weird, but module loading must not be done with mutex | |
176 | * held (since they will register), and we have to have a single | |
adb00ae2 | 177 | * function to use. |
2e4e6a17 HW |
178 | */ |
179 | ||
180 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ | |
76108cea | 181 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
182 | { |
183 | struct xt_match *m; | |
42046e2e | 184 | int err = -ENOENT; |
2e4e6a17 | 185 | |
7926dbfa | 186 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
187 | list_for_each_entry(m, &xt[af].match, list) { |
188 | if (strcmp(m->name, name) == 0) { | |
189 | if (m->revision == revision) { | |
190 | if (try_module_get(m->me)) { | |
9e19bb6d | 191 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
192 | return m; |
193 | } | |
194 | } else | |
195 | err = -EPROTOTYPE; /* Found something. */ | |
196 | } | |
197 | } | |
9e19bb6d | 198 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
199 | |
200 | if (af != NFPROTO_UNSPEC) | |
201 | /* Try searching again in the family-independent list */ | |
202 | return xt_find_match(NFPROTO_UNSPEC, name, revision); | |
203 | ||
2e4e6a17 HW |
204 | return ERR_PTR(err); |
205 | } | |
206 | EXPORT_SYMBOL(xt_find_match); | |
207 | ||
fd0ec0e6 JE |
208 | struct xt_match * |
209 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | |
210 | { | |
211 | struct xt_match *match; | |
212 | ||
adb00ae2 SH |
213 | match = xt_find_match(nfproto, name, revision); |
214 | if (IS_ERR(match)) { | |
215 | request_module("%st_%s", xt_prefix[nfproto], name); | |
216 | match = xt_find_match(nfproto, name, revision); | |
217 | } | |
218 | ||
219 | return match; | |
fd0ec0e6 JE |
220 | } |
221 | EXPORT_SYMBOL_GPL(xt_request_find_match); | |
222 | ||
2e4e6a17 | 223 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
76108cea | 224 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
225 | { |
226 | struct xt_target *t; | |
42046e2e | 227 | int err = -ENOENT; |
2e4e6a17 | 228 | |
7926dbfa | 229 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
230 | list_for_each_entry(t, &xt[af].target, list) { |
231 | if (strcmp(t->name, name) == 0) { | |
232 | if (t->revision == revision) { | |
233 | if (try_module_get(t->me)) { | |
9e19bb6d | 234 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
235 | return t; |
236 | } | |
237 | } else | |
238 | err = -EPROTOTYPE; /* Found something. */ | |
239 | } | |
240 | } | |
9e19bb6d | 241 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
242 | |
243 | if (af != NFPROTO_UNSPEC) | |
244 | /* Try searching again in the family-independent list */ | |
245 | return xt_find_target(NFPROTO_UNSPEC, name, revision); | |
246 | ||
2e4e6a17 HW |
247 | return ERR_PTR(err); |
248 | } | |
249 | EXPORT_SYMBOL(xt_find_target); | |
250 | ||
76108cea | 251 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
252 | { |
253 | struct xt_target *target; | |
254 | ||
adb00ae2 SH |
255 | target = xt_find_target(af, name, revision); |
256 | if (IS_ERR(target)) { | |
257 | request_module("%st_%s", xt_prefix[af], name); | |
258 | target = xt_find_target(af, name, revision); | |
259 | } | |
260 | ||
261 | return target; | |
2e4e6a17 HW |
262 | } |
263 | EXPORT_SYMBOL_GPL(xt_request_find_target); | |
264 | ||
f32815d2 WB |
265 | |
266 | static int xt_obj_to_user(u16 __user *psize, u16 size, | |
267 | void __user *pname, const char *name, | |
268 | u8 __user *prev, u8 rev) | |
269 | { | |
270 | if (put_user(size, psize)) | |
271 | return -EFAULT; | |
272 | if (copy_to_user(pname, name, strlen(name) + 1)) | |
273 | return -EFAULT; | |
274 | if (put_user(rev, prev)) | |
275 | return -EFAULT; | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
280 | #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ | |
281 | xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ | |
282 | U->u.user.name, K->u.kernel.TYPE->name, \ | |
283 | &U->u.user.revision, K->u.kernel.TYPE->revision) | |
284 | ||
285 | int xt_data_to_user(void __user *dst, const void *src, | |
324318f0 | 286 | int usersize, int size, int aligned_size) |
f32815d2 WB |
287 | { |
288 | usersize = usersize ? : size; | |
289 | if (copy_to_user(dst, src, usersize)) | |
290 | return -EFAULT; | |
324318f0 WB |
291 | if (usersize != aligned_size && |
292 | clear_user(dst + usersize, aligned_size - usersize)) | |
f32815d2 WB |
293 | return -EFAULT; |
294 | ||
295 | return 0; | |
296 | } | |
297 | EXPORT_SYMBOL_GPL(xt_data_to_user); | |
298 | ||
751a9c76 | 299 | #define XT_DATA_TO_USER(U, K, TYPE) \ |
f32815d2 WB |
300 | xt_data_to_user(U->data, K->data, \ |
301 | K->u.kernel.TYPE->usersize, \ | |
751a9c76 WB |
302 | K->u.kernel.TYPE->TYPE##size, \ |
303 | XT_ALIGN(K->u.kernel.TYPE->TYPE##size)) | |
f32815d2 WB |
304 | |
305 | int xt_match_to_user(const struct xt_entry_match *m, | |
306 | struct xt_entry_match __user *u) | |
307 | { | |
308 | return XT_OBJ_TO_USER(u, m, match, 0) || | |
751a9c76 | 309 | XT_DATA_TO_USER(u, m, match); |
f32815d2 WB |
310 | } |
311 | EXPORT_SYMBOL_GPL(xt_match_to_user); | |
312 | ||
313 | int xt_target_to_user(const struct xt_entry_target *t, | |
314 | struct xt_entry_target __user *u) | |
315 | { | |
316 | return XT_OBJ_TO_USER(u, t, target, 0) || | |
751a9c76 | 317 | XT_DATA_TO_USER(u, t, target); |
f32815d2 WB |
318 | } |
319 | EXPORT_SYMBOL_GPL(xt_target_to_user); | |
320 | ||
76108cea | 321 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 322 | { |
5452e425 | 323 | const struct xt_match *m; |
2e4e6a17 HW |
324 | int have_rev = 0; |
325 | ||
326 | list_for_each_entry(m, &xt[af].match, list) { | |
327 | if (strcmp(m->name, name) == 0) { | |
328 | if (m->revision > *bestp) | |
329 | *bestp = m->revision; | |
330 | if (m->revision == revision) | |
331 | have_rev = 1; | |
332 | } | |
333 | } | |
656caff2 PM |
334 | |
335 | if (af != NFPROTO_UNSPEC && !have_rev) | |
336 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
337 | ||
2e4e6a17 HW |
338 | return have_rev; |
339 | } | |
340 | ||
76108cea | 341 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 342 | { |
5452e425 | 343 | const struct xt_target *t; |
2e4e6a17 HW |
344 | int have_rev = 0; |
345 | ||
346 | list_for_each_entry(t, &xt[af].target, list) { | |
347 | if (strcmp(t->name, name) == 0) { | |
348 | if (t->revision > *bestp) | |
349 | *bestp = t->revision; | |
350 | if (t->revision == revision) | |
351 | have_rev = 1; | |
352 | } | |
353 | } | |
656caff2 PM |
354 | |
355 | if (af != NFPROTO_UNSPEC && !have_rev) | |
356 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
357 | ||
2e4e6a17 HW |
358 | return have_rev; |
359 | } | |
360 | ||
361 | /* Returns true or false (if no such extension at all) */ | |
76108cea | 362 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
2e4e6a17 HW |
363 | int *err) |
364 | { | |
365 | int have_rev, best = -1; | |
366 | ||
7926dbfa | 367 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
368 | if (target == 1) |
369 | have_rev = target_revfn(af, name, revision, &best); | |
370 | else | |
371 | have_rev = match_revfn(af, name, revision, &best); | |
9e19bb6d | 372 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
373 | |
374 | /* Nothing at all? Return 0 to try loading module. */ | |
375 | if (best == -1) { | |
376 | *err = -ENOENT; | |
377 | return 0; | |
378 | } | |
379 | ||
380 | *err = best; | |
381 | if (!have_rev) | |
382 | *err = -EPROTONOSUPPORT; | |
383 | return 1; | |
384 | } | |
385 | EXPORT_SYMBOL_GPL(xt_find_revision); | |
386 | ||
5b76c494 JE |
387 | static char * |
388 | textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) | |
45185364 | 389 | { |
5b76c494 | 390 | static const char *const inetbr_names[] = { |
45185364 JE |
391 | "PREROUTING", "INPUT", "FORWARD", |
392 | "OUTPUT", "POSTROUTING", "BROUTING", | |
393 | }; | |
5b76c494 JE |
394 | static const char *const arp_names[] = { |
395 | "INPUT", "FORWARD", "OUTPUT", | |
396 | }; | |
397 | const char *const *names; | |
398 | unsigned int i, max; | |
45185364 JE |
399 | char *p = buf; |
400 | bool np = false; | |
401 | int res; | |
402 | ||
5b76c494 JE |
403 | names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; |
404 | max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : | |
405 | ARRAY_SIZE(inetbr_names); | |
45185364 | 406 | *p = '\0'; |
5b76c494 | 407 | for (i = 0; i < max; ++i) { |
45185364 JE |
408 | if (!(mask & (1 << i))) |
409 | continue; | |
410 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); | |
411 | if (res > 0) { | |
412 | size -= res; | |
413 | p += res; | |
414 | } | |
415 | np = true; | |
416 | } | |
417 | ||
418 | return buf; | |
419 | } | |
420 | ||
916a917d | 421 | int xt_check_match(struct xt_mtchk_param *par, |
9b4fce7a | 422 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 423 | { |
bd414ee6 JE |
424 | int ret; |
425 | ||
9b4fce7a JE |
426 | if (XT_ALIGN(par->match->matchsize) != size && |
427 | par->match->matchsize != -1) { | |
043ef46c JE |
428 | /* |
429 | * ebt_among is exempt from centralized matchsize checking | |
430 | * because it uses a dynamic-size data set. | |
431 | */ | |
b402405d JE |
432 | pr_err("%s_tables: %s.%u match: invalid size " |
433 | "%u (kernel) != (user) %u\n", | |
916a917d | 434 | xt_prefix[par->family], par->match->name, |
b402405d | 435 | par->match->revision, |
9b4fce7a | 436 | XT_ALIGN(par->match->matchsize), size); |
37f9f733 PM |
437 | return -EINVAL; |
438 | } | |
9b4fce7a JE |
439 | if (par->match->table != NULL && |
440 | strcmp(par->match->table, par->table) != 0) { | |
3dd5d7e3 | 441 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
916a917d | 442 | xt_prefix[par->family], par->match->name, |
9b4fce7a | 443 | par->match->table, par->table); |
37f9f733 PM |
444 | return -EINVAL; |
445 | } | |
9b4fce7a | 446 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
45185364 JE |
447 | char used[64], allow[64]; |
448 | ||
3dd5d7e3 | 449 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
45185364 | 450 | "valid from %s\n", |
916a917d | 451 | xt_prefix[par->family], par->match->name, |
5b76c494 JE |
452 | textify_hooks(used, sizeof(used), par->hook_mask, |
453 | par->family), | |
454 | textify_hooks(allow, sizeof(allow), par->match->hooks, | |
455 | par->family)); | |
37f9f733 PM |
456 | return -EINVAL; |
457 | } | |
9b4fce7a | 458 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
3dd5d7e3 | 459 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
916a917d JE |
460 | xt_prefix[par->family], par->match->name, |
461 | par->match->proto); | |
37f9f733 PM |
462 | return -EINVAL; |
463 | } | |
bd414ee6 JE |
464 | if (par->match->checkentry != NULL) { |
465 | ret = par->match->checkentry(par); | |
466 | if (ret < 0) | |
467 | return ret; | |
468 | else if (ret > 0) | |
469 | /* Flag up potential errors. */ | |
470 | return -EIO; | |
471 | } | |
37f9f733 PM |
472 | return 0; |
473 | } | |
474 | EXPORT_SYMBOL_GPL(xt_check_match); | |
475 | ||
13631bfc FW |
476 | /** xt_check_entry_match - check that matches end before start of target |
477 | * | |
478 | * @match: beginning of xt_entry_match | |
479 | * @target: beginning of this rules target (alleged end of matches) | |
480 | * @alignment: alignment requirement of match structures | |
481 | * | |
482 | * Validates that all matches add up to the beginning of the target, | |
483 | * and that each match covers at least the base structure size. | |
484 | * | |
485 | * Return: 0 on success, negative errno on failure. | |
486 | */ | |
487 | static int xt_check_entry_match(const char *match, const char *target, | |
488 | const size_t alignment) | |
489 | { | |
490 | const struct xt_entry_match *pos; | |
491 | int length = target - match; | |
492 | ||
493 | if (length == 0) /* no matches */ | |
494 | return 0; | |
495 | ||
496 | pos = (struct xt_entry_match *)match; | |
497 | do { | |
498 | if ((unsigned long)pos % alignment) | |
499 | return -EINVAL; | |
500 | ||
501 | if (length < (int)sizeof(struct xt_entry_match)) | |
502 | return -EINVAL; | |
503 | ||
504 | if (pos->u.match_size < sizeof(struct xt_entry_match)) | |
505 | return -EINVAL; | |
506 | ||
507 | if (pos->u.match_size > length) | |
508 | return -EINVAL; | |
509 | ||
510 | length -= pos->u.match_size; | |
511 | pos = ((void *)((char *)(pos) + (pos)->u.match_size)); | |
512 | } while (length > 0); | |
513 | ||
514 | return 0; | |
515 | } | |
516 | ||
2722971c | 517 | #ifdef CONFIG_COMPAT |
255d0dc3 | 518 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) |
b386d9f5 | 519 | { |
255d0dc3 | 520 | struct xt_af *xp = &xt[af]; |
b386d9f5 | 521 | |
255d0dc3 ED |
522 | if (!xp->compat_tab) { |
523 | if (!xp->number) | |
524 | return -EINVAL; | |
525 | xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); | |
526 | if (!xp->compat_tab) | |
527 | return -ENOMEM; | |
528 | xp->cur = 0; | |
529 | } | |
b386d9f5 | 530 | |
255d0dc3 ED |
531 | if (xp->cur >= xp->number) |
532 | return -EINVAL; | |
b386d9f5 | 533 | |
255d0dc3 ED |
534 | if (xp->cur) |
535 | delta += xp->compat_tab[xp->cur - 1].delta; | |
536 | xp->compat_tab[xp->cur].offset = offset; | |
537 | xp->compat_tab[xp->cur].delta = delta; | |
538 | xp->cur++; | |
b386d9f5 PM |
539 | return 0; |
540 | } | |
541 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); | |
542 | ||
76108cea | 543 | void xt_compat_flush_offsets(u_int8_t af) |
b386d9f5 | 544 | { |
255d0dc3 ED |
545 | if (xt[af].compat_tab) { |
546 | vfree(xt[af].compat_tab); | |
547 | xt[af].compat_tab = NULL; | |
548 | xt[af].number = 0; | |
5a6351ee | 549 | xt[af].cur = 0; |
b386d9f5 PM |
550 | } |
551 | } | |
552 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | |
553 | ||
3e5e524f | 554 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
b386d9f5 | 555 | { |
255d0dc3 ED |
556 | struct compat_delta *tmp = xt[af].compat_tab; |
557 | int mid, left = 0, right = xt[af].cur - 1; | |
558 | ||
559 | while (left <= right) { | |
560 | mid = (left + right) >> 1; | |
561 | if (offset > tmp[mid].offset) | |
562 | left = mid + 1; | |
563 | else if (offset < tmp[mid].offset) | |
564 | right = mid - 1; | |
565 | else | |
566 | return mid ? tmp[mid - 1].delta : 0; | |
567 | } | |
5a6351ee | 568 | return left ? tmp[left - 1].delta : 0; |
b386d9f5 PM |
569 | } |
570 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | |
571 | ||
255d0dc3 ED |
572 | void xt_compat_init_offsets(u_int8_t af, unsigned int number) |
573 | { | |
574 | xt[af].number = number; | |
575 | xt[af].cur = 0; | |
576 | } | |
577 | EXPORT_SYMBOL(xt_compat_init_offsets); | |
578 | ||
5452e425 | 579 | int xt_compat_match_offset(const struct xt_match *match) |
2722971c | 580 | { |
9fa492cd PM |
581 | u_int16_t csize = match->compatsize ? : match->matchsize; |
582 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); | |
583 | } | |
584 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); | |
585 | ||
0188346f FW |
586 | void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
587 | unsigned int *size) | |
9fa492cd | 588 | { |
5452e425 | 589 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
590 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
591 | int pad, off = xt_compat_match_offset(match); | |
592 | u_int16_t msize = cm->u.user.match_size; | |
09d96860 | 593 | char name[sizeof(m->u.user.name)]; |
9fa492cd PM |
594 | |
595 | m = *dstptr; | |
596 | memcpy(m, cm, sizeof(*cm)); | |
597 | if (match->compat_from_user) | |
598 | match->compat_from_user(m->data, cm->data); | |
599 | else | |
600 | memcpy(m->data, cm->data, msize - sizeof(*cm)); | |
601 | pad = XT_ALIGN(match->matchsize) - match->matchsize; | |
602 | if (pad > 0) | |
603 | memset(m->data + match->matchsize, 0, pad); | |
604 | ||
605 | msize += off; | |
606 | m->u.user.match_size = msize; | |
09d96860 FW |
607 | strlcpy(name, match->name, sizeof(name)); |
608 | module_put(match->me); | |
609 | strncpy(m->u.user.name, name, sizeof(m->u.user.name)); | |
9fa492cd PM |
610 | |
611 | *size += off; | |
612 | *dstptr += msize; | |
613 | } | |
614 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | |
615 | ||
751a9c76 WB |
616 | #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ |
617 | xt_data_to_user(U->data, K->data, \ | |
618 | K->u.kernel.TYPE->usersize, \ | |
619 | C_SIZE, \ | |
620 | COMPAT_XT_ALIGN(C_SIZE)) | |
621 | ||
739674fb JE |
622 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
623 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 624 | { |
5452e425 | 625 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
626 | struct compat_xt_entry_match __user *cm = *dstptr; |
627 | int off = xt_compat_match_offset(match); | |
628 | u_int16_t msize = m->u.user.match_size - off; | |
629 | ||
4915f7bb | 630 | if (XT_OBJ_TO_USER(cm, m, match, msize)) |
601e68e1 | 631 | return -EFAULT; |
9fa492cd PM |
632 | |
633 | if (match->compat_to_user) { | |
634 | if (match->compat_to_user((void __user *)cm->data, m->data)) | |
635 | return -EFAULT; | |
636 | } else { | |
751a9c76 | 637 | if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) |
9fa492cd | 638 | return -EFAULT; |
2722971c | 639 | } |
9fa492cd PM |
640 | |
641 | *size -= off; | |
642 | *dstptr += msize; | |
643 | return 0; | |
2722971c | 644 | } |
9fa492cd | 645 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
fc1221b3 | 646 | |
7ed2abdd FW |
647 | /* non-compat version may have padding after verdict */ |
648 | struct compat_xt_standard_target { | |
649 | struct compat_xt_entry_target t; | |
650 | compat_uint_t verdict; | |
651 | }; | |
652 | ||
ce683e5f | 653 | int xt_compat_check_entry_offsets(const void *base, const char *elems, |
fc1221b3 FW |
654 | unsigned int target_offset, |
655 | unsigned int next_offset) | |
656 | { | |
ce683e5f | 657 | long size_of_base_struct = elems - (const char *)base; |
fc1221b3 FW |
658 | const struct compat_xt_entry_target *t; |
659 | const char *e = base; | |
660 | ||
ce683e5f FW |
661 | if (target_offset < size_of_base_struct) |
662 | return -EINVAL; | |
663 | ||
fc1221b3 FW |
664 | if (target_offset + sizeof(*t) > next_offset) |
665 | return -EINVAL; | |
666 | ||
667 | t = (void *)(e + target_offset); | |
668 | if (t->u.target_size < sizeof(*t)) | |
669 | return -EINVAL; | |
670 | ||
671 | if (target_offset + t->u.target_size > next_offset) | |
672 | return -EINVAL; | |
673 | ||
7ed2abdd | 674 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 675 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
7ed2abdd FW |
676 | return -EINVAL; |
677 | ||
550116d2 | 678 | /* compat_xt_entry match has less strict alignment requirements, |
13631bfc FW |
679 | * otherwise they are identical. In case of padding differences |
680 | * we need to add compat version of xt_check_entry_match. | |
681 | */ | |
682 | BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); | |
683 | ||
684 | return xt_check_entry_match(elems, base + target_offset, | |
685 | __alignof__(struct compat_xt_entry_match)); | |
fc1221b3 FW |
686 | } |
687 | EXPORT_SYMBOL(xt_compat_check_entry_offsets); | |
9fa492cd | 688 | #endif /* CONFIG_COMPAT */ |
2722971c | 689 | |
7d35812c FW |
690 | /** |
691 | * xt_check_entry_offsets - validate arp/ip/ip6t_entry | |
692 | * | |
693 | * @base: pointer to arp/ip/ip6t_entry | |
ce683e5f | 694 | * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems |
7d35812c FW |
695 | * @target_offset: the arp/ip/ip6_t->target_offset |
696 | * @next_offset: the arp/ip/ip6_t->next_offset | |
697 | * | |
13631bfc FW |
698 | * validates that target_offset and next_offset are sane and that all |
699 | * match sizes (if any) align with the target offset. | |
7d35812c | 700 | * |
ce683e5f | 701 | * This function does not validate the targets or matches themselves, it |
13631bfc FW |
702 | * only tests that all the offsets and sizes are correct, that all |
703 | * match structures are aligned, and that the last structure ends where | |
704 | * the target structure begins. | |
705 | * | |
706 | * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. | |
ce683e5f | 707 | * |
7d35812c FW |
708 | * The arp/ip/ip6t_entry structure @base must have passed following tests: |
709 | * - it must point to a valid memory location | |
710 | * - base to base + next_offset must be accessible, i.e. not exceed allocated | |
711 | * length. | |
712 | * | |
13631bfc FW |
713 | * A well-formed entry looks like this: |
714 | * | |
715 | * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry | |
716 | * e->elems[]-----' | | | |
717 | * matchsize | | | |
718 | * matchsize | | | |
719 | * | | | |
720 | * target_offset---------------------------------' | | |
721 | * next_offset---------------------------------------------------' | |
722 | * | |
723 | * elems[]: flexible array member at end of ip(6)/arpt_entry struct. | |
724 | * This is where matches (if any) and the target reside. | |
725 | * target_offset: beginning of target. | |
726 | * next_offset: start of the next rule; also: size of this rule. | |
727 | * Since targets have a minimum size, target_offset + minlen <= next_offset. | |
728 | * | |
729 | * Every match stores its size, sum of sizes must not exceed target_offset. | |
730 | * | |
7d35812c FW |
731 | * Return: 0 on success, negative errno on failure. |
732 | */ | |
733 | int xt_check_entry_offsets(const void *base, | |
ce683e5f | 734 | const char *elems, |
7d35812c FW |
735 | unsigned int target_offset, |
736 | unsigned int next_offset) | |
737 | { | |
ce683e5f | 738 | long size_of_base_struct = elems - (const char *)base; |
7d35812c FW |
739 | const struct xt_entry_target *t; |
740 | const char *e = base; | |
741 | ||
ce683e5f FW |
742 | /* target start is within the ip/ip6/arpt_entry struct */ |
743 | if (target_offset < size_of_base_struct) | |
744 | return -EINVAL; | |
745 | ||
7d35812c FW |
746 | if (target_offset + sizeof(*t) > next_offset) |
747 | return -EINVAL; | |
748 | ||
749 | t = (void *)(e + target_offset); | |
a08e4e19 FW |
750 | if (t->u.target_size < sizeof(*t)) |
751 | return -EINVAL; | |
752 | ||
7d35812c FW |
753 | if (target_offset + t->u.target_size > next_offset) |
754 | return -EINVAL; | |
755 | ||
7ed2abdd | 756 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 757 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
7ed2abdd FW |
758 | return -EINVAL; |
759 | ||
13631bfc FW |
760 | return xt_check_entry_match(elems, base + target_offset, |
761 | __alignof__(struct xt_entry_match)); | |
7d35812c FW |
762 | } |
763 | EXPORT_SYMBOL(xt_check_entry_offsets); | |
764 | ||
f4dc7771 FW |
765 | /** |
766 | * xt_alloc_entry_offsets - allocate array to store rule head offsets | |
767 | * | |
768 | * @size: number of entries | |
769 | * | |
770 | * Return: NULL or kmalloc'd or vmalloc'd array | |
771 | */ | |
772 | unsigned int *xt_alloc_entry_offsets(unsigned int size) | |
773 | { | |
752ade68 | 774 | return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO); |
f4dc7771 | 775 | |
f4dc7771 FW |
776 | } |
777 | EXPORT_SYMBOL(xt_alloc_entry_offsets); | |
778 | ||
779 | /** | |
780 | * xt_find_jump_offset - check if target is a valid jump offset | |
781 | * | |
782 | * @offsets: array containing all valid rule start offsets of a rule blob | |
783 | * @target: the jump target to search for | |
784 | * @size: entries in @offset | |
785 | */ | |
786 | bool xt_find_jump_offset(const unsigned int *offsets, | |
787 | unsigned int target, unsigned int size) | |
788 | { | |
789 | int m, low = 0, hi = size; | |
790 | ||
791 | while (hi > low) { | |
792 | m = (low + hi) / 2u; | |
793 | ||
794 | if (offsets[m] > target) | |
795 | hi = m; | |
796 | else if (offsets[m] < target) | |
797 | low = m + 1; | |
798 | else | |
799 | return true; | |
800 | } | |
801 | ||
802 | return false; | |
803 | } | |
804 | EXPORT_SYMBOL(xt_find_jump_offset); | |
805 | ||
916a917d | 806 | int xt_check_target(struct xt_tgchk_param *par, |
af5d6dc2 | 807 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 808 | { |
d6b00a53 JE |
809 | int ret; |
810 | ||
af5d6dc2 | 811 | if (XT_ALIGN(par->target->targetsize) != size) { |
b402405d JE |
812 | pr_err("%s_tables: %s.%u target: invalid size " |
813 | "%u (kernel) != (user) %u\n", | |
916a917d | 814 | xt_prefix[par->family], par->target->name, |
b402405d | 815 | par->target->revision, |
af5d6dc2 | 816 | XT_ALIGN(par->target->targetsize), size); |
37f9f733 PM |
817 | return -EINVAL; |
818 | } | |
af5d6dc2 JE |
819 | if (par->target->table != NULL && |
820 | strcmp(par->target->table, par->table) != 0) { | |
3dd5d7e3 | 821 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
916a917d | 822 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 823 | par->target->table, par->table); |
37f9f733 PM |
824 | return -EINVAL; |
825 | } | |
af5d6dc2 | 826 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
45185364 JE |
827 | char used[64], allow[64]; |
828 | ||
3dd5d7e3 | 829 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
45185364 | 830 | "usable from %s\n", |
916a917d | 831 | xt_prefix[par->family], par->target->name, |
5b76c494 JE |
832 | textify_hooks(used, sizeof(used), par->hook_mask, |
833 | par->family), | |
834 | textify_hooks(allow, sizeof(allow), par->target->hooks, | |
835 | par->family)); | |
37f9f733 PM |
836 | return -EINVAL; |
837 | } | |
af5d6dc2 | 838 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
3dd5d7e3 | 839 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
916a917d | 840 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 841 | par->target->proto); |
37f9f733 PM |
842 | return -EINVAL; |
843 | } | |
d6b00a53 JE |
844 | if (par->target->checkentry != NULL) { |
845 | ret = par->target->checkentry(par); | |
846 | if (ret < 0) | |
847 | return ret; | |
848 | else if (ret > 0) | |
849 | /* Flag up potential errors. */ | |
850 | return -EIO; | |
851 | } | |
37f9f733 PM |
852 | return 0; |
853 | } | |
854 | EXPORT_SYMBOL_GPL(xt_check_target); | |
855 | ||
d7591f0c FW |
856 | /** |
857 | * xt_copy_counters_from_user - copy counters and metadata from userspace | |
858 | * | |
859 | * @user: src pointer to userspace memory | |
860 | * @len: alleged size of userspace memory | |
861 | * @info: where to store the xt_counters_info metadata | |
862 | * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel | |
863 | * | |
864 | * Copies counter meta data from @user and stores it in @info. | |
865 | * | |
866 | * vmallocs memory to hold the counters, then copies the counter data | |
867 | * from @user to the new memory and returns a pointer to it. | |
868 | * | |
869 | * If @compat is true, @info gets converted automatically to the 64bit | |
870 | * representation. | |
871 | * | |
872 | * The metadata associated with the counters is stored in @info. | |
873 | * | |
874 | * Return: returns pointer that caller has to test via IS_ERR(). | |
875 | * If IS_ERR is false, caller has to vfree the pointer. | |
876 | */ | |
877 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |
878 | struct xt_counters_info *info, bool compat) | |
879 | { | |
880 | void *mem; | |
881 | u64 size; | |
882 | ||
883 | #ifdef CONFIG_COMPAT | |
884 | if (compat) { | |
885 | /* structures only differ in size due to alignment */ | |
886 | struct compat_xt_counters_info compat_tmp; | |
887 | ||
888 | if (len <= sizeof(compat_tmp)) | |
889 | return ERR_PTR(-EINVAL); | |
890 | ||
891 | len -= sizeof(compat_tmp); | |
892 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | |
893 | return ERR_PTR(-EFAULT); | |
894 | ||
895 | strlcpy(info->name, compat_tmp.name, sizeof(info->name)); | |
896 | info->num_counters = compat_tmp.num_counters; | |
897 | user += sizeof(compat_tmp); | |
898 | } else | |
899 | #endif | |
900 | { | |
901 | if (len <= sizeof(*info)) | |
902 | return ERR_PTR(-EINVAL); | |
903 | ||
904 | len -= sizeof(*info); | |
905 | if (copy_from_user(info, user, sizeof(*info)) != 0) | |
906 | return ERR_PTR(-EFAULT); | |
907 | ||
908 | info->name[sizeof(info->name) - 1] = '\0'; | |
909 | user += sizeof(*info); | |
910 | } | |
911 | ||
912 | size = sizeof(struct xt_counters); | |
913 | size *= info->num_counters; | |
914 | ||
915 | if (size != (u64)len) | |
916 | return ERR_PTR(-EINVAL); | |
917 | ||
918 | mem = vmalloc(len); | |
919 | if (!mem) | |
920 | return ERR_PTR(-ENOMEM); | |
921 | ||
922 | if (copy_from_user(mem, user, len) == 0) | |
923 | return mem; | |
924 | ||
925 | vfree(mem); | |
926 | return ERR_PTR(-EFAULT); | |
927 | } | |
928 | EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); | |
929 | ||
2722971c | 930 | #ifdef CONFIG_COMPAT |
5452e425 | 931 | int xt_compat_target_offset(const struct xt_target *target) |
2722971c | 932 | { |
9fa492cd PM |
933 | u_int16_t csize = target->compatsize ? : target->targetsize; |
934 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); | |
935 | } | |
936 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | |
937 | ||
938 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | |
b0a6363c | 939 | unsigned int *size) |
9fa492cd | 940 | { |
5452e425 | 941 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
942 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
943 | int pad, off = xt_compat_target_offset(target); | |
944 | u_int16_t tsize = ct->u.user.target_size; | |
09d96860 | 945 | char name[sizeof(t->u.user.name)]; |
9fa492cd PM |
946 | |
947 | t = *dstptr; | |
948 | memcpy(t, ct, sizeof(*ct)); | |
949 | if (target->compat_from_user) | |
950 | target->compat_from_user(t->data, ct->data); | |
951 | else | |
952 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); | |
953 | pad = XT_ALIGN(target->targetsize) - target->targetsize; | |
954 | if (pad > 0) | |
955 | memset(t->data + target->targetsize, 0, pad); | |
956 | ||
957 | tsize += off; | |
958 | t->u.user.target_size = tsize; | |
09d96860 FW |
959 | strlcpy(name, target->name, sizeof(name)); |
960 | module_put(target->me); | |
961 | strncpy(t->u.user.name, name, sizeof(t->u.user.name)); | |
9fa492cd PM |
962 | |
963 | *size += off; | |
964 | *dstptr += tsize; | |
965 | } | |
966 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); | |
967 | ||
739674fb JE |
968 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
969 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 970 | { |
5452e425 | 971 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
972 | struct compat_xt_entry_target __user *ct = *dstptr; |
973 | int off = xt_compat_target_offset(target); | |
974 | u_int16_t tsize = t->u.user.target_size - off; | |
975 | ||
4915f7bb | 976 | if (XT_OBJ_TO_USER(ct, t, target, tsize)) |
601e68e1 | 977 | return -EFAULT; |
9fa492cd PM |
978 | |
979 | if (target->compat_to_user) { | |
980 | if (target->compat_to_user((void __user *)ct->data, t->data)) | |
981 | return -EFAULT; | |
982 | } else { | |
751a9c76 | 983 | if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) |
9fa492cd | 984 | return -EFAULT; |
2722971c | 985 | } |
9fa492cd PM |
986 | |
987 | *size -= off; | |
988 | *dstptr += tsize; | |
989 | return 0; | |
2722971c | 990 | } |
9fa492cd | 991 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
2722971c DM |
992 | #endif |
993 | ||
2e4e6a17 HW |
994 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
995 | { | |
711bdde6 ED |
996 | struct xt_table_info *info = NULL; |
997 | size_t sz = sizeof(*info) + size; | |
2e4e6a17 | 998 | |
d157bd76 FW |
999 | if (sz < sizeof(*info)) |
1000 | return NULL; | |
1001 | ||
2e4e6a17 | 1002 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
4481374c | 1003 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
2e4e6a17 HW |
1004 | return NULL; |
1005 | ||
eacd86ca MH |
1006 | info = kvmalloc(sz, GFP_KERNEL); |
1007 | if (!info) | |
1008 | return NULL; | |
1009 | ||
711bdde6 ED |
1010 | memset(info, 0, sizeof(*info)); |
1011 | info->size = size; | |
1012 | return info; | |
2e4e6a17 HW |
1013 | } |
1014 | EXPORT_SYMBOL(xt_alloc_table_info); | |
1015 | ||
1016 | void xt_free_table_info(struct xt_table_info *info) | |
1017 | { | |
1018 | int cpu; | |
1019 | ||
f3c5c1bf | 1020 | if (info->jumpstack != NULL) { |
f6b50824 ED |
1021 | for_each_possible_cpu(cpu) |
1022 | kvfree(info->jumpstack[cpu]); | |
1023 | kvfree(info->jumpstack); | |
f3c5c1bf JE |
1024 | } |
1025 | ||
711bdde6 | 1026 | kvfree(info); |
2e4e6a17 HW |
1027 | } |
1028 | EXPORT_SYMBOL(xt_free_table_info); | |
1029 | ||
eb1a6bdc | 1030 | /* Find table by name, grabs mutex & ref. Returns NULL on error. */ |
76108cea JE |
1031 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
1032 | const char *name) | |
2e4e6a17 | 1033 | { |
b9e69e12 | 1034 | struct xt_table *t, *found = NULL; |
2e4e6a17 | 1035 | |
7926dbfa | 1036 | mutex_lock(&xt[af].mutex); |
8d870052 | 1037 | list_for_each_entry(t, &net->xt.tables[af], list) |
2e4e6a17 HW |
1038 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
1039 | return t; | |
b9e69e12 FW |
1040 | |
1041 | if (net == &init_net) | |
1042 | goto out; | |
1043 | ||
1044 | /* Table doesn't exist in this netns, re-try init */ | |
1045 | list_for_each_entry(t, &init_net.xt.tables[af], list) { | |
1046 | if (strcmp(t->name, name)) | |
1047 | continue; | |
7dde07e9 DC |
1048 | if (!try_module_get(t->me)) { |
1049 | mutex_unlock(&xt[af].mutex); | |
b9e69e12 | 1050 | return NULL; |
7dde07e9 | 1051 | } |
b9e69e12 FW |
1052 | |
1053 | mutex_unlock(&xt[af].mutex); | |
1054 | if (t->table_init(net) != 0) { | |
1055 | module_put(t->me); | |
1056 | return NULL; | |
1057 | } | |
1058 | ||
1059 | found = t; | |
1060 | ||
1061 | mutex_lock(&xt[af].mutex); | |
1062 | break; | |
1063 | } | |
1064 | ||
1065 | if (!found) | |
1066 | goto out; | |
1067 | ||
1068 | /* and once again: */ | |
1069 | list_for_each_entry(t, &net->xt.tables[af], list) | |
1070 | if (strcmp(t->name, name) == 0) | |
1071 | return t; | |
1072 | ||
1073 | module_put(found->me); | |
1074 | out: | |
9e19bb6d | 1075 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
1076 | return NULL; |
1077 | } | |
1078 | EXPORT_SYMBOL_GPL(xt_find_table_lock); | |
1079 | ||
1080 | void xt_table_unlock(struct xt_table *table) | |
1081 | { | |
9e19bb6d | 1082 | mutex_unlock(&xt[table->af].mutex); |
2e4e6a17 HW |
1083 | } |
1084 | EXPORT_SYMBOL_GPL(xt_table_unlock); | |
1085 | ||
2722971c | 1086 | #ifdef CONFIG_COMPAT |
76108cea | 1087 | void xt_compat_lock(u_int8_t af) |
2722971c DM |
1088 | { |
1089 | mutex_lock(&xt[af].compat_mutex); | |
1090 | } | |
1091 | EXPORT_SYMBOL_GPL(xt_compat_lock); | |
1092 | ||
76108cea | 1093 | void xt_compat_unlock(u_int8_t af) |
2722971c DM |
1094 | { |
1095 | mutex_unlock(&xt[af].compat_mutex); | |
1096 | } | |
1097 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | |
1098 | #endif | |
2e4e6a17 | 1099 | |
7f5c6d4f ED |
1100 | DEFINE_PER_CPU(seqcount_t, xt_recseq); |
1101 | EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); | |
942e4a2b | 1102 | |
dcebd315 FW |
1103 | struct static_key xt_tee_enabled __read_mostly; |
1104 | EXPORT_SYMBOL_GPL(xt_tee_enabled); | |
1105 | ||
f3c5c1bf JE |
1106 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
1107 | { | |
1108 | unsigned int size; | |
1109 | int cpu; | |
1110 | ||
f3c5c1bf JE |
1111 | size = sizeof(void **) * nr_cpu_ids; |
1112 | if (size > PAGE_SIZE) | |
752ade68 | 1113 | i->jumpstack = kvzalloc(size, GFP_KERNEL); |
f3c5c1bf | 1114 | else |
3dbd4439 | 1115 | i->jumpstack = kzalloc(size, GFP_KERNEL); |
f3c5c1bf JE |
1116 | if (i->jumpstack == NULL) |
1117 | return -ENOMEM; | |
f3c5c1bf | 1118 | |
98d1bd80 FW |
1119 | /* ruleset without jumps -- no stack needed */ |
1120 | if (i->stacksize == 0) | |
1121 | return 0; | |
1122 | ||
7814b6ec FW |
1123 | /* Jumpstack needs to be able to record two full callchains, one |
1124 | * from the first rule set traversal, plus one table reentrancy | |
1125 | * via -j TEE without clobbering the callchain that brought us to | |
1126 | * TEE target. | |
1127 | * | |
1128 | * This is done by allocating two jumpstacks per cpu, on reentry | |
1129 | * the upper half of the stack is used. | |
1130 | * | |
1131 | * see the jumpstack setup in ipt_do_table() for more details. | |
1132 | */ | |
1133 | size = sizeof(void *) * i->stacksize * 2u; | |
f3c5c1bf | 1134 | for_each_possible_cpu(cpu) { |
752ade68 MH |
1135 | i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, |
1136 | cpu_to_node(cpu)); | |
f3c5c1bf JE |
1137 | if (i->jumpstack[cpu] == NULL) |
1138 | /* | |
1139 | * Freeing will be done later on by the callers. The | |
1140 | * chain is: xt_replace_table -> __do_replace -> | |
1141 | * do_replace -> xt_free_table_info. | |
1142 | */ | |
1143 | return -ENOMEM; | |
1144 | } | |
1145 | ||
1146 | return 0; | |
1147 | } | |
942e4a2b | 1148 | |
2e4e6a17 HW |
1149 | struct xt_table_info * |
1150 | xt_replace_table(struct xt_table *table, | |
1151 | unsigned int num_counters, | |
1152 | struct xt_table_info *newinfo, | |
1153 | int *error) | |
1154 | { | |
942e4a2b | 1155 | struct xt_table_info *private; |
f3c5c1bf | 1156 | int ret; |
2e4e6a17 | 1157 | |
d97a9e47 JE |
1158 | ret = xt_jumpstack_alloc(newinfo); |
1159 | if (ret < 0) { | |
1160 | *error = ret; | |
1161 | return NULL; | |
1162 | } | |
1163 | ||
2e4e6a17 | 1164 | /* Do the substitution. */ |
942e4a2b | 1165 | local_bh_disable(); |
2e4e6a17 | 1166 | private = table->private; |
942e4a2b | 1167 | |
2e4e6a17 HW |
1168 | /* Check inside lock: is the old number correct? */ |
1169 | if (num_counters != private->number) { | |
be91fd5e | 1170 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
2e4e6a17 | 1171 | num_counters, private->number); |
942e4a2b | 1172 | local_bh_enable(); |
2e4e6a17 HW |
1173 | *error = -EAGAIN; |
1174 | return NULL; | |
1175 | } | |
2e4e6a17 | 1176 | |
942e4a2b | 1177 | newinfo->initial_entries = private->initial_entries; |
b416c144 WD |
1178 | /* |
1179 | * Ensure contents of newinfo are visible before assigning to | |
1180 | * private. | |
1181 | */ | |
1182 | smp_wmb(); | |
1183 | table->private = newinfo; | |
942e4a2b SH |
1184 | |
1185 | /* | |
1186 | * Even though table entries have now been swapped, other CPU's | |
1187 | * may still be using the old entries. This is okay, because | |
1188 | * resynchronization happens because of the locking done | |
1189 | * during the get_counters() routine. | |
1190 | */ | |
1191 | local_bh_enable(); | |
1192 | ||
fbabf31e TG |
1193 | #ifdef CONFIG_AUDIT |
1194 | if (audit_enabled) { | |
46b20c38 GT |
1195 | audit_log(current->audit_context, GFP_KERNEL, |
1196 | AUDIT_NETFILTER_CFG, | |
1197 | "table=%s family=%u entries=%u", | |
1198 | table->name, table->af, private->number); | |
fbabf31e TG |
1199 | } |
1200 | #endif | |
1201 | ||
942e4a2b | 1202 | return private; |
2e4e6a17 HW |
1203 | } |
1204 | EXPORT_SYMBOL_GPL(xt_replace_table); | |
1205 | ||
35aad0ff JE |
1206 | struct xt_table *xt_register_table(struct net *net, |
1207 | const struct xt_table *input_table, | |
a98da11d AD |
1208 | struct xt_table_info *bootstrap, |
1209 | struct xt_table_info *newinfo) | |
2e4e6a17 HW |
1210 | { |
1211 | int ret; | |
1212 | struct xt_table_info *private; | |
35aad0ff | 1213 | struct xt_table *t, *table; |
2e4e6a17 | 1214 | |
44d34e72 | 1215 | /* Don't add one object to multiple lists. */ |
35aad0ff | 1216 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
44d34e72 AD |
1217 | if (!table) { |
1218 | ret = -ENOMEM; | |
1219 | goto out; | |
1220 | } | |
1221 | ||
7926dbfa | 1222 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1223 | /* Don't autoload: we'd eat our tail... */ |
8d870052 | 1224 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
df0933dc PM |
1225 | if (strcmp(t->name, table->name) == 0) { |
1226 | ret = -EEXIST; | |
1227 | goto unlock; | |
1228 | } | |
2e4e6a17 HW |
1229 | } |
1230 | ||
1231 | /* Simplifies replace_table code. */ | |
1232 | table->private = bootstrap; | |
78454473 | 1233 | |
2e4e6a17 HW |
1234 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
1235 | goto unlock; | |
1236 | ||
1237 | private = table->private; | |
be91fd5e | 1238 | pr_debug("table->private->number = %u\n", private->number); |
2e4e6a17 HW |
1239 | |
1240 | /* save number of initial entries */ | |
1241 | private->initial_entries = private->number; | |
1242 | ||
8d870052 | 1243 | list_add(&table->list, &net->xt.tables[table->af]); |
a98da11d AD |
1244 | mutex_unlock(&xt[table->af].mutex); |
1245 | return table; | |
2e4e6a17 | 1246 | |
7926dbfa | 1247 | unlock: |
9e19bb6d | 1248 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1249 | kfree(table); |
a98da11d AD |
1250 | out: |
1251 | return ERR_PTR(ret); | |
2e4e6a17 HW |
1252 | } |
1253 | EXPORT_SYMBOL_GPL(xt_register_table); | |
1254 | ||
1255 | void *xt_unregister_table(struct xt_table *table) | |
1256 | { | |
1257 | struct xt_table_info *private; | |
1258 | ||
9e19bb6d | 1259 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1260 | private = table->private; |
df0933dc | 1261 | list_del(&table->list); |
9e19bb6d | 1262 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1263 | kfree(table); |
2e4e6a17 HW |
1264 | |
1265 | return private; | |
1266 | } | |
1267 | EXPORT_SYMBOL_GPL(xt_unregister_table); | |
1268 | ||
1269 | #ifdef CONFIG_PROC_FS | |
715cf35a AD |
1270 | struct xt_names_priv { |
1271 | struct seq_net_private p; | |
76108cea | 1272 | u_int8_t af; |
715cf35a | 1273 | }; |
025d93d1 | 1274 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
2e4e6a17 | 1275 | { |
715cf35a | 1276 | struct xt_names_priv *priv = seq->private; |
1218854a | 1277 | struct net *net = seq_file_net(seq); |
76108cea | 1278 | u_int8_t af = priv->af; |
2e4e6a17 | 1279 | |
025d93d1 | 1280 | mutex_lock(&xt[af].mutex); |
715cf35a | 1281 | return seq_list_start(&net->xt.tables[af], *pos); |
025d93d1 | 1282 | } |
2e4e6a17 | 1283 | |
025d93d1 AD |
1284 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1285 | { | |
715cf35a | 1286 | struct xt_names_priv *priv = seq->private; |
1218854a | 1287 | struct net *net = seq_file_net(seq); |
76108cea | 1288 | u_int8_t af = priv->af; |
2e4e6a17 | 1289 | |
715cf35a | 1290 | return seq_list_next(v, &net->xt.tables[af], pos); |
2e4e6a17 HW |
1291 | } |
1292 | ||
025d93d1 | 1293 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1294 | { |
715cf35a | 1295 | struct xt_names_priv *priv = seq->private; |
76108cea | 1296 | u_int8_t af = priv->af; |
2e4e6a17 | 1297 | |
025d93d1 AD |
1298 | mutex_unlock(&xt[af].mutex); |
1299 | } | |
2e4e6a17 | 1300 | |
025d93d1 AD |
1301 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
1302 | { | |
1303 | struct xt_table *table = list_entry(v, struct xt_table, list); | |
2e4e6a17 | 1304 | |
861fb107 | 1305 | if (*table->name) |
e71456ae | 1306 | seq_printf(seq, "%s\n", table->name); |
861fb107 | 1307 | return 0; |
025d93d1 | 1308 | } |
601e68e1 | 1309 | |
025d93d1 AD |
1310 | static const struct seq_operations xt_table_seq_ops = { |
1311 | .start = xt_table_seq_start, | |
1312 | .next = xt_table_seq_next, | |
1313 | .stop = xt_table_seq_stop, | |
1314 | .show = xt_table_seq_show, | |
1315 | }; | |
1316 | ||
1317 | static int xt_table_open(struct inode *inode, struct file *file) | |
1318 | { | |
1319 | int ret; | |
715cf35a | 1320 | struct xt_names_priv *priv; |
025d93d1 | 1321 | |
715cf35a AD |
1322 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
1323 | sizeof(struct xt_names_priv)); | |
025d93d1 | 1324 | if (!ret) { |
715cf35a | 1325 | priv = ((struct seq_file *)file->private_data)->private; |
d9dda78b | 1326 | priv->af = (unsigned long)PDE_DATA(inode); |
025d93d1 AD |
1327 | } |
1328 | return ret; | |
2e4e6a17 HW |
1329 | } |
1330 | ||
025d93d1 AD |
1331 | static const struct file_operations xt_table_ops = { |
1332 | .owner = THIS_MODULE, | |
1333 | .open = xt_table_open, | |
1334 | .read = seq_read, | |
1335 | .llseek = seq_lseek, | |
0e93bb94 | 1336 | .release = seq_release_net, |
025d93d1 AD |
1337 | }; |
1338 | ||
eb132205 JE |
1339 | /* |
1340 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | |
1341 | * the multi-AF mutexes. | |
1342 | */ | |
1343 | struct nf_mttg_trav { | |
1344 | struct list_head *head, *curr; | |
1345 | uint8_t class, nfproto; | |
1346 | }; | |
1347 | ||
1348 | enum { | |
1349 | MTTG_TRAV_INIT, | |
1350 | MTTG_TRAV_NFP_UNSPEC, | |
1351 | MTTG_TRAV_NFP_SPEC, | |
1352 | MTTG_TRAV_DONE, | |
1353 | }; | |
1354 | ||
1355 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | |
1356 | bool is_target) | |
2e4e6a17 | 1357 | { |
eb132205 JE |
1358 | static const uint8_t next_class[] = { |
1359 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, | |
1360 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | |
1361 | }; | |
1362 | struct nf_mttg_trav *trav = seq->private; | |
1363 | ||
1364 | switch (trav->class) { | |
1365 | case MTTG_TRAV_INIT: | |
1366 | trav->class = MTTG_TRAV_NFP_UNSPEC; | |
1367 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | |
1368 | trav->head = trav->curr = is_target ? | |
1369 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | |
1370 | break; | |
1371 | case MTTG_TRAV_NFP_UNSPEC: | |
1372 | trav->curr = trav->curr->next; | |
1373 | if (trav->curr != trav->head) | |
1374 | break; | |
1375 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1376 | mutex_lock(&xt[trav->nfproto].mutex); | |
1377 | trav->head = trav->curr = is_target ? | |
1378 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | |
1379 | trav->class = next_class[trav->class]; | |
1380 | break; | |
1381 | case MTTG_TRAV_NFP_SPEC: | |
1382 | trav->curr = trav->curr->next; | |
1383 | if (trav->curr != trav->head) | |
1384 | break; | |
1385 | /* fallthru, _stop will unlock */ | |
1386 | default: | |
1387 | return NULL; | |
1388 | } | |
2e4e6a17 | 1389 | |
eb132205 JE |
1390 | if (ppos != NULL) |
1391 | ++*ppos; | |
1392 | return trav; | |
025d93d1 | 1393 | } |
601e68e1 | 1394 | |
eb132205 JE |
1395 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1396 | bool is_target) | |
025d93d1 | 1397 | { |
eb132205 JE |
1398 | struct nf_mttg_trav *trav = seq->private; |
1399 | unsigned int j; | |
2e4e6a17 | 1400 | |
eb132205 JE |
1401 | trav->class = MTTG_TRAV_INIT; |
1402 | for (j = 0; j < *pos; ++j) | |
1403 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | |
1404 | return NULL; | |
1405 | return trav; | |
2e4e6a17 HW |
1406 | } |
1407 | ||
eb132205 | 1408 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1409 | { |
eb132205 JE |
1410 | struct nf_mttg_trav *trav = seq->private; |
1411 | ||
1412 | switch (trav->class) { | |
1413 | case MTTG_TRAV_NFP_UNSPEC: | |
1414 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1415 | break; | |
1416 | case MTTG_TRAV_NFP_SPEC: | |
1417 | mutex_unlock(&xt[trav->nfproto].mutex); | |
1418 | break; | |
1419 | } | |
1420 | } | |
2e4e6a17 | 1421 | |
eb132205 JE |
1422 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1423 | { | |
1424 | return xt_mttg_seq_start(seq, pos, false); | |
2e4e6a17 HW |
1425 | } |
1426 | ||
eb132205 | 1427 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
2e4e6a17 | 1428 | { |
eb132205 JE |
1429 | return xt_mttg_seq_next(seq, v, ppos, false); |
1430 | } | |
2e4e6a17 | 1431 | |
eb132205 JE |
1432 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1433 | { | |
1434 | const struct nf_mttg_trav *trav = seq->private; | |
1435 | const struct xt_match *match; | |
1436 | ||
1437 | switch (trav->class) { | |
1438 | case MTTG_TRAV_NFP_UNSPEC: | |
1439 | case MTTG_TRAV_NFP_SPEC: | |
1440 | if (trav->curr == trav->head) | |
1441 | return 0; | |
1442 | match = list_entry(trav->curr, struct xt_match, list); | |
861fb107 JP |
1443 | if (*match->name) |
1444 | seq_printf(seq, "%s\n", match->name); | |
eb132205 JE |
1445 | } |
1446 | return 0; | |
2e4e6a17 HW |
1447 | } |
1448 | ||
025d93d1 AD |
1449 | static const struct seq_operations xt_match_seq_ops = { |
1450 | .start = xt_match_seq_start, | |
1451 | .next = xt_match_seq_next, | |
eb132205 | 1452 | .stop = xt_mttg_seq_stop, |
025d93d1 | 1453 | .show = xt_match_seq_show, |
2e4e6a17 HW |
1454 | }; |
1455 | ||
025d93d1 | 1456 | static int xt_match_open(struct inode *inode, struct file *file) |
2e4e6a17 | 1457 | { |
eb132205 | 1458 | struct nf_mttg_trav *trav; |
772476df RJ |
1459 | trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); |
1460 | if (!trav) | |
eb132205 | 1461 | return -ENOMEM; |
2e4e6a17 | 1462 | |
d9dda78b | 1463 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1464 | return 0; |
025d93d1 AD |
1465 | } |
1466 | ||
1467 | static const struct file_operations xt_match_ops = { | |
1468 | .owner = THIS_MODULE, | |
1469 | .open = xt_match_open, | |
1470 | .read = seq_read, | |
1471 | .llseek = seq_lseek, | |
eb132205 | 1472 | .release = seq_release_private, |
025d93d1 | 1473 | }; |
2e4e6a17 | 1474 | |
025d93d1 AD |
1475 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1476 | { | |
eb132205 | 1477 | return xt_mttg_seq_start(seq, pos, true); |
025d93d1 AD |
1478 | } |
1479 | ||
eb132205 | 1480 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
025d93d1 | 1481 | { |
eb132205 | 1482 | return xt_mttg_seq_next(seq, v, ppos, true); |
025d93d1 AD |
1483 | } |
1484 | ||
1485 | static int xt_target_seq_show(struct seq_file *seq, void *v) | |
1486 | { | |
eb132205 JE |
1487 | const struct nf_mttg_trav *trav = seq->private; |
1488 | const struct xt_target *target; | |
1489 | ||
1490 | switch (trav->class) { | |
1491 | case MTTG_TRAV_NFP_UNSPEC: | |
1492 | case MTTG_TRAV_NFP_SPEC: | |
1493 | if (trav->curr == trav->head) | |
1494 | return 0; | |
1495 | target = list_entry(trav->curr, struct xt_target, list); | |
861fb107 JP |
1496 | if (*target->name) |
1497 | seq_printf(seq, "%s\n", target->name); | |
eb132205 JE |
1498 | } |
1499 | return 0; | |
025d93d1 AD |
1500 | } |
1501 | ||
1502 | static const struct seq_operations xt_target_seq_ops = { | |
1503 | .start = xt_target_seq_start, | |
1504 | .next = xt_target_seq_next, | |
eb132205 | 1505 | .stop = xt_mttg_seq_stop, |
025d93d1 AD |
1506 | .show = xt_target_seq_show, |
1507 | }; | |
1508 | ||
1509 | static int xt_target_open(struct inode *inode, struct file *file) | |
1510 | { | |
eb132205 | 1511 | struct nf_mttg_trav *trav; |
772476df RJ |
1512 | trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); |
1513 | if (!trav) | |
eb132205 | 1514 | return -ENOMEM; |
025d93d1 | 1515 | |
d9dda78b | 1516 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1517 | return 0; |
2e4e6a17 HW |
1518 | } |
1519 | ||
025d93d1 | 1520 | static const struct file_operations xt_target_ops = { |
2e4e6a17 | 1521 | .owner = THIS_MODULE, |
025d93d1 | 1522 | .open = xt_target_open, |
2e4e6a17 HW |
1523 | .read = seq_read, |
1524 | .llseek = seq_lseek, | |
eb132205 | 1525 | .release = seq_release_private, |
2e4e6a17 HW |
1526 | }; |
1527 | ||
1528 | #define FORMAT_TABLES "_tables_names" | |
1529 | #define FORMAT_MATCHES "_tables_matches" | |
1530 | #define FORMAT_TARGETS "_tables_targets" | |
1531 | ||
1532 | #endif /* CONFIG_PROC_FS */ | |
1533 | ||
2b95efe7 | 1534 | /** |
b9e69e12 | 1535 | * xt_hook_ops_alloc - set up hooks for a new table |
2b95efe7 JE |
1536 | * @table: table with metadata needed to set up hooks |
1537 | * @fn: Hook function | |
1538 | * | |
b9e69e12 FW |
1539 | * This function will create the nf_hook_ops that the x_table needs |
1540 | * to hand to xt_hook_link_net(). | |
2b95efe7 | 1541 | */ |
b9e69e12 FW |
1542 | struct nf_hook_ops * |
1543 | xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) | |
2b95efe7 JE |
1544 | { |
1545 | unsigned int hook_mask = table->valid_hooks; | |
1546 | uint8_t i, num_hooks = hweight32(hook_mask); | |
1547 | uint8_t hooknum; | |
1548 | struct nf_hook_ops *ops; | |
2b95efe7 | 1549 | |
a6d0bae1 XL |
1550 | if (!num_hooks) |
1551 | return ERR_PTR(-EINVAL); | |
1552 | ||
1ecc281e | 1553 | ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL); |
2b95efe7 JE |
1554 | if (ops == NULL) |
1555 | return ERR_PTR(-ENOMEM); | |
1556 | ||
1557 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; | |
1558 | hook_mask >>= 1, ++hooknum) { | |
1559 | if (!(hook_mask & 1)) | |
1560 | continue; | |
1561 | ops[i].hook = fn; | |
2b95efe7 JE |
1562 | ops[i].pf = table->af; |
1563 | ops[i].hooknum = hooknum; | |
1564 | ops[i].priority = table->priority; | |
1565 | ++i; | |
1566 | } | |
1567 | ||
2b95efe7 JE |
1568 | return ops; |
1569 | } | |
b9e69e12 | 1570 | EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); |
2b95efe7 | 1571 | |
76108cea | 1572 | int xt_proto_init(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1573 | { |
1574 | #ifdef CONFIG_PROC_FS | |
1575 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1576 | struct proc_dir_entry *proc; | |
f13f2aee PW |
1577 | kuid_t root_uid; |
1578 | kgid_t root_gid; | |
2e4e6a17 HW |
1579 | #endif |
1580 | ||
7e9c6eeb | 1581 | if (af >= ARRAY_SIZE(xt_prefix)) |
2e4e6a17 HW |
1582 | return -EINVAL; |
1583 | ||
1584 | ||
1585 | #ifdef CONFIG_PROC_FS | |
f13f2aee PW |
1586 | root_uid = make_kuid(net->user_ns, 0); |
1587 | root_gid = make_kgid(net->user_ns, 0); | |
1588 | ||
ce18afe5 | 1589 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1590 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
8b169240 DL |
1591 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1592 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1593 | if (!proc) |
1594 | goto out; | |
f13f2aee PW |
1595 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1596 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1597 | |
ce18afe5 | 1598 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1599 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
8b169240 DL |
1600 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1601 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1602 | if (!proc) |
1603 | goto out_remove_tables; | |
f13f2aee PW |
1604 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1605 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1606 | |
ce18afe5 | 1607 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1608 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
8b169240 DL |
1609 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1610 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1611 | if (!proc) |
1612 | goto out_remove_matches; | |
f13f2aee PW |
1613 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1614 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 HW |
1615 | #endif |
1616 | ||
1617 | return 0; | |
1618 | ||
1619 | #ifdef CONFIG_PROC_FS | |
1620 | out_remove_matches: | |
ce18afe5 | 1621 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1622 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1623 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1624 | |
1625 | out_remove_tables: | |
ce18afe5 | 1626 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1627 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1628 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1629 | out: |
1630 | return -1; | |
1631 | #endif | |
1632 | } | |
1633 | EXPORT_SYMBOL_GPL(xt_proto_init); | |
1634 | ||
76108cea | 1635 | void xt_proto_fini(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1636 | { |
1637 | #ifdef CONFIG_PROC_FS | |
1638 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1639 | ||
ce18afe5 | 1640 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1641 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1642 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1643 | |
ce18afe5 | 1644 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1645 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
ece31ffd | 1646 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1647 | |
ce18afe5 | 1648 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1649 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1650 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1651 | #endif /*CONFIG_PROC_FS*/ |
1652 | } | |
1653 | EXPORT_SYMBOL_GPL(xt_proto_fini); | |
1654 | ||
f28e15ba FW |
1655 | /** |
1656 | * xt_percpu_counter_alloc - allocate x_tables rule counter | |
1657 | * | |
ae0ac0ed | 1658 | * @state: pointer to xt_percpu allocation state |
f28e15ba FW |
1659 | * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct |
1660 | * | |
1661 | * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then | |
1662 | * contain the address of the real (percpu) counter. | |
1663 | * | |
1664 | * Rule evaluation needs to use xt_get_this_cpu_counter() helper | |
1665 | * to fetch the real percpu counter. | |
1666 | * | |
ae0ac0ed FW |
1667 | * To speed up allocation and improve data locality, a 4kb block is |
1668 | * allocated. | |
1669 | * | |
1670 | * xt_percpu_counter_alloc_state contains the base address of the | |
1671 | * allocated page and the current sub-offset. | |
1672 | * | |
f28e15ba FW |
1673 | * returns false on error. |
1674 | */ | |
ae0ac0ed FW |
1675 | bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, |
1676 | struct xt_counters *counter) | |
f28e15ba | 1677 | { |
ae0ac0ed | 1678 | BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); |
f28e15ba FW |
1679 | |
1680 | if (nr_cpu_ids <= 1) | |
1681 | return true; | |
1682 | ||
ae0ac0ed FW |
1683 | if (!state->mem) { |
1684 | state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, | |
1685 | XT_PCPU_BLOCK_SIZE); | |
1686 | if (!state->mem) | |
1687 | return false; | |
1688 | } | |
1689 | counter->pcnt = (__force unsigned long)(state->mem + state->off); | |
1690 | state->off += sizeof(*counter); | |
1691 | if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { | |
1692 | state->mem = NULL; | |
1693 | state->off = 0; | |
1694 | } | |
f28e15ba FW |
1695 | return true; |
1696 | } | |
1697 | EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); | |
1698 | ||
4d31eef5 FW |
1699 | void xt_percpu_counter_free(struct xt_counters *counters) |
1700 | { | |
1701 | unsigned long pcnt = counters->pcnt; | |
1702 | ||
ae0ac0ed | 1703 | if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) |
4d31eef5 FW |
1704 | free_percpu((void __percpu *)pcnt); |
1705 | } | |
1706 | EXPORT_SYMBOL_GPL(xt_percpu_counter_free); | |
1707 | ||
8d870052 AD |
1708 | static int __net_init xt_net_init(struct net *net) |
1709 | { | |
1710 | int i; | |
1711 | ||
7e9c6eeb | 1712 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
8d870052 AD |
1713 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1714 | return 0; | |
1715 | } | |
1716 | ||
1717 | static struct pernet_operations xt_net_ops = { | |
1718 | .init = xt_net_init, | |
1719 | }; | |
2e4e6a17 HW |
1720 | |
1721 | static int __init xt_init(void) | |
1722 | { | |
942e4a2b SH |
1723 | unsigned int i; |
1724 | int rv; | |
1725 | ||
1726 | for_each_possible_cpu(i) { | |
7f5c6d4f | 1727 | seqcount_init(&per_cpu(xt_recseq, i)); |
942e4a2b | 1728 | } |
2e4e6a17 | 1729 | |
7e9c6eeb | 1730 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
2e4e6a17 HW |
1731 | if (!xt) |
1732 | return -ENOMEM; | |
1733 | ||
7e9c6eeb | 1734 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
9e19bb6d | 1735 | mutex_init(&xt[i].mutex); |
2722971c DM |
1736 | #ifdef CONFIG_COMPAT |
1737 | mutex_init(&xt[i].compat_mutex); | |
255d0dc3 | 1738 | xt[i].compat_tab = NULL; |
2722971c | 1739 | #endif |
2e4e6a17 HW |
1740 | INIT_LIST_HEAD(&xt[i].target); |
1741 | INIT_LIST_HEAD(&xt[i].match); | |
2e4e6a17 | 1742 | } |
8d870052 AD |
1743 | rv = register_pernet_subsys(&xt_net_ops); |
1744 | if (rv < 0) | |
1745 | kfree(xt); | |
1746 | return rv; | |
2e4e6a17 HW |
1747 | } |
1748 | ||
1749 | static void __exit xt_fini(void) | |
1750 | { | |
8d870052 | 1751 | unregister_pernet_subsys(&xt_net_ops); |
2e4e6a17 HW |
1752 | kfree(xt); |
1753 | } | |
1754 | ||
1755 | module_init(xt_init); | |
1756 | module_exit(xt_fini); | |
1757 |