]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/netfilter/x_tables.c
Merge tag 'erofs-for-5.12-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang...
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / x_tables.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * x_tables core - Backend for {ip,ip6,arp}_tables
4 *
5 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
6 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 *
8 * Based on existing ip_tables code which is
9 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
10 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/proc_fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/string.h>
20 #include <linux/vmalloc.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/audit.h>
25 #include <linux/user_namespace.h>
26 #include <net/net_namespace.h>
27
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_arp.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <linux/netfilter_ipv6/ip6_tables.h>
32 #include <linux/netfilter_arp/arp_tables.h>
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
36 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
37
38 #define XT_PCPU_BLOCK_SIZE 4096
39 #define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
40
41 struct compat_delta {
42 unsigned int offset; /* offset in kernel */
43 int delta; /* delta in 32bit user land */
44 };
45
46 struct xt_af {
47 struct mutex mutex;
48 struct list_head match;
49 struct list_head target;
50 #ifdef CONFIG_COMPAT
51 struct mutex compat_mutex;
52 struct compat_delta *compat_tab;
53 unsigned int number; /* number of slots in compat_tab[] */
54 unsigned int cur; /* number of used slots in compat_tab[] */
55 #endif
56 };
57
58 static struct xt_af *xt;
59
60 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
61 [NFPROTO_UNSPEC] = "x",
62 [NFPROTO_IPV4] = "ip",
63 [NFPROTO_ARP] = "arp",
64 [NFPROTO_BRIDGE] = "eb",
65 [NFPROTO_IPV6] = "ip6",
66 };
67
68 /* Registration hooks for targets. */
69 int xt_register_target(struct xt_target *target)
70 {
71 u_int8_t af = target->family;
72
73 mutex_lock(&xt[af].mutex);
74 list_add(&target->list, &xt[af].target);
75 mutex_unlock(&xt[af].mutex);
76 return 0;
77 }
78 EXPORT_SYMBOL(xt_register_target);
79
80 void
81 xt_unregister_target(struct xt_target *target)
82 {
83 u_int8_t af = target->family;
84
85 mutex_lock(&xt[af].mutex);
86 list_del(&target->list);
87 mutex_unlock(&xt[af].mutex);
88 }
89 EXPORT_SYMBOL(xt_unregister_target);
90
91 int
92 xt_register_targets(struct xt_target *target, unsigned int n)
93 {
94 unsigned int i;
95 int err = 0;
96
97 for (i = 0; i < n; i++) {
98 err = xt_register_target(&target[i]);
99 if (err)
100 goto err;
101 }
102 return err;
103
104 err:
105 if (i > 0)
106 xt_unregister_targets(target, i);
107 return err;
108 }
109 EXPORT_SYMBOL(xt_register_targets);
110
111 void
112 xt_unregister_targets(struct xt_target *target, unsigned int n)
113 {
114 while (n-- > 0)
115 xt_unregister_target(&target[n]);
116 }
117 EXPORT_SYMBOL(xt_unregister_targets);
118
119 int xt_register_match(struct xt_match *match)
120 {
121 u_int8_t af = match->family;
122
123 mutex_lock(&xt[af].mutex);
124 list_add(&match->list, &xt[af].match);
125 mutex_unlock(&xt[af].mutex);
126 return 0;
127 }
128 EXPORT_SYMBOL(xt_register_match);
129
130 void
131 xt_unregister_match(struct xt_match *match)
132 {
133 u_int8_t af = match->family;
134
135 mutex_lock(&xt[af].mutex);
136 list_del(&match->list);
137 mutex_unlock(&xt[af].mutex);
138 }
139 EXPORT_SYMBOL(xt_unregister_match);
140
141 int
142 xt_register_matches(struct xt_match *match, unsigned int n)
143 {
144 unsigned int i;
145 int err = 0;
146
147 for (i = 0; i < n; i++) {
148 err = xt_register_match(&match[i]);
149 if (err)
150 goto err;
151 }
152 return err;
153
154 err:
155 if (i > 0)
156 xt_unregister_matches(match, i);
157 return err;
158 }
159 EXPORT_SYMBOL(xt_register_matches);
160
161 void
162 xt_unregister_matches(struct xt_match *match, unsigned int n)
163 {
164 while (n-- > 0)
165 xt_unregister_match(&match[n]);
166 }
167 EXPORT_SYMBOL(xt_unregister_matches);
168
169
170 /*
171 * These are weird, but module loading must not be done with mutex
172 * held (since they will register), and we have to have a single
173 * function to use.
174 */
175
176 /* Find match, grabs ref. Returns ERR_PTR() on error. */
177 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
178 {
179 struct xt_match *m;
180 int err = -ENOENT;
181
182 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
183 return ERR_PTR(-EINVAL);
184
185 mutex_lock(&xt[af].mutex);
186 list_for_each_entry(m, &xt[af].match, list) {
187 if (strcmp(m->name, name) == 0) {
188 if (m->revision == revision) {
189 if (try_module_get(m->me)) {
190 mutex_unlock(&xt[af].mutex);
191 return m;
192 }
193 } else
194 err = -EPROTOTYPE; /* Found something. */
195 }
196 }
197 mutex_unlock(&xt[af].mutex);
198
199 if (af != NFPROTO_UNSPEC)
200 /* Try searching again in the family-independent list */
201 return xt_find_match(NFPROTO_UNSPEC, name, revision);
202
203 return ERR_PTR(err);
204 }
205 EXPORT_SYMBOL(xt_find_match);
206
207 struct xt_match *
208 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
209 {
210 struct xt_match *match;
211
212 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
213 return ERR_PTR(-EINVAL);
214
215 match = xt_find_match(nfproto, name, revision);
216 if (IS_ERR(match)) {
217 request_module("%st_%s", xt_prefix[nfproto], name);
218 match = xt_find_match(nfproto, name, revision);
219 }
220
221 return match;
222 }
223 EXPORT_SYMBOL_GPL(xt_request_find_match);
224
225 /* Find target, grabs ref. Returns ERR_PTR() on error. */
226 static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
227 {
228 struct xt_target *t;
229 int err = -ENOENT;
230
231 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
232 return ERR_PTR(-EINVAL);
233
234 mutex_lock(&xt[af].mutex);
235 list_for_each_entry(t, &xt[af].target, list) {
236 if (strcmp(t->name, name) == 0) {
237 if (t->revision == revision) {
238 if (try_module_get(t->me)) {
239 mutex_unlock(&xt[af].mutex);
240 return t;
241 }
242 } else
243 err = -EPROTOTYPE; /* Found something. */
244 }
245 }
246 mutex_unlock(&xt[af].mutex);
247
248 if (af != NFPROTO_UNSPEC)
249 /* Try searching again in the family-independent list */
250 return xt_find_target(NFPROTO_UNSPEC, name, revision);
251
252 return ERR_PTR(err);
253 }
254
255 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
256 {
257 struct xt_target *target;
258
259 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
260 return ERR_PTR(-EINVAL);
261
262 target = xt_find_target(af, name, revision);
263 if (IS_ERR(target)) {
264 request_module("%st_%s", xt_prefix[af], name);
265 target = xt_find_target(af, name, revision);
266 }
267
268 return target;
269 }
270 EXPORT_SYMBOL_GPL(xt_request_find_target);
271
272
273 static int xt_obj_to_user(u16 __user *psize, u16 size,
274 void __user *pname, const char *name,
275 u8 __user *prev, u8 rev)
276 {
277 if (put_user(size, psize))
278 return -EFAULT;
279 if (copy_to_user(pname, name, strlen(name) + 1))
280 return -EFAULT;
281 if (put_user(rev, prev))
282 return -EFAULT;
283
284 return 0;
285 }
286
287 #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
288 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
289 U->u.user.name, K->u.kernel.TYPE->name, \
290 &U->u.user.revision, K->u.kernel.TYPE->revision)
291
292 int xt_data_to_user(void __user *dst, const void *src,
293 int usersize, int size, int aligned_size)
294 {
295 usersize = usersize ? : size;
296 if (copy_to_user(dst, src, usersize))
297 return -EFAULT;
298 if (usersize != aligned_size &&
299 clear_user(dst + usersize, aligned_size - usersize))
300 return -EFAULT;
301
302 return 0;
303 }
304 EXPORT_SYMBOL_GPL(xt_data_to_user);
305
306 #define XT_DATA_TO_USER(U, K, TYPE) \
307 xt_data_to_user(U->data, K->data, \
308 K->u.kernel.TYPE->usersize, \
309 K->u.kernel.TYPE->TYPE##size, \
310 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
311
312 int xt_match_to_user(const struct xt_entry_match *m,
313 struct xt_entry_match __user *u)
314 {
315 return XT_OBJ_TO_USER(u, m, match, 0) ||
316 XT_DATA_TO_USER(u, m, match);
317 }
318 EXPORT_SYMBOL_GPL(xt_match_to_user);
319
320 int xt_target_to_user(const struct xt_entry_target *t,
321 struct xt_entry_target __user *u)
322 {
323 return XT_OBJ_TO_USER(u, t, target, 0) ||
324 XT_DATA_TO_USER(u, t, target);
325 }
326 EXPORT_SYMBOL_GPL(xt_target_to_user);
327
328 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
329 {
330 const struct xt_match *m;
331 int have_rev = 0;
332
333 mutex_lock(&xt[af].mutex);
334 list_for_each_entry(m, &xt[af].match, list) {
335 if (strcmp(m->name, name) == 0) {
336 if (m->revision > *bestp)
337 *bestp = m->revision;
338 if (m->revision == revision)
339 have_rev = 1;
340 }
341 }
342 mutex_unlock(&xt[af].mutex);
343
344 if (af != NFPROTO_UNSPEC && !have_rev)
345 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
346
347 return have_rev;
348 }
349
350 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
351 {
352 const struct xt_target *t;
353 int have_rev = 0;
354
355 mutex_lock(&xt[af].mutex);
356 list_for_each_entry(t, &xt[af].target, list) {
357 if (strcmp(t->name, name) == 0) {
358 if (t->revision > *bestp)
359 *bestp = t->revision;
360 if (t->revision == revision)
361 have_rev = 1;
362 }
363 }
364 mutex_unlock(&xt[af].mutex);
365
366 if (af != NFPROTO_UNSPEC && !have_rev)
367 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
368
369 return have_rev;
370 }
371
372 /* Returns true or false (if no such extension at all) */
373 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
374 int *err)
375 {
376 int have_rev, best = -1;
377
378 if (target == 1)
379 have_rev = target_revfn(af, name, revision, &best);
380 else
381 have_rev = match_revfn(af, name, revision, &best);
382
383 /* Nothing at all? Return 0 to try loading module. */
384 if (best == -1) {
385 *err = -ENOENT;
386 return 0;
387 }
388
389 *err = best;
390 if (!have_rev)
391 *err = -EPROTONOSUPPORT;
392 return 1;
393 }
394 EXPORT_SYMBOL_GPL(xt_find_revision);
395
396 static char *
397 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
398 {
399 static const char *const inetbr_names[] = {
400 "PREROUTING", "INPUT", "FORWARD",
401 "OUTPUT", "POSTROUTING", "BROUTING",
402 };
403 static const char *const arp_names[] = {
404 "INPUT", "FORWARD", "OUTPUT",
405 };
406 const char *const *names;
407 unsigned int i, max;
408 char *p = buf;
409 bool np = false;
410 int res;
411
412 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
413 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
414 ARRAY_SIZE(inetbr_names);
415 *p = '\0';
416 for (i = 0; i < max; ++i) {
417 if (!(mask & (1 << i)))
418 continue;
419 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
420 if (res > 0) {
421 size -= res;
422 p += res;
423 }
424 np = true;
425 }
426
427 return buf;
428 }
429
430 /**
431 * xt_check_proc_name - check that name is suitable for /proc file creation
432 *
433 * @name: file name candidate
434 * @size: length of buffer
435 *
436 * some x_tables modules wish to create a file in /proc.
437 * This function makes sure that the name is suitable for this
438 * purpose, it checks that name is NUL terminated and isn't a 'special'
439 * name, like "..".
440 *
441 * returns negative number on error or 0 if name is useable.
442 */
443 int xt_check_proc_name(const char *name, unsigned int size)
444 {
445 if (name[0] == '\0')
446 return -EINVAL;
447
448 if (strnlen(name, size) == size)
449 return -ENAMETOOLONG;
450
451 if (strcmp(name, ".") == 0 ||
452 strcmp(name, "..") == 0 ||
453 strchr(name, '/'))
454 return -EINVAL;
455
456 return 0;
457 }
458 EXPORT_SYMBOL(xt_check_proc_name);
459
460 int xt_check_match(struct xt_mtchk_param *par,
461 unsigned int size, u16 proto, bool inv_proto)
462 {
463 int ret;
464
465 if (XT_ALIGN(par->match->matchsize) != size &&
466 par->match->matchsize != -1) {
467 /*
468 * ebt_among is exempt from centralized matchsize checking
469 * because it uses a dynamic-size data set.
470 */
471 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
472 xt_prefix[par->family], par->match->name,
473 par->match->revision,
474 XT_ALIGN(par->match->matchsize), size);
475 return -EINVAL;
476 }
477 if (par->match->table != NULL &&
478 strcmp(par->match->table, par->table) != 0) {
479 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
480 xt_prefix[par->family], par->match->name,
481 par->match->table, par->table);
482 return -EINVAL;
483 }
484 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
485 char used[64], allow[64];
486
487 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
488 xt_prefix[par->family], par->match->name,
489 textify_hooks(used, sizeof(used),
490 par->hook_mask, par->family),
491 textify_hooks(allow, sizeof(allow),
492 par->match->hooks,
493 par->family));
494 return -EINVAL;
495 }
496 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
497 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
498 xt_prefix[par->family], par->match->name,
499 par->match->proto);
500 return -EINVAL;
501 }
502 if (par->match->checkentry != NULL) {
503 ret = par->match->checkentry(par);
504 if (ret < 0)
505 return ret;
506 else if (ret > 0)
507 /* Flag up potential errors. */
508 return -EIO;
509 }
510 return 0;
511 }
512 EXPORT_SYMBOL_GPL(xt_check_match);
513
514 /** xt_check_entry_match - check that matches end before start of target
515 *
516 * @match: beginning of xt_entry_match
517 * @target: beginning of this rules target (alleged end of matches)
518 * @alignment: alignment requirement of match structures
519 *
520 * Validates that all matches add up to the beginning of the target,
521 * and that each match covers at least the base structure size.
522 *
523 * Return: 0 on success, negative errno on failure.
524 */
525 static int xt_check_entry_match(const char *match, const char *target,
526 const size_t alignment)
527 {
528 const struct xt_entry_match *pos;
529 int length = target - match;
530
531 if (length == 0) /* no matches */
532 return 0;
533
534 pos = (struct xt_entry_match *)match;
535 do {
536 if ((unsigned long)pos % alignment)
537 return -EINVAL;
538
539 if (length < (int)sizeof(struct xt_entry_match))
540 return -EINVAL;
541
542 if (pos->u.match_size < sizeof(struct xt_entry_match))
543 return -EINVAL;
544
545 if (pos->u.match_size > length)
546 return -EINVAL;
547
548 length -= pos->u.match_size;
549 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
550 } while (length > 0);
551
552 return 0;
553 }
554
555 /** xt_check_table_hooks - check hook entry points are sane
556 *
557 * @info xt_table_info to check
558 * @valid_hooks - hook entry points that we can enter from
559 *
560 * Validates that the hook entry and underflows points are set up.
561 *
562 * Return: 0 on success, negative errno on failure.
563 */
564 int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
565 {
566 const char *err = "unsorted underflow";
567 unsigned int i, max_uflow, max_entry;
568 bool check_hooks = false;
569
570 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
571
572 max_entry = 0;
573 max_uflow = 0;
574
575 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
576 if (!(valid_hooks & (1 << i)))
577 continue;
578
579 if (info->hook_entry[i] == 0xFFFFFFFF)
580 return -EINVAL;
581 if (info->underflow[i] == 0xFFFFFFFF)
582 return -EINVAL;
583
584 if (check_hooks) {
585 if (max_uflow > info->underflow[i])
586 goto error;
587
588 if (max_uflow == info->underflow[i]) {
589 err = "duplicate underflow";
590 goto error;
591 }
592 if (max_entry > info->hook_entry[i]) {
593 err = "unsorted entry";
594 goto error;
595 }
596 if (max_entry == info->hook_entry[i]) {
597 err = "duplicate entry";
598 goto error;
599 }
600 }
601 max_entry = info->hook_entry[i];
602 max_uflow = info->underflow[i];
603 check_hooks = true;
604 }
605
606 return 0;
607 error:
608 pr_err_ratelimited("%s at hook %d\n", err, i);
609 return -EINVAL;
610 }
611 EXPORT_SYMBOL(xt_check_table_hooks);
612
613 static bool verdict_ok(int verdict)
614 {
615 if (verdict > 0)
616 return true;
617
618 if (verdict < 0) {
619 int v = -verdict - 1;
620
621 if (verdict == XT_RETURN)
622 return true;
623
624 switch (v) {
625 case NF_ACCEPT: return true;
626 case NF_DROP: return true;
627 case NF_QUEUE: return true;
628 default:
629 break;
630 }
631
632 return false;
633 }
634
635 return false;
636 }
637
638 static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
639 const char *msg, unsigned int msglen)
640 {
641 return usersize == kernsize && strnlen(msg, msglen) < msglen;
642 }
643
644 #ifdef CONFIG_COMPAT
645 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
646 {
647 struct xt_af *xp = &xt[af];
648
649 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
650
651 if (WARN_ON(!xp->compat_tab))
652 return -ENOMEM;
653
654 if (xp->cur >= xp->number)
655 return -EINVAL;
656
657 if (xp->cur)
658 delta += xp->compat_tab[xp->cur - 1].delta;
659 xp->compat_tab[xp->cur].offset = offset;
660 xp->compat_tab[xp->cur].delta = delta;
661 xp->cur++;
662 return 0;
663 }
664 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
665
666 void xt_compat_flush_offsets(u_int8_t af)
667 {
668 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
669
670 if (xt[af].compat_tab) {
671 vfree(xt[af].compat_tab);
672 xt[af].compat_tab = NULL;
673 xt[af].number = 0;
674 xt[af].cur = 0;
675 }
676 }
677 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
678
679 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
680 {
681 struct compat_delta *tmp = xt[af].compat_tab;
682 int mid, left = 0, right = xt[af].cur - 1;
683
684 while (left <= right) {
685 mid = (left + right) >> 1;
686 if (offset > tmp[mid].offset)
687 left = mid + 1;
688 else if (offset < tmp[mid].offset)
689 right = mid - 1;
690 else
691 return mid ? tmp[mid - 1].delta : 0;
692 }
693 return left ? tmp[left - 1].delta : 0;
694 }
695 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
696
697 int xt_compat_init_offsets(u8 af, unsigned int number)
698 {
699 size_t mem;
700
701 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
702
703 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
704 return -EINVAL;
705
706 if (WARN_ON(xt[af].compat_tab))
707 return -EINVAL;
708
709 mem = sizeof(struct compat_delta) * number;
710 if (mem > XT_MAX_TABLE_SIZE)
711 return -ENOMEM;
712
713 xt[af].compat_tab = vmalloc(mem);
714 if (!xt[af].compat_tab)
715 return -ENOMEM;
716
717 xt[af].number = number;
718 xt[af].cur = 0;
719
720 return 0;
721 }
722 EXPORT_SYMBOL(xt_compat_init_offsets);
723
724 int xt_compat_match_offset(const struct xt_match *match)
725 {
726 u_int16_t csize = match->compatsize ? : match->matchsize;
727 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
728 }
729 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
730
731 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
732 unsigned int *size)
733 {
734 const struct xt_match *match = m->u.kernel.match;
735 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
736 int pad, off = xt_compat_match_offset(match);
737 u_int16_t msize = cm->u.user.match_size;
738 char name[sizeof(m->u.user.name)];
739
740 m = *dstptr;
741 memcpy(m, cm, sizeof(*cm));
742 if (match->compat_from_user)
743 match->compat_from_user(m->data, cm->data);
744 else
745 memcpy(m->data, cm->data, msize - sizeof(*cm));
746 pad = XT_ALIGN(match->matchsize) - match->matchsize;
747 if (pad > 0)
748 memset(m->data + match->matchsize, 0, pad);
749
750 msize += off;
751 m->u.user.match_size = msize;
752 strlcpy(name, match->name, sizeof(name));
753 module_put(match->me);
754 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
755
756 *size += off;
757 *dstptr += msize;
758 }
759 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
760
761 #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
762 xt_data_to_user(U->data, K->data, \
763 K->u.kernel.TYPE->usersize, \
764 C_SIZE, \
765 COMPAT_XT_ALIGN(C_SIZE))
766
767 int xt_compat_match_to_user(const struct xt_entry_match *m,
768 void __user **dstptr, unsigned int *size)
769 {
770 const struct xt_match *match = m->u.kernel.match;
771 struct compat_xt_entry_match __user *cm = *dstptr;
772 int off = xt_compat_match_offset(match);
773 u_int16_t msize = m->u.user.match_size - off;
774
775 if (XT_OBJ_TO_USER(cm, m, match, msize))
776 return -EFAULT;
777
778 if (match->compat_to_user) {
779 if (match->compat_to_user((void __user *)cm->data, m->data))
780 return -EFAULT;
781 } else {
782 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
783 return -EFAULT;
784 }
785
786 *size -= off;
787 *dstptr += msize;
788 return 0;
789 }
790 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
791
792 /* non-compat version may have padding after verdict */
793 struct compat_xt_standard_target {
794 struct compat_xt_entry_target t;
795 compat_uint_t verdict;
796 };
797
798 struct compat_xt_error_target {
799 struct compat_xt_entry_target t;
800 char errorname[XT_FUNCTION_MAXNAMELEN];
801 };
802
803 int xt_compat_check_entry_offsets(const void *base, const char *elems,
804 unsigned int target_offset,
805 unsigned int next_offset)
806 {
807 long size_of_base_struct = elems - (const char *)base;
808 const struct compat_xt_entry_target *t;
809 const char *e = base;
810
811 if (target_offset < size_of_base_struct)
812 return -EINVAL;
813
814 if (target_offset + sizeof(*t) > next_offset)
815 return -EINVAL;
816
817 t = (void *)(e + target_offset);
818 if (t->u.target_size < sizeof(*t))
819 return -EINVAL;
820
821 if (target_offset + t->u.target_size > next_offset)
822 return -EINVAL;
823
824 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
825 const struct compat_xt_standard_target *st = (const void *)t;
826
827 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
828 return -EINVAL;
829
830 if (!verdict_ok(st->verdict))
831 return -EINVAL;
832 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
833 const struct compat_xt_error_target *et = (const void *)t;
834
835 if (!error_tg_ok(t->u.target_size, sizeof(*et),
836 et->errorname, sizeof(et->errorname)))
837 return -EINVAL;
838 }
839
840 /* compat_xt_entry match has less strict alignment requirements,
841 * otherwise they are identical. In case of padding differences
842 * we need to add compat version of xt_check_entry_match.
843 */
844 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
845
846 return xt_check_entry_match(elems, base + target_offset,
847 __alignof__(struct compat_xt_entry_match));
848 }
849 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
850 #endif /* CONFIG_COMPAT */
851
852 /**
853 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
854 *
855 * @base: pointer to arp/ip/ip6t_entry
856 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
857 * @target_offset: the arp/ip/ip6_t->target_offset
858 * @next_offset: the arp/ip/ip6_t->next_offset
859 *
860 * validates that target_offset and next_offset are sane and that all
861 * match sizes (if any) align with the target offset.
862 *
863 * This function does not validate the targets or matches themselves, it
864 * only tests that all the offsets and sizes are correct, that all
865 * match structures are aligned, and that the last structure ends where
866 * the target structure begins.
867 *
868 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
869 *
870 * The arp/ip/ip6t_entry structure @base must have passed following tests:
871 * - it must point to a valid memory location
872 * - base to base + next_offset must be accessible, i.e. not exceed allocated
873 * length.
874 *
875 * A well-formed entry looks like this:
876 *
877 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
878 * e->elems[]-----' | |
879 * matchsize | |
880 * matchsize | |
881 * | |
882 * target_offset---------------------------------' |
883 * next_offset---------------------------------------------------'
884 *
885 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
886 * This is where matches (if any) and the target reside.
887 * target_offset: beginning of target.
888 * next_offset: start of the next rule; also: size of this rule.
889 * Since targets have a minimum size, target_offset + minlen <= next_offset.
890 *
891 * Every match stores its size, sum of sizes must not exceed target_offset.
892 *
893 * Return: 0 on success, negative errno on failure.
894 */
895 int xt_check_entry_offsets(const void *base,
896 const char *elems,
897 unsigned int target_offset,
898 unsigned int next_offset)
899 {
900 long size_of_base_struct = elems - (const char *)base;
901 const struct xt_entry_target *t;
902 const char *e = base;
903
904 /* target start is within the ip/ip6/arpt_entry struct */
905 if (target_offset < size_of_base_struct)
906 return -EINVAL;
907
908 if (target_offset + sizeof(*t) > next_offset)
909 return -EINVAL;
910
911 t = (void *)(e + target_offset);
912 if (t->u.target_size < sizeof(*t))
913 return -EINVAL;
914
915 if (target_offset + t->u.target_size > next_offset)
916 return -EINVAL;
917
918 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
919 const struct xt_standard_target *st = (const void *)t;
920
921 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
922 return -EINVAL;
923
924 if (!verdict_ok(st->verdict))
925 return -EINVAL;
926 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
927 const struct xt_error_target *et = (const void *)t;
928
929 if (!error_tg_ok(t->u.target_size, sizeof(*et),
930 et->errorname, sizeof(et->errorname)))
931 return -EINVAL;
932 }
933
934 return xt_check_entry_match(elems, base + target_offset,
935 __alignof__(struct xt_entry_match));
936 }
937 EXPORT_SYMBOL(xt_check_entry_offsets);
938
939 /**
940 * xt_alloc_entry_offsets - allocate array to store rule head offsets
941 *
942 * @size: number of entries
943 *
944 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
945 */
946 unsigned int *xt_alloc_entry_offsets(unsigned int size)
947 {
948 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
949 return NULL;
950
951 return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
952
953 }
954 EXPORT_SYMBOL(xt_alloc_entry_offsets);
955
956 /**
957 * xt_find_jump_offset - check if target is a valid jump offset
958 *
959 * @offsets: array containing all valid rule start offsets of a rule blob
960 * @target: the jump target to search for
961 * @size: entries in @offset
962 */
963 bool xt_find_jump_offset(const unsigned int *offsets,
964 unsigned int target, unsigned int size)
965 {
966 int m, low = 0, hi = size;
967
968 while (hi > low) {
969 m = (low + hi) / 2u;
970
971 if (offsets[m] > target)
972 hi = m;
973 else if (offsets[m] < target)
974 low = m + 1;
975 else
976 return true;
977 }
978
979 return false;
980 }
981 EXPORT_SYMBOL(xt_find_jump_offset);
982
983 int xt_check_target(struct xt_tgchk_param *par,
984 unsigned int size, u16 proto, bool inv_proto)
985 {
986 int ret;
987
988 if (XT_ALIGN(par->target->targetsize) != size) {
989 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
990 xt_prefix[par->family], par->target->name,
991 par->target->revision,
992 XT_ALIGN(par->target->targetsize), size);
993 return -EINVAL;
994 }
995 if (par->target->table != NULL &&
996 strcmp(par->target->table, par->table) != 0) {
997 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
998 xt_prefix[par->family], par->target->name,
999 par->target->table, par->table);
1000 return -EINVAL;
1001 }
1002 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1003 char used[64], allow[64];
1004
1005 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1006 xt_prefix[par->family], par->target->name,
1007 textify_hooks(used, sizeof(used),
1008 par->hook_mask, par->family),
1009 textify_hooks(allow, sizeof(allow),
1010 par->target->hooks,
1011 par->family));
1012 return -EINVAL;
1013 }
1014 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1015 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1016 xt_prefix[par->family], par->target->name,
1017 par->target->proto);
1018 return -EINVAL;
1019 }
1020 if (par->target->checkentry != NULL) {
1021 ret = par->target->checkentry(par);
1022 if (ret < 0)
1023 return ret;
1024 else if (ret > 0)
1025 /* Flag up potential errors. */
1026 return -EIO;
1027 }
1028 return 0;
1029 }
1030 EXPORT_SYMBOL_GPL(xt_check_target);
1031
1032 /**
1033 * xt_copy_counters - copy counters and metadata from a sockptr_t
1034 *
1035 * @arg: src sockptr
1036 * @len: alleged size of userspace memory
1037 * @info: where to store the xt_counters_info metadata
1038 *
1039 * Copies counter meta data from @user and stores it in @info.
1040 *
1041 * vmallocs memory to hold the counters, then copies the counter data
1042 * from @user to the new memory and returns a pointer to it.
1043 *
1044 * If called from a compat syscall, @info gets converted automatically to the
1045 * 64bit representation.
1046 *
1047 * The metadata associated with the counters is stored in @info.
1048 *
1049 * Return: returns pointer that caller has to test via IS_ERR().
1050 * If IS_ERR is false, caller has to vfree the pointer.
1051 */
1052 void *xt_copy_counters(sockptr_t arg, unsigned int len,
1053 struct xt_counters_info *info)
1054 {
1055 size_t offset;
1056 void *mem;
1057 u64 size;
1058
1059 #ifdef CONFIG_COMPAT
1060 if (in_compat_syscall()) {
1061 /* structures only differ in size due to alignment */
1062 struct compat_xt_counters_info compat_tmp;
1063
1064 if (len <= sizeof(compat_tmp))
1065 return ERR_PTR(-EINVAL);
1066
1067 len -= sizeof(compat_tmp);
1068 if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1069 return ERR_PTR(-EFAULT);
1070
1071 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1072 info->num_counters = compat_tmp.num_counters;
1073 offset = sizeof(compat_tmp);
1074 } else
1075 #endif
1076 {
1077 if (len <= sizeof(*info))
1078 return ERR_PTR(-EINVAL);
1079
1080 len -= sizeof(*info);
1081 if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1082 return ERR_PTR(-EFAULT);
1083
1084 offset = sizeof(*info);
1085 }
1086 info->name[sizeof(info->name) - 1] = '\0';
1087
1088 size = sizeof(struct xt_counters);
1089 size *= info->num_counters;
1090
1091 if (size != (u64)len)
1092 return ERR_PTR(-EINVAL);
1093
1094 mem = vmalloc(len);
1095 if (!mem)
1096 return ERR_PTR(-ENOMEM);
1097
1098 if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1099 return mem;
1100
1101 vfree(mem);
1102 return ERR_PTR(-EFAULT);
1103 }
1104 EXPORT_SYMBOL_GPL(xt_copy_counters);
1105
1106 #ifdef CONFIG_COMPAT
1107 int xt_compat_target_offset(const struct xt_target *target)
1108 {
1109 u_int16_t csize = target->compatsize ? : target->targetsize;
1110 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1111 }
1112 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1113
1114 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1115 unsigned int *size)
1116 {
1117 const struct xt_target *target = t->u.kernel.target;
1118 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1119 int pad, off = xt_compat_target_offset(target);
1120 u_int16_t tsize = ct->u.user.target_size;
1121 char name[sizeof(t->u.user.name)];
1122
1123 t = *dstptr;
1124 memcpy(t, ct, sizeof(*ct));
1125 if (target->compat_from_user)
1126 target->compat_from_user(t->data, ct->data);
1127 else
1128 memcpy(t->data, ct->data, tsize - sizeof(*ct));
1129 pad = XT_ALIGN(target->targetsize) - target->targetsize;
1130 if (pad > 0)
1131 memset(t->data + target->targetsize, 0, pad);
1132
1133 tsize += off;
1134 t->u.user.target_size = tsize;
1135 strlcpy(name, target->name, sizeof(name));
1136 module_put(target->me);
1137 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1138
1139 *size += off;
1140 *dstptr += tsize;
1141 }
1142 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1143
1144 int xt_compat_target_to_user(const struct xt_entry_target *t,
1145 void __user **dstptr, unsigned int *size)
1146 {
1147 const struct xt_target *target = t->u.kernel.target;
1148 struct compat_xt_entry_target __user *ct = *dstptr;
1149 int off = xt_compat_target_offset(target);
1150 u_int16_t tsize = t->u.user.target_size - off;
1151
1152 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1153 return -EFAULT;
1154
1155 if (target->compat_to_user) {
1156 if (target->compat_to_user((void __user *)ct->data, t->data))
1157 return -EFAULT;
1158 } else {
1159 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1160 return -EFAULT;
1161 }
1162
1163 *size -= off;
1164 *dstptr += tsize;
1165 return 0;
1166 }
1167 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1168 #endif
1169
1170 struct xt_table_info *xt_alloc_table_info(unsigned int size)
1171 {
1172 struct xt_table_info *info = NULL;
1173 size_t sz = sizeof(*info) + size;
1174
1175 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1176 return NULL;
1177
1178 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1179 if (!info)
1180 return NULL;
1181
1182 memset(info, 0, sizeof(*info));
1183 info->size = size;
1184 return info;
1185 }
1186 EXPORT_SYMBOL(xt_alloc_table_info);
1187
1188 void xt_free_table_info(struct xt_table_info *info)
1189 {
1190 int cpu;
1191
1192 if (info->jumpstack != NULL) {
1193 for_each_possible_cpu(cpu)
1194 kvfree(info->jumpstack[cpu]);
1195 kvfree(info->jumpstack);
1196 }
1197
1198 kvfree(info);
1199 }
1200 EXPORT_SYMBOL(xt_free_table_info);
1201
1202 /* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */
1203 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1204 const char *name)
1205 {
1206 struct xt_table *t, *found = NULL;
1207
1208 mutex_lock(&xt[af].mutex);
1209 list_for_each_entry(t, &net->xt.tables[af], list)
1210 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1211 return t;
1212
1213 if (net == &init_net)
1214 goto out;
1215
1216 /* Table doesn't exist in this netns, re-try init */
1217 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1218 int err;
1219
1220 if (strcmp(t->name, name))
1221 continue;
1222 if (!try_module_get(t->me))
1223 goto out;
1224 mutex_unlock(&xt[af].mutex);
1225 err = t->table_init(net);
1226 if (err < 0) {
1227 module_put(t->me);
1228 return ERR_PTR(err);
1229 }
1230
1231 found = t;
1232
1233 mutex_lock(&xt[af].mutex);
1234 break;
1235 }
1236
1237 if (!found)
1238 goto out;
1239
1240 /* and once again: */
1241 list_for_each_entry(t, &net->xt.tables[af], list)
1242 if (strcmp(t->name, name) == 0)
1243 return t;
1244
1245 module_put(found->me);
1246 out:
1247 mutex_unlock(&xt[af].mutex);
1248 return ERR_PTR(-ENOENT);
1249 }
1250 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1251
1252 struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1253 const char *name)
1254 {
1255 struct xt_table *t = xt_find_table_lock(net, af, name);
1256
1257 #ifdef CONFIG_MODULES
1258 if (IS_ERR(t)) {
1259 int err = request_module("%stable_%s", xt_prefix[af], name);
1260 if (err < 0)
1261 return ERR_PTR(err);
1262 t = xt_find_table_lock(net, af, name);
1263 }
1264 #endif
1265
1266 return t;
1267 }
1268 EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1269
1270 void xt_table_unlock(struct xt_table *table)
1271 {
1272 mutex_unlock(&xt[table->af].mutex);
1273 }
1274 EXPORT_SYMBOL_GPL(xt_table_unlock);
1275
1276 #ifdef CONFIG_COMPAT
1277 void xt_compat_lock(u_int8_t af)
1278 {
1279 mutex_lock(&xt[af].compat_mutex);
1280 }
1281 EXPORT_SYMBOL_GPL(xt_compat_lock);
1282
1283 void xt_compat_unlock(u_int8_t af)
1284 {
1285 mutex_unlock(&xt[af].compat_mutex);
1286 }
1287 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1288 #endif
1289
1290 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1291 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1292
1293 struct static_key xt_tee_enabled __read_mostly;
1294 EXPORT_SYMBOL_GPL(xt_tee_enabled);
1295
1296 static int xt_jumpstack_alloc(struct xt_table_info *i)
1297 {
1298 unsigned int size;
1299 int cpu;
1300
1301 size = sizeof(void **) * nr_cpu_ids;
1302 if (size > PAGE_SIZE)
1303 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1304 else
1305 i->jumpstack = kzalloc(size, GFP_KERNEL);
1306 if (i->jumpstack == NULL)
1307 return -ENOMEM;
1308
1309 /* ruleset without jumps -- no stack needed */
1310 if (i->stacksize == 0)
1311 return 0;
1312
1313 /* Jumpstack needs to be able to record two full callchains, one
1314 * from the first rule set traversal, plus one table reentrancy
1315 * via -j TEE without clobbering the callchain that brought us to
1316 * TEE target.
1317 *
1318 * This is done by allocating two jumpstacks per cpu, on reentry
1319 * the upper half of the stack is used.
1320 *
1321 * see the jumpstack setup in ipt_do_table() for more details.
1322 */
1323 size = sizeof(void *) * i->stacksize * 2u;
1324 for_each_possible_cpu(cpu) {
1325 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1326 cpu_to_node(cpu));
1327 if (i->jumpstack[cpu] == NULL)
1328 /*
1329 * Freeing will be done later on by the callers. The
1330 * chain is: xt_replace_table -> __do_replace ->
1331 * do_replace -> xt_free_table_info.
1332 */
1333 return -ENOMEM;
1334 }
1335
1336 return 0;
1337 }
1338
1339 struct xt_counters *xt_counters_alloc(unsigned int counters)
1340 {
1341 struct xt_counters *mem;
1342
1343 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1344 return NULL;
1345
1346 counters *= sizeof(*mem);
1347 if (counters > XT_MAX_TABLE_SIZE)
1348 return NULL;
1349
1350 return vzalloc(counters);
1351 }
1352 EXPORT_SYMBOL(xt_counters_alloc);
1353
1354 struct xt_table_info
1355 *xt_table_get_private_protected(const struct xt_table *table)
1356 {
1357 return rcu_dereference_protected(table->private,
1358 mutex_is_locked(&xt[table->af].mutex));
1359 }
1360 EXPORT_SYMBOL(xt_table_get_private_protected);
1361
1362 struct xt_table_info *
1363 xt_replace_table(struct xt_table *table,
1364 unsigned int num_counters,
1365 struct xt_table_info *newinfo,
1366 int *error)
1367 {
1368 struct xt_table_info *private;
1369 int ret;
1370
1371 ret = xt_jumpstack_alloc(newinfo);
1372 if (ret < 0) {
1373 *error = ret;
1374 return NULL;
1375 }
1376
1377 /* Do the substitution. */
1378 private = xt_table_get_private_protected(table);
1379
1380 /* Check inside lock: is the old number correct? */
1381 if (num_counters != private->number) {
1382 pr_debug("num_counters != table->private->number (%u/%u)\n",
1383 num_counters, private->number);
1384 *error = -EAGAIN;
1385 return NULL;
1386 }
1387
1388 newinfo->initial_entries = private->initial_entries;
1389
1390 rcu_assign_pointer(table->private, newinfo);
1391 synchronize_rcu();
1392
1393 audit_log_nfcfg(table->name, table->af, private->number,
1394 !private->number ? AUDIT_XT_OP_REGISTER :
1395 AUDIT_XT_OP_REPLACE,
1396 GFP_KERNEL);
1397 return private;
1398 }
1399 EXPORT_SYMBOL_GPL(xt_replace_table);
1400
1401 struct xt_table *xt_register_table(struct net *net,
1402 const struct xt_table *input_table,
1403 struct xt_table_info *bootstrap,
1404 struct xt_table_info *newinfo)
1405 {
1406 int ret;
1407 struct xt_table_info *private;
1408 struct xt_table *t, *table;
1409
1410 /* Don't add one object to multiple lists. */
1411 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1412 if (!table) {
1413 ret = -ENOMEM;
1414 goto out;
1415 }
1416
1417 mutex_lock(&xt[table->af].mutex);
1418 /* Don't autoload: we'd eat our tail... */
1419 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1420 if (strcmp(t->name, table->name) == 0) {
1421 ret = -EEXIST;
1422 goto unlock;
1423 }
1424 }
1425
1426 /* Simplifies replace_table code. */
1427 rcu_assign_pointer(table->private, bootstrap);
1428
1429 if (!xt_replace_table(table, 0, newinfo, &ret))
1430 goto unlock;
1431
1432 private = xt_table_get_private_protected(table);
1433 pr_debug("table->private->number = %u\n", private->number);
1434
1435 /* save number of initial entries */
1436 private->initial_entries = private->number;
1437
1438 list_add(&table->list, &net->xt.tables[table->af]);
1439 mutex_unlock(&xt[table->af].mutex);
1440 return table;
1441
1442 unlock:
1443 mutex_unlock(&xt[table->af].mutex);
1444 kfree(table);
1445 out:
1446 return ERR_PTR(ret);
1447 }
1448 EXPORT_SYMBOL_GPL(xt_register_table);
1449
1450 void *xt_unregister_table(struct xt_table *table)
1451 {
1452 struct xt_table_info *private;
1453
1454 mutex_lock(&xt[table->af].mutex);
1455 private = xt_table_get_private_protected(table);
1456 RCU_INIT_POINTER(table->private, NULL);
1457 list_del(&table->list);
1458 mutex_unlock(&xt[table->af].mutex);
1459 audit_log_nfcfg(table->name, table->af, private->number,
1460 AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1461 kfree(table);
1462
1463 return private;
1464 }
1465 EXPORT_SYMBOL_GPL(xt_unregister_table);
1466
1467 #ifdef CONFIG_PROC_FS
1468 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1469 {
1470 struct net *net = seq_file_net(seq);
1471 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1472
1473 mutex_lock(&xt[af].mutex);
1474 return seq_list_start(&net->xt.tables[af], *pos);
1475 }
1476
1477 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1478 {
1479 struct net *net = seq_file_net(seq);
1480 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1481
1482 return seq_list_next(v, &net->xt.tables[af], pos);
1483 }
1484
1485 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1486 {
1487 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1488
1489 mutex_unlock(&xt[af].mutex);
1490 }
1491
1492 static int xt_table_seq_show(struct seq_file *seq, void *v)
1493 {
1494 struct xt_table *table = list_entry(v, struct xt_table, list);
1495
1496 if (*table->name)
1497 seq_printf(seq, "%s\n", table->name);
1498 return 0;
1499 }
1500
1501 static const struct seq_operations xt_table_seq_ops = {
1502 .start = xt_table_seq_start,
1503 .next = xt_table_seq_next,
1504 .stop = xt_table_seq_stop,
1505 .show = xt_table_seq_show,
1506 };
1507
1508 /*
1509 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1510 * the multi-AF mutexes.
1511 */
1512 struct nf_mttg_trav {
1513 struct list_head *head, *curr;
1514 uint8_t class;
1515 };
1516
1517 enum {
1518 MTTG_TRAV_INIT,
1519 MTTG_TRAV_NFP_UNSPEC,
1520 MTTG_TRAV_NFP_SPEC,
1521 MTTG_TRAV_DONE,
1522 };
1523
1524 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1525 bool is_target)
1526 {
1527 static const uint8_t next_class[] = {
1528 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1529 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1530 };
1531 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1532 struct nf_mttg_trav *trav = seq->private;
1533
1534 if (ppos != NULL)
1535 ++(*ppos);
1536
1537 switch (trav->class) {
1538 case MTTG_TRAV_INIT:
1539 trav->class = MTTG_TRAV_NFP_UNSPEC;
1540 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1541 trav->head = trav->curr = is_target ?
1542 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1543 break;
1544 case MTTG_TRAV_NFP_UNSPEC:
1545 trav->curr = trav->curr->next;
1546 if (trav->curr != trav->head)
1547 break;
1548 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1549 mutex_lock(&xt[nfproto].mutex);
1550 trav->head = trav->curr = is_target ?
1551 &xt[nfproto].target : &xt[nfproto].match;
1552 trav->class = next_class[trav->class];
1553 break;
1554 case MTTG_TRAV_NFP_SPEC:
1555 trav->curr = trav->curr->next;
1556 if (trav->curr != trav->head)
1557 break;
1558 fallthrough;
1559 default:
1560 return NULL;
1561 }
1562 return trav;
1563 }
1564
1565 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1566 bool is_target)
1567 {
1568 struct nf_mttg_trav *trav = seq->private;
1569 unsigned int j;
1570
1571 trav->class = MTTG_TRAV_INIT;
1572 for (j = 0; j < *pos; ++j)
1573 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1574 return NULL;
1575 return trav;
1576 }
1577
1578 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1579 {
1580 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1581 struct nf_mttg_trav *trav = seq->private;
1582
1583 switch (trav->class) {
1584 case MTTG_TRAV_NFP_UNSPEC:
1585 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1586 break;
1587 case MTTG_TRAV_NFP_SPEC:
1588 mutex_unlock(&xt[nfproto].mutex);
1589 break;
1590 }
1591 }
1592
1593 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1594 {
1595 return xt_mttg_seq_start(seq, pos, false);
1596 }
1597
1598 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1599 {
1600 return xt_mttg_seq_next(seq, v, ppos, false);
1601 }
1602
1603 static int xt_match_seq_show(struct seq_file *seq, void *v)
1604 {
1605 const struct nf_mttg_trav *trav = seq->private;
1606 const struct xt_match *match;
1607
1608 switch (trav->class) {
1609 case MTTG_TRAV_NFP_UNSPEC:
1610 case MTTG_TRAV_NFP_SPEC:
1611 if (trav->curr == trav->head)
1612 return 0;
1613 match = list_entry(trav->curr, struct xt_match, list);
1614 if (*match->name)
1615 seq_printf(seq, "%s\n", match->name);
1616 }
1617 return 0;
1618 }
1619
1620 static const struct seq_operations xt_match_seq_ops = {
1621 .start = xt_match_seq_start,
1622 .next = xt_match_seq_next,
1623 .stop = xt_mttg_seq_stop,
1624 .show = xt_match_seq_show,
1625 };
1626
1627 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1628 {
1629 return xt_mttg_seq_start(seq, pos, true);
1630 }
1631
1632 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1633 {
1634 return xt_mttg_seq_next(seq, v, ppos, true);
1635 }
1636
1637 static int xt_target_seq_show(struct seq_file *seq, void *v)
1638 {
1639 const struct nf_mttg_trav *trav = seq->private;
1640 const struct xt_target *target;
1641
1642 switch (trav->class) {
1643 case MTTG_TRAV_NFP_UNSPEC:
1644 case MTTG_TRAV_NFP_SPEC:
1645 if (trav->curr == trav->head)
1646 return 0;
1647 target = list_entry(trav->curr, struct xt_target, list);
1648 if (*target->name)
1649 seq_printf(seq, "%s\n", target->name);
1650 }
1651 return 0;
1652 }
1653
1654 static const struct seq_operations xt_target_seq_ops = {
1655 .start = xt_target_seq_start,
1656 .next = xt_target_seq_next,
1657 .stop = xt_mttg_seq_stop,
1658 .show = xt_target_seq_show,
1659 };
1660
1661 #define FORMAT_TABLES "_tables_names"
1662 #define FORMAT_MATCHES "_tables_matches"
1663 #define FORMAT_TARGETS "_tables_targets"
1664
1665 #endif /* CONFIG_PROC_FS */
1666
1667 /**
1668 * xt_hook_ops_alloc - set up hooks for a new table
1669 * @table: table with metadata needed to set up hooks
1670 * @fn: Hook function
1671 *
1672 * This function will create the nf_hook_ops that the x_table needs
1673 * to hand to xt_hook_link_net().
1674 */
1675 struct nf_hook_ops *
1676 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1677 {
1678 unsigned int hook_mask = table->valid_hooks;
1679 uint8_t i, num_hooks = hweight32(hook_mask);
1680 uint8_t hooknum;
1681 struct nf_hook_ops *ops;
1682
1683 if (!num_hooks)
1684 return ERR_PTR(-EINVAL);
1685
1686 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1687 if (ops == NULL)
1688 return ERR_PTR(-ENOMEM);
1689
1690 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1691 hook_mask >>= 1, ++hooknum) {
1692 if (!(hook_mask & 1))
1693 continue;
1694 ops[i].hook = fn;
1695 ops[i].pf = table->af;
1696 ops[i].hooknum = hooknum;
1697 ops[i].priority = table->priority;
1698 ++i;
1699 }
1700
1701 return ops;
1702 }
1703 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1704
1705 int xt_proto_init(struct net *net, u_int8_t af)
1706 {
1707 #ifdef CONFIG_PROC_FS
1708 char buf[XT_FUNCTION_MAXNAMELEN];
1709 struct proc_dir_entry *proc;
1710 kuid_t root_uid;
1711 kgid_t root_gid;
1712 #endif
1713
1714 if (af >= ARRAY_SIZE(xt_prefix))
1715 return -EINVAL;
1716
1717
1718 #ifdef CONFIG_PROC_FS
1719 root_uid = make_kuid(net->user_ns, 0);
1720 root_gid = make_kgid(net->user_ns, 0);
1721
1722 strlcpy(buf, xt_prefix[af], sizeof(buf));
1723 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1724 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1725 sizeof(struct seq_net_private),
1726 (void *)(unsigned long)af);
1727 if (!proc)
1728 goto out;
1729 if (uid_valid(root_uid) && gid_valid(root_gid))
1730 proc_set_user(proc, root_uid, root_gid);
1731
1732 strlcpy(buf, xt_prefix[af], sizeof(buf));
1733 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1734 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1735 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1736 (void *)(unsigned long)af);
1737 if (!proc)
1738 goto out_remove_tables;
1739 if (uid_valid(root_uid) && gid_valid(root_gid))
1740 proc_set_user(proc, root_uid, root_gid);
1741
1742 strlcpy(buf, xt_prefix[af], sizeof(buf));
1743 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1744 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1745 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1746 (void *)(unsigned long)af);
1747 if (!proc)
1748 goto out_remove_matches;
1749 if (uid_valid(root_uid) && gid_valid(root_gid))
1750 proc_set_user(proc, root_uid, root_gid);
1751 #endif
1752
1753 return 0;
1754
1755 #ifdef CONFIG_PROC_FS
1756 out_remove_matches:
1757 strlcpy(buf, xt_prefix[af], sizeof(buf));
1758 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1759 remove_proc_entry(buf, net->proc_net);
1760
1761 out_remove_tables:
1762 strlcpy(buf, xt_prefix[af], sizeof(buf));
1763 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1764 remove_proc_entry(buf, net->proc_net);
1765 out:
1766 return -1;
1767 #endif
1768 }
1769 EXPORT_SYMBOL_GPL(xt_proto_init);
1770
1771 void xt_proto_fini(struct net *net, u_int8_t af)
1772 {
1773 #ifdef CONFIG_PROC_FS
1774 char buf[XT_FUNCTION_MAXNAMELEN];
1775
1776 strlcpy(buf, xt_prefix[af], sizeof(buf));
1777 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1778 remove_proc_entry(buf, net->proc_net);
1779
1780 strlcpy(buf, xt_prefix[af], sizeof(buf));
1781 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1782 remove_proc_entry(buf, net->proc_net);
1783
1784 strlcpy(buf, xt_prefix[af], sizeof(buf));
1785 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1786 remove_proc_entry(buf, net->proc_net);
1787 #endif /*CONFIG_PROC_FS*/
1788 }
1789 EXPORT_SYMBOL_GPL(xt_proto_fini);
1790
1791 /**
1792 * xt_percpu_counter_alloc - allocate x_tables rule counter
1793 *
1794 * @state: pointer to xt_percpu allocation state
1795 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1796 *
1797 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1798 * contain the address of the real (percpu) counter.
1799 *
1800 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1801 * to fetch the real percpu counter.
1802 *
1803 * To speed up allocation and improve data locality, a 4kb block is
1804 * allocated. Freeing any counter may free an entire block, so all
1805 * counters allocated using the same state must be freed at the same
1806 * time.
1807 *
1808 * xt_percpu_counter_alloc_state contains the base address of the
1809 * allocated page and the current sub-offset.
1810 *
1811 * returns false on error.
1812 */
1813 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1814 struct xt_counters *counter)
1815 {
1816 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1817
1818 if (nr_cpu_ids <= 1)
1819 return true;
1820
1821 if (!state->mem) {
1822 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1823 XT_PCPU_BLOCK_SIZE);
1824 if (!state->mem)
1825 return false;
1826 }
1827 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1828 state->off += sizeof(*counter);
1829 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1830 state->mem = NULL;
1831 state->off = 0;
1832 }
1833 return true;
1834 }
1835 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1836
1837 void xt_percpu_counter_free(struct xt_counters *counters)
1838 {
1839 unsigned long pcnt = counters->pcnt;
1840
1841 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1842 free_percpu((void __percpu *)pcnt);
1843 }
1844 EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1845
1846 static int __net_init xt_net_init(struct net *net)
1847 {
1848 int i;
1849
1850 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1851 INIT_LIST_HEAD(&net->xt.tables[i]);
1852 return 0;
1853 }
1854
1855 static void __net_exit xt_net_exit(struct net *net)
1856 {
1857 int i;
1858
1859 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1860 WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1861 }
1862
1863 static struct pernet_operations xt_net_ops = {
1864 .init = xt_net_init,
1865 .exit = xt_net_exit,
1866 };
1867
1868 static int __init xt_init(void)
1869 {
1870 unsigned int i;
1871 int rv;
1872
1873 for_each_possible_cpu(i) {
1874 seqcount_init(&per_cpu(xt_recseq, i));
1875 }
1876
1877 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1878 if (!xt)
1879 return -ENOMEM;
1880
1881 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1882 mutex_init(&xt[i].mutex);
1883 #ifdef CONFIG_COMPAT
1884 mutex_init(&xt[i].compat_mutex);
1885 xt[i].compat_tab = NULL;
1886 #endif
1887 INIT_LIST_HEAD(&xt[i].target);
1888 INIT_LIST_HEAD(&xt[i].match);
1889 }
1890 rv = register_pernet_subsys(&xt_net_ops);
1891 if (rv < 0)
1892 kfree(xt);
1893 return rv;
1894 }
1895
1896 static void __exit xt_fini(void)
1897 {
1898 unregister_pernet_subsys(&xt_net_ops);
1899 kfree(xt);
1900 }
1901
1902 module_init(xt_init);
1903 module_exit(xt_fini);
1904