]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nf_conntrack_standalone.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke...
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_conntrack_standalone.c
1 /* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #include <linux/types.h>
10 #include <linux/netfilter.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h>
15 #include <linux/percpu.h>
16 #include <linux/netdevice.h>
17 #include <net/net_namespace.h>
18 #ifdef CONFIG_SYSCTL
19 #include <linux/sysctl.h>
20 #endif
21
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_core.h>
24 #include <net/netfilter/nf_conntrack_l3proto.h>
25 #include <net/netfilter/nf_conntrack_l4proto.h>
26 #include <net/netfilter/nf_conntrack_expect.h>
27 #include <net/netfilter/nf_conntrack_helper.h>
28 #include <net/netfilter/nf_conntrack_acct.h>
29 #include <net/netfilter/nf_conntrack_zones.h>
30
31 MODULE_LICENSE("GPL");
32
33 #ifdef CONFIG_PROC_FS
34 int
35 print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
36 const struct nf_conntrack_l3proto *l3proto,
37 const struct nf_conntrack_l4proto *l4proto)
38 {
39 return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple);
40 }
41 EXPORT_SYMBOL_GPL(print_tuple);
42
43 struct ct_iter_state {
44 struct seq_net_private p;
45 unsigned int bucket;
46 };
47
48 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
49 {
50 struct net *net = seq_file_net(seq);
51 struct ct_iter_state *st = seq->private;
52 struct hlist_nulls_node *n;
53
54 for (st->bucket = 0;
55 st->bucket < net->ct.htable_size;
56 st->bucket++) {
57 n = rcu_dereference(net->ct.hash[st->bucket].first);
58 if (!is_a_nulls(n))
59 return n;
60 }
61 return NULL;
62 }
63
64 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
65 struct hlist_nulls_node *head)
66 {
67 struct net *net = seq_file_net(seq);
68 struct ct_iter_state *st = seq->private;
69
70 head = rcu_dereference(head->next);
71 while (is_a_nulls(head)) {
72 if (likely(get_nulls_value(head) == st->bucket)) {
73 if (++st->bucket >= net->ct.htable_size)
74 return NULL;
75 }
76 head = rcu_dereference(net->ct.hash[st->bucket].first);
77 }
78 return head;
79 }
80
81 static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
82 {
83 struct hlist_nulls_node *head = ct_get_first(seq);
84
85 if (head)
86 while (pos && (head = ct_get_next(seq, head)))
87 pos--;
88 return pos ? NULL : head;
89 }
90
91 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
92 __acquires(RCU)
93 {
94 rcu_read_lock();
95 return ct_get_idx(seq, *pos);
96 }
97
98 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
99 {
100 (*pos)++;
101 return ct_get_next(s, v);
102 }
103
104 static void ct_seq_stop(struct seq_file *s, void *v)
105 __releases(RCU)
106 {
107 rcu_read_unlock();
108 }
109
110 /* return 0 on success, 1 in case of error */
111 static int ct_seq_show(struct seq_file *s, void *v)
112 {
113 struct nf_conntrack_tuple_hash *hash = v;
114 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
115 const struct nf_conntrack_l3proto *l3proto;
116 const struct nf_conntrack_l4proto *l4proto;
117 int ret = 0;
118
119 NF_CT_ASSERT(ct);
120 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
121 return 0;
122
123 /* we only want to print DIR_ORIGINAL */
124 if (NF_CT_DIRECTION(hash))
125 goto release;
126
127 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
128 NF_CT_ASSERT(l3proto);
129 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
130 NF_CT_ASSERT(l4proto);
131
132 ret = -ENOSPC;
133 if (seq_printf(s, "%-8s %u %-8s %u %ld ",
134 l3proto->name, nf_ct_l3num(ct),
135 l4proto->name, nf_ct_protonum(ct),
136 timer_pending(&ct->timeout)
137 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
138 goto release;
139
140 if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
141 goto release;
142
143 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
144 l3proto, l4proto))
145 goto release;
146
147 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
148 goto release;
149
150 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
151 if (seq_printf(s, "[UNREPLIED] "))
152 goto release;
153
154 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
155 l3proto, l4proto))
156 goto release;
157
158 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
159 goto release;
160
161 if (test_bit(IPS_ASSURED_BIT, &ct->status))
162 if (seq_printf(s, "[ASSURED] "))
163 goto release;
164
165 #if defined(CONFIG_NF_CONNTRACK_MARK)
166 if (seq_printf(s, "mark=%u ", ct->mark))
167 goto release;
168 #endif
169
170 #ifdef CONFIG_NF_CONNTRACK_SECMARK
171 if (seq_printf(s, "secmark=%u ", ct->secmark))
172 goto release;
173 #endif
174
175 #ifdef CONFIG_NF_CONNTRACK_ZONES
176 if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
177 goto release;
178 #endif
179
180 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
181 goto release;
182
183 ret = 0;
184 release:
185 nf_ct_put(ct);
186 return 0;
187 }
188
189 static const struct seq_operations ct_seq_ops = {
190 .start = ct_seq_start,
191 .next = ct_seq_next,
192 .stop = ct_seq_stop,
193 .show = ct_seq_show
194 };
195
196 static int ct_open(struct inode *inode, struct file *file)
197 {
198 return seq_open_net(inode, file, &ct_seq_ops,
199 sizeof(struct ct_iter_state));
200 }
201
202 static const struct file_operations ct_file_ops = {
203 .owner = THIS_MODULE,
204 .open = ct_open,
205 .read = seq_read,
206 .llseek = seq_lseek,
207 .release = seq_release_net,
208 };
209
210 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
211 {
212 struct net *net = seq_file_net(seq);
213 int cpu;
214
215 if (*pos == 0)
216 return SEQ_START_TOKEN;
217
218 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
219 if (!cpu_possible(cpu))
220 continue;
221 *pos = cpu + 1;
222 return per_cpu_ptr(net->ct.stat, cpu);
223 }
224
225 return NULL;
226 }
227
228 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
229 {
230 struct net *net = seq_file_net(seq);
231 int cpu;
232
233 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
234 if (!cpu_possible(cpu))
235 continue;
236 *pos = cpu + 1;
237 return per_cpu_ptr(net->ct.stat, cpu);
238 }
239
240 return NULL;
241 }
242
243 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
244 {
245 }
246
247 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
248 {
249 struct net *net = seq_file_net(seq);
250 unsigned int nr_conntracks = atomic_read(&net->ct.count);
251 const struct ip_conntrack_stat *st = v;
252
253 if (v == SEQ_START_TOKEN) {
254 seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
255 return 0;
256 }
257
258 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
259 "%08x %08x %08x %08x %08x %08x %08x %08x \n",
260 nr_conntracks,
261 st->searched,
262 st->found,
263 st->new,
264 st->invalid,
265 st->ignore,
266 st->delete,
267 st->delete_list,
268 st->insert,
269 st->insert_failed,
270 st->drop,
271 st->early_drop,
272 st->error,
273
274 st->expect_new,
275 st->expect_create,
276 st->expect_delete
277 );
278 return 0;
279 }
280
281 static const struct seq_operations ct_cpu_seq_ops = {
282 .start = ct_cpu_seq_start,
283 .next = ct_cpu_seq_next,
284 .stop = ct_cpu_seq_stop,
285 .show = ct_cpu_seq_show,
286 };
287
288 static int ct_cpu_seq_open(struct inode *inode, struct file *file)
289 {
290 return seq_open_net(inode, file, &ct_cpu_seq_ops,
291 sizeof(struct seq_net_private));
292 }
293
294 static const struct file_operations ct_cpu_seq_fops = {
295 .owner = THIS_MODULE,
296 .open = ct_cpu_seq_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = seq_release_net,
300 };
301
302 static int nf_conntrack_standalone_init_proc(struct net *net)
303 {
304 struct proc_dir_entry *pde;
305
306 pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops);
307 if (!pde)
308 goto out_nf_conntrack;
309
310 pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat,
311 &ct_cpu_seq_fops);
312 if (!pde)
313 goto out_stat_nf_conntrack;
314 return 0;
315
316 out_stat_nf_conntrack:
317 proc_net_remove(net, "nf_conntrack");
318 out_nf_conntrack:
319 return -ENOMEM;
320 }
321
322 static void nf_conntrack_standalone_fini_proc(struct net *net)
323 {
324 remove_proc_entry("nf_conntrack", net->proc_net_stat);
325 proc_net_remove(net, "nf_conntrack");
326 }
327 #else
328 static int nf_conntrack_standalone_init_proc(struct net *net)
329 {
330 return 0;
331 }
332
333 static void nf_conntrack_standalone_fini_proc(struct net *net)
334 {
335 }
336 #endif /* CONFIG_PROC_FS */
337
338 /* Sysctl support */
339
340 #ifdef CONFIG_SYSCTL
341 /* Log invalid packets of a given protocol */
342 static int log_invalid_proto_min = 0;
343 static int log_invalid_proto_max = 255;
344
345 static struct ctl_table_header *nf_ct_netfilter_header;
346
347 static ctl_table nf_ct_sysctl_table[] = {
348 {
349 .procname = "nf_conntrack_max",
350 .data = &nf_conntrack_max,
351 .maxlen = sizeof(int),
352 .mode = 0644,
353 .proc_handler = proc_dointvec,
354 },
355 {
356 .procname = "nf_conntrack_count",
357 .data = &init_net.ct.count,
358 .maxlen = sizeof(int),
359 .mode = 0444,
360 .proc_handler = proc_dointvec,
361 },
362 {
363 .procname = "nf_conntrack_buckets",
364 .data = &init_net.ct.htable_size,
365 .maxlen = sizeof(unsigned int),
366 .mode = 0444,
367 .proc_handler = proc_dointvec,
368 },
369 {
370 .procname = "nf_conntrack_checksum",
371 .data = &init_net.ct.sysctl_checksum,
372 .maxlen = sizeof(unsigned int),
373 .mode = 0644,
374 .proc_handler = proc_dointvec,
375 },
376 {
377 .procname = "nf_conntrack_log_invalid",
378 .data = &init_net.ct.sysctl_log_invalid,
379 .maxlen = sizeof(unsigned int),
380 .mode = 0644,
381 .proc_handler = proc_dointvec_minmax,
382 .extra1 = &log_invalid_proto_min,
383 .extra2 = &log_invalid_proto_max,
384 },
385 {
386 .procname = "nf_conntrack_expect_max",
387 .data = &nf_ct_expect_max,
388 .maxlen = sizeof(int),
389 .mode = 0644,
390 .proc_handler = proc_dointvec,
391 },
392 { }
393 };
394
395 #define NET_NF_CONNTRACK_MAX 2089
396
397 static ctl_table nf_ct_netfilter_table[] = {
398 {
399 .procname = "nf_conntrack_max",
400 .data = &nf_conntrack_max,
401 .maxlen = sizeof(int),
402 .mode = 0644,
403 .proc_handler = proc_dointvec,
404 },
405 { }
406 };
407
408 static struct ctl_path nf_ct_path[] = {
409 { .procname = "net", },
410 { }
411 };
412
413 static int nf_conntrack_standalone_init_sysctl(struct net *net)
414 {
415 struct ctl_table *table;
416
417 if (net_eq(net, &init_net)) {
418 nf_ct_netfilter_header =
419 register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
420 if (!nf_ct_netfilter_header)
421 goto out;
422 }
423
424 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
425 GFP_KERNEL);
426 if (!table)
427 goto out_kmemdup;
428
429 table[1].data = &net->ct.count;
430 table[2].data = &net->ct.htable_size;
431 table[3].data = &net->ct.sysctl_checksum;
432 table[4].data = &net->ct.sysctl_log_invalid;
433
434 net->ct.sysctl_header = register_net_sysctl_table(net,
435 nf_net_netfilter_sysctl_path, table);
436 if (!net->ct.sysctl_header)
437 goto out_unregister_netfilter;
438
439 return 0;
440
441 out_unregister_netfilter:
442 kfree(table);
443 out_kmemdup:
444 if (net_eq(net, &init_net))
445 unregister_sysctl_table(nf_ct_netfilter_header);
446 out:
447 printk("nf_conntrack: can't register to sysctl.\n");
448 return -ENOMEM;
449 }
450
451 static void nf_conntrack_standalone_fini_sysctl(struct net *net)
452 {
453 struct ctl_table *table;
454
455 if (net_eq(net, &init_net))
456 unregister_sysctl_table(nf_ct_netfilter_header);
457 table = net->ct.sysctl_header->ctl_table_arg;
458 unregister_net_sysctl_table(net->ct.sysctl_header);
459 kfree(table);
460 }
461 #else
462 static int nf_conntrack_standalone_init_sysctl(struct net *net)
463 {
464 return 0;
465 }
466
467 static void nf_conntrack_standalone_fini_sysctl(struct net *net)
468 {
469 }
470 #endif /* CONFIG_SYSCTL */
471
472 static int nf_conntrack_net_init(struct net *net)
473 {
474 int ret;
475
476 ret = nf_conntrack_init(net);
477 if (ret < 0)
478 goto out_init;
479 ret = nf_conntrack_standalone_init_proc(net);
480 if (ret < 0)
481 goto out_proc;
482 net->ct.sysctl_checksum = 1;
483 net->ct.sysctl_log_invalid = 0;
484 ret = nf_conntrack_standalone_init_sysctl(net);
485 if (ret < 0)
486 goto out_sysctl;
487 return 0;
488
489 out_sysctl:
490 nf_conntrack_standalone_fini_proc(net);
491 out_proc:
492 nf_conntrack_cleanup(net);
493 out_init:
494 return ret;
495 }
496
497 static void nf_conntrack_net_exit(struct net *net)
498 {
499 nf_conntrack_standalone_fini_sysctl(net);
500 nf_conntrack_standalone_fini_proc(net);
501 nf_conntrack_cleanup(net);
502 }
503
504 static struct pernet_operations nf_conntrack_net_ops = {
505 .init = nf_conntrack_net_init,
506 .exit = nf_conntrack_net_exit,
507 };
508
509 static int __init nf_conntrack_standalone_init(void)
510 {
511 return register_pernet_subsys(&nf_conntrack_net_ops);
512 }
513
514 static void __exit nf_conntrack_standalone_fini(void)
515 {
516 unregister_pernet_subsys(&nf_conntrack_net_ops);
517 }
518
519 module_init(nf_conntrack_standalone_init);
520 module_exit(nf_conntrack_standalone_fini);
521
522 /* Some modules need us, but don't depend directly on any symbol.
523 They should call this. */
524 void need_conntrack(void)
525 {
526 }
527 EXPORT_SYMBOL_GPL(need_conntrack);