]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
netfilter: netns nf_conntrack: per-netns expectations
[mirror_ubuntu-zesty-kernel.git] / net / ipv4 / netfilter / nf_conntrack_l3proto_ipv4_compat.c
1 /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
2 *
3 * (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/types.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/percpu.h>
14 #include <net/net_namespace.h>
15
16 #include <linux/netfilter.h>
17 #include <net/netfilter/nf_conntrack_core.h>
18 #include <net/netfilter/nf_conntrack_l3proto.h>
19 #include <net/netfilter/nf_conntrack_l4proto.h>
20 #include <net/netfilter/nf_conntrack_expect.h>
21 #include <net/netfilter/nf_conntrack_acct.h>
22
23 struct ct_iter_state {
24 unsigned int bucket;
25 };
26
27 static struct hlist_node *ct_get_first(struct seq_file *seq)
28 {
29 struct ct_iter_state *st = seq->private;
30 struct hlist_node *n;
31
32 for (st->bucket = 0;
33 st->bucket < nf_conntrack_htable_size;
34 st->bucket++) {
35 n = rcu_dereference(init_net.ct.hash[st->bucket].first);
36 if (n)
37 return n;
38 }
39 return NULL;
40 }
41
42 static struct hlist_node *ct_get_next(struct seq_file *seq,
43 struct hlist_node *head)
44 {
45 struct ct_iter_state *st = seq->private;
46
47 head = rcu_dereference(head->next);
48 while (head == NULL) {
49 if (++st->bucket >= nf_conntrack_htable_size)
50 return NULL;
51 head = rcu_dereference(init_net.ct.hash[st->bucket].first);
52 }
53 return head;
54 }
55
56 static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
57 {
58 struct hlist_node *head = ct_get_first(seq);
59
60 if (head)
61 while (pos && (head = ct_get_next(seq, head)))
62 pos--;
63 return pos ? NULL : head;
64 }
65
66 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
67 __acquires(RCU)
68 {
69 rcu_read_lock();
70 return ct_get_idx(seq, *pos);
71 }
72
73 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
74 {
75 (*pos)++;
76 return ct_get_next(s, v);
77 }
78
79 static void ct_seq_stop(struct seq_file *s, void *v)
80 __releases(RCU)
81 {
82 rcu_read_unlock();
83 }
84
85 static int ct_seq_show(struct seq_file *s, void *v)
86 {
87 const struct nf_conntrack_tuple_hash *hash = v;
88 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
89 const struct nf_conntrack_l3proto *l3proto;
90 const struct nf_conntrack_l4proto *l4proto;
91
92 NF_CT_ASSERT(ct);
93
94 /* we only want to print DIR_ORIGINAL */
95 if (NF_CT_DIRECTION(hash))
96 return 0;
97 if (nf_ct_l3num(ct) != AF_INET)
98 return 0;
99
100 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
101 NF_CT_ASSERT(l3proto);
102 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
103 NF_CT_ASSERT(l4proto);
104
105 if (seq_printf(s, "%-8s %u %ld ",
106 l4proto->name, nf_ct_protonum(ct),
107 timer_pending(&ct->timeout)
108 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
109 return -ENOSPC;
110
111 if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
112 return -ENOSPC;
113
114 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
115 l3proto, l4proto))
116 return -ENOSPC;
117
118 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
119 return -ENOSPC;
120
121 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
122 if (seq_printf(s, "[UNREPLIED] "))
123 return -ENOSPC;
124
125 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
126 l3proto, l4proto))
127 return -ENOSPC;
128
129 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
130 return -ENOSPC;
131
132 if (test_bit(IPS_ASSURED_BIT, &ct->status))
133 if (seq_printf(s, "[ASSURED] "))
134 return -ENOSPC;
135
136 #ifdef CONFIG_NF_CONNTRACK_MARK
137 if (seq_printf(s, "mark=%u ", ct->mark))
138 return -ENOSPC;
139 #endif
140
141 #ifdef CONFIG_NF_CONNTRACK_SECMARK
142 if (seq_printf(s, "secmark=%u ", ct->secmark))
143 return -ENOSPC;
144 #endif
145
146 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
147 return -ENOSPC;
148
149 return 0;
150 }
151
152 static const struct seq_operations ct_seq_ops = {
153 .start = ct_seq_start,
154 .next = ct_seq_next,
155 .stop = ct_seq_stop,
156 .show = ct_seq_show
157 };
158
159 static int ct_open(struct inode *inode, struct file *file)
160 {
161 return seq_open_private(file, &ct_seq_ops,
162 sizeof(struct ct_iter_state));
163 }
164
165 static const struct file_operations ct_file_ops = {
166 .owner = THIS_MODULE,
167 .open = ct_open,
168 .read = seq_read,
169 .llseek = seq_lseek,
170 .release = seq_release_private,
171 };
172
173 /* expects */
174 struct ct_expect_iter_state {
175 unsigned int bucket;
176 };
177
178 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
179 {
180 struct net *net = &init_net;
181 struct ct_expect_iter_state *st = seq->private;
182 struct hlist_node *n;
183
184 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
185 n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
186 if (n)
187 return n;
188 }
189 return NULL;
190 }
191
192 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
193 struct hlist_node *head)
194 {
195 struct net *net = &init_net;
196 struct ct_expect_iter_state *st = seq->private;
197
198 head = rcu_dereference(head->next);
199 while (head == NULL) {
200 if (++st->bucket >= nf_ct_expect_hsize)
201 return NULL;
202 head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
203 }
204 return head;
205 }
206
207 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
208 {
209 struct hlist_node *head = ct_expect_get_first(seq);
210
211 if (head)
212 while (pos && (head = ct_expect_get_next(seq, head)))
213 pos--;
214 return pos ? NULL : head;
215 }
216
217 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
218 __acquires(RCU)
219 {
220 rcu_read_lock();
221 return ct_expect_get_idx(seq, *pos);
222 }
223
224 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
225 {
226 (*pos)++;
227 return ct_expect_get_next(seq, v);
228 }
229
230 static void exp_seq_stop(struct seq_file *seq, void *v)
231 __releases(RCU)
232 {
233 rcu_read_unlock();
234 }
235
236 static int exp_seq_show(struct seq_file *s, void *v)
237 {
238 struct nf_conntrack_expect *exp;
239 const struct hlist_node *n = v;
240
241 exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
242
243 if (exp->tuple.src.l3num != AF_INET)
244 return 0;
245
246 if (exp->timeout.function)
247 seq_printf(s, "%ld ", timer_pending(&exp->timeout)
248 ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
249 else
250 seq_printf(s, "- ");
251
252 seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
253
254 print_tuple(s, &exp->tuple,
255 __nf_ct_l3proto_find(exp->tuple.src.l3num),
256 __nf_ct_l4proto_find(exp->tuple.src.l3num,
257 exp->tuple.dst.protonum));
258 return seq_putc(s, '\n');
259 }
260
261 static const struct seq_operations exp_seq_ops = {
262 .start = exp_seq_start,
263 .next = exp_seq_next,
264 .stop = exp_seq_stop,
265 .show = exp_seq_show
266 };
267
268 static int exp_open(struct inode *inode, struct file *file)
269 {
270 return seq_open_private(file, &exp_seq_ops,
271 sizeof(struct ct_expect_iter_state));
272 }
273
274 static const struct file_operations ip_exp_file_ops = {
275 .owner = THIS_MODULE,
276 .open = exp_open,
277 .read = seq_read,
278 .llseek = seq_lseek,
279 .release = seq_release_private,
280 };
281
282 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
283 {
284 int cpu;
285
286 if (*pos == 0)
287 return SEQ_START_TOKEN;
288
289 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
290 if (!cpu_possible(cpu))
291 continue;
292 *pos = cpu+1;
293 return &per_cpu(nf_conntrack_stat, cpu);
294 }
295
296 return NULL;
297 }
298
299 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
300 {
301 int cpu;
302
303 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
304 if (!cpu_possible(cpu))
305 continue;
306 *pos = cpu+1;
307 return &per_cpu(nf_conntrack_stat, cpu);
308 }
309
310 return NULL;
311 }
312
313 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
314 {
315 }
316
317 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
318 {
319 unsigned int nr_conntracks = atomic_read(&init_net.ct.count);
320 const struct ip_conntrack_stat *st = v;
321
322 if (v == SEQ_START_TOKEN) {
323 seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
324 return 0;
325 }
326
327 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
328 "%08x %08x %08x %08x %08x %08x %08x %08x \n",
329 nr_conntracks,
330 st->searched,
331 st->found,
332 st->new,
333 st->invalid,
334 st->ignore,
335 st->delete,
336 st->delete_list,
337 st->insert,
338 st->insert_failed,
339 st->drop,
340 st->early_drop,
341 st->error,
342
343 st->expect_new,
344 st->expect_create,
345 st->expect_delete
346 );
347 return 0;
348 }
349
350 static const struct seq_operations ct_cpu_seq_ops = {
351 .start = ct_cpu_seq_start,
352 .next = ct_cpu_seq_next,
353 .stop = ct_cpu_seq_stop,
354 .show = ct_cpu_seq_show,
355 };
356
357 static int ct_cpu_seq_open(struct inode *inode, struct file *file)
358 {
359 return seq_open(file, &ct_cpu_seq_ops);
360 }
361
362 static const struct file_operations ct_cpu_seq_fops = {
363 .owner = THIS_MODULE,
364 .open = ct_cpu_seq_open,
365 .read = seq_read,
366 .llseek = seq_lseek,
367 .release = seq_release,
368 };
369
370 int __init nf_conntrack_ipv4_compat_init(void)
371 {
372 struct proc_dir_entry *proc, *proc_exp, *proc_stat;
373
374 proc = proc_net_fops_create(&init_net, "ip_conntrack", 0440, &ct_file_ops);
375 if (!proc)
376 goto err1;
377
378 proc_exp = proc_net_fops_create(&init_net, "ip_conntrack_expect", 0440,
379 &ip_exp_file_ops);
380 if (!proc_exp)
381 goto err2;
382
383 proc_stat = proc_create("ip_conntrack", S_IRUGO,
384 init_net.proc_net_stat, &ct_cpu_seq_fops);
385 if (!proc_stat)
386 goto err3;
387 return 0;
388
389 err3:
390 proc_net_remove(&init_net, "ip_conntrack_expect");
391 err2:
392 proc_net_remove(&init_net, "ip_conntrack");
393 err1:
394 return -ENOMEM;
395 }
396
397 void __exit nf_conntrack_ipv4_compat_fini(void)
398 {
399 remove_proc_entry("ip_conntrack", init_net.proc_net_stat);
400 proc_net_remove(&init_net, "ip_conntrack_expect");
401 proc_net_remove(&init_net, "ip_conntrack");
402 }