1 /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
3 * (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2006-2010 Patrick McHardy <kaber@trash.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/proc_fs.h>
13 #include <linux/seq_file.h>
14 #include <linux/percpu.h>
15 #include <linux/security.h>
16 #include <net/net_namespace.h>
18 #include <linux/netfilter.h>
19 #include <net/netfilter/nf_conntrack_core.h>
20 #include <net/netfilter/nf_conntrack_l3proto.h>
21 #include <net/netfilter/nf_conntrack_l4proto.h>
22 #include <net/netfilter/nf_conntrack_expect.h>
23 #include <net/netfilter/nf_conntrack_acct.h>
24 #include <linux/rculist_nulls.h>
25 #include <linux/export.h>
27 struct ct_iter_state
{
28 struct seq_net_private p
;
29 struct hlist_nulls_head
*hash
;
30 unsigned int htable_size
;
34 static struct hlist_nulls_node
*ct_get_first(struct seq_file
*seq
)
36 struct ct_iter_state
*st
= seq
->private;
37 struct hlist_nulls_node
*n
;
40 st
->bucket
< st
->htable_size
;
43 hlist_nulls_first_rcu(&st
->hash
[st
->bucket
]));
50 static struct hlist_nulls_node
*ct_get_next(struct seq_file
*seq
,
51 struct hlist_nulls_node
*head
)
53 struct ct_iter_state
*st
= seq
->private;
55 head
= rcu_dereference(hlist_nulls_next_rcu(head
));
56 while (is_a_nulls(head
)) {
57 if (likely(get_nulls_value(head
) == st
->bucket
)) {
58 if (++st
->bucket
>= st
->htable_size
)
61 head
= rcu_dereference(
62 hlist_nulls_first_rcu(&st
->hash
[st
->bucket
]));
67 static struct hlist_nulls_node
*ct_get_idx(struct seq_file
*seq
, loff_t pos
)
69 struct hlist_nulls_node
*head
= ct_get_first(seq
);
72 while (pos
&& (head
= ct_get_next(seq
, head
)))
74 return pos
? NULL
: head
;
77 static void *ct_seq_start(struct seq_file
*seq
, loff_t
*pos
)
80 struct ct_iter_state
*st
= seq
->private;
84 nf_conntrack_get_ht(&st
->hash
, &st
->htable_size
);
85 return ct_get_idx(seq
, *pos
);
88 static void *ct_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
91 return ct_get_next(s
, v
);
94 static void ct_seq_stop(struct seq_file
*s
, void *v
)
100 #ifdef CONFIG_NF_CONNTRACK_SECMARK
101 static void ct_show_secctx(struct seq_file
*s
, const struct nf_conn
*ct
)
107 ret
= security_secid_to_secctx(ct
->secmark
, &secctx
, &len
);
111 seq_printf(s
, "secctx=%s ", secctx
);
113 security_release_secctx(secctx
, len
);
116 static inline void ct_show_secctx(struct seq_file
*s
, const struct nf_conn
*ct
)
121 static bool ct_seq_should_skip(const struct nf_conn
*ct
,
122 const struct net
*net
,
123 const struct nf_conntrack_tuple_hash
*hash
)
125 /* we only want to print DIR_ORIGINAL */
126 if (NF_CT_DIRECTION(hash
))
129 if (nf_ct_l3num(ct
) != AF_INET
)
132 if (!net_eq(nf_ct_net(ct
), net
))
138 static int ct_seq_show(struct seq_file
*s
, void *v
)
140 struct nf_conntrack_tuple_hash
*hash
= v
;
141 struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(hash
);
142 const struct nf_conntrack_l3proto
*l3proto
;
143 const struct nf_conntrack_l4proto
*l4proto
;
147 if (ct_seq_should_skip(ct
, seq_file_net(s
), hash
))
150 if (unlikely(!atomic_inc_not_zero(&ct
->ct_general
.use
)))
153 /* check if we raced w. object reuse */
154 if (!nf_ct_is_confirmed(ct
) ||
155 ct_seq_should_skip(ct
, seq_file_net(s
), hash
))
158 l3proto
= __nf_ct_l3proto_find(nf_ct_l3num(ct
));
159 NF_CT_ASSERT(l3proto
);
160 l4proto
= __nf_ct_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
161 NF_CT_ASSERT(l4proto
);
164 seq_printf(s
, "%-8s %u %ld ",
165 l4proto
->name
, nf_ct_protonum(ct
),
166 timer_pending(&ct
->timeout
)
167 ? (long)(ct
->timeout
.expires
- jiffies
)/HZ
: 0);
169 if (l4proto
->print_conntrack
)
170 l4proto
->print_conntrack(s
, ct
);
172 if (seq_has_overflowed(s
))
175 print_tuple(s
, &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
,
178 if (seq_has_overflowed(s
))
181 if (seq_print_acct(s
, ct
, IP_CT_DIR_ORIGINAL
))
184 if (!(test_bit(IPS_SEEN_REPLY_BIT
, &ct
->status
)))
185 seq_printf(s
, "[UNREPLIED] ");
187 print_tuple(s
, &ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
,
190 if (seq_has_overflowed(s
))
193 if (seq_print_acct(s
, ct
, IP_CT_DIR_REPLY
))
196 if (test_bit(IPS_ASSURED_BIT
, &ct
->status
))
197 seq_printf(s
, "[ASSURED] ");
199 #ifdef CONFIG_NF_CONNTRACK_MARK
200 seq_printf(s
, "mark=%u ", ct
->mark
);
203 ct_show_secctx(s
, ct
);
205 seq_printf(s
, "use=%u\n", atomic_read(&ct
->ct_general
.use
));
207 if (seq_has_overflowed(s
))
216 static const struct seq_operations ct_seq_ops
= {
217 .start
= ct_seq_start
,
223 static int ct_open(struct inode
*inode
, struct file
*file
)
225 return seq_open_net(inode
, file
, &ct_seq_ops
,
226 sizeof(struct ct_iter_state
));
229 static const struct file_operations ct_file_ops
= {
230 .owner
= THIS_MODULE
,
234 .release
= seq_release_net
,
238 struct ct_expect_iter_state
{
239 struct seq_net_private p
;
243 static struct hlist_node
*ct_expect_get_first(struct seq_file
*seq
)
245 struct ct_expect_iter_state
*st
= seq
->private;
246 struct hlist_node
*n
;
248 for (st
->bucket
= 0; st
->bucket
< nf_ct_expect_hsize
; st
->bucket
++) {
250 hlist_first_rcu(&nf_ct_expect_hash
[st
->bucket
]));
257 static struct hlist_node
*ct_expect_get_next(struct seq_file
*seq
,
258 struct hlist_node
*head
)
260 struct ct_expect_iter_state
*st
= seq
->private;
262 head
= rcu_dereference(hlist_next_rcu(head
));
263 while (head
== NULL
) {
264 if (++st
->bucket
>= nf_ct_expect_hsize
)
266 head
= rcu_dereference(
267 hlist_first_rcu(&nf_ct_expect_hash
[st
->bucket
]));
272 static struct hlist_node
*ct_expect_get_idx(struct seq_file
*seq
, loff_t pos
)
274 struct hlist_node
*head
= ct_expect_get_first(seq
);
277 while (pos
&& (head
= ct_expect_get_next(seq
, head
)))
279 return pos
? NULL
: head
;
282 static void *exp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
286 return ct_expect_get_idx(seq
, *pos
);
289 static void *exp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
292 return ct_expect_get_next(seq
, v
);
295 static void exp_seq_stop(struct seq_file
*seq
, void *v
)
301 static int exp_seq_show(struct seq_file
*s
, void *v
)
303 struct nf_conntrack_expect
*exp
;
304 const struct hlist_node
*n
= v
;
306 exp
= hlist_entry(n
, struct nf_conntrack_expect
, hnode
);
308 if (!net_eq(nf_ct_net(exp
->master
), seq_file_net(s
)))
311 if (exp
->tuple
.src
.l3num
!= AF_INET
)
314 if (exp
->timeout
.function
)
315 seq_printf(s
, "%ld ", timer_pending(&exp
->timeout
)
316 ? (long)(exp
->timeout
.expires
- jiffies
)/HZ
: 0);
320 seq_printf(s
, "proto=%u ", exp
->tuple
.dst
.protonum
);
322 print_tuple(s
, &exp
->tuple
,
323 __nf_ct_l3proto_find(exp
->tuple
.src
.l3num
),
324 __nf_ct_l4proto_find(exp
->tuple
.src
.l3num
,
325 exp
->tuple
.dst
.protonum
));
331 static const struct seq_operations exp_seq_ops
= {
332 .start
= exp_seq_start
,
333 .next
= exp_seq_next
,
334 .stop
= exp_seq_stop
,
338 static int exp_open(struct inode
*inode
, struct file
*file
)
340 return seq_open_net(inode
, file
, &exp_seq_ops
,
341 sizeof(struct ct_expect_iter_state
));
344 static const struct file_operations ip_exp_file_ops
= {
345 .owner
= THIS_MODULE
,
349 .release
= seq_release_net
,
352 static void *ct_cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
354 struct net
*net
= seq_file_net(seq
);
358 return SEQ_START_TOKEN
;
360 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
361 if (!cpu_possible(cpu
))
364 return per_cpu_ptr(net
->ct
.stat
, cpu
);
370 static void *ct_cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
372 struct net
*net
= seq_file_net(seq
);
375 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
376 if (!cpu_possible(cpu
))
379 return per_cpu_ptr(net
->ct
.stat
, cpu
);
385 static void ct_cpu_seq_stop(struct seq_file
*seq
, void *v
)
389 static int ct_cpu_seq_show(struct seq_file
*seq
, void *v
)
391 struct net
*net
= seq_file_net(seq
);
392 unsigned int nr_conntracks
= atomic_read(&net
->ct
.count
);
393 const struct ip_conntrack_stat
*st
= v
;
395 if (v
== SEQ_START_TOKEN
) {
396 seq_printf(seq
, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
400 seq_printf(seq
, "%08x %08x %08x %08x %08x %08x %08x %08x "
401 "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
424 static const struct seq_operations ct_cpu_seq_ops
= {
425 .start
= ct_cpu_seq_start
,
426 .next
= ct_cpu_seq_next
,
427 .stop
= ct_cpu_seq_stop
,
428 .show
= ct_cpu_seq_show
,
431 static int ct_cpu_seq_open(struct inode
*inode
, struct file
*file
)
433 return seq_open_net(inode
, file
, &ct_cpu_seq_ops
,
434 sizeof(struct seq_net_private
));
437 static const struct file_operations ct_cpu_seq_fops
= {
438 .owner
= THIS_MODULE
,
439 .open
= ct_cpu_seq_open
,
442 .release
= seq_release_net
,
445 static int __net_init
ip_conntrack_net_init(struct net
*net
)
447 struct proc_dir_entry
*proc
, *proc_exp
, *proc_stat
;
449 proc
= proc_create("ip_conntrack", 0440, net
->proc_net
, &ct_file_ops
);
453 proc_exp
= proc_create("ip_conntrack_expect", 0440, net
->proc_net
,
458 proc_stat
= proc_create("ip_conntrack", S_IRUGO
,
459 net
->proc_net_stat
, &ct_cpu_seq_fops
);
465 remove_proc_entry("ip_conntrack_expect", net
->proc_net
);
467 remove_proc_entry("ip_conntrack", net
->proc_net
);
472 static void __net_exit
ip_conntrack_net_exit(struct net
*net
)
474 remove_proc_entry("ip_conntrack", net
->proc_net_stat
);
475 remove_proc_entry("ip_conntrack_expect", net
->proc_net
);
476 remove_proc_entry("ip_conntrack", net
->proc_net
);
479 static struct pernet_operations ip_conntrack_net_ops
= {
480 .init
= ip_conntrack_net_init
,
481 .exit
= ip_conntrack_net_exit
,
484 int __init
nf_conntrack_ipv4_compat_init(void)
486 return register_pernet_subsys(&ip_conntrack_net_ops
);
489 void __exit
nf_conntrack_ipv4_compat_fini(void)
491 unregister_pernet_subsys(&ip_conntrack_net_ops
);