]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/netfilter/nf_conntrack_expect.c
net: add moduleparam.h for users of module_param/MODULE_PARM_DESC
[mirror_ubuntu-hirsute-kernel.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/types.h>
13#include <linux/netfilter.h>
14#include <linux/skbuff.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/stddef.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/percpu.h>
21#include <linux/kernel.h>
a71c0855 22#include <linux/jhash.h>
d9b93842 23#include <linux/moduleparam.h>
457c4cbc 24#include <net/net_namespace.h>
77ab9cff
MJ
25
26#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_core.h>
28#include <net/netfilter/nf_conntrack_expect.h>
29#include <net/netfilter/nf_conntrack_helper.h>
30#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 31#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 32
a71c0855
PM
33unsigned int nf_ct_expect_hsize __read_mostly;
34EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
35
f264a7df 36unsigned int nf_ct_expect_max __read_mostly;
a71c0855 37
e9c1b084 38static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff 39
bc01befd
PNA
40static HLIST_HEAD(nf_ct_userspace_expect_list);
41
77ab9cff 42/* nf_conntrack_expect helper functions */
ebbf41df
PNA
43void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
44 u32 pid, int report)
77ab9cff
MJ
45{
46 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 47 struct net *net = nf_ct_exp_net(exp);
77ab9cff 48
77ab9cff
MJ
49 NF_CT_ASSERT(!timer_pending(&exp->timeout));
50
7d0742da 51 hlist_del_rcu(&exp->hnode);
9b03f38d 52 net->ct.expect_count--;
a71c0855 53
b560580a 54 hlist_del(&exp->lnode);
bc01befd
PNA
55 if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
56 master_help->expecting[exp->class]--;
57
ebbf41df 58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
6823645d 59 nf_ct_expect_put(exp);
b560580a 60
0d55af87 61 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 62}
ebbf41df 63EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 64
6823645d 65static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
66{
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
68
f8ba1aff 69 spin_lock_bh(&nf_conntrack_lock);
77ab9cff 70 nf_ct_unlink_expect(exp);
f8ba1aff 71 spin_unlock_bh(&nf_conntrack_lock);
6823645d 72 nf_ct_expect_put(exp);
77ab9cff
MJ
73}
74
a71c0855
PM
75static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76{
34498825
PM
77 unsigned int hash;
78
f682cefa
CG
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
a71c0855
PM
81 }
82
34498825 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
f682cefa 85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
34498825 86 return ((u64)hash * nf_ct_expect_hsize) >> 32;
a71c0855
PM
87}
88
77ab9cff 89struct nf_conntrack_expect *
5d0aa2cc
PM
90__nf_ct_expect_find(struct net *net, u16 zone,
91 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
92{
93 struct nf_conntrack_expect *i;
a71c0855
PM
94 struct hlist_node *n;
95 unsigned int h;
96
9b03f38d 97 if (!net->ct.expect_count)
a71c0855 98 return NULL;
77ab9cff 99
a71c0855 100 h = nf_ct_expect_dst_hash(tuple);
9b03f38d 101 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
5d0aa2cc
PM
102 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
103 nf_ct_zone(i->master) == zone)
77ab9cff
MJ
104 return i;
105 }
106 return NULL;
107}
6823645d 108EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
109
110/* Just find a expectation corresponding to a tuple. */
111struct nf_conntrack_expect *
5d0aa2cc
PM
112nf_ct_expect_find_get(struct net *net, u16 zone,
113 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
114{
115 struct nf_conntrack_expect *i;
116
7d0742da 117 rcu_read_lock();
5d0aa2cc 118 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
119 if (i && !atomic_inc_not_zero(&i->use))
120 i = NULL;
121 rcu_read_unlock();
77ab9cff
MJ
122
123 return i;
124}
6823645d 125EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
126
127/* If an expectation for this connection is found, it gets delete from
128 * global list then returned. */
129struct nf_conntrack_expect *
5d0aa2cc
PM
130nf_ct_find_expectation(struct net *net, u16 zone,
131 const struct nf_conntrack_tuple *tuple)
77ab9cff 132{
359b9ab6
PM
133 struct nf_conntrack_expect *i, *exp = NULL;
134 struct hlist_node *n;
135 unsigned int h;
136
9b03f38d 137 if (!net->ct.expect_count)
359b9ab6 138 return NULL;
ece00641 139
359b9ab6 140 h = nf_ct_expect_dst_hash(tuple);
9b03f38d 141 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
359b9ab6 142 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc
PM
143 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
144 nf_ct_zone(i->master) == zone) {
359b9ab6
PM
145 exp = i;
146 break;
147 }
148 }
ece00641
YK
149 if (!exp)
150 return NULL;
77ab9cff 151
77ab9cff
MJ
152 /* If master is not in hash table yet (ie. packet hasn't left
153 this machine yet), how can other end know about expected?
154 Hence these are not the droids you are looking for (if
155 master ct never got confirmed, we'd hold a reference to it
156 and weird things would happen to future packets). */
ece00641
YK
157 if (!nf_ct_is_confirmed(exp->master))
158 return NULL;
159
160 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
161 atomic_inc(&exp->use);
162 return exp;
163 } else if (del_timer(&exp->timeout)) {
164 nf_ct_unlink_expect(exp);
165 return exp;
77ab9cff 166 }
ece00641 167
77ab9cff
MJ
168 return NULL;
169}
170
171/* delete all expectations for this conntrack */
172void nf_ct_remove_expectations(struct nf_conn *ct)
173{
77ab9cff 174 struct nf_conn_help *help = nfct_help(ct);
b560580a
PM
175 struct nf_conntrack_expect *exp;
176 struct hlist_node *n, *next;
77ab9cff
MJ
177
178 /* Optimization: most connection never expect any others. */
6002f266 179 if (!help)
77ab9cff
MJ
180 return;
181
b560580a
PM
182 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
183 if (del_timer(&exp->timeout)) {
184 nf_ct_unlink_expect(exp);
185 nf_ct_expect_put(exp);
601e68e1 186 }
77ab9cff
MJ
187 }
188}
13b18339 189EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
190
191/* Would two expected things clash? */
192static inline int expect_clash(const struct nf_conntrack_expect *a,
193 const struct nf_conntrack_expect *b)
194{
195 /* Part covered by intersection of masks must be unequal,
196 otherwise they clash */
d4156e8c 197 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
198 int count;
199
77ab9cff 200 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
201
202 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
203 intersect_mask.src.u3.all[count] =
204 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
205 }
206
77ab9cff
MJ
207 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
208}
209
210static inline int expect_matches(const struct nf_conntrack_expect *a,
211 const struct nf_conntrack_expect *b)
212{
f64f9e71
JP
213 return a->master == b->master && a->class == b->class &&
214 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
5d0aa2cc
PM
215 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
216 nf_ct_zone(a->master) == nf_ct_zone(b->master);
77ab9cff
MJ
217}
218
219/* Generally a bad idea to call this: could have matched already. */
6823645d 220void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 221{
f8ba1aff 222 spin_lock_bh(&nf_conntrack_lock);
4e1d4e6c
PM
223 if (del_timer(&exp->timeout)) {
224 nf_ct_unlink_expect(exp);
225 nf_ct_expect_put(exp);
77ab9cff 226 }
f8ba1aff 227 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff 228}
6823645d 229EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
230
231/* We don't increase the master conntrack refcount for non-fulfilled
232 * conntracks. During the conntrack destruction, the expectations are
233 * always killed before the conntrack itself */
6823645d 234struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
235{
236 struct nf_conntrack_expect *new;
237
6823645d 238 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
239 if (!new)
240 return NULL;
241
242 new->master = me;
243 atomic_set(&new->use, 1);
244 return new;
245}
6823645d 246EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 247
6002f266 248void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 249 u_int8_t family,
1d9d7522
PM
250 const union nf_inet_addr *saddr,
251 const union nf_inet_addr *daddr,
252 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
253{
254 int len;
255
256 if (family == AF_INET)
257 len = 4;
258 else
259 len = 16;
260
261 exp->flags = 0;
6002f266 262 exp->class = class;
d6a9b650
PM
263 exp->expectfn = NULL;
264 exp->helper = NULL;
265 exp->tuple.src.l3num = family;
266 exp->tuple.dst.protonum = proto;
d6a9b650
PM
267
268 if (saddr) {
269 memcpy(&exp->tuple.src.u3, saddr, len);
270 if (sizeof(exp->tuple.src.u3) > len)
271 /* address needs to be cleared for nf_ct_tuple_equal */
272 memset((void *)&exp->tuple.src.u3 + len, 0x00,
273 sizeof(exp->tuple.src.u3) - len);
274 memset(&exp->mask.src.u3, 0xFF, len);
275 if (sizeof(exp->mask.src.u3) > len)
276 memset((void *)&exp->mask.src.u3 + len, 0x00,
277 sizeof(exp->mask.src.u3) - len);
278 } else {
279 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
280 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
281 }
282
d6a9b650 283 if (src) {
a34c4589
AV
284 exp->tuple.src.u.all = *src;
285 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
286 } else {
287 exp->tuple.src.u.all = 0;
288 exp->mask.src.u.all = 0;
289 }
290
d4156e8c
PM
291 memcpy(&exp->tuple.dst.u3, daddr, len);
292 if (sizeof(exp->tuple.dst.u3) > len)
293 /* address needs to be cleared for nf_ct_tuple_equal */
294 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
295 sizeof(exp->tuple.dst.u3) - len);
296
a34c4589 297 exp->tuple.dst.u.all = *dst;
d6a9b650 298}
6823645d 299EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 300
7d0742da
PM
301static void nf_ct_expect_free_rcu(struct rcu_head *head)
302{
303 struct nf_conntrack_expect *exp;
304
305 exp = container_of(head, struct nf_conntrack_expect, rcu);
306 kmem_cache_free(nf_ct_expect_cachep, exp);
307}
308
6823645d 309void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
310{
311 if (atomic_dec_and_test(&exp->use))
7d0742da 312 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 313}
6823645d 314EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 315
6823645d 316static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
317{
318 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 319 struct net *net = nf_ct_exp_net(exp);
6002f266 320 const struct nf_conntrack_expect_policy *p;
a71c0855 321 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 322
3bfd45f9
ED
323 /* two references : one for hash insert, one for the timer */
324 atomic_add(2, &exp->use);
b560580a 325
bc01befd
PNA
326 if (master_help) {
327 hlist_add_head(&exp->lnode, &master_help->expectations);
328 master_help->expecting[exp->class]++;
329 } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
330 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
a71c0855 331
9b03f38d
AD
332 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
333 net->ct.expect_count++;
77ab9cff 334
6823645d
PM
335 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
336 (unsigned long)exp);
bc01befd 337 if (master_help) {
c5d277d2
ED
338 p = &rcu_dereference_protected(
339 master_help->helper,
340 lockdep_is_held(&nf_conntrack_lock)
341 )->expect_policy[exp->class];
bc01befd
PNA
342 exp->timeout.expires = jiffies + p->timeout * HZ;
343 }
77ab9cff
MJ
344 add_timer(&exp->timeout);
345
0d55af87 346 NF_CT_STAT_INC(net, expect_create);
77ab9cff
MJ
347}
348
349/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
350static void evict_oldest_expect(struct nf_conn *master,
351 struct nf_conntrack_expect *new)
77ab9cff 352{
b560580a 353 struct nf_conn_help *master_help = nfct_help(master);
6002f266 354 struct nf_conntrack_expect *exp, *last = NULL;
b560580a 355 struct hlist_node *n;
77ab9cff 356
6002f266
PM
357 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
358 if (exp->class == new->class)
359 last = exp;
360 }
b560580a 361
6002f266
PM
362 if (last && del_timer(&last->timeout)) {
363 nf_ct_unlink_expect(last);
364 nf_ct_expect_put(last);
77ab9cff
MJ
365 }
366}
367
368static inline int refresh_timer(struct nf_conntrack_expect *i)
369{
370 struct nf_conn_help *master_help = nfct_help(i->master);
6002f266 371 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
372
373 if (!del_timer(&i->timeout))
374 return 0;
375
c5d277d2
ED
376 p = &rcu_dereference_protected(
377 master_help->helper,
378 lockdep_is_held(&nf_conntrack_lock)
379 )->expect_policy[i->class];
6002f266 380 i->timeout.expires = jiffies + p->timeout * HZ;
77ab9cff
MJ
381 add_timer(&i->timeout);
382 return 1;
383}
384
19abb7b0 385static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 386{
6002f266 387 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
388 struct nf_conntrack_expect *i;
389 struct nf_conn *master = expect->master;
390 struct nf_conn_help *master_help = nfct_help(master);
9b03f38d 391 struct net *net = nf_ct_exp_net(expect);
a71c0855
PM
392 struct hlist_node *n;
393 unsigned int h;
83731671 394 int ret = 1;
77ab9cff 395
bc01befd
PNA
396 /* Don't allow expectations created from kernel-space with no helper */
397 if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
398 (!master_help || (master_help && !master_help->helper))) {
3c158f7f
PM
399 ret = -ESHUTDOWN;
400 goto out;
401 }
a71c0855 402 h = nf_ct_expect_dst_hash(&expect->tuple);
9b03f38d 403 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
77ab9cff
MJ
404 if (expect_matches(i, expect)) {
405 /* Refresh timer: if it's dying, ignore.. */
406 if (refresh_timer(i)) {
407 ret = 0;
408 goto out;
409 }
410 } else if (expect_clash(i, expect)) {
411 ret = -EBUSY;
412 goto out;
413 }
414 }
415 /* Will be over limit? */
bc01befd 416 if (master_help) {
c5d277d2
ED
417 p = &rcu_dereference_protected(
418 master_help->helper,
419 lockdep_is_held(&nf_conntrack_lock)
420 )->expect_policy[expect->class];
bc01befd
PNA
421 if (p->max_expected &&
422 master_help->expecting[expect->class] >= p->max_expected) {
423 evict_oldest_expect(master, expect);
424 if (master_help->expecting[expect->class]
425 >= p->max_expected) {
426 ret = -EMFILE;
427 goto out;
428 }
6002f266
PM
429 }
430 }
77ab9cff 431
9b03f38d 432 if (net->ct.expect_count >= nf_ct_expect_max) {
f264a7df
PM
433 if (net_ratelimit())
434 printk(KERN_WARNING
3d89e9cf 435 "nf_conntrack: expectation table full\n");
f264a7df 436 ret = -EMFILE;
f264a7df 437 }
19abb7b0
PNA
438out:
439 return ret;
440}
441
83731671
PNA
442int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
443 u32 pid, int report)
19abb7b0
PNA
444{
445 int ret;
446
447 spin_lock_bh(&nf_conntrack_lock);
448 ret = __nf_ct_expect_check(expect);
83731671 449 if (ret <= 0)
19abb7b0 450 goto out;
f264a7df 451
83731671 452 ret = 0;
6823645d 453 nf_ct_expect_insert(expect);
f8ba1aff 454 spin_unlock_bh(&nf_conntrack_lock);
83731671 455 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
77ab9cff 456 return ret;
19abb7b0
PNA
457out:
458 spin_unlock_bh(&nf_conntrack_lock);
19abb7b0
PNA
459 return ret;
460}
461EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
462
bc01befd
PNA
463void nf_ct_remove_userspace_expectations(void)
464{
465 struct nf_conntrack_expect *exp;
466 struct hlist_node *n, *next;
467
468 hlist_for_each_entry_safe(exp, n, next,
469 &nf_ct_userspace_expect_list, lnode) {
470 if (del_timer(&exp->timeout)) {
471 nf_ct_unlink_expect(exp);
472 nf_ct_expect_put(exp);
473 }
474 }
475}
476EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
477
77ab9cff 478#ifdef CONFIG_PROC_FS
5d08ad44 479struct ct_expect_iter_state {
dc5129f8 480 struct seq_net_private p;
5d08ad44
PM
481 unsigned int bucket;
482};
483
484static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 485{
dc5129f8 486 struct net *net = seq_file_net(seq);
5d08ad44 487 struct ct_expect_iter_state *st = seq->private;
7d0742da 488 struct hlist_node *n;
77ab9cff 489
5d08ad44 490 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 491 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
492 if (n)
493 return n;
5d08ad44
PM
494 }
495 return NULL;
496}
77ab9cff 497
5d08ad44
PM
498static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
499 struct hlist_node *head)
500{
dc5129f8 501 struct net *net = seq_file_net(seq);
5d08ad44 502 struct ct_expect_iter_state *st = seq->private;
77ab9cff 503
0e60ebe0 504 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
505 while (head == NULL) {
506 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 507 return NULL;
0e60ebe0 508 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 509 }
5d08ad44 510 return head;
77ab9cff
MJ
511}
512
5d08ad44 513static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 514{
5d08ad44 515 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 516
5d08ad44
PM
517 if (head)
518 while (pos && (head = ct_expect_get_next(seq, head)))
519 pos--;
520 return pos ? NULL : head;
521}
77ab9cff 522
5d08ad44 523static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 524 __acquires(RCU)
5d08ad44 525{
7d0742da 526 rcu_read_lock();
5d08ad44
PM
527 return ct_expect_get_idx(seq, *pos);
528}
77ab9cff 529
5d08ad44
PM
530static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
531{
532 (*pos)++;
533 return ct_expect_get_next(seq, v);
77ab9cff
MJ
534}
535
5d08ad44 536static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 537 __releases(RCU)
77ab9cff 538{
7d0742da 539 rcu_read_unlock();
77ab9cff
MJ
540}
541
542static int exp_seq_show(struct seq_file *s, void *v)
543{
5d08ad44 544 struct nf_conntrack_expect *expect;
b87921bd 545 struct nf_conntrack_helper *helper;
5d08ad44 546 struct hlist_node *n = v;
359b9ab6 547 char *delim = "";
5d08ad44
PM
548
549 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
550
551 if (expect->timeout.function)
552 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
553 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
554 else
555 seq_printf(s, "- ");
556 seq_printf(s, "l3proto = %u proto=%u ",
557 expect->tuple.src.l3num,
558 expect->tuple.dst.protonum);
559 print_tuple(s, &expect->tuple,
560 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 561 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 562 expect->tuple.dst.protonum));
4bb119ea 563
359b9ab6
PM
564 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
565 seq_printf(s, "PERMANENT");
566 delim = ",";
567 }
bc01befd 568 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 569 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
570 delim = ",";
571 }
572 if (expect->flags & NF_CT_EXPECT_USERSPACE)
573 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 574
b87921bd
PM
575 helper = rcu_dereference(nfct_help(expect->master)->helper);
576 if (helper) {
577 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
578 if (helper->expect_policy[expect->class].name)
579 seq_printf(s, "/%s",
580 helper->expect_policy[expect->class].name);
581 }
582
77ab9cff
MJ
583 return seq_putc(s, '\n');
584}
585
56b3d975 586static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
587 .start = exp_seq_start,
588 .next = exp_seq_next,
589 .stop = exp_seq_stop,
590 .show = exp_seq_show
591};
592
593static int exp_open(struct inode *inode, struct file *file)
594{
dc5129f8 595 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 596 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
597}
598
5d08ad44 599static const struct file_operations exp_file_ops = {
77ab9cff
MJ
600 .owner = THIS_MODULE,
601 .open = exp_open,
602 .read = seq_read,
603 .llseek = seq_lseek,
dc5129f8 604 .release = seq_release_net,
77ab9cff
MJ
605};
606#endif /* CONFIG_PROC_FS */
e9c1b084 607
dc5129f8 608static int exp_proc_init(struct net *net)
e9c1b084
PM
609{
610#ifdef CONFIG_PROC_FS
611 struct proc_dir_entry *proc;
612
dc5129f8 613 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
e9c1b084
PM
614 if (!proc)
615 return -ENOMEM;
616#endif /* CONFIG_PROC_FS */
617 return 0;
618}
619
dc5129f8 620static void exp_proc_remove(struct net *net)
e9c1b084
PM
621{
622#ifdef CONFIG_PROC_FS
dc5129f8 623 proc_net_remove(net, "nf_conntrack_expect");
e9c1b084
PM
624#endif /* CONFIG_PROC_FS */
625}
626
13ccdfc2 627module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 628
9b03f38d 629int nf_conntrack_expect_init(struct net *net)
e9c1b084 630{
a71c0855
PM
631 int err = -ENOMEM;
632
08f6547d
AD
633 if (net_eq(net, &init_net)) {
634 if (!nf_ct_expect_hsize) {
d696c7bd 635 nf_ct_expect_hsize = net->ct.htable_size / 256;
08f6547d
AD
636 if (!nf_ct_expect_hsize)
637 nf_ct_expect_hsize = 1;
638 }
639 nf_ct_expect_max = nf_ct_expect_hsize * 4;
a71c0855
PM
640 }
641
9b03f38d 642 net->ct.expect_count = 0;
d862a662 643 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 644 if (net->ct.expect_hash == NULL)
a71c0855 645 goto err1;
e9c1b084 646
08f6547d
AD
647 if (net_eq(net, &init_net)) {
648 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
e9c1b084 649 sizeof(struct nf_conntrack_expect),
20c2df83 650 0, 0, NULL);
08f6547d
AD
651 if (!nf_ct_expect_cachep)
652 goto err2;
653 }
e9c1b084 654
dc5129f8 655 err = exp_proc_init(net);
e9c1b084 656 if (err < 0)
a71c0855 657 goto err3;
e9c1b084
PM
658
659 return 0;
660
a71c0855 661err3:
08f6547d
AD
662 if (net_eq(net, &init_net))
663 kmem_cache_destroy(nf_ct_expect_cachep);
12293bf9 664err2:
d862a662 665 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 666err1:
e9c1b084
PM
667 return err;
668}
669
9b03f38d 670void nf_conntrack_expect_fini(struct net *net)
e9c1b084 671{
dc5129f8 672 exp_proc_remove(net);
308ff823
JDB
673 if (net_eq(net, &init_net)) {
674 rcu_barrier(); /* Wait for call_rcu() before destroy */
08f6547d 675 kmem_cache_destroy(nf_ct_expect_cachep);
308ff823 676 }
d862a662 677 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 678}