]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/netfilter/nf_conntrack_expect.c
netfilter: netns nf_conntrack: per-netns event cache
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/types.h>
13#include <linux/netfilter.h>
14#include <linux/skbuff.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/stddef.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/percpu.h>
21#include <linux/kernel.h>
a71c0855 22#include <linux/jhash.h>
457c4cbc 23#include <net/net_namespace.h>
77ab9cff
MJ
24
25#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_core.h>
27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_helper.h>
29#include <net/netfilter/nf_conntrack_tuple.h>
30
a71c0855
PM
31unsigned int nf_ct_expect_hsize __read_mostly;
32EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
33
34static unsigned int nf_ct_expect_hash_rnd __read_mostly;
f264a7df 35unsigned int nf_ct_expect_max __read_mostly;
a71c0855 36static int nf_ct_expect_hash_rnd_initted __read_mostly;
a71c0855 37
e9c1b084 38static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff
MJ
39
40/* nf_conntrack_expect helper functions */
41void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
42{
43 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 44 struct net *net = nf_ct_exp_net(exp);
77ab9cff
MJ
45
46 NF_CT_ASSERT(master_help);
47 NF_CT_ASSERT(!timer_pending(&exp->timeout));
48
7d0742da 49 hlist_del_rcu(&exp->hnode);
9b03f38d 50 net->ct.expect_count--;
a71c0855 51
b560580a 52 hlist_del(&exp->lnode);
6002f266 53 master_help->expecting[exp->class]--;
6823645d 54 nf_ct_expect_put(exp);
b560580a
PM
55
56 NF_CT_STAT_INC(expect_delete);
77ab9cff 57}
13b18339 58EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
77ab9cff 59
6823645d 60static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
61{
62 struct nf_conntrack_expect *exp = (void *)ul_expect;
63
f8ba1aff 64 spin_lock_bh(&nf_conntrack_lock);
77ab9cff 65 nf_ct_unlink_expect(exp);
f8ba1aff 66 spin_unlock_bh(&nf_conntrack_lock);
6823645d 67 nf_ct_expect_put(exp);
77ab9cff
MJ
68}
69
a71c0855
PM
70static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
71{
34498825
PM
72 unsigned int hash;
73
a71c0855
PM
74 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
75 get_random_bytes(&nf_ct_expect_hash_rnd, 4);
76 nf_ct_expect_hash_rnd_initted = 1;
77 }
78
34498825 79 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 80 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
34498825
PM
81 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
82 return ((u64)hash * nf_ct_expect_hsize) >> 32;
a71c0855
PM
83}
84
77ab9cff 85struct nf_conntrack_expect *
9b03f38d 86__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
87{
88 struct nf_conntrack_expect *i;
a71c0855
PM
89 struct hlist_node *n;
90 unsigned int h;
91
9b03f38d 92 if (!net->ct.expect_count)
a71c0855 93 return NULL;
77ab9cff 94
a71c0855 95 h = nf_ct_expect_dst_hash(tuple);
9b03f38d 96 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
77ab9cff
MJ
97 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
98 return i;
99 }
100 return NULL;
101}
6823645d 102EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
103
104/* Just find a expectation corresponding to a tuple. */
105struct nf_conntrack_expect *
9b03f38d 106nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
107{
108 struct nf_conntrack_expect *i;
109
7d0742da 110 rcu_read_lock();
9b03f38d 111 i = __nf_ct_expect_find(net, tuple);
7d0742da
PM
112 if (i && !atomic_inc_not_zero(&i->use))
113 i = NULL;
114 rcu_read_unlock();
77ab9cff
MJ
115
116 return i;
117}
6823645d 118EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
119
120/* If an expectation for this connection is found, it gets delete from
121 * global list then returned. */
122struct nf_conntrack_expect *
9b03f38d 123nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
77ab9cff 124{
359b9ab6
PM
125 struct nf_conntrack_expect *i, *exp = NULL;
126 struct hlist_node *n;
127 unsigned int h;
128
9b03f38d 129 if (!net->ct.expect_count)
359b9ab6 130 return NULL;
ece00641 131
359b9ab6 132 h = nf_ct_expect_dst_hash(tuple);
9b03f38d 133 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
359b9ab6
PM
134 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
135 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
136 exp = i;
137 break;
138 }
139 }
ece00641
YK
140 if (!exp)
141 return NULL;
77ab9cff 142
77ab9cff
MJ
143 /* If master is not in hash table yet (ie. packet hasn't left
144 this machine yet), how can other end know about expected?
145 Hence these are not the droids you are looking for (if
146 master ct never got confirmed, we'd hold a reference to it
147 and weird things would happen to future packets). */
ece00641
YK
148 if (!nf_ct_is_confirmed(exp->master))
149 return NULL;
150
151 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
152 atomic_inc(&exp->use);
153 return exp;
154 } else if (del_timer(&exp->timeout)) {
155 nf_ct_unlink_expect(exp);
156 return exp;
77ab9cff 157 }
ece00641 158
77ab9cff
MJ
159 return NULL;
160}
161
162/* delete all expectations for this conntrack */
163void nf_ct_remove_expectations(struct nf_conn *ct)
164{
77ab9cff 165 struct nf_conn_help *help = nfct_help(ct);
b560580a
PM
166 struct nf_conntrack_expect *exp;
167 struct hlist_node *n, *next;
77ab9cff
MJ
168
169 /* Optimization: most connection never expect any others. */
6002f266 170 if (!help)
77ab9cff
MJ
171 return;
172
b560580a
PM
173 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
174 if (del_timer(&exp->timeout)) {
175 nf_ct_unlink_expect(exp);
176 nf_ct_expect_put(exp);
601e68e1 177 }
77ab9cff
MJ
178 }
179}
13b18339 180EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
181
182/* Would two expected things clash? */
183static inline int expect_clash(const struct nf_conntrack_expect *a,
184 const struct nf_conntrack_expect *b)
185{
186 /* Part covered by intersection of masks must be unequal,
187 otherwise they clash */
d4156e8c 188 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
189 int count;
190
77ab9cff 191 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
192
193 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
194 intersect_mask.src.u3.all[count] =
195 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
196 }
197
77ab9cff
MJ
198 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
199}
200
201static inline int expect_matches(const struct nf_conntrack_expect *a,
202 const struct nf_conntrack_expect *b)
203{
6002f266 204 return a->master == b->master && a->class == b->class
77ab9cff 205 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
d4156e8c 206 && nf_ct_tuple_mask_equal(&a->mask, &b->mask);
77ab9cff
MJ
207}
208
209/* Generally a bad idea to call this: could have matched already. */
6823645d 210void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 211{
f8ba1aff 212 spin_lock_bh(&nf_conntrack_lock);
4e1d4e6c
PM
213 if (del_timer(&exp->timeout)) {
214 nf_ct_unlink_expect(exp);
215 nf_ct_expect_put(exp);
77ab9cff 216 }
f8ba1aff 217 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff 218}
6823645d 219EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
220
221/* We don't increase the master conntrack refcount for non-fulfilled
222 * conntracks. During the conntrack destruction, the expectations are
223 * always killed before the conntrack itself */
6823645d 224struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
225{
226 struct nf_conntrack_expect *new;
227
6823645d 228 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
229 if (!new)
230 return NULL;
231
232 new->master = me;
233 atomic_set(&new->use, 1);
7d0742da 234 INIT_RCU_HEAD(&new->rcu);
77ab9cff
MJ
235 return new;
236}
6823645d 237EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 238
6002f266 239void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 240 u_int8_t family,
1d9d7522
PM
241 const union nf_inet_addr *saddr,
242 const union nf_inet_addr *daddr,
243 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
244{
245 int len;
246
247 if (family == AF_INET)
248 len = 4;
249 else
250 len = 16;
251
252 exp->flags = 0;
6002f266 253 exp->class = class;
d6a9b650
PM
254 exp->expectfn = NULL;
255 exp->helper = NULL;
256 exp->tuple.src.l3num = family;
257 exp->tuple.dst.protonum = proto;
d6a9b650
PM
258
259 if (saddr) {
260 memcpy(&exp->tuple.src.u3, saddr, len);
261 if (sizeof(exp->tuple.src.u3) > len)
262 /* address needs to be cleared for nf_ct_tuple_equal */
263 memset((void *)&exp->tuple.src.u3 + len, 0x00,
264 sizeof(exp->tuple.src.u3) - len);
265 memset(&exp->mask.src.u3, 0xFF, len);
266 if (sizeof(exp->mask.src.u3) > len)
267 memset((void *)&exp->mask.src.u3 + len, 0x00,
268 sizeof(exp->mask.src.u3) - len);
269 } else {
270 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
271 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
272 }
273
d6a9b650 274 if (src) {
a34c4589
AV
275 exp->tuple.src.u.all = *src;
276 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
277 } else {
278 exp->tuple.src.u.all = 0;
279 exp->mask.src.u.all = 0;
280 }
281
d4156e8c
PM
282 memcpy(&exp->tuple.dst.u3, daddr, len);
283 if (sizeof(exp->tuple.dst.u3) > len)
284 /* address needs to be cleared for nf_ct_tuple_equal */
285 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
286 sizeof(exp->tuple.dst.u3) - len);
287
a34c4589 288 exp->tuple.dst.u.all = *dst;
d6a9b650 289}
6823645d 290EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 291
7d0742da
PM
292static void nf_ct_expect_free_rcu(struct rcu_head *head)
293{
294 struct nf_conntrack_expect *exp;
295
296 exp = container_of(head, struct nf_conntrack_expect, rcu);
297 kmem_cache_free(nf_ct_expect_cachep, exp);
298}
299
6823645d 300void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
301{
302 if (atomic_dec_and_test(&exp->use))
7d0742da 303 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 304}
6823645d 305EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 306
6823645d 307static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
308{
309 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 310 struct net *net = nf_ct_exp_net(exp);
6002f266 311 const struct nf_conntrack_expect_policy *p;
a71c0855 312 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff
MJ
313
314 atomic_inc(&exp->use);
b560580a
PM
315
316 hlist_add_head(&exp->lnode, &master_help->expectations);
6002f266 317 master_help->expecting[exp->class]++;
a71c0855 318
9b03f38d
AD
319 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
320 net->ct.expect_count++;
77ab9cff 321
6823645d
PM
322 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
323 (unsigned long)exp);
6002f266
PM
324 p = &master_help->helper->expect_policy[exp->class];
325 exp->timeout.expires = jiffies + p->timeout * HZ;
77ab9cff
MJ
326 add_timer(&exp->timeout);
327
77ab9cff
MJ
328 atomic_inc(&exp->use);
329 NF_CT_STAT_INC(expect_create);
330}
331
332/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
333static void evict_oldest_expect(struct nf_conn *master,
334 struct nf_conntrack_expect *new)
77ab9cff 335{
b560580a 336 struct nf_conn_help *master_help = nfct_help(master);
6002f266 337 struct nf_conntrack_expect *exp, *last = NULL;
b560580a 338 struct hlist_node *n;
77ab9cff 339
6002f266
PM
340 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
341 if (exp->class == new->class)
342 last = exp;
343 }
b560580a 344
6002f266
PM
345 if (last && del_timer(&last->timeout)) {
346 nf_ct_unlink_expect(last);
347 nf_ct_expect_put(last);
77ab9cff
MJ
348 }
349}
350
351static inline int refresh_timer(struct nf_conntrack_expect *i)
352{
353 struct nf_conn_help *master_help = nfct_help(i->master);
6002f266 354 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
355
356 if (!del_timer(&i->timeout))
357 return 0;
358
6002f266
PM
359 p = &master_help->helper->expect_policy[i->class];
360 i->timeout.expires = jiffies + p->timeout * HZ;
77ab9cff
MJ
361 add_timer(&i->timeout);
362 return 1;
363}
364
6823645d 365int nf_ct_expect_related(struct nf_conntrack_expect *expect)
77ab9cff 366{
6002f266 367 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
368 struct nf_conntrack_expect *i;
369 struct nf_conn *master = expect->master;
370 struct nf_conn_help *master_help = nfct_help(master);
9b03f38d 371 struct net *net = nf_ct_exp_net(expect);
a71c0855
PM
372 struct hlist_node *n;
373 unsigned int h;
77ab9cff
MJ
374 int ret;
375
376 NF_CT_ASSERT(master_help);
377
f8ba1aff 378 spin_lock_bh(&nf_conntrack_lock);
3c158f7f
PM
379 if (!master_help->helper) {
380 ret = -ESHUTDOWN;
381 goto out;
382 }
a71c0855 383 h = nf_ct_expect_dst_hash(&expect->tuple);
9b03f38d 384 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
77ab9cff
MJ
385 if (expect_matches(i, expect)) {
386 /* Refresh timer: if it's dying, ignore.. */
387 if (refresh_timer(i)) {
388 ret = 0;
389 goto out;
390 }
391 } else if (expect_clash(i, expect)) {
392 ret = -EBUSY;
393 goto out;
394 }
395 }
396 /* Will be over limit? */
6002f266
PM
397 p = &master_help->helper->expect_policy[expect->class];
398 if (p->max_expected &&
399 master_help->expecting[expect->class] >= p->max_expected) {
400 evict_oldest_expect(master, expect);
401 if (master_help->expecting[expect->class] >= p->max_expected) {
402 ret = -EMFILE;
403 goto out;
404 }
405 }
77ab9cff 406
9b03f38d 407 if (net->ct.expect_count >= nf_ct_expect_max) {
f264a7df
PM
408 if (net_ratelimit())
409 printk(KERN_WARNING
3d89e9cf 410 "nf_conntrack: expectation table full\n");
f264a7df
PM
411 ret = -EMFILE;
412 goto out;
413 }
414
6823645d
PM
415 nf_ct_expect_insert(expect);
416 nf_ct_expect_event(IPEXP_NEW, expect);
77ab9cff
MJ
417 ret = 0;
418out:
f8ba1aff 419 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff
MJ
420 return ret;
421}
6823645d 422EXPORT_SYMBOL_GPL(nf_ct_expect_related);
77ab9cff
MJ
423
424#ifdef CONFIG_PROC_FS
5d08ad44 425struct ct_expect_iter_state {
dc5129f8 426 struct seq_net_private p;
5d08ad44
PM
427 unsigned int bucket;
428};
429
430static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 431{
dc5129f8 432 struct net *net = seq_file_net(seq);
5d08ad44 433 struct ct_expect_iter_state *st = seq->private;
7d0742da 434 struct hlist_node *n;
77ab9cff 435
5d08ad44 436 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
9b03f38d 437 n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
7d0742da
PM
438 if (n)
439 return n;
5d08ad44
PM
440 }
441 return NULL;
442}
77ab9cff 443
5d08ad44
PM
444static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
445 struct hlist_node *head)
446{
dc5129f8 447 struct net *net = seq_file_net(seq);
5d08ad44 448 struct ct_expect_iter_state *st = seq->private;
77ab9cff 449
7d0742da 450 head = rcu_dereference(head->next);
5d08ad44
PM
451 while (head == NULL) {
452 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 453 return NULL;
9b03f38d 454 head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
77ab9cff 455 }
5d08ad44 456 return head;
77ab9cff
MJ
457}
458
5d08ad44 459static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 460{
5d08ad44 461 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 462
5d08ad44
PM
463 if (head)
464 while (pos && (head = ct_expect_get_next(seq, head)))
465 pos--;
466 return pos ? NULL : head;
467}
77ab9cff 468
5d08ad44 469static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 470 __acquires(RCU)
5d08ad44 471{
7d0742da 472 rcu_read_lock();
5d08ad44
PM
473 return ct_expect_get_idx(seq, *pos);
474}
77ab9cff 475
5d08ad44
PM
476static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
477{
478 (*pos)++;
479 return ct_expect_get_next(seq, v);
77ab9cff
MJ
480}
481
5d08ad44 482static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 483 __releases(RCU)
77ab9cff 484{
7d0742da 485 rcu_read_unlock();
77ab9cff
MJ
486}
487
488static int exp_seq_show(struct seq_file *s, void *v)
489{
5d08ad44
PM
490 struct nf_conntrack_expect *expect;
491 struct hlist_node *n = v;
359b9ab6 492 char *delim = "";
5d08ad44
PM
493
494 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
495
496 if (expect->timeout.function)
497 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
498 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
499 else
500 seq_printf(s, "- ");
501 seq_printf(s, "l3proto = %u proto=%u ",
502 expect->tuple.src.l3num,
503 expect->tuple.dst.protonum);
504 print_tuple(s, &expect->tuple,
505 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 506 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 507 expect->tuple.dst.protonum));
4bb119ea 508
359b9ab6
PM
509 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
510 seq_printf(s, "PERMANENT");
511 delim = ",";
512 }
513 if (expect->flags & NF_CT_EXPECT_INACTIVE)
514 seq_printf(s, "%sINACTIVE", delim);
4bb119ea 515
77ab9cff
MJ
516 return seq_putc(s, '\n');
517}
518
56b3d975 519static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
520 .start = exp_seq_start,
521 .next = exp_seq_next,
522 .stop = exp_seq_stop,
523 .show = exp_seq_show
524};
525
526static int exp_open(struct inode *inode, struct file *file)
527{
dc5129f8 528 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 529 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
530}
531
5d08ad44 532static const struct file_operations exp_file_ops = {
77ab9cff
MJ
533 .owner = THIS_MODULE,
534 .open = exp_open,
535 .read = seq_read,
536 .llseek = seq_lseek,
dc5129f8 537 .release = seq_release_net,
77ab9cff
MJ
538};
539#endif /* CONFIG_PROC_FS */
e9c1b084 540
dc5129f8 541static int exp_proc_init(struct net *net)
e9c1b084
PM
542{
543#ifdef CONFIG_PROC_FS
544 struct proc_dir_entry *proc;
545
dc5129f8 546 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
e9c1b084
PM
547 if (!proc)
548 return -ENOMEM;
549#endif /* CONFIG_PROC_FS */
550 return 0;
551}
552
dc5129f8 553static void exp_proc_remove(struct net *net)
e9c1b084
PM
554{
555#ifdef CONFIG_PROC_FS
dc5129f8 556 proc_net_remove(net, "nf_conntrack_expect");
e9c1b084
PM
557#endif /* CONFIG_PROC_FS */
558}
559
a71c0855
PM
560module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
561
9b03f38d 562int nf_conntrack_expect_init(struct net *net)
e9c1b084 563{
a71c0855
PM
564 int err = -ENOMEM;
565
566 if (!nf_ct_expect_hsize) {
567 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
568 if (!nf_ct_expect_hsize)
569 nf_ct_expect_hsize = 1;
570 }
f264a7df 571 nf_ct_expect_max = nf_ct_expect_hsize * 4;
a71c0855 572
9b03f38d
AD
573 net->ct.expect_count = 0;
574 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
575 &net->ct.expect_vmalloc);
576 if (net->ct.expect_hash == NULL)
a71c0855 577 goto err1;
e9c1b084
PM
578
579 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
580 sizeof(struct nf_conntrack_expect),
20c2df83 581 0, 0, NULL);
e9c1b084 582 if (!nf_ct_expect_cachep)
a71c0855 583 goto err2;
e9c1b084 584
dc5129f8 585 err = exp_proc_init(net);
e9c1b084 586 if (err < 0)
a71c0855 587 goto err3;
e9c1b084
PM
588
589 return 0;
590
a71c0855 591err3:
12293bf9
AD
592 kmem_cache_destroy(nf_ct_expect_cachep);
593err2:
9b03f38d 594 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
a71c0855 595 nf_ct_expect_hsize);
a71c0855 596err1:
e9c1b084
PM
597 return err;
598}
599
9b03f38d 600void nf_conntrack_expect_fini(struct net *net)
e9c1b084 601{
dc5129f8 602 exp_proc_remove(net);
e9c1b084 603 kmem_cache_destroy(nf_ct_expect_cachep);
9b03f38d 604 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
a71c0855 605 nf_ct_expect_hsize);
e9c1b084 606}