]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/netfilter/nf_conntrack_expect.c
treewide: init_timer() -> setup_timer()
[mirror_ubuntu-hirsute-kernel.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stddef.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/percpu.h>
22 #include <linux/kernel.h>
23 #include <linux/jhash.h>
24 #include <linux/moduleparam.h>
25 #include <linux/export.h>
26 #include <net/net_namespace.h>
27 #include <net/netns/hash.h>
28
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_expect.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_tuple.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
35
36 unsigned int nf_ct_expect_hsize __read_mostly;
37 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
38
39 struct hlist_head *nf_ct_expect_hash __read_mostly;
40 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
41
42 unsigned int nf_ct_expect_max __read_mostly;
43
44 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
45 static unsigned int nf_ct_expect_hashrnd __read_mostly;
46
47 /* nf_conntrack_expect helper functions */
48 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
49 u32 portid, int report)
50 {
51 struct nf_conn_help *master_help = nfct_help(exp->master);
52 struct net *net = nf_ct_exp_net(exp);
53
54 WARN_ON(!master_help);
55 WARN_ON(timer_pending(&exp->timeout));
56
57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--;
59
60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--;
62
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
64 nf_ct_expect_put(exp);
65
66 NF_CT_STAT_INC(net, expect_delete);
67 }
68 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
69
70 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
71 {
72 struct nf_conntrack_expect *exp = (void *)ul_expect;
73
74 spin_lock_bh(&nf_conntrack_expect_lock);
75 nf_ct_unlink_expect(exp);
76 spin_unlock_bh(&nf_conntrack_expect_lock);
77 nf_ct_expect_put(exp);
78 }
79
80 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
81 {
82 unsigned int hash, seed;
83
84 get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
85
86 seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
87
88 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
89 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
90 (__force __u16)tuple->dst.u.all) ^ seed);
91
92 return reciprocal_scale(hash, nf_ct_expect_hsize);
93 }
94
95 static bool
96 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
97 const struct nf_conntrack_expect *i,
98 const struct nf_conntrack_zone *zone,
99 const struct net *net)
100 {
101 return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 net_eq(net, nf_ct_net(i->master)) &&
103 nf_ct_zone_equal_any(i->master, zone);
104 }
105
106 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
107 {
108 if (del_timer(&exp->timeout)) {
109 nf_ct_unlink_expect(exp);
110 nf_ct_expect_put(exp);
111 return true;
112 }
113 return false;
114 }
115 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
116
117 struct nf_conntrack_expect *
118 __nf_ct_expect_find(struct net *net,
119 const struct nf_conntrack_zone *zone,
120 const struct nf_conntrack_tuple *tuple)
121 {
122 struct nf_conntrack_expect *i;
123 unsigned int h;
124
125 if (!net->ct.expect_count)
126 return NULL;
127
128 h = nf_ct_expect_dst_hash(net, tuple);
129 hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
130 if (nf_ct_exp_equal(tuple, i, zone, net))
131 return i;
132 }
133 return NULL;
134 }
135 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
136
137 /* Just find a expectation corresponding to a tuple. */
138 struct nf_conntrack_expect *
139 nf_ct_expect_find_get(struct net *net,
140 const struct nf_conntrack_zone *zone,
141 const struct nf_conntrack_tuple *tuple)
142 {
143 struct nf_conntrack_expect *i;
144
145 rcu_read_lock();
146 i = __nf_ct_expect_find(net, zone, tuple);
147 if (i && !refcount_inc_not_zero(&i->use))
148 i = NULL;
149 rcu_read_unlock();
150
151 return i;
152 }
153 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
154
155 /* If an expectation for this connection is found, it gets delete from
156 * global list then returned. */
157 struct nf_conntrack_expect *
158 nf_ct_find_expectation(struct net *net,
159 const struct nf_conntrack_zone *zone,
160 const struct nf_conntrack_tuple *tuple)
161 {
162 struct nf_conntrack_expect *i, *exp = NULL;
163 unsigned int h;
164
165 if (!net->ct.expect_count)
166 return NULL;
167
168 h = nf_ct_expect_dst_hash(net, tuple);
169 hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
170 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
171 nf_ct_exp_equal(tuple, i, zone, net)) {
172 exp = i;
173 break;
174 }
175 }
176 if (!exp)
177 return NULL;
178
179 /* If master is not in hash table yet (ie. packet hasn't left
180 this machine yet), how can other end know about expected?
181 Hence these are not the droids you are looking for (if
182 master ct never got confirmed, we'd hold a reference to it
183 and weird things would happen to future packets). */
184 if (!nf_ct_is_confirmed(exp->master))
185 return NULL;
186
187 /* Avoid race with other CPUs, that for exp->master ct, is
188 * about to invoke ->destroy(), or nf_ct_delete() via timeout
189 * or early_drop().
190 *
191 * The atomic_inc_not_zero() check tells: If that fails, we
192 * know that the ct is being destroyed. If it succeeds, we
193 * can be sure the ct cannot disappear underneath.
194 */
195 if (unlikely(nf_ct_is_dying(exp->master) ||
196 !atomic_inc_not_zero(&exp->master->ct_general.use)))
197 return NULL;
198
199 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
200 refcount_inc(&exp->use);
201 return exp;
202 } else if (del_timer(&exp->timeout)) {
203 nf_ct_unlink_expect(exp);
204 return exp;
205 }
206 /* Undo exp->master refcnt increase, if del_timer() failed */
207 nf_ct_put(exp->master);
208
209 return NULL;
210 }
211
212 /* delete all expectations for this conntrack */
213 void nf_ct_remove_expectations(struct nf_conn *ct)
214 {
215 struct nf_conn_help *help = nfct_help(ct);
216 struct nf_conntrack_expect *exp;
217 struct hlist_node *next;
218
219 /* Optimization: most connection never expect any others. */
220 if (!help)
221 return;
222
223 spin_lock_bh(&nf_conntrack_expect_lock);
224 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
225 nf_ct_remove_expect(exp);
226 }
227 spin_unlock_bh(&nf_conntrack_expect_lock);
228 }
229 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
230
231 /* Would two expected things clash? */
232 static inline int expect_clash(const struct nf_conntrack_expect *a,
233 const struct nf_conntrack_expect *b)
234 {
235 /* Part covered by intersection of masks must be unequal,
236 otherwise they clash */
237 struct nf_conntrack_tuple_mask intersect_mask;
238 int count;
239
240 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
241
242 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
243 intersect_mask.src.u3.all[count] =
244 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
245 }
246
247 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
248 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
249 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
250 }
251
252 static inline int expect_matches(const struct nf_conntrack_expect *a,
253 const struct nf_conntrack_expect *b)
254 {
255 return a->master == b->master && a->class == b->class &&
256 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
259 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
260 }
261
262 /* Generally a bad idea to call this: could have matched already. */
263 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
264 {
265 spin_lock_bh(&nf_conntrack_expect_lock);
266 nf_ct_remove_expect(exp);
267 spin_unlock_bh(&nf_conntrack_expect_lock);
268 }
269 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
270
271 /* We don't increase the master conntrack refcount for non-fulfilled
272 * conntracks. During the conntrack destruction, the expectations are
273 * always killed before the conntrack itself */
274 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
275 {
276 struct nf_conntrack_expect *new;
277
278 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
279 if (!new)
280 return NULL;
281
282 new->master = me;
283 refcount_set(&new->use, 1);
284 return new;
285 }
286 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
287
288 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
289 u_int8_t family,
290 const union nf_inet_addr *saddr,
291 const union nf_inet_addr *daddr,
292 u_int8_t proto, const __be16 *src, const __be16 *dst)
293 {
294 int len;
295
296 if (family == AF_INET)
297 len = 4;
298 else
299 len = 16;
300
301 exp->flags = 0;
302 exp->class = class;
303 exp->expectfn = NULL;
304 exp->helper = NULL;
305 exp->tuple.src.l3num = family;
306 exp->tuple.dst.protonum = proto;
307
308 if (saddr) {
309 memcpy(&exp->tuple.src.u3, saddr, len);
310 if (sizeof(exp->tuple.src.u3) > len)
311 /* address needs to be cleared for nf_ct_tuple_equal */
312 memset((void *)&exp->tuple.src.u3 + len, 0x00,
313 sizeof(exp->tuple.src.u3) - len);
314 memset(&exp->mask.src.u3, 0xFF, len);
315 if (sizeof(exp->mask.src.u3) > len)
316 memset((void *)&exp->mask.src.u3 + len, 0x00,
317 sizeof(exp->mask.src.u3) - len);
318 } else {
319 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
320 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
321 }
322
323 if (src) {
324 exp->tuple.src.u.all = *src;
325 exp->mask.src.u.all = htons(0xFFFF);
326 } else {
327 exp->tuple.src.u.all = 0;
328 exp->mask.src.u.all = 0;
329 }
330
331 memcpy(&exp->tuple.dst.u3, daddr, len);
332 if (sizeof(exp->tuple.dst.u3) > len)
333 /* address needs to be cleared for nf_ct_tuple_equal */
334 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
335 sizeof(exp->tuple.dst.u3) - len);
336
337 exp->tuple.dst.u.all = *dst;
338
339 #ifdef CONFIG_NF_NAT_NEEDED
340 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
341 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
342 #endif
343 }
344 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
345
346 static void nf_ct_expect_free_rcu(struct rcu_head *head)
347 {
348 struct nf_conntrack_expect *exp;
349
350 exp = container_of(head, struct nf_conntrack_expect, rcu);
351 kmem_cache_free(nf_ct_expect_cachep, exp);
352 }
353
354 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
355 {
356 if (refcount_dec_and_test(&exp->use))
357 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
358 }
359 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
360
361 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
362 {
363 struct nf_conn_help *master_help = nfct_help(exp->master);
364 struct nf_conntrack_helper *helper;
365 struct net *net = nf_ct_exp_net(exp);
366 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
367
368 /* two references : one for hash insert, one for the timer */
369 refcount_add(2, &exp->use);
370
371 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
372 (unsigned long)exp);
373 helper = rcu_dereference_protected(master_help->helper,
374 lockdep_is_held(&nf_conntrack_expect_lock));
375 if (helper) {
376 exp->timeout.expires = jiffies +
377 helper->expect_policy[exp->class].timeout * HZ;
378 }
379 add_timer(&exp->timeout);
380
381 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
382 master_help->expecting[exp->class]++;
383
384 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
385 net->ct.expect_count++;
386
387 NF_CT_STAT_INC(net, expect_create);
388 }
389
390 /* Race with expectations being used means we could have none to find; OK. */
391 static void evict_oldest_expect(struct nf_conn *master,
392 struct nf_conntrack_expect *new)
393 {
394 struct nf_conn_help *master_help = nfct_help(master);
395 struct nf_conntrack_expect *exp, *last = NULL;
396
397 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
398 if (exp->class == new->class)
399 last = exp;
400 }
401
402 if (last)
403 nf_ct_remove_expect(last);
404 }
405
406 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
407 {
408 const struct nf_conntrack_expect_policy *p;
409 struct nf_conntrack_expect *i;
410 struct nf_conn *master = expect->master;
411 struct nf_conn_help *master_help = nfct_help(master);
412 struct nf_conntrack_helper *helper;
413 struct net *net = nf_ct_exp_net(expect);
414 struct hlist_node *next;
415 unsigned int h;
416 int ret = 0;
417
418 if (!master_help) {
419 ret = -ESHUTDOWN;
420 goto out;
421 }
422 h = nf_ct_expect_dst_hash(net, &expect->tuple);
423 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
424 if (expect_matches(i, expect)) {
425 if (nf_ct_remove_expect(i))
426 break;
427 } else if (expect_clash(i, expect)) {
428 ret = -EBUSY;
429 goto out;
430 }
431 }
432 /* Will be over limit? */
433 helper = rcu_dereference_protected(master_help->helper,
434 lockdep_is_held(&nf_conntrack_expect_lock));
435 if (helper) {
436 p = &helper->expect_policy[expect->class];
437 if (p->max_expected &&
438 master_help->expecting[expect->class] >= p->max_expected) {
439 evict_oldest_expect(master, expect);
440 if (master_help->expecting[expect->class]
441 >= p->max_expected) {
442 ret = -EMFILE;
443 goto out;
444 }
445 }
446 }
447
448 if (net->ct.expect_count >= nf_ct_expect_max) {
449 net_warn_ratelimited("nf_conntrack: expectation table full\n");
450 ret = -EMFILE;
451 }
452 out:
453 return ret;
454 }
455
456 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
457 u32 portid, int report)
458 {
459 int ret;
460
461 spin_lock_bh(&nf_conntrack_expect_lock);
462 ret = __nf_ct_expect_check(expect);
463 if (ret < 0)
464 goto out;
465
466 nf_ct_expect_insert(expect);
467
468 spin_unlock_bh(&nf_conntrack_expect_lock);
469 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
470 return 0;
471 out:
472 spin_unlock_bh(&nf_conntrack_expect_lock);
473 return ret;
474 }
475 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
476
477 void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
478 void *data)
479 {
480 struct nf_conntrack_expect *exp;
481 const struct hlist_node *next;
482 unsigned int i;
483
484 spin_lock_bh(&nf_conntrack_expect_lock);
485
486 for (i = 0; i < nf_ct_expect_hsize; i++) {
487 hlist_for_each_entry_safe(exp, next,
488 &nf_ct_expect_hash[i],
489 hnode) {
490 if (iter(exp, data) && del_timer(&exp->timeout)) {
491 nf_ct_unlink_expect(exp);
492 nf_ct_expect_put(exp);
493 }
494 }
495 }
496
497 spin_unlock_bh(&nf_conntrack_expect_lock);
498 }
499 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy);
500
501 void nf_ct_expect_iterate_net(struct net *net,
502 bool (*iter)(struct nf_conntrack_expect *e, void *data),
503 void *data,
504 u32 portid, int report)
505 {
506 struct nf_conntrack_expect *exp;
507 const struct hlist_node *next;
508 unsigned int i;
509
510 spin_lock_bh(&nf_conntrack_expect_lock);
511
512 for (i = 0; i < nf_ct_expect_hsize; i++) {
513 hlist_for_each_entry_safe(exp, next,
514 &nf_ct_expect_hash[i],
515 hnode) {
516
517 if (!net_eq(nf_ct_exp_net(exp), net))
518 continue;
519
520 if (iter(exp, data) && del_timer(&exp->timeout)) {
521 nf_ct_unlink_expect_report(exp, portid, report);
522 nf_ct_expect_put(exp);
523 }
524 }
525 }
526
527 spin_unlock_bh(&nf_conntrack_expect_lock);
528 }
529 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net);
530
531 #ifdef CONFIG_NF_CONNTRACK_PROCFS
532 struct ct_expect_iter_state {
533 struct seq_net_private p;
534 unsigned int bucket;
535 };
536
537 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
538 {
539 struct ct_expect_iter_state *st = seq->private;
540 struct hlist_node *n;
541
542 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
543 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
544 if (n)
545 return n;
546 }
547 return NULL;
548 }
549
550 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
551 struct hlist_node *head)
552 {
553 struct ct_expect_iter_state *st = seq->private;
554
555 head = rcu_dereference(hlist_next_rcu(head));
556 while (head == NULL) {
557 if (++st->bucket >= nf_ct_expect_hsize)
558 return NULL;
559 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
560 }
561 return head;
562 }
563
564 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
565 {
566 struct hlist_node *head = ct_expect_get_first(seq);
567
568 if (head)
569 while (pos && (head = ct_expect_get_next(seq, head)))
570 pos--;
571 return pos ? NULL : head;
572 }
573
574 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
575 __acquires(RCU)
576 {
577 rcu_read_lock();
578 return ct_expect_get_idx(seq, *pos);
579 }
580
581 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
582 {
583 (*pos)++;
584 return ct_expect_get_next(seq, v);
585 }
586
587 static void exp_seq_stop(struct seq_file *seq, void *v)
588 __releases(RCU)
589 {
590 rcu_read_unlock();
591 }
592
593 static int exp_seq_show(struct seq_file *s, void *v)
594 {
595 struct nf_conntrack_expect *expect;
596 struct nf_conntrack_helper *helper;
597 struct hlist_node *n = v;
598 char *delim = "";
599
600 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
601
602 if (expect->timeout.function)
603 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
604 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
605 else
606 seq_puts(s, "- ");
607 seq_printf(s, "l3proto = %u proto=%u ",
608 expect->tuple.src.l3num,
609 expect->tuple.dst.protonum);
610 print_tuple(s, &expect->tuple,
611 __nf_ct_l3proto_find(expect->tuple.src.l3num),
612 __nf_ct_l4proto_find(expect->tuple.src.l3num,
613 expect->tuple.dst.protonum));
614
615 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
616 seq_puts(s, "PERMANENT");
617 delim = ",";
618 }
619 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
620 seq_printf(s, "%sINACTIVE", delim);
621 delim = ",";
622 }
623 if (expect->flags & NF_CT_EXPECT_USERSPACE)
624 seq_printf(s, "%sUSERSPACE", delim);
625
626 helper = rcu_dereference(nfct_help(expect->master)->helper);
627 if (helper) {
628 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
629 if (helper->expect_policy[expect->class].name[0])
630 seq_printf(s, "/%s",
631 helper->expect_policy[expect->class].name);
632 }
633
634 seq_putc(s, '\n');
635
636 return 0;
637 }
638
639 static const struct seq_operations exp_seq_ops = {
640 .start = exp_seq_start,
641 .next = exp_seq_next,
642 .stop = exp_seq_stop,
643 .show = exp_seq_show
644 };
645
646 static int exp_open(struct inode *inode, struct file *file)
647 {
648 return seq_open_net(inode, file, &exp_seq_ops,
649 sizeof(struct ct_expect_iter_state));
650 }
651
652 static const struct file_operations exp_file_ops = {
653 .owner = THIS_MODULE,
654 .open = exp_open,
655 .read = seq_read,
656 .llseek = seq_lseek,
657 .release = seq_release_net,
658 };
659 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
660
661 static int exp_proc_init(struct net *net)
662 {
663 #ifdef CONFIG_NF_CONNTRACK_PROCFS
664 struct proc_dir_entry *proc;
665 kuid_t root_uid;
666 kgid_t root_gid;
667
668 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
669 &exp_file_ops);
670 if (!proc)
671 return -ENOMEM;
672
673 root_uid = make_kuid(net->user_ns, 0);
674 root_gid = make_kgid(net->user_ns, 0);
675 if (uid_valid(root_uid) && gid_valid(root_gid))
676 proc_set_user(proc, root_uid, root_gid);
677 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
678 return 0;
679 }
680
681 static void exp_proc_remove(struct net *net)
682 {
683 #ifdef CONFIG_NF_CONNTRACK_PROCFS
684 remove_proc_entry("nf_conntrack_expect", net->proc_net);
685 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
686 }
687
688 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
689
690 int nf_conntrack_expect_pernet_init(struct net *net)
691 {
692 net->ct.expect_count = 0;
693 return exp_proc_init(net);
694 }
695
696 void nf_conntrack_expect_pernet_fini(struct net *net)
697 {
698 exp_proc_remove(net);
699 }
700
701 int nf_conntrack_expect_init(void)
702 {
703 if (!nf_ct_expect_hsize) {
704 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
705 if (!nf_ct_expect_hsize)
706 nf_ct_expect_hsize = 1;
707 }
708 nf_ct_expect_max = nf_ct_expect_hsize * 4;
709 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
710 sizeof(struct nf_conntrack_expect),
711 0, 0, NULL);
712 if (!nf_ct_expect_cachep)
713 return -ENOMEM;
714
715 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
716 if (!nf_ct_expect_hash) {
717 kmem_cache_destroy(nf_ct_expect_cachep);
718 return -ENOMEM;
719 }
720
721 return 0;
722 }
723
724 void nf_conntrack_expect_fini(void)
725 {
726 rcu_barrier(); /* Wait for call_rcu() before destroy */
727 kmem_cache_destroy(nf_ct_expect_cachep);
728 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
729 }