]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nf_conntrack_expect.c
Merge tag 'drm-intel-fixes-2017-12-14' of git://anongit.freedesktop.org/drm/drm-intel...
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stddef.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/percpu.h>
22 #include <linux/kernel.h>
23 #include <linux/jhash.h>
24 #include <linux/moduleparam.h>
25 #include <linux/export.h>
26 #include <net/net_namespace.h>
27 #include <net/netns/hash.h>
28
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_expect.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_tuple.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
35
36 unsigned int nf_ct_expect_hsize __read_mostly;
37 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
38
39 struct hlist_head *nf_ct_expect_hash __read_mostly;
40 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
41
42 unsigned int nf_ct_expect_max __read_mostly;
43
44 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
45 static unsigned int nf_ct_expect_hashrnd __read_mostly;
46
47 /* nf_conntrack_expect helper functions */
48 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
49 u32 portid, int report)
50 {
51 struct nf_conn_help *master_help = nfct_help(exp->master);
52 struct net *net = nf_ct_exp_net(exp);
53
54 WARN_ON(!master_help);
55 WARN_ON(timer_pending(&exp->timeout));
56
57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--;
59
60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--;
62
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
64 nf_ct_expect_put(exp);
65
66 NF_CT_STAT_INC(net, expect_delete);
67 }
68 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
69
70 static void nf_ct_expectation_timed_out(struct timer_list *t)
71 {
72 struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
73
74 spin_lock_bh(&nf_conntrack_expect_lock);
75 nf_ct_unlink_expect(exp);
76 spin_unlock_bh(&nf_conntrack_expect_lock);
77 nf_ct_expect_put(exp);
78 }
79
80 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
81 {
82 unsigned int hash, seed;
83
84 get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
85
86 seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
87
88 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
89 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
90 (__force __u16)tuple->dst.u.all) ^ seed);
91
92 return reciprocal_scale(hash, nf_ct_expect_hsize);
93 }
94
95 static bool
96 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
97 const struct nf_conntrack_expect *i,
98 const struct nf_conntrack_zone *zone,
99 const struct net *net)
100 {
101 return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 net_eq(net, nf_ct_net(i->master)) &&
103 nf_ct_zone_equal_any(i->master, zone);
104 }
105
106 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
107 {
108 if (del_timer(&exp->timeout)) {
109 nf_ct_unlink_expect(exp);
110 nf_ct_expect_put(exp);
111 return true;
112 }
113 return false;
114 }
115 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
116
117 struct nf_conntrack_expect *
118 __nf_ct_expect_find(struct net *net,
119 const struct nf_conntrack_zone *zone,
120 const struct nf_conntrack_tuple *tuple)
121 {
122 struct nf_conntrack_expect *i;
123 unsigned int h;
124
125 if (!net->ct.expect_count)
126 return NULL;
127
128 h = nf_ct_expect_dst_hash(net, tuple);
129 hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
130 if (nf_ct_exp_equal(tuple, i, zone, net))
131 return i;
132 }
133 return NULL;
134 }
135 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
136
137 /* Just find a expectation corresponding to a tuple. */
138 struct nf_conntrack_expect *
139 nf_ct_expect_find_get(struct net *net,
140 const struct nf_conntrack_zone *zone,
141 const struct nf_conntrack_tuple *tuple)
142 {
143 struct nf_conntrack_expect *i;
144
145 rcu_read_lock();
146 i = __nf_ct_expect_find(net, zone, tuple);
147 if (i && !refcount_inc_not_zero(&i->use))
148 i = NULL;
149 rcu_read_unlock();
150
151 return i;
152 }
153 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
154
155 /* If an expectation for this connection is found, it gets delete from
156 * global list then returned. */
157 struct nf_conntrack_expect *
158 nf_ct_find_expectation(struct net *net,
159 const struct nf_conntrack_zone *zone,
160 const struct nf_conntrack_tuple *tuple)
161 {
162 struct nf_conntrack_expect *i, *exp = NULL;
163 unsigned int h;
164
165 if (!net->ct.expect_count)
166 return NULL;
167
168 h = nf_ct_expect_dst_hash(net, tuple);
169 hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
170 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
171 nf_ct_exp_equal(tuple, i, zone, net)) {
172 exp = i;
173 break;
174 }
175 }
176 if (!exp)
177 return NULL;
178
179 /* If master is not in hash table yet (ie. packet hasn't left
180 this machine yet), how can other end know about expected?
181 Hence these are not the droids you are looking for (if
182 master ct never got confirmed, we'd hold a reference to it
183 and weird things would happen to future packets). */
184 if (!nf_ct_is_confirmed(exp->master))
185 return NULL;
186
187 /* Avoid race with other CPUs, that for exp->master ct, is
188 * about to invoke ->destroy(), or nf_ct_delete() via timeout
189 * or early_drop().
190 *
191 * The atomic_inc_not_zero() check tells: If that fails, we
192 * know that the ct is being destroyed. If it succeeds, we
193 * can be sure the ct cannot disappear underneath.
194 */
195 if (unlikely(nf_ct_is_dying(exp->master) ||
196 !atomic_inc_not_zero(&exp->master->ct_general.use)))
197 return NULL;
198
199 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
200 refcount_inc(&exp->use);
201 return exp;
202 } else if (del_timer(&exp->timeout)) {
203 nf_ct_unlink_expect(exp);
204 return exp;
205 }
206 /* Undo exp->master refcnt increase, if del_timer() failed */
207 nf_ct_put(exp->master);
208
209 return NULL;
210 }
211
212 /* delete all expectations for this conntrack */
213 void nf_ct_remove_expectations(struct nf_conn *ct)
214 {
215 struct nf_conn_help *help = nfct_help(ct);
216 struct nf_conntrack_expect *exp;
217 struct hlist_node *next;
218
219 /* Optimization: most connection never expect any others. */
220 if (!help)
221 return;
222
223 spin_lock_bh(&nf_conntrack_expect_lock);
224 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
225 nf_ct_remove_expect(exp);
226 }
227 spin_unlock_bh(&nf_conntrack_expect_lock);
228 }
229 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
230
231 /* Would two expected things clash? */
232 static inline int expect_clash(const struct nf_conntrack_expect *a,
233 const struct nf_conntrack_expect *b)
234 {
235 /* Part covered by intersection of masks must be unequal,
236 otherwise they clash */
237 struct nf_conntrack_tuple_mask intersect_mask;
238 int count;
239
240 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
241
242 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
243 intersect_mask.src.u3.all[count] =
244 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
245 }
246
247 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
248 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
249 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
250 }
251
252 static inline int expect_matches(const struct nf_conntrack_expect *a,
253 const struct nf_conntrack_expect *b)
254 {
255 return a->master == b->master && a->class == b->class &&
256 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
259 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
260 }
261
262 /* Generally a bad idea to call this: could have matched already. */
263 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
264 {
265 spin_lock_bh(&nf_conntrack_expect_lock);
266 nf_ct_remove_expect(exp);
267 spin_unlock_bh(&nf_conntrack_expect_lock);
268 }
269 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
270
271 /* We don't increase the master conntrack refcount for non-fulfilled
272 * conntracks. During the conntrack destruction, the expectations are
273 * always killed before the conntrack itself */
274 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
275 {
276 struct nf_conntrack_expect *new;
277
278 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
279 if (!new)
280 return NULL;
281
282 new->master = me;
283 refcount_set(&new->use, 1);
284 return new;
285 }
286 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
287
288 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
289 u_int8_t family,
290 const union nf_inet_addr *saddr,
291 const union nf_inet_addr *daddr,
292 u_int8_t proto, const __be16 *src, const __be16 *dst)
293 {
294 int len;
295
296 if (family == AF_INET)
297 len = 4;
298 else
299 len = 16;
300
301 exp->flags = 0;
302 exp->class = class;
303 exp->expectfn = NULL;
304 exp->helper = NULL;
305 exp->tuple.src.l3num = family;
306 exp->tuple.dst.protonum = proto;
307
308 if (saddr) {
309 memcpy(&exp->tuple.src.u3, saddr, len);
310 if (sizeof(exp->tuple.src.u3) > len)
311 /* address needs to be cleared for nf_ct_tuple_equal */
312 memset((void *)&exp->tuple.src.u3 + len, 0x00,
313 sizeof(exp->tuple.src.u3) - len);
314 memset(&exp->mask.src.u3, 0xFF, len);
315 if (sizeof(exp->mask.src.u3) > len)
316 memset((void *)&exp->mask.src.u3 + len, 0x00,
317 sizeof(exp->mask.src.u3) - len);
318 } else {
319 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
320 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
321 }
322
323 if (src) {
324 exp->tuple.src.u.all = *src;
325 exp->mask.src.u.all = htons(0xFFFF);
326 } else {
327 exp->tuple.src.u.all = 0;
328 exp->mask.src.u.all = 0;
329 }
330
331 memcpy(&exp->tuple.dst.u3, daddr, len);
332 if (sizeof(exp->tuple.dst.u3) > len)
333 /* address needs to be cleared for nf_ct_tuple_equal */
334 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
335 sizeof(exp->tuple.dst.u3) - len);
336
337 exp->tuple.dst.u.all = *dst;
338
339 #ifdef CONFIG_NF_NAT_NEEDED
340 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
341 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
342 #endif
343 }
344 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
345
346 static void nf_ct_expect_free_rcu(struct rcu_head *head)
347 {
348 struct nf_conntrack_expect *exp;
349
350 exp = container_of(head, struct nf_conntrack_expect, rcu);
351 kmem_cache_free(nf_ct_expect_cachep, exp);
352 }
353
354 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
355 {
356 if (refcount_dec_and_test(&exp->use))
357 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
358 }
359 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
360
361 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
362 {
363 struct nf_conn_help *master_help = nfct_help(exp->master);
364 struct nf_conntrack_helper *helper;
365 struct net *net = nf_ct_exp_net(exp);
366 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
367
368 /* two references : one for hash insert, one for the timer */
369 refcount_add(2, &exp->use);
370
371 timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
372 helper = rcu_dereference_protected(master_help->helper,
373 lockdep_is_held(&nf_conntrack_expect_lock));
374 if (helper) {
375 exp->timeout.expires = jiffies +
376 helper->expect_policy[exp->class].timeout * HZ;
377 }
378 add_timer(&exp->timeout);
379
380 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
381 master_help->expecting[exp->class]++;
382
383 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
384 net->ct.expect_count++;
385
386 NF_CT_STAT_INC(net, expect_create);
387 }
388
389 /* Race with expectations being used means we could have none to find; OK. */
390 static void evict_oldest_expect(struct nf_conn *master,
391 struct nf_conntrack_expect *new)
392 {
393 struct nf_conn_help *master_help = nfct_help(master);
394 struct nf_conntrack_expect *exp, *last = NULL;
395
396 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
397 if (exp->class == new->class)
398 last = exp;
399 }
400
401 if (last)
402 nf_ct_remove_expect(last);
403 }
404
405 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
406 {
407 const struct nf_conntrack_expect_policy *p;
408 struct nf_conntrack_expect *i;
409 struct nf_conn *master = expect->master;
410 struct nf_conn_help *master_help = nfct_help(master);
411 struct nf_conntrack_helper *helper;
412 struct net *net = nf_ct_exp_net(expect);
413 struct hlist_node *next;
414 unsigned int h;
415 int ret = 0;
416
417 if (!master_help) {
418 ret = -ESHUTDOWN;
419 goto out;
420 }
421 h = nf_ct_expect_dst_hash(net, &expect->tuple);
422 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
423 if (expect_matches(i, expect)) {
424 if (nf_ct_remove_expect(i))
425 break;
426 } else if (expect_clash(i, expect)) {
427 ret = -EBUSY;
428 goto out;
429 }
430 }
431 /* Will be over limit? */
432 helper = rcu_dereference_protected(master_help->helper,
433 lockdep_is_held(&nf_conntrack_expect_lock));
434 if (helper) {
435 p = &helper->expect_policy[expect->class];
436 if (p->max_expected &&
437 master_help->expecting[expect->class] >= p->max_expected) {
438 evict_oldest_expect(master, expect);
439 if (master_help->expecting[expect->class]
440 >= p->max_expected) {
441 ret = -EMFILE;
442 goto out;
443 }
444 }
445 }
446
447 if (net->ct.expect_count >= nf_ct_expect_max) {
448 net_warn_ratelimited("nf_conntrack: expectation table full\n");
449 ret = -EMFILE;
450 }
451 out:
452 return ret;
453 }
454
455 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
456 u32 portid, int report)
457 {
458 int ret;
459
460 spin_lock_bh(&nf_conntrack_expect_lock);
461 ret = __nf_ct_expect_check(expect);
462 if (ret < 0)
463 goto out;
464
465 nf_ct_expect_insert(expect);
466
467 spin_unlock_bh(&nf_conntrack_expect_lock);
468 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
469 return 0;
470 out:
471 spin_unlock_bh(&nf_conntrack_expect_lock);
472 return ret;
473 }
474 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
475
476 void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
477 void *data)
478 {
479 struct nf_conntrack_expect *exp;
480 const struct hlist_node *next;
481 unsigned int i;
482
483 spin_lock_bh(&nf_conntrack_expect_lock);
484
485 for (i = 0; i < nf_ct_expect_hsize; i++) {
486 hlist_for_each_entry_safe(exp, next,
487 &nf_ct_expect_hash[i],
488 hnode) {
489 if (iter(exp, data) && del_timer(&exp->timeout)) {
490 nf_ct_unlink_expect(exp);
491 nf_ct_expect_put(exp);
492 }
493 }
494 }
495
496 spin_unlock_bh(&nf_conntrack_expect_lock);
497 }
498 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy);
499
500 void nf_ct_expect_iterate_net(struct net *net,
501 bool (*iter)(struct nf_conntrack_expect *e, void *data),
502 void *data,
503 u32 portid, int report)
504 {
505 struct nf_conntrack_expect *exp;
506 const struct hlist_node *next;
507 unsigned int i;
508
509 spin_lock_bh(&nf_conntrack_expect_lock);
510
511 for (i = 0; i < nf_ct_expect_hsize; i++) {
512 hlist_for_each_entry_safe(exp, next,
513 &nf_ct_expect_hash[i],
514 hnode) {
515
516 if (!net_eq(nf_ct_exp_net(exp), net))
517 continue;
518
519 if (iter(exp, data) && del_timer(&exp->timeout)) {
520 nf_ct_unlink_expect_report(exp, portid, report);
521 nf_ct_expect_put(exp);
522 }
523 }
524 }
525
526 spin_unlock_bh(&nf_conntrack_expect_lock);
527 }
528 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net);
529
530 #ifdef CONFIG_NF_CONNTRACK_PROCFS
531 struct ct_expect_iter_state {
532 struct seq_net_private p;
533 unsigned int bucket;
534 };
535
536 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
537 {
538 struct ct_expect_iter_state *st = seq->private;
539 struct hlist_node *n;
540
541 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
542 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
543 if (n)
544 return n;
545 }
546 return NULL;
547 }
548
549 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
550 struct hlist_node *head)
551 {
552 struct ct_expect_iter_state *st = seq->private;
553
554 head = rcu_dereference(hlist_next_rcu(head));
555 while (head == NULL) {
556 if (++st->bucket >= nf_ct_expect_hsize)
557 return NULL;
558 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
559 }
560 return head;
561 }
562
563 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
564 {
565 struct hlist_node *head = ct_expect_get_first(seq);
566
567 if (head)
568 while (pos && (head = ct_expect_get_next(seq, head)))
569 pos--;
570 return pos ? NULL : head;
571 }
572
573 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
574 __acquires(RCU)
575 {
576 rcu_read_lock();
577 return ct_expect_get_idx(seq, *pos);
578 }
579
580 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
581 {
582 (*pos)++;
583 return ct_expect_get_next(seq, v);
584 }
585
586 static void exp_seq_stop(struct seq_file *seq, void *v)
587 __releases(RCU)
588 {
589 rcu_read_unlock();
590 }
591
592 static int exp_seq_show(struct seq_file *s, void *v)
593 {
594 struct nf_conntrack_expect *expect;
595 struct nf_conntrack_helper *helper;
596 struct hlist_node *n = v;
597 char *delim = "";
598
599 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
600
601 if (expect->timeout.function)
602 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
603 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
604 else
605 seq_puts(s, "- ");
606 seq_printf(s, "l3proto = %u proto=%u ",
607 expect->tuple.src.l3num,
608 expect->tuple.dst.protonum);
609 print_tuple(s, &expect->tuple,
610 __nf_ct_l3proto_find(expect->tuple.src.l3num),
611 __nf_ct_l4proto_find(expect->tuple.src.l3num,
612 expect->tuple.dst.protonum));
613
614 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
615 seq_puts(s, "PERMANENT");
616 delim = ",";
617 }
618 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
619 seq_printf(s, "%sINACTIVE", delim);
620 delim = ",";
621 }
622 if (expect->flags & NF_CT_EXPECT_USERSPACE)
623 seq_printf(s, "%sUSERSPACE", delim);
624
625 helper = rcu_dereference(nfct_help(expect->master)->helper);
626 if (helper) {
627 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
628 if (helper->expect_policy[expect->class].name[0])
629 seq_printf(s, "/%s",
630 helper->expect_policy[expect->class].name);
631 }
632
633 seq_putc(s, '\n');
634
635 return 0;
636 }
637
638 static const struct seq_operations exp_seq_ops = {
639 .start = exp_seq_start,
640 .next = exp_seq_next,
641 .stop = exp_seq_stop,
642 .show = exp_seq_show
643 };
644
645 static int exp_open(struct inode *inode, struct file *file)
646 {
647 return seq_open_net(inode, file, &exp_seq_ops,
648 sizeof(struct ct_expect_iter_state));
649 }
650
651 static const struct file_operations exp_file_ops = {
652 .owner = THIS_MODULE,
653 .open = exp_open,
654 .read = seq_read,
655 .llseek = seq_lseek,
656 .release = seq_release_net,
657 };
658 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
659
660 static int exp_proc_init(struct net *net)
661 {
662 #ifdef CONFIG_NF_CONNTRACK_PROCFS
663 struct proc_dir_entry *proc;
664 kuid_t root_uid;
665 kgid_t root_gid;
666
667 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
668 &exp_file_ops);
669 if (!proc)
670 return -ENOMEM;
671
672 root_uid = make_kuid(net->user_ns, 0);
673 root_gid = make_kgid(net->user_ns, 0);
674 if (uid_valid(root_uid) && gid_valid(root_gid))
675 proc_set_user(proc, root_uid, root_gid);
676 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
677 return 0;
678 }
679
680 static void exp_proc_remove(struct net *net)
681 {
682 #ifdef CONFIG_NF_CONNTRACK_PROCFS
683 remove_proc_entry("nf_conntrack_expect", net->proc_net);
684 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
685 }
686
687 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
688
689 int nf_conntrack_expect_pernet_init(struct net *net)
690 {
691 net->ct.expect_count = 0;
692 return exp_proc_init(net);
693 }
694
695 void nf_conntrack_expect_pernet_fini(struct net *net)
696 {
697 exp_proc_remove(net);
698 }
699
700 int nf_conntrack_expect_init(void)
701 {
702 if (!nf_ct_expect_hsize) {
703 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
704 if (!nf_ct_expect_hsize)
705 nf_ct_expect_hsize = 1;
706 }
707 nf_ct_expect_max = nf_ct_expect_hsize * 4;
708 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
709 sizeof(struct nf_conntrack_expect),
710 0, 0, NULL);
711 if (!nf_ct_expect_cachep)
712 return -ENOMEM;
713
714 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
715 if (!nf_ct_expect_hash) {
716 kmem_cache_destroy(nf_ct_expect_cachep);
717 return -ENOMEM;
718 }
719
720 return 0;
721 }
722
723 void nf_conntrack_expect_fini(void)
724 {
725 rcu_barrier(); /* Wait for call_rcu() before destroy */
726 kmem_cache_destroy(nf_ct_expect_cachep);
727 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
728 }