]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/netfilter/nf_conntrack_expect.c
netfilter: avoid race with exp->master ct
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
77ab9cff
MJ
27
28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_expect.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 33#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 34
a71c0855
PM
35unsigned int nf_ct_expect_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
f264a7df 38unsigned int nf_ct_expect_max __read_mostly;
a71c0855 39
e9c1b084 40static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff
MJ
41
42/* nf_conntrack_expect helper functions */
ebbf41df 43void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 44 u32 portid, int report)
77ab9cff
MJ
45{
46 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 47 struct net *net = nf_ct_exp_net(exp);
77ab9cff 48
3d058d7b 49 NF_CT_ASSERT(master_help);
77ab9cff
MJ
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
7d0742da 52 hlist_del_rcu(&exp->hnode);
9b03f38d 53 net->ct.expect_count--;
a71c0855 54
b560580a 55 hlist_del(&exp->lnode);
3d058d7b 56 master_help->expecting[exp->class]--;
bc01befd 57
ec464e5d 58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 59 nf_ct_expect_put(exp);
b560580a 60
0d55af87 61 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 62}
ebbf41df 63EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 64
6823645d 65static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
66{
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
68
f8ba1aff 69 spin_lock_bh(&nf_conntrack_lock);
77ab9cff 70 nf_ct_unlink_expect(exp);
f8ba1aff 71 spin_unlock_bh(&nf_conntrack_lock);
6823645d 72 nf_ct_expect_put(exp);
77ab9cff
MJ
73}
74
a71c0855
PM
75static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76{
34498825
PM
77 unsigned int hash;
78
f682cefa
CG
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
a71c0855
PM
81 }
82
34498825 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
f682cefa 85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
34498825 86 return ((u64)hash * nf_ct_expect_hsize) >> 32;
a71c0855
PM
87}
88
77ab9cff 89struct nf_conntrack_expect *
5d0aa2cc
PM
90__nf_ct_expect_find(struct net *net, u16 zone,
91 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
92{
93 struct nf_conntrack_expect *i;
a71c0855
PM
94 unsigned int h;
95
9b03f38d 96 if (!net->ct.expect_count)
a71c0855 97 return NULL;
77ab9cff 98
a71c0855 99 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 100 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
5d0aa2cc
PM
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone)
77ab9cff
MJ
103 return i;
104 }
105 return NULL;
106}
6823645d 107EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
108
109/* Just find a expectation corresponding to a tuple. */
110struct nf_conntrack_expect *
5d0aa2cc
PM
111nf_ct_expect_find_get(struct net *net, u16 zone,
112 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
113{
114 struct nf_conntrack_expect *i;
115
7d0742da 116 rcu_read_lock();
5d0aa2cc 117 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
118 if (i && !atomic_inc_not_zero(&i->use))
119 i = NULL;
120 rcu_read_unlock();
77ab9cff
MJ
121
122 return i;
123}
6823645d 124EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
125
126/* If an expectation for this connection is found, it gets delete from
127 * global list then returned. */
128struct nf_conntrack_expect *
5d0aa2cc
PM
129nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple)
77ab9cff 131{
359b9ab6 132 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
133 unsigned int h;
134
9b03f38d 135 if (!net->ct.expect_count)
359b9ab6 136 return NULL;
ece00641 137
359b9ab6 138 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 139 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
359b9ab6 140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc
PM
141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
359b9ab6
PM
143 exp = i;
144 break;
145 }
146 }
ece00641
YK
147 if (!exp)
148 return NULL;
77ab9cff 149
77ab9cff
MJ
150 /* If master is not in hash table yet (ie. packet hasn't left
151 this machine yet), how can other end know about expected?
152 Hence these are not the droids you are looking for (if
153 master ct never got confirmed, we'd hold a reference to it
154 and weird things would happen to future packets). */
ece00641
YK
155 if (!nf_ct_is_confirmed(exp->master))
156 return NULL;
157
e1b207da
JDB
158 /* Avoid race with other CPUs, that for exp->master ct, is
159 * about to invoke ->destroy(), or nf_ct_delete() via timeout
160 * or early_drop().
161 *
162 * The atomic_inc_not_zero() check tells: If that fails, we
163 * know that the ct is being destroyed. If it succeeds, we
164 * can be sure the ct cannot disappear underneath.
165 */
166 if (unlikely(nf_ct_is_dying(exp->master) ||
167 !atomic_inc_not_zero(&exp->master->ct_general.use)))
168 return NULL;
169
ece00641
YK
170 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
171 atomic_inc(&exp->use);
172 return exp;
173 } else if (del_timer(&exp->timeout)) {
174 nf_ct_unlink_expect(exp);
175 return exp;
77ab9cff 176 }
e1b207da
JDB
177 /* Undo exp->master refcnt increase, if del_timer() failed */
178 nf_ct_put(exp->master);
ece00641 179
77ab9cff
MJ
180 return NULL;
181}
182
183/* delete all expectations for this conntrack */
184void nf_ct_remove_expectations(struct nf_conn *ct)
185{
77ab9cff 186 struct nf_conn_help *help = nfct_help(ct);
b560580a 187 struct nf_conntrack_expect *exp;
b67bfe0d 188 struct hlist_node *next;
77ab9cff
MJ
189
190 /* Optimization: most connection never expect any others. */
6002f266 191 if (!help)
77ab9cff
MJ
192 return;
193
b67bfe0d 194 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
195 if (del_timer(&exp->timeout)) {
196 nf_ct_unlink_expect(exp);
197 nf_ct_expect_put(exp);
601e68e1 198 }
77ab9cff
MJ
199 }
200}
13b18339 201EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
202
203/* Would two expected things clash? */
204static inline int expect_clash(const struct nf_conntrack_expect *a,
205 const struct nf_conntrack_expect *b)
206{
207 /* Part covered by intersection of masks must be unequal,
208 otherwise they clash */
d4156e8c 209 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
210 int count;
211
77ab9cff 212 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
213
214 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
215 intersect_mask.src.u3.all[count] =
216 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
217 }
218
77ab9cff
MJ
219 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
220}
221
222static inline int expect_matches(const struct nf_conntrack_expect *a,
223 const struct nf_conntrack_expect *b)
224{
f64f9e71
JP
225 return a->master == b->master && a->class == b->class &&
226 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
5d0aa2cc
PM
227 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
228 nf_ct_zone(a->master) == nf_ct_zone(b->master);
77ab9cff
MJ
229}
230
231/* Generally a bad idea to call this: could have matched already. */
6823645d 232void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 233{
f8ba1aff 234 spin_lock_bh(&nf_conntrack_lock);
4e1d4e6c
PM
235 if (del_timer(&exp->timeout)) {
236 nf_ct_unlink_expect(exp);
237 nf_ct_expect_put(exp);
77ab9cff 238 }
f8ba1aff 239 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff 240}
6823645d 241EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
242
243/* We don't increase the master conntrack refcount for non-fulfilled
244 * conntracks. During the conntrack destruction, the expectations are
245 * always killed before the conntrack itself */
6823645d 246struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
247{
248 struct nf_conntrack_expect *new;
249
6823645d 250 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
251 if (!new)
252 return NULL;
253
254 new->master = me;
255 atomic_set(&new->use, 1);
256 return new;
257}
6823645d 258EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 259
6002f266 260void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 261 u_int8_t family,
1d9d7522
PM
262 const union nf_inet_addr *saddr,
263 const union nf_inet_addr *daddr,
264 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
265{
266 int len;
267
268 if (family == AF_INET)
269 len = 4;
270 else
271 len = 16;
272
273 exp->flags = 0;
6002f266 274 exp->class = class;
d6a9b650
PM
275 exp->expectfn = NULL;
276 exp->helper = NULL;
277 exp->tuple.src.l3num = family;
278 exp->tuple.dst.protonum = proto;
d6a9b650
PM
279
280 if (saddr) {
281 memcpy(&exp->tuple.src.u3, saddr, len);
282 if (sizeof(exp->tuple.src.u3) > len)
283 /* address needs to be cleared for nf_ct_tuple_equal */
284 memset((void *)&exp->tuple.src.u3 + len, 0x00,
285 sizeof(exp->tuple.src.u3) - len);
286 memset(&exp->mask.src.u3, 0xFF, len);
287 if (sizeof(exp->mask.src.u3) > len)
288 memset((void *)&exp->mask.src.u3 + len, 0x00,
289 sizeof(exp->mask.src.u3) - len);
290 } else {
291 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
292 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
293 }
294
d6a9b650 295 if (src) {
a34c4589
AV
296 exp->tuple.src.u.all = *src;
297 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
298 } else {
299 exp->tuple.src.u.all = 0;
300 exp->mask.src.u.all = 0;
301 }
302
d4156e8c
PM
303 memcpy(&exp->tuple.dst.u3, daddr, len);
304 if (sizeof(exp->tuple.dst.u3) > len)
305 /* address needs to be cleared for nf_ct_tuple_equal */
306 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
307 sizeof(exp->tuple.dst.u3) - len);
308
a34c4589 309 exp->tuple.dst.u.all = *dst;
f09eca8d
PNA
310
311#ifdef CONFIG_NF_NAT_NEEDED
312 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
313 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
314#endif
d6a9b650 315}
6823645d 316EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 317
7d0742da
PM
318static void nf_ct_expect_free_rcu(struct rcu_head *head)
319{
320 struct nf_conntrack_expect *exp;
321
322 exp = container_of(head, struct nf_conntrack_expect, rcu);
323 kmem_cache_free(nf_ct_expect_cachep, exp);
324}
325
6823645d 326void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
327{
328 if (atomic_dec_and_test(&exp->use))
7d0742da 329 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 330}
6823645d 331EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 332
3d058d7b 333static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
334{
335 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 336 struct nf_conntrack_helper *helper;
9b03f38d 337 struct net *net = nf_ct_exp_net(exp);
a71c0855 338 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 339
3bfd45f9
ED
340 /* two references : one for hash insert, one for the timer */
341 atomic_add(2, &exp->use);
b560580a 342
3d058d7b
PNA
343 hlist_add_head(&exp->lnode, &master_help->expectations);
344 master_help->expecting[exp->class]++;
a71c0855 345
9b03f38d
AD
346 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
347 net->ct.expect_count++;
77ab9cff 348
6823645d
PM
349 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
350 (unsigned long)exp);
3d058d7b
PNA
351 helper = rcu_dereference_protected(master_help->helper,
352 lockdep_is_held(&nf_conntrack_lock));
353 if (helper) {
354 exp->timeout.expires = jiffies +
355 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 356 }
77ab9cff
MJ
357 add_timer(&exp->timeout);
358
0d55af87 359 NF_CT_STAT_INC(net, expect_create);
3d058d7b 360 return 0;
77ab9cff
MJ
361}
362
363/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
364static void evict_oldest_expect(struct nf_conn *master,
365 struct nf_conntrack_expect *new)
77ab9cff 366{
b560580a 367 struct nf_conn_help *master_help = nfct_help(master);
6002f266 368 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 369
b67bfe0d 370 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
371 if (exp->class == new->class)
372 last = exp;
373 }
b560580a 374
6002f266
PM
375 if (last && del_timer(&last->timeout)) {
376 nf_ct_unlink_expect(last);
377 nf_ct_expect_put(last);
77ab9cff
MJ
378 }
379}
380
19abb7b0 381static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 382{
6002f266 383 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
384 struct nf_conntrack_expect *i;
385 struct nf_conn *master = expect->master;
386 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 387 struct nf_conntrack_helper *helper;
9b03f38d 388 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 389 struct hlist_node *next;
a71c0855 390 unsigned int h;
83731671 391 int ret = 1;
77ab9cff 392
3d058d7b 393 if (!master_help) {
3c158f7f
PM
394 ret = -ESHUTDOWN;
395 goto out;
396 }
a71c0855 397 h = nf_ct_expect_dst_hash(&expect->tuple);
b67bfe0d 398 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
77ab9cff 399 if (expect_matches(i, expect)) {
2614f864
PNA
400 if (del_timer(&i->timeout)) {
401 nf_ct_unlink_expect(i);
402 nf_ct_expect_put(i);
403 break;
77ab9cff
MJ
404 }
405 } else if (expect_clash(i, expect)) {
406 ret = -EBUSY;
407 goto out;
408 }
409 }
410 /* Will be over limit? */
3d058d7b
PNA
411 helper = rcu_dereference_protected(master_help->helper,
412 lockdep_is_held(&nf_conntrack_lock));
413 if (helper) {
414 p = &helper->expect_policy[expect->class];
bc01befd
PNA
415 if (p->max_expected &&
416 master_help->expecting[expect->class] >= p->max_expected) {
417 evict_oldest_expect(master, expect);
418 if (master_help->expecting[expect->class]
419 >= p->max_expected) {
420 ret = -EMFILE;
421 goto out;
422 }
6002f266
PM
423 }
424 }
77ab9cff 425
9b03f38d 426 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 427 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 428 ret = -EMFILE;
f264a7df 429 }
19abb7b0
PNA
430out:
431 return ret;
432}
433
b476b72a 434int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 435 u32 portid, int report)
19abb7b0
PNA
436{
437 int ret;
438
439 spin_lock_bh(&nf_conntrack_lock);
440 ret = __nf_ct_expect_check(expect);
83731671 441 if (ret <= 0)
19abb7b0 442 goto out;
f264a7df 443
3d058d7b
PNA
444 ret = nf_ct_expect_insert(expect);
445 if (ret < 0)
446 goto out;
f8ba1aff 447 spin_unlock_bh(&nf_conntrack_lock);
ec464e5d 448 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
77ab9cff 449 return ret;
19abb7b0
PNA
450out:
451 spin_unlock_bh(&nf_conntrack_lock);
19abb7b0
PNA
452 return ret;
453}
454EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
455
54b07dca 456#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 457struct ct_expect_iter_state {
dc5129f8 458 struct seq_net_private p;
5d08ad44
PM
459 unsigned int bucket;
460};
461
462static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 463{
dc5129f8 464 struct net *net = seq_file_net(seq);
5d08ad44 465 struct ct_expect_iter_state *st = seq->private;
7d0742da 466 struct hlist_node *n;
77ab9cff 467
5d08ad44 468 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 469 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
470 if (n)
471 return n;
5d08ad44
PM
472 }
473 return NULL;
474}
77ab9cff 475
5d08ad44
PM
476static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
477 struct hlist_node *head)
478{
dc5129f8 479 struct net *net = seq_file_net(seq);
5d08ad44 480 struct ct_expect_iter_state *st = seq->private;
77ab9cff 481
0e60ebe0 482 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
483 while (head == NULL) {
484 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 485 return NULL;
0e60ebe0 486 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 487 }
5d08ad44 488 return head;
77ab9cff
MJ
489}
490
5d08ad44 491static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 492{
5d08ad44 493 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 494
5d08ad44
PM
495 if (head)
496 while (pos && (head = ct_expect_get_next(seq, head)))
497 pos--;
498 return pos ? NULL : head;
499}
77ab9cff 500
5d08ad44 501static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 502 __acquires(RCU)
5d08ad44 503{
7d0742da 504 rcu_read_lock();
5d08ad44
PM
505 return ct_expect_get_idx(seq, *pos);
506}
77ab9cff 507
5d08ad44
PM
508static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
509{
510 (*pos)++;
511 return ct_expect_get_next(seq, v);
77ab9cff
MJ
512}
513
5d08ad44 514static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 515 __releases(RCU)
77ab9cff 516{
7d0742da 517 rcu_read_unlock();
77ab9cff
MJ
518}
519
520static int exp_seq_show(struct seq_file *s, void *v)
521{
5d08ad44 522 struct nf_conntrack_expect *expect;
b87921bd 523 struct nf_conntrack_helper *helper;
5d08ad44 524 struct hlist_node *n = v;
359b9ab6 525 char *delim = "";
5d08ad44
PM
526
527 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
528
529 if (expect->timeout.function)
530 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
531 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
532 else
533 seq_printf(s, "- ");
534 seq_printf(s, "l3proto = %u proto=%u ",
535 expect->tuple.src.l3num,
536 expect->tuple.dst.protonum);
537 print_tuple(s, &expect->tuple,
538 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 539 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 540 expect->tuple.dst.protonum));
4bb119ea 541
359b9ab6
PM
542 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
543 seq_printf(s, "PERMANENT");
544 delim = ",";
545 }
bc01befd 546 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 547 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
548 delim = ",";
549 }
550 if (expect->flags & NF_CT_EXPECT_USERSPACE)
551 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 552
b87921bd
PM
553 helper = rcu_dereference(nfct_help(expect->master)->helper);
554 if (helper) {
555 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
556 if (helper->expect_policy[expect->class].name)
557 seq_printf(s, "/%s",
558 helper->expect_policy[expect->class].name);
559 }
560
77ab9cff
MJ
561 return seq_putc(s, '\n');
562}
563
56b3d975 564static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
565 .start = exp_seq_start,
566 .next = exp_seq_next,
567 .stop = exp_seq_stop,
568 .show = exp_seq_show
569};
570
571static int exp_open(struct inode *inode, struct file *file)
572{
dc5129f8 573 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 574 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
575}
576
5d08ad44 577static const struct file_operations exp_file_ops = {
77ab9cff
MJ
578 .owner = THIS_MODULE,
579 .open = exp_open,
580 .read = seq_read,
581 .llseek = seq_lseek,
dc5129f8 582 .release = seq_release_net,
77ab9cff 583};
54b07dca 584#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 585
dc5129f8 586static int exp_proc_init(struct net *net)
e9c1b084 587{
54b07dca 588#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084
PM
589 struct proc_dir_entry *proc;
590
d4beaa66
G
591 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
592 &exp_file_ops);
e9c1b084
PM
593 if (!proc)
594 return -ENOMEM;
54b07dca 595#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
596 return 0;
597}
598
dc5129f8 599static void exp_proc_remove(struct net *net)
e9c1b084 600{
54b07dca 601#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 602 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 603#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
604}
605
13ccdfc2 606module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 607
83b4dbe1 608int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 609{
a71c0855
PM
610 int err = -ENOMEM;
611
9b03f38d 612 net->ct.expect_count = 0;
d862a662 613 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 614 if (net->ct.expect_hash == NULL)
a71c0855 615 goto err1;
e9c1b084 616
dc5129f8 617 err = exp_proc_init(net);
e9c1b084 618 if (err < 0)
83b4dbe1 619 goto err2;
e9c1b084
PM
620
621 return 0;
12293bf9 622err2:
d862a662 623 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 624err1:
e9c1b084
PM
625 return err;
626}
627
83b4dbe1 628void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 629{
dc5129f8 630 exp_proc_remove(net);
d862a662 631 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 632}
83b4dbe1
G
633
634int nf_conntrack_expect_init(void)
635{
636 if (!nf_ct_expect_hsize) {
637 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
638 if (!nf_ct_expect_hsize)
639 nf_ct_expect_hsize = 1;
640 }
641 nf_ct_expect_max = nf_ct_expect_hsize * 4;
642 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
643 sizeof(struct nf_conntrack_expect),
644 0, 0, NULL);
645 if (!nf_ct_expect_cachep)
646 return -ENOMEM;
647 return 0;
648}
649
650void nf_conntrack_expect_fini(void)
651{
652 rcu_barrier(); /* Wait for call_rcu() before destroy */
653 kmem_cache_destroy(nf_ct_expect_cachep);
654}