]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/netfilter/nf_conntrack_expect.c
netfilter: nf_conntrack: add direction support for zones
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
77ab9cff
MJ
27
28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_expect.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 33#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 34
a71c0855
PM
35unsigned int nf_ct_expect_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
f264a7df 38unsigned int nf_ct_expect_max __read_mostly;
a71c0855 39
e9c1b084 40static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff
MJ
41
42/* nf_conntrack_expect helper functions */
ebbf41df 43void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 44 u32 portid, int report)
77ab9cff
MJ
45{
46 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 47 struct net *net = nf_ct_exp_net(exp);
77ab9cff 48
3d058d7b 49 NF_CT_ASSERT(master_help);
77ab9cff
MJ
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
7d0742da 52 hlist_del_rcu(&exp->hnode);
9b03f38d 53 net->ct.expect_count--;
a71c0855 54
b560580a 55 hlist_del(&exp->lnode);
3d058d7b 56 master_help->expecting[exp->class]--;
bc01befd 57
ec464e5d 58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 59 nf_ct_expect_put(exp);
b560580a 60
0d55af87 61 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 62}
ebbf41df 63EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 64
6823645d 65static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
66{
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
68
ca7433df 69 spin_lock_bh(&nf_conntrack_expect_lock);
77ab9cff 70 nf_ct_unlink_expect(exp);
ca7433df 71 spin_unlock_bh(&nf_conntrack_expect_lock);
6823645d 72 nf_ct_expect_put(exp);
77ab9cff
MJ
73}
74
a71c0855
PM
75static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76{
34498825
PM
77 unsigned int hash;
78
f682cefa
CG
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
a71c0855
PM
81 }
82
34498825 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
f682cefa 85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
8fc54f68
DB
86
87 return reciprocal_scale(hash, nf_ct_expect_hsize);
a71c0855
PM
88}
89
77ab9cff 90struct nf_conntrack_expect *
308ac914
DB
91__nf_ct_expect_find(struct net *net,
92 const struct nf_conntrack_zone *zone,
5d0aa2cc 93 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
94{
95 struct nf_conntrack_expect *i;
a71c0855
PM
96 unsigned int h;
97
9b03f38d 98 if (!net->ct.expect_count)
a71c0855 99 return NULL;
77ab9cff 100
a71c0855 101 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 102 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
5d0aa2cc 103 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
deedb590 104 nf_ct_zone_equal_any(i->master, zone))
77ab9cff
MJ
105 return i;
106 }
107 return NULL;
108}
6823645d 109EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
110
111/* Just find a expectation corresponding to a tuple. */
112struct nf_conntrack_expect *
308ac914
DB
113nf_ct_expect_find_get(struct net *net,
114 const struct nf_conntrack_zone *zone,
5d0aa2cc 115 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
116{
117 struct nf_conntrack_expect *i;
118
7d0742da 119 rcu_read_lock();
5d0aa2cc 120 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
121 if (i && !atomic_inc_not_zero(&i->use))
122 i = NULL;
123 rcu_read_unlock();
77ab9cff
MJ
124
125 return i;
126}
6823645d 127EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
128
129/* If an expectation for this connection is found, it gets delete from
130 * global list then returned. */
131struct nf_conntrack_expect *
308ac914
DB
132nf_ct_find_expectation(struct net *net,
133 const struct nf_conntrack_zone *zone,
5d0aa2cc 134 const struct nf_conntrack_tuple *tuple)
77ab9cff 135{
359b9ab6 136 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
137 unsigned int h;
138
9b03f38d 139 if (!net->ct.expect_count)
359b9ab6 140 return NULL;
ece00641 141
359b9ab6 142 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 143 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
359b9ab6 144 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc 145 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
deedb590 146 nf_ct_zone_equal_any(i->master, zone)) {
359b9ab6
PM
147 exp = i;
148 break;
149 }
150 }
ece00641
YK
151 if (!exp)
152 return NULL;
77ab9cff 153
77ab9cff
MJ
154 /* If master is not in hash table yet (ie. packet hasn't left
155 this machine yet), how can other end know about expected?
156 Hence these are not the droids you are looking for (if
157 master ct never got confirmed, we'd hold a reference to it
158 and weird things would happen to future packets). */
ece00641
YK
159 if (!nf_ct_is_confirmed(exp->master))
160 return NULL;
161
e1b207da
JDB
162 /* Avoid race with other CPUs, that for exp->master ct, is
163 * about to invoke ->destroy(), or nf_ct_delete() via timeout
164 * or early_drop().
165 *
166 * The atomic_inc_not_zero() check tells: If that fails, we
167 * know that the ct is being destroyed. If it succeeds, we
168 * can be sure the ct cannot disappear underneath.
169 */
170 if (unlikely(nf_ct_is_dying(exp->master) ||
171 !atomic_inc_not_zero(&exp->master->ct_general.use)))
172 return NULL;
173
ece00641
YK
174 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
175 atomic_inc(&exp->use);
176 return exp;
177 } else if (del_timer(&exp->timeout)) {
178 nf_ct_unlink_expect(exp);
179 return exp;
77ab9cff 180 }
e1b207da
JDB
181 /* Undo exp->master refcnt increase, if del_timer() failed */
182 nf_ct_put(exp->master);
ece00641 183
77ab9cff
MJ
184 return NULL;
185}
186
187/* delete all expectations for this conntrack */
188void nf_ct_remove_expectations(struct nf_conn *ct)
189{
77ab9cff 190 struct nf_conn_help *help = nfct_help(ct);
b560580a 191 struct nf_conntrack_expect *exp;
b67bfe0d 192 struct hlist_node *next;
77ab9cff
MJ
193
194 /* Optimization: most connection never expect any others. */
6002f266 195 if (!help)
77ab9cff
MJ
196 return;
197
ca7433df 198 spin_lock_bh(&nf_conntrack_expect_lock);
b67bfe0d 199 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
200 if (del_timer(&exp->timeout)) {
201 nf_ct_unlink_expect(exp);
202 nf_ct_expect_put(exp);
601e68e1 203 }
77ab9cff 204 }
ca7433df 205 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 206}
13b18339 207EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
208
209/* Would two expected things clash? */
210static inline int expect_clash(const struct nf_conntrack_expect *a,
211 const struct nf_conntrack_expect *b)
212{
213 /* Part covered by intersection of masks must be unequal,
214 otherwise they clash */
d4156e8c 215 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
216 int count;
217
77ab9cff 218 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
219
220 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
221 intersect_mask.src.u3.all[count] =
222 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
223 }
224
4b31814d 225 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
deedb590 226 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
227}
228
229static inline int expect_matches(const struct nf_conntrack_expect *a,
230 const struct nf_conntrack_expect *b)
231{
f64f9e71 232 return a->master == b->master && a->class == b->class &&
308ac914
DB
233 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
234 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
deedb590 235 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
236}
237
238/* Generally a bad idea to call this: could have matched already. */
6823645d 239void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 240{
ca7433df 241 spin_lock_bh(&nf_conntrack_expect_lock);
4e1d4e6c
PM
242 if (del_timer(&exp->timeout)) {
243 nf_ct_unlink_expect(exp);
244 nf_ct_expect_put(exp);
77ab9cff 245 }
ca7433df 246 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 247}
6823645d 248EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
249
250/* We don't increase the master conntrack refcount for non-fulfilled
251 * conntracks. During the conntrack destruction, the expectations are
252 * always killed before the conntrack itself */
6823645d 253struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
254{
255 struct nf_conntrack_expect *new;
256
6823645d 257 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
258 if (!new)
259 return NULL;
260
261 new->master = me;
262 atomic_set(&new->use, 1);
263 return new;
264}
6823645d 265EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 266
6002f266 267void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 268 u_int8_t family,
1d9d7522
PM
269 const union nf_inet_addr *saddr,
270 const union nf_inet_addr *daddr,
271 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
272{
273 int len;
274
275 if (family == AF_INET)
276 len = 4;
277 else
278 len = 16;
279
280 exp->flags = 0;
6002f266 281 exp->class = class;
d6a9b650
PM
282 exp->expectfn = NULL;
283 exp->helper = NULL;
284 exp->tuple.src.l3num = family;
285 exp->tuple.dst.protonum = proto;
d6a9b650
PM
286
287 if (saddr) {
288 memcpy(&exp->tuple.src.u3, saddr, len);
289 if (sizeof(exp->tuple.src.u3) > len)
290 /* address needs to be cleared for nf_ct_tuple_equal */
291 memset((void *)&exp->tuple.src.u3 + len, 0x00,
292 sizeof(exp->tuple.src.u3) - len);
293 memset(&exp->mask.src.u3, 0xFF, len);
294 if (sizeof(exp->mask.src.u3) > len)
295 memset((void *)&exp->mask.src.u3 + len, 0x00,
296 sizeof(exp->mask.src.u3) - len);
297 } else {
298 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
299 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
300 }
301
d6a9b650 302 if (src) {
a34c4589
AV
303 exp->tuple.src.u.all = *src;
304 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
305 } else {
306 exp->tuple.src.u.all = 0;
307 exp->mask.src.u.all = 0;
308 }
309
d4156e8c
PM
310 memcpy(&exp->tuple.dst.u3, daddr, len);
311 if (sizeof(exp->tuple.dst.u3) > len)
312 /* address needs to be cleared for nf_ct_tuple_equal */
313 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
314 sizeof(exp->tuple.dst.u3) - len);
315
a34c4589 316 exp->tuple.dst.u.all = *dst;
f09eca8d
PNA
317
318#ifdef CONFIG_NF_NAT_NEEDED
319 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
320 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
321#endif
d6a9b650 322}
6823645d 323EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 324
7d0742da
PM
325static void nf_ct_expect_free_rcu(struct rcu_head *head)
326{
327 struct nf_conntrack_expect *exp;
328
329 exp = container_of(head, struct nf_conntrack_expect, rcu);
330 kmem_cache_free(nf_ct_expect_cachep, exp);
331}
332
6823645d 333void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
334{
335 if (atomic_dec_and_test(&exp->use))
7d0742da 336 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 337}
6823645d 338EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 339
3d058d7b 340static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
341{
342 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 343 struct nf_conntrack_helper *helper;
9b03f38d 344 struct net *net = nf_ct_exp_net(exp);
a71c0855 345 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 346
3bfd45f9
ED
347 /* two references : one for hash insert, one for the timer */
348 atomic_add(2, &exp->use);
b560580a 349
3d058d7b
PNA
350 hlist_add_head(&exp->lnode, &master_help->expectations);
351 master_help->expecting[exp->class]++;
a71c0855 352
9b03f38d
AD
353 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
354 net->ct.expect_count++;
77ab9cff 355
6823645d
PM
356 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
357 (unsigned long)exp);
3d058d7b 358 helper = rcu_dereference_protected(master_help->helper,
ca7433df 359 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
360 if (helper) {
361 exp->timeout.expires = jiffies +
362 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 363 }
77ab9cff
MJ
364 add_timer(&exp->timeout);
365
0d55af87 366 NF_CT_STAT_INC(net, expect_create);
3d058d7b 367 return 0;
77ab9cff
MJ
368}
369
370/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
371static void evict_oldest_expect(struct nf_conn *master,
372 struct nf_conntrack_expect *new)
77ab9cff 373{
b560580a 374 struct nf_conn_help *master_help = nfct_help(master);
6002f266 375 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 376
b67bfe0d 377 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
378 if (exp->class == new->class)
379 last = exp;
380 }
b560580a 381
6002f266
PM
382 if (last && del_timer(&last->timeout)) {
383 nf_ct_unlink_expect(last);
384 nf_ct_expect_put(last);
77ab9cff
MJ
385 }
386}
387
19abb7b0 388static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 389{
6002f266 390 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
391 struct nf_conntrack_expect *i;
392 struct nf_conn *master = expect->master;
393 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 394 struct nf_conntrack_helper *helper;
9b03f38d 395 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 396 struct hlist_node *next;
a71c0855 397 unsigned int h;
83731671 398 int ret = 1;
77ab9cff 399
3d058d7b 400 if (!master_help) {
3c158f7f
PM
401 ret = -ESHUTDOWN;
402 goto out;
403 }
a71c0855 404 h = nf_ct_expect_dst_hash(&expect->tuple);
b67bfe0d 405 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
77ab9cff 406 if (expect_matches(i, expect)) {
2614f864
PNA
407 if (del_timer(&i->timeout)) {
408 nf_ct_unlink_expect(i);
409 nf_ct_expect_put(i);
410 break;
77ab9cff
MJ
411 }
412 } else if (expect_clash(i, expect)) {
413 ret = -EBUSY;
414 goto out;
415 }
416 }
417 /* Will be over limit? */
3d058d7b 418 helper = rcu_dereference_protected(master_help->helper,
ca7433df 419 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
420 if (helper) {
421 p = &helper->expect_policy[expect->class];
bc01befd
PNA
422 if (p->max_expected &&
423 master_help->expecting[expect->class] >= p->max_expected) {
424 evict_oldest_expect(master, expect);
425 if (master_help->expecting[expect->class]
426 >= p->max_expected) {
427 ret = -EMFILE;
428 goto out;
429 }
6002f266
PM
430 }
431 }
77ab9cff 432
9b03f38d 433 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 434 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 435 ret = -EMFILE;
f264a7df 436 }
19abb7b0
PNA
437out:
438 return ret;
439}
440
b476b72a 441int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 442 u32 portid, int report)
19abb7b0
PNA
443{
444 int ret;
445
ca7433df 446 spin_lock_bh(&nf_conntrack_expect_lock);
19abb7b0 447 ret = __nf_ct_expect_check(expect);
83731671 448 if (ret <= 0)
19abb7b0 449 goto out;
f264a7df 450
3d058d7b
PNA
451 ret = nf_ct_expect_insert(expect);
452 if (ret < 0)
453 goto out;
ca7433df 454 spin_unlock_bh(&nf_conntrack_expect_lock);
ec464e5d 455 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
77ab9cff 456 return ret;
19abb7b0 457out:
ca7433df 458 spin_unlock_bh(&nf_conntrack_expect_lock);
19abb7b0
PNA
459 return ret;
460}
461EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
462
54b07dca 463#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 464struct ct_expect_iter_state {
dc5129f8 465 struct seq_net_private p;
5d08ad44
PM
466 unsigned int bucket;
467};
468
469static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 470{
dc5129f8 471 struct net *net = seq_file_net(seq);
5d08ad44 472 struct ct_expect_iter_state *st = seq->private;
7d0742da 473 struct hlist_node *n;
77ab9cff 474
5d08ad44 475 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 476 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
477 if (n)
478 return n;
5d08ad44
PM
479 }
480 return NULL;
481}
77ab9cff 482
5d08ad44
PM
483static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
484 struct hlist_node *head)
485{
dc5129f8 486 struct net *net = seq_file_net(seq);
5d08ad44 487 struct ct_expect_iter_state *st = seq->private;
77ab9cff 488
0e60ebe0 489 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
490 while (head == NULL) {
491 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 492 return NULL;
0e60ebe0 493 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 494 }
5d08ad44 495 return head;
77ab9cff
MJ
496}
497
5d08ad44 498static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 499{
5d08ad44 500 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 501
5d08ad44
PM
502 if (head)
503 while (pos && (head = ct_expect_get_next(seq, head)))
504 pos--;
505 return pos ? NULL : head;
506}
77ab9cff 507
5d08ad44 508static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 509 __acquires(RCU)
5d08ad44 510{
7d0742da 511 rcu_read_lock();
5d08ad44
PM
512 return ct_expect_get_idx(seq, *pos);
513}
77ab9cff 514
5d08ad44
PM
515static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
516{
517 (*pos)++;
518 return ct_expect_get_next(seq, v);
77ab9cff
MJ
519}
520
5d08ad44 521static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 522 __releases(RCU)
77ab9cff 523{
7d0742da 524 rcu_read_unlock();
77ab9cff
MJ
525}
526
527static int exp_seq_show(struct seq_file *s, void *v)
528{
5d08ad44 529 struct nf_conntrack_expect *expect;
b87921bd 530 struct nf_conntrack_helper *helper;
5d08ad44 531 struct hlist_node *n = v;
359b9ab6 532 char *delim = "";
5d08ad44
PM
533
534 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
535
536 if (expect->timeout.function)
537 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
538 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
539 else
540 seq_printf(s, "- ");
541 seq_printf(s, "l3proto = %u proto=%u ",
542 expect->tuple.src.l3num,
543 expect->tuple.dst.protonum);
544 print_tuple(s, &expect->tuple,
545 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 546 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 547 expect->tuple.dst.protonum));
4bb119ea 548
359b9ab6
PM
549 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
550 seq_printf(s, "PERMANENT");
551 delim = ",";
552 }
bc01befd 553 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 554 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
555 delim = ",";
556 }
557 if (expect->flags & NF_CT_EXPECT_USERSPACE)
558 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 559
b87921bd
PM
560 helper = rcu_dereference(nfct_help(expect->master)->helper);
561 if (helper) {
562 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
563 if (helper->expect_policy[expect->class].name)
564 seq_printf(s, "/%s",
565 helper->expect_policy[expect->class].name);
566 }
567
1ca9e417
JP
568 seq_putc(s, '\n');
569
570 return 0;
77ab9cff
MJ
571}
572
56b3d975 573static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
574 .start = exp_seq_start,
575 .next = exp_seq_next,
576 .stop = exp_seq_stop,
577 .show = exp_seq_show
578};
579
580static int exp_open(struct inode *inode, struct file *file)
581{
dc5129f8 582 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 583 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
584}
585
5d08ad44 586static const struct file_operations exp_file_ops = {
77ab9cff
MJ
587 .owner = THIS_MODULE,
588 .open = exp_open,
589 .read = seq_read,
590 .llseek = seq_lseek,
dc5129f8 591 .release = seq_release_net,
77ab9cff 592};
54b07dca 593#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 594
dc5129f8 595static int exp_proc_init(struct net *net)
e9c1b084 596{
54b07dca 597#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084
PM
598 struct proc_dir_entry *proc;
599
d4beaa66
G
600 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
601 &exp_file_ops);
e9c1b084
PM
602 if (!proc)
603 return -ENOMEM;
54b07dca 604#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
605 return 0;
606}
607
dc5129f8 608static void exp_proc_remove(struct net *net)
e9c1b084 609{
54b07dca 610#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 611 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 612#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
613}
614
13ccdfc2 615module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 616
83b4dbe1 617int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 618{
a71c0855
PM
619 int err = -ENOMEM;
620
9b03f38d 621 net->ct.expect_count = 0;
d862a662 622 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 623 if (net->ct.expect_hash == NULL)
a71c0855 624 goto err1;
e9c1b084 625
dc5129f8 626 err = exp_proc_init(net);
e9c1b084 627 if (err < 0)
83b4dbe1 628 goto err2;
e9c1b084
PM
629
630 return 0;
12293bf9 631err2:
d862a662 632 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 633err1:
e9c1b084
PM
634 return err;
635}
636
83b4dbe1 637void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 638{
dc5129f8 639 exp_proc_remove(net);
d862a662 640 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 641}
83b4dbe1
G
642
643int nf_conntrack_expect_init(void)
644{
645 if (!nf_ct_expect_hsize) {
646 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
647 if (!nf_ct_expect_hsize)
648 nf_ct_expect_hsize = 1;
649 }
650 nf_ct_expect_max = nf_ct_expect_hsize * 4;
651 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
652 sizeof(struct nf_conntrack_expect),
653 0, 0, NULL);
654 if (!nf_ct_expect_cachep)
655 return -ENOMEM;
656 return 0;
657}
658
659void nf_conntrack_expect_fini(void)
660{
661 rcu_barrier(); /* Wait for call_rcu() before destroy */
662 kmem_cache_destroy(nf_ct_expect_cachep);
663}