]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/netfilter/nf_conntrack_expect.c
netfilter: expect: fix crash when putting uninited expectation
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
a9a083c3 27#include <net/netns/hash.h>
77ab9cff
MJ
28
29#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_expect.h>
32#include <net/netfilter/nf_conntrack_helper.h>
33#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 34#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 35
a71c0855
PM
36unsigned int nf_ct_expect_hsize __read_mostly;
37EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
38
0a93aaed
FW
39struct hlist_head *nf_ct_expect_hash __read_mostly;
40EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
41
f264a7df 42unsigned int nf_ct_expect_max __read_mostly;
a71c0855 43
e9c1b084 44static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
7001c6d1 45static unsigned int nf_ct_expect_hashrnd __read_mostly;
77ab9cff
MJ
46
47/* nf_conntrack_expect helper functions */
ebbf41df 48void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 49 u32 portid, int report)
77ab9cff
MJ
50{
51 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 52 struct net *net = nf_ct_exp_net(exp);
77ab9cff 53
3d058d7b 54 NF_CT_ASSERT(master_help);
77ab9cff
MJ
55 NF_CT_ASSERT(!timer_pending(&exp->timeout));
56
7d0742da 57 hlist_del_rcu(&exp->hnode);
9b03f38d 58 net->ct.expect_count--;
a71c0855 59
7cddd967 60 hlist_del_rcu(&exp->lnode);
3d058d7b 61 master_help->expecting[exp->class]--;
bc01befd 62
ec464e5d 63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 64 nf_ct_expect_put(exp);
b560580a 65
0d55af87 66 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 67}
ebbf41df 68EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 69
6823645d 70static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
71{
72 struct nf_conntrack_expect *exp = (void *)ul_expect;
73
ca7433df 74 spin_lock_bh(&nf_conntrack_expect_lock);
77ab9cff 75 nf_ct_unlink_expect(exp);
ca7433df 76 spin_unlock_bh(&nf_conntrack_expect_lock);
6823645d 77 nf_ct_expect_put(exp);
77ab9cff
MJ
78}
79
a9a083c3 80static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
a71c0855 81{
a9a083c3 82 unsigned int hash, seed;
34498825 83
7001c6d1 84 get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
a71c0855 85
a9a083c3
FW
86 seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
87
34498825 88 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 89 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
a9a083c3 90 (__force __u16)tuple->dst.u.all) ^ seed);
8fc54f68
DB
91
92 return reciprocal_scale(hash, nf_ct_expect_hsize);
a71c0855
PM
93}
94
03d7dc5c
FW
95static bool
96nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
97 const struct nf_conntrack_expect *i,
98 const struct nf_conntrack_zone *zone,
99 const struct net *net)
100{
101 return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 net_eq(net, nf_ct_net(i->master)) &&
103 nf_ct_zone_equal_any(i->master, zone);
104}
105
ec0e3f01
GF
106bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
107{
108 if (del_timer(&exp->timeout)) {
109 nf_ct_unlink_expect(exp);
110 nf_ct_expect_put(exp);
111 return true;
112 }
113 return false;
114}
115EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
116
77ab9cff 117struct nf_conntrack_expect *
308ac914
DB
118__nf_ct_expect_find(struct net *net,
119 const struct nf_conntrack_zone *zone,
5d0aa2cc 120 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
121{
122 struct nf_conntrack_expect *i;
a71c0855
PM
123 unsigned int h;
124
9b03f38d 125 if (!net->ct.expect_count)
a71c0855 126 return NULL;
77ab9cff 127
a9a083c3 128 h = nf_ct_expect_dst_hash(net, tuple);
0a93aaed 129 hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
03d7dc5c 130 if (nf_ct_exp_equal(tuple, i, zone, net))
77ab9cff
MJ
131 return i;
132 }
133 return NULL;
134}
6823645d 135EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
136
137/* Just find a expectation corresponding to a tuple. */
138struct nf_conntrack_expect *
308ac914
DB
139nf_ct_expect_find_get(struct net *net,
140 const struct nf_conntrack_zone *zone,
5d0aa2cc 141 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
142{
143 struct nf_conntrack_expect *i;
144
7d0742da 145 rcu_read_lock();
5d0aa2cc 146 i = __nf_ct_expect_find(net, zone, tuple);
b54ab92b 147 if (i && !refcount_inc_not_zero(&i->use))
7d0742da
PM
148 i = NULL;
149 rcu_read_unlock();
77ab9cff
MJ
150
151 return i;
152}
6823645d 153EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
154
155/* If an expectation for this connection is found, it gets delete from
156 * global list then returned. */
157struct nf_conntrack_expect *
308ac914
DB
158nf_ct_find_expectation(struct net *net,
159 const struct nf_conntrack_zone *zone,
5d0aa2cc 160 const struct nf_conntrack_tuple *tuple)
77ab9cff 161{
359b9ab6 162 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
163 unsigned int h;
164
9b03f38d 165 if (!net->ct.expect_count)
359b9ab6 166 return NULL;
ece00641 167
a9a083c3 168 h = nf_ct_expect_dst_hash(net, tuple);
0a93aaed 169 hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
359b9ab6 170 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
03d7dc5c 171 nf_ct_exp_equal(tuple, i, zone, net)) {
359b9ab6
PM
172 exp = i;
173 break;
174 }
175 }
ece00641
YK
176 if (!exp)
177 return NULL;
77ab9cff 178
77ab9cff
MJ
179 /* If master is not in hash table yet (ie. packet hasn't left
180 this machine yet), how can other end know about expected?
181 Hence these are not the droids you are looking for (if
182 master ct never got confirmed, we'd hold a reference to it
183 and weird things would happen to future packets). */
ece00641
YK
184 if (!nf_ct_is_confirmed(exp->master))
185 return NULL;
186
e1b207da
JDB
187 /* Avoid race with other CPUs, that for exp->master ct, is
188 * about to invoke ->destroy(), or nf_ct_delete() via timeout
189 * or early_drop().
190 *
191 * The atomic_inc_not_zero() check tells: If that fails, we
192 * know that the ct is being destroyed. If it succeeds, we
193 * can be sure the ct cannot disappear underneath.
194 */
195 if (unlikely(nf_ct_is_dying(exp->master) ||
196 !atomic_inc_not_zero(&exp->master->ct_general.use)))
197 return NULL;
198
ece00641 199 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
b54ab92b 200 refcount_inc(&exp->use);
ece00641
YK
201 return exp;
202 } else if (del_timer(&exp->timeout)) {
203 nf_ct_unlink_expect(exp);
204 return exp;
77ab9cff 205 }
e1b207da
JDB
206 /* Undo exp->master refcnt increase, if del_timer() failed */
207 nf_ct_put(exp->master);
ece00641 208
77ab9cff
MJ
209 return NULL;
210}
211
212/* delete all expectations for this conntrack */
213void nf_ct_remove_expectations(struct nf_conn *ct)
214{
77ab9cff 215 struct nf_conn_help *help = nfct_help(ct);
b560580a 216 struct nf_conntrack_expect *exp;
b67bfe0d 217 struct hlist_node *next;
77ab9cff
MJ
218
219 /* Optimization: most connection never expect any others. */
6002f266 220 if (!help)
77ab9cff
MJ
221 return;
222
ca7433df 223 spin_lock_bh(&nf_conntrack_expect_lock);
b67bfe0d 224 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
ec0e3f01 225 nf_ct_remove_expect(exp);
77ab9cff 226 }
ca7433df 227 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 228}
13b18339 229EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
230
231/* Would two expected things clash? */
232static inline int expect_clash(const struct nf_conntrack_expect *a,
233 const struct nf_conntrack_expect *b)
234{
235 /* Part covered by intersection of masks must be unequal,
236 otherwise they clash */
d4156e8c 237 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
238 int count;
239
77ab9cff 240 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
241
242 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
243 intersect_mask.src.u3.all[count] =
244 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
245 }
246
4b31814d 247 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
03d7dc5c 248 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
deedb590 249 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
250}
251
252static inline int expect_matches(const struct nf_conntrack_expect *a,
253 const struct nf_conntrack_expect *b)
254{
f64f9e71 255 return a->master == b->master && a->class == b->class &&
308ac914
DB
256 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
03d7dc5c 258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
deedb590 259 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
260}
261
262/* Generally a bad idea to call this: could have matched already. */
6823645d 263void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 264{
ca7433df 265 spin_lock_bh(&nf_conntrack_expect_lock);
ec0e3f01 266 nf_ct_remove_expect(exp);
ca7433df 267 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 268}
6823645d 269EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
270
271/* We don't increase the master conntrack refcount for non-fulfilled
272 * conntracks. During the conntrack destruction, the expectations are
273 * always killed before the conntrack itself */
6823645d 274struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
275{
276 struct nf_conntrack_expect *new;
277
6823645d 278 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
279 if (!new)
280 return NULL;
281
282 new->master = me;
b54ab92b 283 refcount_set(&new->use, 1);
77ab9cff
MJ
284 return new;
285}
6823645d 286EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 287
6002f266 288void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 289 u_int8_t family,
1d9d7522
PM
290 const union nf_inet_addr *saddr,
291 const union nf_inet_addr *daddr,
292 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
293{
294 int len;
295
296 if (family == AF_INET)
297 len = 4;
298 else
299 len = 16;
300
301 exp->flags = 0;
6002f266 302 exp->class = class;
d6a9b650
PM
303 exp->expectfn = NULL;
304 exp->helper = NULL;
305 exp->tuple.src.l3num = family;
306 exp->tuple.dst.protonum = proto;
d6a9b650
PM
307
308 if (saddr) {
309 memcpy(&exp->tuple.src.u3, saddr, len);
310 if (sizeof(exp->tuple.src.u3) > len)
311 /* address needs to be cleared for nf_ct_tuple_equal */
312 memset((void *)&exp->tuple.src.u3 + len, 0x00,
313 sizeof(exp->tuple.src.u3) - len);
314 memset(&exp->mask.src.u3, 0xFF, len);
315 if (sizeof(exp->mask.src.u3) > len)
316 memset((void *)&exp->mask.src.u3 + len, 0x00,
317 sizeof(exp->mask.src.u3) - len);
318 } else {
319 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
320 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
321 }
322
d6a9b650 323 if (src) {
a34c4589
AV
324 exp->tuple.src.u.all = *src;
325 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
326 } else {
327 exp->tuple.src.u.all = 0;
328 exp->mask.src.u.all = 0;
329 }
330
d4156e8c
PM
331 memcpy(&exp->tuple.dst.u3, daddr, len);
332 if (sizeof(exp->tuple.dst.u3) > len)
333 /* address needs to be cleared for nf_ct_tuple_equal */
334 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
335 sizeof(exp->tuple.dst.u3) - len);
336
a34c4589 337 exp->tuple.dst.u.all = *dst;
f09eca8d
PNA
338
339#ifdef CONFIG_NF_NAT_NEEDED
340 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
341 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
342#endif
d6a9b650 343}
6823645d 344EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 345
7d0742da
PM
346static void nf_ct_expect_free_rcu(struct rcu_head *head)
347{
348 struct nf_conntrack_expect *exp;
349
350 exp = container_of(head, struct nf_conntrack_expect, rcu);
351 kmem_cache_free(nf_ct_expect_cachep, exp);
352}
353
6823645d 354void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff 355{
b54ab92b 356 if (refcount_dec_and_test(&exp->use))
7d0742da 357 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 358}
6823645d 359EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 360
4dee62b1 361static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
362{
363 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 364 struct nf_conntrack_helper *helper;
9b03f38d 365 struct net *net = nf_ct_exp_net(exp);
a9a083c3 366 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
77ab9cff 367
3bfd45f9 368 /* two references : one for hash insert, one for the timer */
b54ab92b 369 refcount_add(2, &exp->use);
b560580a 370
7cddd967 371 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
3d058d7b 372 master_help->expecting[exp->class]++;
a71c0855 373
0a93aaed 374 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
9b03f38d 375 net->ct.expect_count++;
77ab9cff 376
6823645d
PM
377 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
378 (unsigned long)exp);
3d058d7b 379 helper = rcu_dereference_protected(master_help->helper,
ca7433df 380 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
381 if (helper) {
382 exp->timeout.expires = jiffies +
383 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 384 }
77ab9cff
MJ
385 add_timer(&exp->timeout);
386
0d55af87 387 NF_CT_STAT_INC(net, expect_create);
77ab9cff
MJ
388}
389
390/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
391static void evict_oldest_expect(struct nf_conn *master,
392 struct nf_conntrack_expect *new)
77ab9cff 393{
b560580a 394 struct nf_conn_help *master_help = nfct_help(master);
6002f266 395 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 396
b67bfe0d 397 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
398 if (exp->class == new->class)
399 last = exp;
400 }
b560580a 401
ec0e3f01
GF
402 if (last)
403 nf_ct_remove_expect(last);
77ab9cff
MJ
404}
405
19abb7b0 406static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 407{
6002f266 408 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
409 struct nf_conntrack_expect *i;
410 struct nf_conn *master = expect->master;
411 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 412 struct nf_conntrack_helper *helper;
9b03f38d 413 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 414 struct hlist_node *next;
a71c0855 415 unsigned int h;
4b86c459 416 int ret = 0;
77ab9cff 417
3d058d7b 418 if (!master_help) {
3c158f7f
PM
419 ret = -ESHUTDOWN;
420 goto out;
421 }
a9a083c3 422 h = nf_ct_expect_dst_hash(net, &expect->tuple);
0a93aaed 423 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
77ab9cff 424 if (expect_matches(i, expect)) {
36ac344e 425 if (nf_ct_remove_expect(i))
2614f864 426 break;
77ab9cff
MJ
427 } else if (expect_clash(i, expect)) {
428 ret = -EBUSY;
429 goto out;
430 }
431 }
432 /* Will be over limit? */
3d058d7b 433 helper = rcu_dereference_protected(master_help->helper,
ca7433df 434 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
435 if (helper) {
436 p = &helper->expect_policy[expect->class];
bc01befd
PNA
437 if (p->max_expected &&
438 master_help->expecting[expect->class] >= p->max_expected) {
439 evict_oldest_expect(master, expect);
440 if (master_help->expecting[expect->class]
441 >= p->max_expected) {
442 ret = -EMFILE;
443 goto out;
444 }
6002f266
PM
445 }
446 }
77ab9cff 447
9b03f38d 448 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 449 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 450 ret = -EMFILE;
f264a7df 451 }
19abb7b0
PNA
452out:
453 return ret;
454}
455
b476b72a 456int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 457 u32 portid, int report)
19abb7b0
PNA
458{
459 int ret;
460
ca7433df 461 spin_lock_bh(&nf_conntrack_expect_lock);
19abb7b0 462 ret = __nf_ct_expect_check(expect);
4b86c459 463 if (ret < 0)
19abb7b0 464 goto out;
f264a7df 465
4dee62b1
GF
466 nf_ct_expect_insert(expect);
467
ca7433df 468 spin_unlock_bh(&nf_conntrack_expect_lock);
ec464e5d 469 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
7fb668ac 470 return 0;
19abb7b0 471out:
ca7433df 472 spin_unlock_bh(&nf_conntrack_expect_lock);
19abb7b0
PNA
473 return ret;
474}
475EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
476
54b07dca 477#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 478struct ct_expect_iter_state {
dc5129f8 479 struct seq_net_private p;
5d08ad44
PM
480 unsigned int bucket;
481};
482
483static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 484{
5d08ad44 485 struct ct_expect_iter_state *st = seq->private;
7d0742da 486 struct hlist_node *n;
77ab9cff 487
5d08ad44 488 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0a93aaed 489 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
7d0742da
PM
490 if (n)
491 return n;
5d08ad44
PM
492 }
493 return NULL;
494}
77ab9cff 495
5d08ad44
PM
496static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
497 struct hlist_node *head)
498{
499 struct ct_expect_iter_state *st = seq->private;
77ab9cff 500
0e60ebe0 501 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
502 while (head == NULL) {
503 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 504 return NULL;
0a93aaed 505 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
77ab9cff 506 }
5d08ad44 507 return head;
77ab9cff
MJ
508}
509
5d08ad44 510static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 511{
5d08ad44 512 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 513
5d08ad44
PM
514 if (head)
515 while (pos && (head = ct_expect_get_next(seq, head)))
516 pos--;
517 return pos ? NULL : head;
518}
77ab9cff 519
5d08ad44 520static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 521 __acquires(RCU)
5d08ad44 522{
7d0742da 523 rcu_read_lock();
5d08ad44
PM
524 return ct_expect_get_idx(seq, *pos);
525}
77ab9cff 526
5d08ad44
PM
527static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
528{
529 (*pos)++;
530 return ct_expect_get_next(seq, v);
77ab9cff
MJ
531}
532
5d08ad44 533static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 534 __releases(RCU)
77ab9cff 535{
7d0742da 536 rcu_read_unlock();
77ab9cff
MJ
537}
538
539static int exp_seq_show(struct seq_file *s, void *v)
540{
5d08ad44 541 struct nf_conntrack_expect *expect;
b87921bd 542 struct nf_conntrack_helper *helper;
5d08ad44 543 struct hlist_node *n = v;
359b9ab6 544 char *delim = "";
5d08ad44
PM
545
546 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
547
548 if (expect->timeout.function)
549 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
550 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
551 else
cdec2685 552 seq_puts(s, "- ");
77ab9cff
MJ
553 seq_printf(s, "l3proto = %u proto=%u ",
554 expect->tuple.src.l3num,
555 expect->tuple.dst.protonum);
556 print_tuple(s, &expect->tuple,
557 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 558 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 559 expect->tuple.dst.protonum));
4bb119ea 560
359b9ab6 561 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
cdec2685 562 seq_puts(s, "PERMANENT");
359b9ab6
PM
563 delim = ",";
564 }
bc01befd 565 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 566 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
567 delim = ",";
568 }
569 if (expect->flags & NF_CT_EXPECT_USERSPACE)
570 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 571
b87921bd
PM
572 helper = rcu_dereference(nfct_help(expect->master)->helper);
573 if (helper) {
574 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
b173a28f 575 if (helper->expect_policy[expect->class].name[0])
b87921bd
PM
576 seq_printf(s, "/%s",
577 helper->expect_policy[expect->class].name);
578 }
579
1ca9e417
JP
580 seq_putc(s, '\n');
581
582 return 0;
77ab9cff
MJ
583}
584
56b3d975 585static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
586 .start = exp_seq_start,
587 .next = exp_seq_next,
588 .stop = exp_seq_stop,
589 .show = exp_seq_show
590};
591
592static int exp_open(struct inode *inode, struct file *file)
593{
dc5129f8 594 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 595 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
596}
597
5d08ad44 598static const struct file_operations exp_file_ops = {
77ab9cff
MJ
599 .owner = THIS_MODULE,
600 .open = exp_open,
601 .read = seq_read,
602 .llseek = seq_lseek,
dc5129f8 603 .release = seq_release_net,
77ab9cff 604};
54b07dca 605#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 606
dc5129f8 607static int exp_proc_init(struct net *net)
e9c1b084 608{
54b07dca 609#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084 610 struct proc_dir_entry *proc;
f13f2aee
PW
611 kuid_t root_uid;
612 kgid_t root_gid;
e9c1b084 613
d4beaa66
G
614 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
615 &exp_file_ops);
e9c1b084
PM
616 if (!proc)
617 return -ENOMEM;
f13f2aee
PW
618
619 root_uid = make_kuid(net->user_ns, 0);
620 root_gid = make_kgid(net->user_ns, 0);
621 if (uid_valid(root_uid) && gid_valid(root_gid))
622 proc_set_user(proc, root_uid, root_gid);
54b07dca 623#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
624 return 0;
625}
626
dc5129f8 627static void exp_proc_remove(struct net *net)
e9c1b084 628{
54b07dca 629#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 630 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 631#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
632}
633
13ccdfc2 634module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 635
83b4dbe1 636int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 637{
9b03f38d 638 net->ct.expect_count = 0;
0a93aaed 639 return exp_proc_init(net);
e9c1b084
PM
640}
641
83b4dbe1 642void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 643{
dc5129f8 644 exp_proc_remove(net);
e9c1b084 645}
83b4dbe1
G
646
647int nf_conntrack_expect_init(void)
648{
649 if (!nf_ct_expect_hsize) {
650 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
651 if (!nf_ct_expect_hsize)
652 nf_ct_expect_hsize = 1;
653 }
654 nf_ct_expect_max = nf_ct_expect_hsize * 4;
655 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
656 sizeof(struct nf_conntrack_expect),
657 0, 0, NULL);
658 if (!nf_ct_expect_cachep)
659 return -ENOMEM;
0a93aaed
FW
660
661 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
662 if (!nf_ct_expect_hash) {
663 kmem_cache_destroy(nf_ct_expect_cachep);
664 return -ENOMEM;
665 }
666
83b4dbe1
G
667 return 0;
668}
669
670void nf_conntrack_expect_fini(void)
671{
672 rcu_barrier(); /* Wait for call_rcu() before destroy */
673 kmem_cache_destroy(nf_ct_expect_cachep);
0a93aaed 674 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
83b4dbe1 675}