]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/netfilter/nf_conntrack_expect.c
Merge branch 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[mirror_ubuntu-zesty-kernel.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
a9a083c3 27#include <net/netns/hash.h>
77ab9cff
MJ
28
29#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_expect.h>
32#include <net/netfilter/nf_conntrack_helper.h>
33#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 34#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 35
a71c0855
PM
36unsigned int nf_ct_expect_hsize __read_mostly;
37EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
38
0a93aaed
FW
39struct hlist_head *nf_ct_expect_hash __read_mostly;
40EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
41
f264a7df 42unsigned int nf_ct_expect_max __read_mostly;
a71c0855 43
e9c1b084 44static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
7001c6d1 45static unsigned int nf_ct_expect_hashrnd __read_mostly;
77ab9cff
MJ
46
47/* nf_conntrack_expect helper functions */
ebbf41df 48void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 49 u32 portid, int report)
77ab9cff
MJ
50{
51 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 52 struct net *net = nf_ct_exp_net(exp);
77ab9cff 53
3d058d7b 54 NF_CT_ASSERT(master_help);
77ab9cff
MJ
55 NF_CT_ASSERT(!timer_pending(&exp->timeout));
56
7d0742da 57 hlist_del_rcu(&exp->hnode);
9b03f38d 58 net->ct.expect_count--;
a71c0855 59
b560580a 60 hlist_del(&exp->lnode);
3d058d7b 61 master_help->expecting[exp->class]--;
bc01befd 62
ec464e5d 63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 64 nf_ct_expect_put(exp);
b560580a 65
0d55af87 66 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 67}
ebbf41df 68EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 69
6823645d 70static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
71{
72 struct nf_conntrack_expect *exp = (void *)ul_expect;
73
ca7433df 74 spin_lock_bh(&nf_conntrack_expect_lock);
77ab9cff 75 nf_ct_unlink_expect(exp);
ca7433df 76 spin_unlock_bh(&nf_conntrack_expect_lock);
6823645d 77 nf_ct_expect_put(exp);
77ab9cff
MJ
78}
79
a9a083c3 80static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
a71c0855 81{
a9a083c3 82 unsigned int hash, seed;
34498825 83
7001c6d1 84 get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
a71c0855 85
a9a083c3
FW
86 seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
87
34498825 88 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 89 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
a9a083c3 90 (__force __u16)tuple->dst.u.all) ^ seed);
8fc54f68
DB
91
92 return reciprocal_scale(hash, nf_ct_expect_hsize);
a71c0855
PM
93}
94
03d7dc5c
FW
95static bool
96nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
97 const struct nf_conntrack_expect *i,
98 const struct nf_conntrack_zone *zone,
99 const struct net *net)
100{
101 return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 net_eq(net, nf_ct_net(i->master)) &&
103 nf_ct_zone_equal_any(i->master, zone);
104}
105
77ab9cff 106struct nf_conntrack_expect *
308ac914
DB
107__nf_ct_expect_find(struct net *net,
108 const struct nf_conntrack_zone *zone,
5d0aa2cc 109 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
110{
111 struct nf_conntrack_expect *i;
a71c0855
PM
112 unsigned int h;
113
9b03f38d 114 if (!net->ct.expect_count)
a71c0855 115 return NULL;
77ab9cff 116
a9a083c3 117 h = nf_ct_expect_dst_hash(net, tuple);
0a93aaed 118 hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
03d7dc5c 119 if (nf_ct_exp_equal(tuple, i, zone, net))
77ab9cff
MJ
120 return i;
121 }
122 return NULL;
123}
6823645d 124EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
125
126/* Just find a expectation corresponding to a tuple. */
127struct nf_conntrack_expect *
308ac914
DB
128nf_ct_expect_find_get(struct net *net,
129 const struct nf_conntrack_zone *zone,
5d0aa2cc 130 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
131{
132 struct nf_conntrack_expect *i;
133
7d0742da 134 rcu_read_lock();
5d0aa2cc 135 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
136 if (i && !atomic_inc_not_zero(&i->use))
137 i = NULL;
138 rcu_read_unlock();
77ab9cff
MJ
139
140 return i;
141}
6823645d 142EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
143
144/* If an expectation for this connection is found, it gets delete from
145 * global list then returned. */
146struct nf_conntrack_expect *
308ac914
DB
147nf_ct_find_expectation(struct net *net,
148 const struct nf_conntrack_zone *zone,
5d0aa2cc 149 const struct nf_conntrack_tuple *tuple)
77ab9cff 150{
359b9ab6 151 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
152 unsigned int h;
153
9b03f38d 154 if (!net->ct.expect_count)
359b9ab6 155 return NULL;
ece00641 156
a9a083c3 157 h = nf_ct_expect_dst_hash(net, tuple);
0a93aaed 158 hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
359b9ab6 159 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
03d7dc5c 160 nf_ct_exp_equal(tuple, i, zone, net)) {
359b9ab6
PM
161 exp = i;
162 break;
163 }
164 }
ece00641
YK
165 if (!exp)
166 return NULL;
77ab9cff 167
77ab9cff
MJ
168 /* If master is not in hash table yet (ie. packet hasn't left
169 this machine yet), how can other end know about expected?
170 Hence these are not the droids you are looking for (if
171 master ct never got confirmed, we'd hold a reference to it
172 and weird things would happen to future packets). */
ece00641
YK
173 if (!nf_ct_is_confirmed(exp->master))
174 return NULL;
175
e1b207da
JDB
176 /* Avoid race with other CPUs, that for exp->master ct, is
177 * about to invoke ->destroy(), or nf_ct_delete() via timeout
178 * or early_drop().
179 *
180 * The atomic_inc_not_zero() check tells: If that fails, we
181 * know that the ct is being destroyed. If it succeeds, we
182 * can be sure the ct cannot disappear underneath.
183 */
184 if (unlikely(nf_ct_is_dying(exp->master) ||
185 !atomic_inc_not_zero(&exp->master->ct_general.use)))
186 return NULL;
187
ece00641
YK
188 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
189 atomic_inc(&exp->use);
190 return exp;
191 } else if (del_timer(&exp->timeout)) {
192 nf_ct_unlink_expect(exp);
193 return exp;
77ab9cff 194 }
e1b207da
JDB
195 /* Undo exp->master refcnt increase, if del_timer() failed */
196 nf_ct_put(exp->master);
ece00641 197
77ab9cff
MJ
198 return NULL;
199}
200
201/* delete all expectations for this conntrack */
202void nf_ct_remove_expectations(struct nf_conn *ct)
203{
77ab9cff 204 struct nf_conn_help *help = nfct_help(ct);
b560580a 205 struct nf_conntrack_expect *exp;
b67bfe0d 206 struct hlist_node *next;
77ab9cff
MJ
207
208 /* Optimization: most connection never expect any others. */
6002f266 209 if (!help)
77ab9cff
MJ
210 return;
211
ca7433df 212 spin_lock_bh(&nf_conntrack_expect_lock);
b67bfe0d 213 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
214 if (del_timer(&exp->timeout)) {
215 nf_ct_unlink_expect(exp);
216 nf_ct_expect_put(exp);
601e68e1 217 }
77ab9cff 218 }
ca7433df 219 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 220}
13b18339 221EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
222
223/* Would two expected things clash? */
224static inline int expect_clash(const struct nf_conntrack_expect *a,
225 const struct nf_conntrack_expect *b)
226{
227 /* Part covered by intersection of masks must be unequal,
228 otherwise they clash */
d4156e8c 229 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
230 int count;
231
77ab9cff 232 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
233
234 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
235 intersect_mask.src.u3.all[count] =
236 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
237 }
238
4b31814d 239 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
03d7dc5c 240 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
deedb590 241 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
242}
243
244static inline int expect_matches(const struct nf_conntrack_expect *a,
245 const struct nf_conntrack_expect *b)
246{
f64f9e71 247 return a->master == b->master && a->class == b->class &&
308ac914
DB
248 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
249 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
03d7dc5c 250 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
deedb590 251 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
252}
253
254/* Generally a bad idea to call this: could have matched already. */
6823645d 255void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 256{
ca7433df 257 spin_lock_bh(&nf_conntrack_expect_lock);
4e1d4e6c
PM
258 if (del_timer(&exp->timeout)) {
259 nf_ct_unlink_expect(exp);
260 nf_ct_expect_put(exp);
77ab9cff 261 }
ca7433df 262 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 263}
6823645d 264EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
265
266/* We don't increase the master conntrack refcount for non-fulfilled
267 * conntracks. During the conntrack destruction, the expectations are
268 * always killed before the conntrack itself */
6823645d 269struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
270{
271 struct nf_conntrack_expect *new;
272
6823645d 273 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
274 if (!new)
275 return NULL;
276
277 new->master = me;
278 atomic_set(&new->use, 1);
279 return new;
280}
6823645d 281EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 282
6002f266 283void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 284 u_int8_t family,
1d9d7522
PM
285 const union nf_inet_addr *saddr,
286 const union nf_inet_addr *daddr,
287 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
288{
289 int len;
290
291 if (family == AF_INET)
292 len = 4;
293 else
294 len = 16;
295
296 exp->flags = 0;
6002f266 297 exp->class = class;
d6a9b650
PM
298 exp->expectfn = NULL;
299 exp->helper = NULL;
300 exp->tuple.src.l3num = family;
301 exp->tuple.dst.protonum = proto;
d6a9b650
PM
302
303 if (saddr) {
304 memcpy(&exp->tuple.src.u3, saddr, len);
305 if (sizeof(exp->tuple.src.u3) > len)
306 /* address needs to be cleared for nf_ct_tuple_equal */
307 memset((void *)&exp->tuple.src.u3 + len, 0x00,
308 sizeof(exp->tuple.src.u3) - len);
309 memset(&exp->mask.src.u3, 0xFF, len);
310 if (sizeof(exp->mask.src.u3) > len)
311 memset((void *)&exp->mask.src.u3 + len, 0x00,
312 sizeof(exp->mask.src.u3) - len);
313 } else {
314 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
315 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
316 }
317
d6a9b650 318 if (src) {
a34c4589
AV
319 exp->tuple.src.u.all = *src;
320 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
321 } else {
322 exp->tuple.src.u.all = 0;
323 exp->mask.src.u.all = 0;
324 }
325
d4156e8c
PM
326 memcpy(&exp->tuple.dst.u3, daddr, len);
327 if (sizeof(exp->tuple.dst.u3) > len)
328 /* address needs to be cleared for nf_ct_tuple_equal */
329 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
330 sizeof(exp->tuple.dst.u3) - len);
331
a34c4589 332 exp->tuple.dst.u.all = *dst;
f09eca8d
PNA
333
334#ifdef CONFIG_NF_NAT_NEEDED
335 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
336 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
337#endif
d6a9b650 338}
6823645d 339EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 340
7d0742da
PM
341static void nf_ct_expect_free_rcu(struct rcu_head *head)
342{
343 struct nf_conntrack_expect *exp;
344
345 exp = container_of(head, struct nf_conntrack_expect, rcu);
346 kmem_cache_free(nf_ct_expect_cachep, exp);
347}
348
6823645d 349void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
350{
351 if (atomic_dec_and_test(&exp->use))
7d0742da 352 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 353}
6823645d 354EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 355
3d058d7b 356static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
357{
358 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 359 struct nf_conntrack_helper *helper;
9b03f38d 360 struct net *net = nf_ct_exp_net(exp);
a9a083c3 361 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
77ab9cff 362
3bfd45f9
ED
363 /* two references : one for hash insert, one for the timer */
364 atomic_add(2, &exp->use);
b560580a 365
3d058d7b
PNA
366 hlist_add_head(&exp->lnode, &master_help->expectations);
367 master_help->expecting[exp->class]++;
a71c0855 368
0a93aaed 369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
9b03f38d 370 net->ct.expect_count++;
77ab9cff 371
6823645d
PM
372 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
373 (unsigned long)exp);
3d058d7b 374 helper = rcu_dereference_protected(master_help->helper,
ca7433df 375 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
376 if (helper) {
377 exp->timeout.expires = jiffies +
378 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 379 }
77ab9cff
MJ
380 add_timer(&exp->timeout);
381
0d55af87 382 NF_CT_STAT_INC(net, expect_create);
3d058d7b 383 return 0;
77ab9cff
MJ
384}
385
386/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
387static void evict_oldest_expect(struct nf_conn *master,
388 struct nf_conntrack_expect *new)
77ab9cff 389{
b560580a 390 struct nf_conn_help *master_help = nfct_help(master);
6002f266 391 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 392
b67bfe0d 393 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
394 if (exp->class == new->class)
395 last = exp;
396 }
b560580a 397
6002f266
PM
398 if (last && del_timer(&last->timeout)) {
399 nf_ct_unlink_expect(last);
400 nf_ct_expect_put(last);
77ab9cff
MJ
401 }
402}
403
19abb7b0 404static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 405{
6002f266 406 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
407 struct nf_conntrack_expect *i;
408 struct nf_conn *master = expect->master;
409 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 410 struct nf_conntrack_helper *helper;
9b03f38d 411 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 412 struct hlist_node *next;
a71c0855 413 unsigned int h;
83731671 414 int ret = 1;
77ab9cff 415
3d058d7b 416 if (!master_help) {
3c158f7f
PM
417 ret = -ESHUTDOWN;
418 goto out;
419 }
a9a083c3 420 h = nf_ct_expect_dst_hash(net, &expect->tuple);
0a93aaed 421 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
77ab9cff 422 if (expect_matches(i, expect)) {
2614f864
PNA
423 if (del_timer(&i->timeout)) {
424 nf_ct_unlink_expect(i);
425 nf_ct_expect_put(i);
426 break;
77ab9cff
MJ
427 }
428 } else if (expect_clash(i, expect)) {
429 ret = -EBUSY;
430 goto out;
431 }
432 }
433 /* Will be over limit? */
3d058d7b 434 helper = rcu_dereference_protected(master_help->helper,
ca7433df 435 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
436 if (helper) {
437 p = &helper->expect_policy[expect->class];
bc01befd
PNA
438 if (p->max_expected &&
439 master_help->expecting[expect->class] >= p->max_expected) {
440 evict_oldest_expect(master, expect);
441 if (master_help->expecting[expect->class]
442 >= p->max_expected) {
443 ret = -EMFILE;
444 goto out;
445 }
6002f266
PM
446 }
447 }
77ab9cff 448
9b03f38d 449 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 450 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 451 ret = -EMFILE;
f264a7df 452 }
19abb7b0
PNA
453out:
454 return ret;
455}
456
b476b72a 457int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 458 u32 portid, int report)
19abb7b0
PNA
459{
460 int ret;
461
ca7433df 462 spin_lock_bh(&nf_conntrack_expect_lock);
19abb7b0 463 ret = __nf_ct_expect_check(expect);
83731671 464 if (ret <= 0)
19abb7b0 465 goto out;
f264a7df 466
3d058d7b
PNA
467 ret = nf_ct_expect_insert(expect);
468 if (ret < 0)
469 goto out;
ca7433df 470 spin_unlock_bh(&nf_conntrack_expect_lock);
ec464e5d 471 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
77ab9cff 472 return ret;
19abb7b0 473out:
ca7433df 474 spin_unlock_bh(&nf_conntrack_expect_lock);
19abb7b0
PNA
475 return ret;
476}
477EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
478
54b07dca 479#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 480struct ct_expect_iter_state {
dc5129f8 481 struct seq_net_private p;
5d08ad44
PM
482 unsigned int bucket;
483};
484
485static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 486{
5d08ad44 487 struct ct_expect_iter_state *st = seq->private;
7d0742da 488 struct hlist_node *n;
77ab9cff 489
5d08ad44 490 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0a93aaed 491 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
7d0742da
PM
492 if (n)
493 return n;
5d08ad44
PM
494 }
495 return NULL;
496}
77ab9cff 497
5d08ad44
PM
498static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
499 struct hlist_node *head)
500{
501 struct ct_expect_iter_state *st = seq->private;
77ab9cff 502
0e60ebe0 503 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
504 while (head == NULL) {
505 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 506 return NULL;
0a93aaed 507 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
77ab9cff 508 }
5d08ad44 509 return head;
77ab9cff
MJ
510}
511
5d08ad44 512static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 513{
5d08ad44 514 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 515
5d08ad44
PM
516 if (head)
517 while (pos && (head = ct_expect_get_next(seq, head)))
518 pos--;
519 return pos ? NULL : head;
520}
77ab9cff 521
5d08ad44 522static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 523 __acquires(RCU)
5d08ad44 524{
7d0742da 525 rcu_read_lock();
5d08ad44
PM
526 return ct_expect_get_idx(seq, *pos);
527}
77ab9cff 528
5d08ad44
PM
529static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
530{
531 (*pos)++;
532 return ct_expect_get_next(seq, v);
77ab9cff
MJ
533}
534
5d08ad44 535static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 536 __releases(RCU)
77ab9cff 537{
7d0742da 538 rcu_read_unlock();
77ab9cff
MJ
539}
540
541static int exp_seq_show(struct seq_file *s, void *v)
542{
5d08ad44 543 struct nf_conntrack_expect *expect;
b87921bd 544 struct nf_conntrack_helper *helper;
5d08ad44 545 struct hlist_node *n = v;
359b9ab6 546 char *delim = "";
5d08ad44
PM
547
548 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
549
550 if (expect->timeout.function)
551 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
552 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
553 else
554 seq_printf(s, "- ");
555 seq_printf(s, "l3proto = %u proto=%u ",
556 expect->tuple.src.l3num,
557 expect->tuple.dst.protonum);
558 print_tuple(s, &expect->tuple,
559 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 560 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 561 expect->tuple.dst.protonum));
4bb119ea 562
359b9ab6
PM
563 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
564 seq_printf(s, "PERMANENT");
565 delim = ",";
566 }
bc01befd 567 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 568 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
569 delim = ",";
570 }
571 if (expect->flags & NF_CT_EXPECT_USERSPACE)
572 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 573
b87921bd
PM
574 helper = rcu_dereference(nfct_help(expect->master)->helper);
575 if (helper) {
576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
b173a28f 577 if (helper->expect_policy[expect->class].name[0])
b87921bd
PM
578 seq_printf(s, "/%s",
579 helper->expect_policy[expect->class].name);
580 }
581
1ca9e417
JP
582 seq_putc(s, '\n');
583
584 return 0;
77ab9cff
MJ
585}
586
56b3d975 587static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
588 .start = exp_seq_start,
589 .next = exp_seq_next,
590 .stop = exp_seq_stop,
591 .show = exp_seq_show
592};
593
594static int exp_open(struct inode *inode, struct file *file)
595{
dc5129f8 596 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 597 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
598}
599
5d08ad44 600static const struct file_operations exp_file_ops = {
77ab9cff
MJ
601 .owner = THIS_MODULE,
602 .open = exp_open,
603 .read = seq_read,
604 .llseek = seq_lseek,
dc5129f8 605 .release = seq_release_net,
77ab9cff 606};
54b07dca 607#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 608
dc5129f8 609static int exp_proc_init(struct net *net)
e9c1b084 610{
54b07dca 611#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084 612 struct proc_dir_entry *proc;
f13f2aee
PW
613 kuid_t root_uid;
614 kgid_t root_gid;
e9c1b084 615
d4beaa66
G
616 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
617 &exp_file_ops);
e9c1b084
PM
618 if (!proc)
619 return -ENOMEM;
f13f2aee
PW
620
621 root_uid = make_kuid(net->user_ns, 0);
622 root_gid = make_kgid(net->user_ns, 0);
623 if (uid_valid(root_uid) && gid_valid(root_gid))
624 proc_set_user(proc, root_uid, root_gid);
54b07dca 625#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
626 return 0;
627}
628
dc5129f8 629static void exp_proc_remove(struct net *net)
e9c1b084 630{
54b07dca 631#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 632 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 633#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
634}
635
13ccdfc2 636module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 637
83b4dbe1 638int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 639{
9b03f38d 640 net->ct.expect_count = 0;
0a93aaed 641 return exp_proc_init(net);
e9c1b084
PM
642}
643
83b4dbe1 644void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 645{
dc5129f8 646 exp_proc_remove(net);
e9c1b084 647}
83b4dbe1
G
648
649int nf_conntrack_expect_init(void)
650{
651 if (!nf_ct_expect_hsize) {
652 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
653 if (!nf_ct_expect_hsize)
654 nf_ct_expect_hsize = 1;
655 }
656 nf_ct_expect_max = nf_ct_expect_hsize * 4;
657 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
658 sizeof(struct nf_conntrack_expect),
659 0, 0, NULL);
660 if (!nf_ct_expect_cachep)
661 return -ENOMEM;
0a93aaed
FW
662
663 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
664 if (!nf_ct_expect_hash) {
665 kmem_cache_destroy(nf_ct_expect_cachep);
666 return -ENOMEM;
667 }
668
83b4dbe1
G
669 return 0;
670}
671
672void nf_conntrack_expect_fini(void)
673{
674 rcu_barrier(); /* Wait for call_rcu() before destroy */
675 kmem_cache_destroy(nf_ct_expect_cachep);
0a93aaed 676 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
83b4dbe1 677}