]>
Commit | Line | Data |
---|---|---|
1 | /* Expectation handling for nf_conntrack. */ | |
2 | ||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | |
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | |
5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | |
6 | * (c) 2005-2012 Patrick McHardy <kaber@trash.net> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/types.h> | |
14 | #include <linux/netfilter.h> | |
15 | #include <linux/skbuff.h> | |
16 | #include <linux/proc_fs.h> | |
17 | #include <linux/seq_file.h> | |
18 | #include <linux/stddef.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/percpu.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/jhash.h> | |
24 | #include <linux/moduleparam.h> | |
25 | #include <linux/export.h> | |
26 | #include <net/net_namespace.h> | |
27 | ||
28 | #include <net/netfilter/nf_conntrack.h> | |
29 | #include <net/netfilter/nf_conntrack_core.h> | |
30 | #include <net/netfilter/nf_conntrack_expect.h> | |
31 | #include <net/netfilter/nf_conntrack_helper.h> | |
32 | #include <net/netfilter/nf_conntrack_tuple.h> | |
33 | #include <net/netfilter/nf_conntrack_zones.h> | |
34 | ||
35 | unsigned int nf_ct_expect_hsize __read_mostly; | |
36 | EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); | |
37 | ||
38 | unsigned int nf_ct_expect_max __read_mostly; | |
39 | ||
40 | static struct kmem_cache *nf_ct_expect_cachep __read_mostly; | |
41 | ||
42 | /* nf_conntrack_expect helper functions */ | |
43 | void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, | |
44 | u32 portid, int report) | |
45 | { | |
46 | struct nf_conn_help *master_help = nfct_help(exp->master); | |
47 | struct net *net = nf_ct_exp_net(exp); | |
48 | ||
49 | NF_CT_ASSERT(master_help); | |
50 | NF_CT_ASSERT(!timer_pending(&exp->timeout)); | |
51 | ||
52 | hlist_del_rcu(&exp->hnode); | |
53 | net->ct.expect_count--; | |
54 | ||
55 | hlist_del(&exp->lnode); | |
56 | master_help->expecting[exp->class]--; | |
57 | ||
58 | nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); | |
59 | nf_ct_expect_put(exp); | |
60 | ||
61 | NF_CT_STAT_INC(net, expect_delete); | |
62 | } | |
63 | EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report); | |
64 | ||
65 | static void nf_ct_expectation_timed_out(unsigned long ul_expect) | |
66 | { | |
67 | struct nf_conntrack_expect *exp = (void *)ul_expect; | |
68 | ||
69 | spin_lock_bh(&nf_conntrack_lock); | |
70 | nf_ct_unlink_expect(exp); | |
71 | spin_unlock_bh(&nf_conntrack_lock); | |
72 | nf_ct_expect_put(exp); | |
73 | } | |
74 | ||
75 | static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple) | |
76 | { | |
77 | unsigned int hash; | |
78 | ||
79 | if (unlikely(!nf_conntrack_hash_rnd)) { | |
80 | init_nf_conntrack_hash_rnd(); | |
81 | } | |
82 | ||
83 | hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), | |
84 | (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | | |
85 | (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd); | |
86 | return ((u64)hash * nf_ct_expect_hsize) >> 32; | |
87 | } | |
88 | ||
89 | struct nf_conntrack_expect * | |
90 | __nf_ct_expect_find(struct net *net, u16 zone, | |
91 | const struct nf_conntrack_tuple *tuple) | |
92 | { | |
93 | struct nf_conntrack_expect *i; | |
94 | unsigned int h; | |
95 | ||
96 | if (!net->ct.expect_count) | |
97 | return NULL; | |
98 | ||
99 | h = nf_ct_expect_dst_hash(tuple); | |
100 | hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { | |
101 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && | |
102 | nf_ct_zone(i->master) == zone) | |
103 | return i; | |
104 | } | |
105 | return NULL; | |
106 | } | |
107 | EXPORT_SYMBOL_GPL(__nf_ct_expect_find); | |
108 | ||
109 | /* Just find a expectation corresponding to a tuple. */ | |
110 | struct nf_conntrack_expect * | |
111 | nf_ct_expect_find_get(struct net *net, u16 zone, | |
112 | const struct nf_conntrack_tuple *tuple) | |
113 | { | |
114 | struct nf_conntrack_expect *i; | |
115 | ||
116 | rcu_read_lock(); | |
117 | i = __nf_ct_expect_find(net, zone, tuple); | |
118 | if (i && !atomic_inc_not_zero(&i->use)) | |
119 | i = NULL; | |
120 | rcu_read_unlock(); | |
121 | ||
122 | return i; | |
123 | } | |
124 | EXPORT_SYMBOL_GPL(nf_ct_expect_find_get); | |
125 | ||
126 | /* If an expectation for this connection is found, it gets delete from | |
127 | * global list then returned. */ | |
128 | struct nf_conntrack_expect * | |
129 | nf_ct_find_expectation(struct net *net, u16 zone, | |
130 | const struct nf_conntrack_tuple *tuple) | |
131 | { | |
132 | struct nf_conntrack_expect *i, *exp = NULL; | |
133 | unsigned int h; | |
134 | ||
135 | if (!net->ct.expect_count) | |
136 | return NULL; | |
137 | ||
138 | h = nf_ct_expect_dst_hash(tuple); | |
139 | hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { | |
140 | if (!(i->flags & NF_CT_EXPECT_INACTIVE) && | |
141 | nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && | |
142 | nf_ct_zone(i->master) == zone) { | |
143 | exp = i; | |
144 | break; | |
145 | } | |
146 | } | |
147 | if (!exp) | |
148 | return NULL; | |
149 | ||
150 | /* If master is not in hash table yet (ie. packet hasn't left | |
151 | this machine yet), how can other end know about expected? | |
152 | Hence these are not the droids you are looking for (if | |
153 | master ct never got confirmed, we'd hold a reference to it | |
154 | and weird things would happen to future packets). */ | |
155 | if (!nf_ct_is_confirmed(exp->master)) | |
156 | return NULL; | |
157 | ||
158 | /* Avoid race with other CPUs, that for exp->master ct, is | |
159 | * about to invoke ->destroy(), or nf_ct_delete() via timeout | |
160 | * or early_drop(). | |
161 | * | |
162 | * The atomic_inc_not_zero() check tells: If that fails, we | |
163 | * know that the ct is being destroyed. If it succeeds, we | |
164 | * can be sure the ct cannot disappear underneath. | |
165 | */ | |
166 | if (unlikely(nf_ct_is_dying(exp->master) || | |
167 | !atomic_inc_not_zero(&exp->master->ct_general.use))) | |
168 | return NULL; | |
169 | ||
170 | if (exp->flags & NF_CT_EXPECT_PERMANENT) { | |
171 | atomic_inc(&exp->use); | |
172 | return exp; | |
173 | } else if (del_timer(&exp->timeout)) { | |
174 | nf_ct_unlink_expect(exp); | |
175 | return exp; | |
176 | } | |
177 | /* Undo exp->master refcnt increase, if del_timer() failed */ | |
178 | nf_ct_put(exp->master); | |
179 | ||
180 | return NULL; | |
181 | } | |
182 | ||
183 | /* delete all expectations for this conntrack */ | |
184 | void nf_ct_remove_expectations(struct nf_conn *ct) | |
185 | { | |
186 | struct nf_conn_help *help = nfct_help(ct); | |
187 | struct nf_conntrack_expect *exp; | |
188 | struct hlist_node *next; | |
189 | ||
190 | /* Optimization: most connection never expect any others. */ | |
191 | if (!help) | |
192 | return; | |
193 | ||
194 | hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { | |
195 | if (del_timer(&exp->timeout)) { | |
196 | nf_ct_unlink_expect(exp); | |
197 | nf_ct_expect_put(exp); | |
198 | } | |
199 | } | |
200 | } | |
201 | EXPORT_SYMBOL_GPL(nf_ct_remove_expectations); | |
202 | ||
203 | /* Would two expected things clash? */ | |
204 | static inline int expect_clash(const struct nf_conntrack_expect *a, | |
205 | const struct nf_conntrack_expect *b) | |
206 | { | |
207 | /* Part covered by intersection of masks must be unequal, | |
208 | otherwise they clash */ | |
209 | struct nf_conntrack_tuple_mask intersect_mask; | |
210 | int count; | |
211 | ||
212 | intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; | |
213 | ||
214 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ | |
215 | intersect_mask.src.u3.all[count] = | |
216 | a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; | |
217 | } | |
218 | ||
219 | return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); | |
220 | } | |
221 | ||
222 | static inline int expect_matches(const struct nf_conntrack_expect *a, | |
223 | const struct nf_conntrack_expect *b) | |
224 | { | |
225 | return a->master == b->master && a->class == b->class && | |
226 | nf_ct_tuple_equal(&a->tuple, &b->tuple) && | |
227 | nf_ct_tuple_mask_equal(&a->mask, &b->mask) && | |
228 | nf_ct_zone(a->master) == nf_ct_zone(b->master); | |
229 | } | |
230 | ||
231 | /* Generally a bad idea to call this: could have matched already. */ | |
232 | void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) | |
233 | { | |
234 | spin_lock_bh(&nf_conntrack_lock); | |
235 | if (del_timer(&exp->timeout)) { | |
236 | nf_ct_unlink_expect(exp); | |
237 | nf_ct_expect_put(exp); | |
238 | } | |
239 | spin_unlock_bh(&nf_conntrack_lock); | |
240 | } | |
241 | EXPORT_SYMBOL_GPL(nf_ct_unexpect_related); | |
242 | ||
243 | /* We don't increase the master conntrack refcount for non-fulfilled | |
244 | * conntracks. During the conntrack destruction, the expectations are | |
245 | * always killed before the conntrack itself */ | |
246 | struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me) | |
247 | { | |
248 | struct nf_conntrack_expect *new; | |
249 | ||
250 | new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC); | |
251 | if (!new) | |
252 | return NULL; | |
253 | ||
254 | new->master = me; | |
255 | atomic_set(&new->use, 1); | |
256 | return new; | |
257 | } | |
258 | EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); | |
259 | ||
260 | void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class, | |
261 | u_int8_t family, | |
262 | const union nf_inet_addr *saddr, | |
263 | const union nf_inet_addr *daddr, | |
264 | u_int8_t proto, const __be16 *src, const __be16 *dst) | |
265 | { | |
266 | int len; | |
267 | ||
268 | if (family == AF_INET) | |
269 | len = 4; | |
270 | else | |
271 | len = 16; | |
272 | ||
273 | exp->flags = 0; | |
274 | exp->class = class; | |
275 | exp->expectfn = NULL; | |
276 | exp->helper = NULL; | |
277 | exp->tuple.src.l3num = family; | |
278 | exp->tuple.dst.protonum = proto; | |
279 | ||
280 | if (saddr) { | |
281 | memcpy(&exp->tuple.src.u3, saddr, len); | |
282 | if (sizeof(exp->tuple.src.u3) > len) | |
283 | /* address needs to be cleared for nf_ct_tuple_equal */ | |
284 | memset((void *)&exp->tuple.src.u3 + len, 0x00, | |
285 | sizeof(exp->tuple.src.u3) - len); | |
286 | memset(&exp->mask.src.u3, 0xFF, len); | |
287 | if (sizeof(exp->mask.src.u3) > len) | |
288 | memset((void *)&exp->mask.src.u3 + len, 0x00, | |
289 | sizeof(exp->mask.src.u3) - len); | |
290 | } else { | |
291 | memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3)); | |
292 | memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3)); | |
293 | } | |
294 | ||
295 | if (src) { | |
296 | exp->tuple.src.u.all = *src; | |
297 | exp->mask.src.u.all = htons(0xFFFF); | |
298 | } else { | |
299 | exp->tuple.src.u.all = 0; | |
300 | exp->mask.src.u.all = 0; | |
301 | } | |
302 | ||
303 | memcpy(&exp->tuple.dst.u3, daddr, len); | |
304 | if (sizeof(exp->tuple.dst.u3) > len) | |
305 | /* address needs to be cleared for nf_ct_tuple_equal */ | |
306 | memset((void *)&exp->tuple.dst.u3 + len, 0x00, | |
307 | sizeof(exp->tuple.dst.u3) - len); | |
308 | ||
309 | exp->tuple.dst.u.all = *dst; | |
310 | ||
311 | #ifdef CONFIG_NF_NAT_NEEDED | |
312 | memset(&exp->saved_addr, 0, sizeof(exp->saved_addr)); | |
313 | memset(&exp->saved_proto, 0, sizeof(exp->saved_proto)); | |
314 | #endif | |
315 | } | |
316 | EXPORT_SYMBOL_GPL(nf_ct_expect_init); | |
317 | ||
318 | static void nf_ct_expect_free_rcu(struct rcu_head *head) | |
319 | { | |
320 | struct nf_conntrack_expect *exp; | |
321 | ||
322 | exp = container_of(head, struct nf_conntrack_expect, rcu); | |
323 | kmem_cache_free(nf_ct_expect_cachep, exp); | |
324 | } | |
325 | ||
326 | void nf_ct_expect_put(struct nf_conntrack_expect *exp) | |
327 | { | |
328 | if (atomic_dec_and_test(&exp->use)) | |
329 | call_rcu(&exp->rcu, nf_ct_expect_free_rcu); | |
330 | } | |
331 | EXPORT_SYMBOL_GPL(nf_ct_expect_put); | |
332 | ||
333 | static int nf_ct_expect_insert(struct nf_conntrack_expect *exp) | |
334 | { | |
335 | struct nf_conn_help *master_help = nfct_help(exp->master); | |
336 | struct nf_conntrack_helper *helper; | |
337 | struct net *net = nf_ct_exp_net(exp); | |
338 | unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); | |
339 | ||
340 | /* two references : one for hash insert, one for the timer */ | |
341 | atomic_add(2, &exp->use); | |
342 | ||
343 | hlist_add_head(&exp->lnode, &master_help->expectations); | |
344 | master_help->expecting[exp->class]++; | |
345 | ||
346 | hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]); | |
347 | net->ct.expect_count++; | |
348 | ||
349 | setup_timer(&exp->timeout, nf_ct_expectation_timed_out, | |
350 | (unsigned long)exp); | |
351 | helper = rcu_dereference_protected(master_help->helper, | |
352 | lockdep_is_held(&nf_conntrack_lock)); | |
353 | if (helper) { | |
354 | exp->timeout.expires = jiffies + | |
355 | helper->expect_policy[exp->class].timeout * HZ; | |
356 | } | |
357 | add_timer(&exp->timeout); | |
358 | ||
359 | NF_CT_STAT_INC(net, expect_create); | |
360 | return 0; | |
361 | } | |
362 | ||
363 | /* Race with expectations being used means we could have none to find; OK. */ | |
364 | static void evict_oldest_expect(struct nf_conn *master, | |
365 | struct nf_conntrack_expect *new) | |
366 | { | |
367 | struct nf_conn_help *master_help = nfct_help(master); | |
368 | struct nf_conntrack_expect *exp, *last = NULL; | |
369 | ||
370 | hlist_for_each_entry(exp, &master_help->expectations, lnode) { | |
371 | if (exp->class == new->class) | |
372 | last = exp; | |
373 | } | |
374 | ||
375 | if (last && del_timer(&last->timeout)) { | |
376 | nf_ct_unlink_expect(last); | |
377 | nf_ct_expect_put(last); | |
378 | } | |
379 | } | |
380 | ||
381 | static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | |
382 | { | |
383 | const struct nf_conntrack_expect_policy *p; | |
384 | struct nf_conntrack_expect *i; | |
385 | struct nf_conn *master = expect->master; | |
386 | struct nf_conn_help *master_help = nfct_help(master); | |
387 | struct nf_conntrack_helper *helper; | |
388 | struct net *net = nf_ct_exp_net(expect); | |
389 | struct hlist_node *next; | |
390 | unsigned int h; | |
391 | int ret = 1; | |
392 | ||
393 | if (!master_help) { | |
394 | ret = -ESHUTDOWN; | |
395 | goto out; | |
396 | } | |
397 | h = nf_ct_expect_dst_hash(&expect->tuple); | |
398 | hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) { | |
399 | if (expect_matches(i, expect)) { | |
400 | if (del_timer(&i->timeout)) { | |
401 | nf_ct_unlink_expect(i); | |
402 | nf_ct_expect_put(i); | |
403 | break; | |
404 | } | |
405 | } else if (expect_clash(i, expect)) { | |
406 | ret = -EBUSY; | |
407 | goto out; | |
408 | } | |
409 | } | |
410 | /* Will be over limit? */ | |
411 | helper = rcu_dereference_protected(master_help->helper, | |
412 | lockdep_is_held(&nf_conntrack_lock)); | |
413 | if (helper) { | |
414 | p = &helper->expect_policy[expect->class]; | |
415 | if (p->max_expected && | |
416 | master_help->expecting[expect->class] >= p->max_expected) { | |
417 | evict_oldest_expect(master, expect); | |
418 | if (master_help->expecting[expect->class] | |
419 | >= p->max_expected) { | |
420 | ret = -EMFILE; | |
421 | goto out; | |
422 | } | |
423 | } | |
424 | } | |
425 | ||
426 | if (net->ct.expect_count >= nf_ct_expect_max) { | |
427 | net_warn_ratelimited("nf_conntrack: expectation table full\n"); | |
428 | ret = -EMFILE; | |
429 | } | |
430 | out: | |
431 | return ret; | |
432 | } | |
433 | ||
434 | int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, | |
435 | u32 portid, int report) | |
436 | { | |
437 | int ret; | |
438 | ||
439 | spin_lock_bh(&nf_conntrack_lock); | |
440 | ret = __nf_ct_expect_check(expect); | |
441 | if (ret <= 0) | |
442 | goto out; | |
443 | ||
444 | ret = nf_ct_expect_insert(expect); | |
445 | if (ret < 0) | |
446 | goto out; | |
447 | spin_unlock_bh(&nf_conntrack_lock); | |
448 | nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report); | |
449 | return ret; | |
450 | out: | |
451 | spin_unlock_bh(&nf_conntrack_lock); | |
452 | return ret; | |
453 | } | |
454 | EXPORT_SYMBOL_GPL(nf_ct_expect_related_report); | |
455 | ||
456 | #ifdef CONFIG_NF_CONNTRACK_PROCFS | |
457 | struct ct_expect_iter_state { | |
458 | struct seq_net_private p; | |
459 | unsigned int bucket; | |
460 | }; | |
461 | ||
462 | static struct hlist_node *ct_expect_get_first(struct seq_file *seq) | |
463 | { | |
464 | struct net *net = seq_file_net(seq); | |
465 | struct ct_expect_iter_state *st = seq->private; | |
466 | struct hlist_node *n; | |
467 | ||
468 | for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { | |
469 | n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); | |
470 | if (n) | |
471 | return n; | |
472 | } | |
473 | return NULL; | |
474 | } | |
475 | ||
476 | static struct hlist_node *ct_expect_get_next(struct seq_file *seq, | |
477 | struct hlist_node *head) | |
478 | { | |
479 | struct net *net = seq_file_net(seq); | |
480 | struct ct_expect_iter_state *st = seq->private; | |
481 | ||
482 | head = rcu_dereference(hlist_next_rcu(head)); | |
483 | while (head == NULL) { | |
484 | if (++st->bucket >= nf_ct_expect_hsize) | |
485 | return NULL; | |
486 | head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); | |
487 | } | |
488 | return head; | |
489 | } | |
490 | ||
491 | static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos) | |
492 | { | |
493 | struct hlist_node *head = ct_expect_get_first(seq); | |
494 | ||
495 | if (head) | |
496 | while (pos && (head = ct_expect_get_next(seq, head))) | |
497 | pos--; | |
498 | return pos ? NULL : head; | |
499 | } | |
500 | ||
501 | static void *exp_seq_start(struct seq_file *seq, loff_t *pos) | |
502 | __acquires(RCU) | |
503 | { | |
504 | rcu_read_lock(); | |
505 | return ct_expect_get_idx(seq, *pos); | |
506 | } | |
507 | ||
508 | static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
509 | { | |
510 | (*pos)++; | |
511 | return ct_expect_get_next(seq, v); | |
512 | } | |
513 | ||
514 | static void exp_seq_stop(struct seq_file *seq, void *v) | |
515 | __releases(RCU) | |
516 | { | |
517 | rcu_read_unlock(); | |
518 | } | |
519 | ||
520 | static int exp_seq_show(struct seq_file *s, void *v) | |
521 | { | |
522 | struct nf_conntrack_expect *expect; | |
523 | struct nf_conntrack_helper *helper; | |
524 | struct hlist_node *n = v; | |
525 | char *delim = ""; | |
526 | ||
527 | expect = hlist_entry(n, struct nf_conntrack_expect, hnode); | |
528 | ||
529 | if (expect->timeout.function) | |
530 | seq_printf(s, "%ld ", timer_pending(&expect->timeout) | |
531 | ? (long)(expect->timeout.expires - jiffies)/HZ : 0); | |
532 | else | |
533 | seq_printf(s, "- "); | |
534 | seq_printf(s, "l3proto = %u proto=%u ", | |
535 | expect->tuple.src.l3num, | |
536 | expect->tuple.dst.protonum); | |
537 | print_tuple(s, &expect->tuple, | |
538 | __nf_ct_l3proto_find(expect->tuple.src.l3num), | |
539 | __nf_ct_l4proto_find(expect->tuple.src.l3num, | |
540 | expect->tuple.dst.protonum)); | |
541 | ||
542 | if (expect->flags & NF_CT_EXPECT_PERMANENT) { | |
543 | seq_printf(s, "PERMANENT"); | |
544 | delim = ","; | |
545 | } | |
546 | if (expect->flags & NF_CT_EXPECT_INACTIVE) { | |
547 | seq_printf(s, "%sINACTIVE", delim); | |
548 | delim = ","; | |
549 | } | |
550 | if (expect->flags & NF_CT_EXPECT_USERSPACE) | |
551 | seq_printf(s, "%sUSERSPACE", delim); | |
552 | ||
553 | helper = rcu_dereference(nfct_help(expect->master)->helper); | |
554 | if (helper) { | |
555 | seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); | |
556 | if (helper->expect_policy[expect->class].name) | |
557 | seq_printf(s, "/%s", | |
558 | helper->expect_policy[expect->class].name); | |
559 | } | |
560 | ||
561 | return seq_putc(s, '\n'); | |
562 | } | |
563 | ||
564 | static const struct seq_operations exp_seq_ops = { | |
565 | .start = exp_seq_start, | |
566 | .next = exp_seq_next, | |
567 | .stop = exp_seq_stop, | |
568 | .show = exp_seq_show | |
569 | }; | |
570 | ||
571 | static int exp_open(struct inode *inode, struct file *file) | |
572 | { | |
573 | return seq_open_net(inode, file, &exp_seq_ops, | |
574 | sizeof(struct ct_expect_iter_state)); | |
575 | } | |
576 | ||
577 | static const struct file_operations exp_file_ops = { | |
578 | .owner = THIS_MODULE, | |
579 | .open = exp_open, | |
580 | .read = seq_read, | |
581 | .llseek = seq_lseek, | |
582 | .release = seq_release_net, | |
583 | }; | |
584 | #endif /* CONFIG_NF_CONNTRACK_PROCFS */ | |
585 | ||
586 | static int exp_proc_init(struct net *net) | |
587 | { | |
588 | #ifdef CONFIG_NF_CONNTRACK_PROCFS | |
589 | struct proc_dir_entry *proc; | |
590 | ||
591 | proc = proc_create("nf_conntrack_expect", 0440, net->proc_net, | |
592 | &exp_file_ops); | |
593 | if (!proc) | |
594 | return -ENOMEM; | |
595 | #endif /* CONFIG_NF_CONNTRACK_PROCFS */ | |
596 | return 0; | |
597 | } | |
598 | ||
599 | static void exp_proc_remove(struct net *net) | |
600 | { | |
601 | #ifdef CONFIG_NF_CONNTRACK_PROCFS | |
602 | remove_proc_entry("nf_conntrack_expect", net->proc_net); | |
603 | #endif /* CONFIG_NF_CONNTRACK_PROCFS */ | |
604 | } | |
605 | ||
606 | module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); | |
607 | ||
608 | int nf_conntrack_expect_pernet_init(struct net *net) | |
609 | { | |
610 | int err = -ENOMEM; | |
611 | ||
612 | net->ct.expect_count = 0; | |
613 | net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0); | |
614 | if (net->ct.expect_hash == NULL) | |
615 | goto err1; | |
616 | ||
617 | err = exp_proc_init(net); | |
618 | if (err < 0) | |
619 | goto err2; | |
620 | ||
621 | return 0; | |
622 | err2: | |
623 | nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); | |
624 | err1: | |
625 | return err; | |
626 | } | |
627 | ||
628 | void nf_conntrack_expect_pernet_fini(struct net *net) | |
629 | { | |
630 | exp_proc_remove(net); | |
631 | nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); | |
632 | } | |
633 | ||
634 | int nf_conntrack_expect_init(void) | |
635 | { | |
636 | if (!nf_ct_expect_hsize) { | |
637 | nf_ct_expect_hsize = nf_conntrack_htable_size / 256; | |
638 | if (!nf_ct_expect_hsize) | |
639 | nf_ct_expect_hsize = 1; | |
640 | } | |
641 | nf_ct_expect_max = nf_ct_expect_hsize * 4; | |
642 | nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect", | |
643 | sizeof(struct nf_conntrack_expect), | |
644 | 0, 0, NULL); | |
645 | if (!nf_ct_expect_cachep) | |
646 | return -ENOMEM; | |
647 | return 0; | |
648 | } | |
649 | ||
650 | void nf_conntrack_expect_fini(void) | |
651 | { | |
652 | rcu_barrier(); /* Wait for call_rcu() before destroy */ | |
653 | kmem_cache_destroy(nf_ct_expect_cachep); | |
654 | } |