]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/netfilter/nf_conntrack_expect.c
Merge branch 'release' of git://lm-sensors.org/kernel/mhoffman/hwmon-2.6
[mirror_ubuntu-zesty-kernel.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <net/net_namespace.h>
24
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_conntrack_expect.h>
28 #include <net/netfilter/nf_conntrack_helper.h>
29 #include <net/netfilter/nf_conntrack_tuple.h>
30
31 struct hlist_head *nf_ct_expect_hash __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
33
34 unsigned int nf_ct_expect_hsize __read_mostly;
35 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
36
37 static unsigned int nf_ct_expect_hash_rnd __read_mostly;
38 static unsigned int nf_ct_expect_count;
39 unsigned int nf_ct_expect_max __read_mostly;
40 static int nf_ct_expect_hash_rnd_initted __read_mostly;
41 static int nf_ct_expect_vmalloc;
42
43 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
44
45 /* nf_conntrack_expect helper functions */
46 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
47 {
48 struct nf_conn_help *master_help = nfct_help(exp->master);
49
50 NF_CT_ASSERT(master_help);
51 NF_CT_ASSERT(!timer_pending(&exp->timeout));
52
53 hlist_del(&exp->hnode);
54 nf_ct_expect_count--;
55
56 hlist_del(&exp->lnode);
57 master_help->expecting--;
58 nf_ct_expect_put(exp);
59
60 NF_CT_STAT_INC(expect_delete);
61 }
62 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
63
64 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
65 {
66 struct nf_conntrack_expect *exp = (void *)ul_expect;
67
68 write_lock_bh(&nf_conntrack_lock);
69 nf_ct_unlink_expect(exp);
70 write_unlock_bh(&nf_conntrack_lock);
71 nf_ct_expect_put(exp);
72 }
73
74 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
75 {
76 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
77 get_random_bytes(&nf_ct_expect_hash_rnd, 4);
78 nf_ct_expect_hash_rnd_initted = 1;
79 }
80
81 return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
82 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
83 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
84 nf_ct_expect_hsize;
85 }
86
87 struct nf_conntrack_expect *
88 __nf_ct_expect_find(const struct nf_conntrack_tuple *tuple)
89 {
90 struct nf_conntrack_expect *i;
91 struct hlist_node *n;
92 unsigned int h;
93
94 if (!nf_ct_expect_count)
95 return NULL;
96
97 h = nf_ct_expect_dst_hash(tuple);
98 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) {
99 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
100 return i;
101 }
102 return NULL;
103 }
104 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
105
106 /* Just find a expectation corresponding to a tuple. */
107 struct nf_conntrack_expect *
108 nf_ct_expect_find_get(const struct nf_conntrack_tuple *tuple)
109 {
110 struct nf_conntrack_expect *i;
111
112 read_lock_bh(&nf_conntrack_lock);
113 i = __nf_ct_expect_find(tuple);
114 if (i)
115 atomic_inc(&i->use);
116 read_unlock_bh(&nf_conntrack_lock);
117
118 return i;
119 }
120 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
121
122 /* If an expectation for this connection is found, it gets delete from
123 * global list then returned. */
124 struct nf_conntrack_expect *
125 nf_ct_find_expectation(const struct nf_conntrack_tuple *tuple)
126 {
127 struct nf_conntrack_expect *exp;
128
129 exp = __nf_ct_expect_find(tuple);
130 if (!exp)
131 return NULL;
132
133 /* If master is not in hash table yet (ie. packet hasn't left
134 this machine yet), how can other end know about expected?
135 Hence these are not the droids you are looking for (if
136 master ct never got confirmed, we'd hold a reference to it
137 and weird things would happen to future packets). */
138 if (!nf_ct_is_confirmed(exp->master))
139 return NULL;
140
141 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
142 atomic_inc(&exp->use);
143 return exp;
144 } else if (del_timer(&exp->timeout)) {
145 nf_ct_unlink_expect(exp);
146 return exp;
147 }
148
149 return NULL;
150 }
151
152 /* delete all expectations for this conntrack */
153 void nf_ct_remove_expectations(struct nf_conn *ct)
154 {
155 struct nf_conn_help *help = nfct_help(ct);
156 struct nf_conntrack_expect *exp;
157 struct hlist_node *n, *next;
158
159 /* Optimization: most connection never expect any others. */
160 if (!help || help->expecting == 0)
161 return;
162
163 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
164 if (del_timer(&exp->timeout)) {
165 nf_ct_unlink_expect(exp);
166 nf_ct_expect_put(exp);
167 }
168 }
169 }
170 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
171
172 /* Would two expected things clash? */
173 static inline int expect_clash(const struct nf_conntrack_expect *a,
174 const struct nf_conntrack_expect *b)
175 {
176 /* Part covered by intersection of masks must be unequal,
177 otherwise they clash */
178 struct nf_conntrack_tuple_mask intersect_mask;
179 int count;
180
181 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
182
183 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
184 intersect_mask.src.u3.all[count] =
185 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
186 }
187
188 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
189 }
190
191 static inline int expect_matches(const struct nf_conntrack_expect *a,
192 const struct nf_conntrack_expect *b)
193 {
194 return a->master == b->master
195 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
196 && nf_ct_tuple_mask_equal(&a->mask, &b->mask);
197 }
198
199 /* Generally a bad idea to call this: could have matched already. */
200 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
201 {
202 write_lock_bh(&nf_conntrack_lock);
203 if (del_timer(&exp->timeout)) {
204 nf_ct_unlink_expect(exp);
205 nf_ct_expect_put(exp);
206 }
207 write_unlock_bh(&nf_conntrack_lock);
208 }
209 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
210
211 /* We don't increase the master conntrack refcount for non-fulfilled
212 * conntracks. During the conntrack destruction, the expectations are
213 * always killed before the conntrack itself */
214 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
215 {
216 struct nf_conntrack_expect *new;
217
218 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
219 if (!new)
220 return NULL;
221
222 new->master = me;
223 atomic_set(&new->use, 1);
224 return new;
225 }
226 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
227
228 void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
229 union nf_conntrack_address *saddr,
230 union nf_conntrack_address *daddr,
231 u_int8_t proto, __be16 *src, __be16 *dst)
232 {
233 int len;
234
235 if (family == AF_INET)
236 len = 4;
237 else
238 len = 16;
239
240 exp->flags = 0;
241 exp->expectfn = NULL;
242 exp->helper = NULL;
243 exp->tuple.src.l3num = family;
244 exp->tuple.dst.protonum = proto;
245
246 if (saddr) {
247 memcpy(&exp->tuple.src.u3, saddr, len);
248 if (sizeof(exp->tuple.src.u3) > len)
249 /* address needs to be cleared for nf_ct_tuple_equal */
250 memset((void *)&exp->tuple.src.u3 + len, 0x00,
251 sizeof(exp->tuple.src.u3) - len);
252 memset(&exp->mask.src.u3, 0xFF, len);
253 if (sizeof(exp->mask.src.u3) > len)
254 memset((void *)&exp->mask.src.u3 + len, 0x00,
255 sizeof(exp->mask.src.u3) - len);
256 } else {
257 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
258 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
259 }
260
261 if (src) {
262 exp->tuple.src.u.all = *src;
263 exp->mask.src.u.all = htons(0xFFFF);
264 } else {
265 exp->tuple.src.u.all = 0;
266 exp->mask.src.u.all = 0;
267 }
268
269 memcpy(&exp->tuple.dst.u3, daddr, len);
270 if (sizeof(exp->tuple.dst.u3) > len)
271 /* address needs to be cleared for nf_ct_tuple_equal */
272 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
273 sizeof(exp->tuple.dst.u3) - len);
274
275 exp->tuple.dst.u.all = *dst;
276 }
277 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
278
279 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
280 {
281 if (atomic_dec_and_test(&exp->use))
282 kmem_cache_free(nf_ct_expect_cachep, exp);
283 }
284 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
285
286 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
287 {
288 struct nf_conn_help *master_help = nfct_help(exp->master);
289 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
290
291 atomic_inc(&exp->use);
292
293 hlist_add_head(&exp->lnode, &master_help->expectations);
294 master_help->expecting++;
295
296 hlist_add_head(&exp->hnode, &nf_ct_expect_hash[h]);
297 nf_ct_expect_count++;
298
299 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
300 (unsigned long)exp);
301 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
302 add_timer(&exp->timeout);
303
304 atomic_inc(&exp->use);
305 NF_CT_STAT_INC(expect_create);
306 }
307
308 /* Race with expectations being used means we could have none to find; OK. */
309 static void evict_oldest_expect(struct nf_conn *master)
310 {
311 struct nf_conn_help *master_help = nfct_help(master);
312 struct nf_conntrack_expect *exp = NULL;
313 struct hlist_node *n;
314
315 hlist_for_each_entry(exp, n, &master_help->expectations, lnode)
316 ; /* nothing */
317
318 if (exp && del_timer(&exp->timeout)) {
319 nf_ct_unlink_expect(exp);
320 nf_ct_expect_put(exp);
321 }
322 }
323
324 static inline int refresh_timer(struct nf_conntrack_expect *i)
325 {
326 struct nf_conn_help *master_help = nfct_help(i->master);
327
328 if (!del_timer(&i->timeout))
329 return 0;
330
331 i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
332 add_timer(&i->timeout);
333 return 1;
334 }
335
336 int nf_ct_expect_related(struct nf_conntrack_expect *expect)
337 {
338 struct nf_conntrack_expect *i;
339 struct nf_conn *master = expect->master;
340 struct nf_conn_help *master_help = nfct_help(master);
341 struct hlist_node *n;
342 unsigned int h;
343 int ret;
344
345 NF_CT_ASSERT(master_help);
346
347 write_lock_bh(&nf_conntrack_lock);
348 if (!master_help->helper) {
349 ret = -ESHUTDOWN;
350 goto out;
351 }
352 h = nf_ct_expect_dst_hash(&expect->tuple);
353 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) {
354 if (expect_matches(i, expect)) {
355 /* Refresh timer: if it's dying, ignore.. */
356 if (refresh_timer(i)) {
357 ret = 0;
358 goto out;
359 }
360 } else if (expect_clash(i, expect)) {
361 ret = -EBUSY;
362 goto out;
363 }
364 }
365 /* Will be over limit? */
366 if (master_help->helper->max_expected &&
367 master_help->expecting >= master_help->helper->max_expected)
368 evict_oldest_expect(master);
369
370 if (nf_ct_expect_count >= nf_ct_expect_max) {
371 if (net_ratelimit())
372 printk(KERN_WARNING
373 "nf_conntrack: expectation table full");
374 ret = -EMFILE;
375 goto out;
376 }
377
378 nf_ct_expect_insert(expect);
379 nf_ct_expect_event(IPEXP_NEW, expect);
380 ret = 0;
381 out:
382 write_unlock_bh(&nf_conntrack_lock);
383 return ret;
384 }
385 EXPORT_SYMBOL_GPL(nf_ct_expect_related);
386
387 #ifdef CONFIG_PROC_FS
388 struct ct_expect_iter_state {
389 unsigned int bucket;
390 };
391
392 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
393 {
394 struct ct_expect_iter_state *st = seq->private;
395
396 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
397 if (!hlist_empty(&nf_ct_expect_hash[st->bucket]))
398 return nf_ct_expect_hash[st->bucket].first;
399 }
400 return NULL;
401 }
402
403 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
404 struct hlist_node *head)
405 {
406 struct ct_expect_iter_state *st = seq->private;
407
408 head = head->next;
409 while (head == NULL) {
410 if (++st->bucket >= nf_ct_expect_hsize)
411 return NULL;
412 head = nf_ct_expect_hash[st->bucket].first;
413 }
414 return head;
415 }
416
417 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
418 {
419 struct hlist_node *head = ct_expect_get_first(seq);
420
421 if (head)
422 while (pos && (head = ct_expect_get_next(seq, head)))
423 pos--;
424 return pos ? NULL : head;
425 }
426
427 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
428 {
429 read_lock_bh(&nf_conntrack_lock);
430 return ct_expect_get_idx(seq, *pos);
431 }
432
433 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
434 {
435 (*pos)++;
436 return ct_expect_get_next(seq, v);
437 }
438
439 static void exp_seq_stop(struct seq_file *seq, void *v)
440 {
441 read_unlock_bh(&nf_conntrack_lock);
442 }
443
444 static int exp_seq_show(struct seq_file *s, void *v)
445 {
446 struct nf_conntrack_expect *expect;
447 struct hlist_node *n = v;
448
449 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
450
451 if (expect->timeout.function)
452 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
453 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
454 else
455 seq_printf(s, "- ");
456 seq_printf(s, "l3proto = %u proto=%u ",
457 expect->tuple.src.l3num,
458 expect->tuple.dst.protonum);
459 print_tuple(s, &expect->tuple,
460 __nf_ct_l3proto_find(expect->tuple.src.l3num),
461 __nf_ct_l4proto_find(expect->tuple.src.l3num,
462 expect->tuple.dst.protonum));
463 return seq_putc(s, '\n');
464 }
465
466 static const struct seq_operations exp_seq_ops = {
467 .start = exp_seq_start,
468 .next = exp_seq_next,
469 .stop = exp_seq_stop,
470 .show = exp_seq_show
471 };
472
473 static int exp_open(struct inode *inode, struct file *file)
474 {
475 return seq_open_private(file, &exp_seq_ops,
476 sizeof(struct ct_expect_iter_state));
477 }
478
479 static const struct file_operations exp_file_ops = {
480 .owner = THIS_MODULE,
481 .open = exp_open,
482 .read = seq_read,
483 .llseek = seq_lseek,
484 .release = seq_release_private,
485 };
486 #endif /* CONFIG_PROC_FS */
487
488 static int __init exp_proc_init(void)
489 {
490 #ifdef CONFIG_PROC_FS
491 struct proc_dir_entry *proc;
492
493 proc = proc_net_fops_create(&init_net, "nf_conntrack_expect", 0440, &exp_file_ops);
494 if (!proc)
495 return -ENOMEM;
496 #endif /* CONFIG_PROC_FS */
497 return 0;
498 }
499
500 static void exp_proc_remove(void)
501 {
502 #ifdef CONFIG_PROC_FS
503 proc_net_remove(&init_net, "nf_conntrack_expect");
504 #endif /* CONFIG_PROC_FS */
505 }
506
507 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
508
509 int __init nf_conntrack_expect_init(void)
510 {
511 int err = -ENOMEM;
512
513 if (!nf_ct_expect_hsize) {
514 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
515 if (!nf_ct_expect_hsize)
516 nf_ct_expect_hsize = 1;
517 }
518 nf_ct_expect_max = nf_ct_expect_hsize * 4;
519
520 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
521 &nf_ct_expect_vmalloc);
522 if (nf_ct_expect_hash == NULL)
523 goto err1;
524
525 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
526 sizeof(struct nf_conntrack_expect),
527 0, 0, NULL);
528 if (!nf_ct_expect_cachep)
529 goto err2;
530
531 err = exp_proc_init();
532 if (err < 0)
533 goto err3;
534
535 return 0;
536
537 err3:
538 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_vmalloc,
539 nf_ct_expect_hsize);
540 err2:
541 kmem_cache_destroy(nf_ct_expect_cachep);
542 err1:
543 return err;
544 }
545
546 void nf_conntrack_expect_fini(void)
547 {
548 exp_proc_remove();
549 kmem_cache_destroy(nf_ct_expect_cachep);
550 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_vmalloc,
551 nf_ct_expect_hsize);
552 }