]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/netfilter/nf_conntrack_expect.c
[NETFILTER]: nf_nat: add FTP NAT helper port
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/types.h>
13#include <linux/netfilter.h>
14#include <linux/skbuff.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/stddef.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/percpu.h>
21#include <linux/kernel.h>
22
23#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_core.h>
25#include <net/netfilter/nf_conntrack_expect.h>
26#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_tuple.h>
28
29LIST_HEAD(nf_conntrack_expect_list);
30kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
77ab9cff
MJ
31static unsigned int nf_conntrack_expect_next_id;
32
33/* nf_conntrack_expect helper functions */
34void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
35{
36 struct nf_conn_help *master_help = nfct_help(exp->master);
37
38 NF_CT_ASSERT(master_help);
39 NF_CT_ASSERT(!timer_pending(&exp->timeout));
40
41 list_del(&exp->list);
42 NF_CT_STAT_INC(expect_delete);
43 master_help->expecting--;
44 nf_conntrack_expect_put(exp);
45}
46
47static void expectation_timed_out(unsigned long ul_expect)
48{
49 struct nf_conntrack_expect *exp = (void *)ul_expect;
50
51 write_lock_bh(&nf_conntrack_lock);
52 nf_ct_unlink_expect(exp);
53 write_unlock_bh(&nf_conntrack_lock);
54 nf_conntrack_expect_put(exp);
55}
56
57struct nf_conntrack_expect *
58__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
59{
60 struct nf_conntrack_expect *i;
61
62 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
63 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
64 return i;
65 }
66 return NULL;
67}
68
69/* Just find a expectation corresponding to a tuple. */
70struct nf_conntrack_expect *
468ec44b 71nf_conntrack_expect_find_get(const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
72{
73 struct nf_conntrack_expect *i;
74
75 read_lock_bh(&nf_conntrack_lock);
76 i = __nf_conntrack_expect_find(tuple);
77 if (i)
78 atomic_inc(&i->use);
79 read_unlock_bh(&nf_conntrack_lock);
80
81 return i;
82}
83
84/* If an expectation for this connection is found, it gets delete from
85 * global list then returned. */
86struct nf_conntrack_expect *
87find_expectation(const struct nf_conntrack_tuple *tuple)
88{
89 struct nf_conntrack_expect *i;
90
91 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
92 /* If master is not in hash table yet (ie. packet hasn't left
93 this machine yet), how can other end know about expected?
94 Hence these are not the droids you are looking for (if
95 master ct never got confirmed, we'd hold a reference to it
96 and weird things would happen to future packets). */
97 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
98 && nf_ct_is_confirmed(i->master)) {
99 if (i->flags & NF_CT_EXPECT_PERMANENT) {
100 atomic_inc(&i->use);
101 return i;
102 } else if (del_timer(&i->timeout)) {
103 nf_ct_unlink_expect(i);
104 return i;
105 }
106 }
107 }
108 return NULL;
109}
110
111/* delete all expectations for this conntrack */
112void nf_ct_remove_expectations(struct nf_conn *ct)
113{
114 struct nf_conntrack_expect *i, *tmp;
115 struct nf_conn_help *help = nfct_help(ct);
116
117 /* Optimization: most connection never expect any others. */
118 if (!help || help->expecting == 0)
119 return;
120
121 list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
122 if (i->master == ct && del_timer(&i->timeout)) {
123 nf_ct_unlink_expect(i);
124 nf_conntrack_expect_put(i);
125 }
126 }
127}
128
129/* Would two expected things clash? */
130static inline int expect_clash(const struct nf_conntrack_expect *a,
131 const struct nf_conntrack_expect *b)
132{
133 /* Part covered by intersection of masks must be unequal,
134 otherwise they clash */
135 struct nf_conntrack_tuple intersect_mask;
136 int count;
137
138 intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
139 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
140 intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
141 intersect_mask.dst.protonum = a->mask.dst.protonum
142 & b->mask.dst.protonum;
143
144 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
145 intersect_mask.src.u3.all[count] =
146 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
147 }
148
149 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
150 intersect_mask.dst.u3.all[count] =
151 a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
152 }
153
154 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
155}
156
157static inline int expect_matches(const struct nf_conntrack_expect *a,
158 const struct nf_conntrack_expect *b)
159{
160 return a->master == b->master
161 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
162 && nf_ct_tuple_equal(&a->mask, &b->mask);
163}
164
165/* Generally a bad idea to call this: could have matched already. */
166void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
167{
168 struct nf_conntrack_expect *i;
169
170 write_lock_bh(&nf_conntrack_lock);
171 /* choose the the oldest expectation to evict */
172 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
173 if (expect_matches(i, exp) && del_timer(&i->timeout)) {
174 nf_ct_unlink_expect(i);
175 write_unlock_bh(&nf_conntrack_lock);
176 nf_conntrack_expect_put(i);
177 return;
178 }
179 }
180 write_unlock_bh(&nf_conntrack_lock);
181}
182
183/* We don't increase the master conntrack refcount for non-fulfilled
184 * conntracks. During the conntrack destruction, the expectations are
185 * always killed before the conntrack itself */
186struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
187{
188 struct nf_conntrack_expect *new;
189
190 new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
191 if (!new)
192 return NULL;
193
194 new->master = me;
195 atomic_set(&new->use, 1);
196 return new;
197}
198
199void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
200{
201 if (atomic_dec_and_test(&exp->use))
202 kmem_cache_free(nf_conntrack_expect_cachep, exp);
203}
204
205static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
206{
207 struct nf_conn_help *master_help = nfct_help(exp->master);
208
209 atomic_inc(&exp->use);
210 master_help->expecting++;
211 list_add(&exp->list, &nf_conntrack_expect_list);
212
213 init_timer(&exp->timeout);
214 exp->timeout.data = (unsigned long)exp;
215 exp->timeout.function = expectation_timed_out;
216 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
217 add_timer(&exp->timeout);
218
219 exp->id = ++nf_conntrack_expect_next_id;
220 atomic_inc(&exp->use);
221 NF_CT_STAT_INC(expect_create);
222}
223
224/* Race with expectations being used means we could have none to find; OK. */
225static void evict_oldest_expect(struct nf_conn *master)
226{
227 struct nf_conntrack_expect *i;
228
229 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
230 if (i->master == master) {
231 if (del_timer(&i->timeout)) {
232 nf_ct_unlink_expect(i);
233 nf_conntrack_expect_put(i);
234 }
235 break;
236 }
237 }
238}
239
240static inline int refresh_timer(struct nf_conntrack_expect *i)
241{
242 struct nf_conn_help *master_help = nfct_help(i->master);
243
244 if (!del_timer(&i->timeout))
245 return 0;
246
247 i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
248 add_timer(&i->timeout);
249 return 1;
250}
251
252int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
253{
254 struct nf_conntrack_expect *i;
255 struct nf_conn *master = expect->master;
256 struct nf_conn_help *master_help = nfct_help(master);
257 int ret;
258
259 NF_CT_ASSERT(master_help);
260
261 write_lock_bh(&nf_conntrack_lock);
262 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
263 if (expect_matches(i, expect)) {
264 /* Refresh timer: if it's dying, ignore.. */
265 if (refresh_timer(i)) {
266 ret = 0;
267 goto out;
268 }
269 } else if (expect_clash(i, expect)) {
270 ret = -EBUSY;
271 goto out;
272 }
273 }
274 /* Will be over limit? */
275 if (master_help->helper->max_expected &&
276 master_help->expecting >= master_help->helper->max_expected)
277 evict_oldest_expect(master);
278
279 nf_conntrack_expect_insert(expect);
280 nf_conntrack_expect_event(IPEXP_NEW, expect);
281 ret = 0;
282out:
283 write_unlock_bh(&nf_conntrack_lock);
284 return ret;
285}
286
287#ifdef CONFIG_PROC_FS
288static void *exp_seq_start(struct seq_file *s, loff_t *pos)
289{
290 struct list_head *e = &nf_conntrack_expect_list;
291 loff_t i;
292
293 /* strange seq_file api calls stop even if we fail,
294 * thus we need to grab lock since stop unlocks */
295 read_lock_bh(&nf_conntrack_lock);
296
297 if (list_empty(e))
298 return NULL;
299
300 for (i = 0; i <= *pos; i++) {
301 e = e->next;
302 if (e == &nf_conntrack_expect_list)
303 return NULL;
304 }
305 return e;
306}
307
308static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
309{
310 struct list_head *e = v;
311
312 ++*pos;
313 e = e->next;
314
315 if (e == &nf_conntrack_expect_list)
316 return NULL;
317
318 return e;
319}
320
321static void exp_seq_stop(struct seq_file *s, void *v)
322{
323 read_unlock_bh(&nf_conntrack_lock);
324}
325
326static int exp_seq_show(struct seq_file *s, void *v)
327{
328 struct nf_conntrack_expect *expect = v;
329
330 if (expect->timeout.function)
331 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
332 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
333 else
334 seq_printf(s, "- ");
335 seq_printf(s, "l3proto = %u proto=%u ",
336 expect->tuple.src.l3num,
337 expect->tuple.dst.protonum);
338 print_tuple(s, &expect->tuple,
339 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 340 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff
MJ
341 expect->tuple.dst.protonum));
342 return seq_putc(s, '\n');
343}
344
345static struct seq_operations exp_seq_ops = {
346 .start = exp_seq_start,
347 .next = exp_seq_next,
348 .stop = exp_seq_stop,
349 .show = exp_seq_show
350};
351
352static int exp_open(struct inode *inode, struct file *file)
353{
354 return seq_open(file, &exp_seq_ops);
355}
356
357struct file_operations exp_file_ops = {
358 .owner = THIS_MODULE,
359 .open = exp_open,
360 .read = seq_read,
361 .llseek = seq_lseek,
362 .release = seq_release
363};
364#endif /* CONFIG_PROC_FS */