]>
Commit | Line | Data |
---|---|---|
1 | /* Connection state tracking for netfilter. This is separated from, | |
2 | but required by, the NAT layer; it can also be used by an iptables | |
3 | extension. */ | |
4 | ||
5 | /* (C) 1999-2001 Paul `Rusty' Russell | |
6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | |
7 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | |
8 | * (C) 2005-2012 Patrick McHardy <kaber@trash.net> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
15 | #include <linux/types.h> | |
16 | #include <linux/netfilter.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/skbuff.h> | |
20 | #include <linux/proc_fs.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <linux/stddef.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/random.h> | |
25 | #include <linux/jhash.h> | |
26 | #include <linux/err.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/moduleparam.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/kernel.h> | |
31 | #include <linux/netdevice.h> | |
32 | #include <linux/socket.h> | |
33 | #include <linux/mm.h> | |
34 | #include <linux/nsproxy.h> | |
35 | #include <linux/rculist_nulls.h> | |
36 | ||
37 | #include <net/netfilter/nf_conntrack.h> | |
38 | #include <net/netfilter/nf_conntrack_l3proto.h> | |
39 | #include <net/netfilter/nf_conntrack_l4proto.h> | |
40 | #include <net/netfilter/nf_conntrack_expect.h> | |
41 | #include <net/netfilter/nf_conntrack_helper.h> | |
42 | #include <net/netfilter/nf_conntrack_seqadj.h> | |
43 | #include <net/netfilter/nf_conntrack_core.h> | |
44 | #include <net/netfilter/nf_conntrack_extend.h> | |
45 | #include <net/netfilter/nf_conntrack_acct.h> | |
46 | #include <net/netfilter/nf_conntrack_ecache.h> | |
47 | #include <net/netfilter/nf_conntrack_zones.h> | |
48 | #include <net/netfilter/nf_conntrack_timestamp.h> | |
49 | #include <net/netfilter/nf_conntrack_timeout.h> | |
50 | #include <net/netfilter/nf_conntrack_labels.h> | |
51 | #include <net/netfilter/nf_conntrack_synproxy.h> | |
52 | #include <net/netfilter/nf_nat.h> | |
53 | #include <net/netfilter/nf_nat_core.h> | |
54 | #include <net/netfilter/nf_nat_helper.h> | |
55 | ||
56 | #define NF_CONNTRACK_VERSION "0.5.0" | |
57 | ||
58 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, | |
59 | enum nf_nat_manip_type manip, | |
60 | const struct nlattr *attr) __read_mostly; | |
61 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); | |
62 | ||
63 | __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; | |
64 | EXPORT_SYMBOL_GPL(nf_conntrack_locks); | |
65 | ||
66 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); | |
67 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); | |
68 | ||
69 | static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) | |
70 | { | |
71 | h1 %= CONNTRACK_LOCKS; | |
72 | h2 %= CONNTRACK_LOCKS; | |
73 | spin_unlock(&nf_conntrack_locks[h1]); | |
74 | if (h1 != h2) | |
75 | spin_unlock(&nf_conntrack_locks[h2]); | |
76 | } | |
77 | ||
78 | /* return true if we need to recompute hashes (in case hash table was resized) */ | |
79 | static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, | |
80 | unsigned int h2, unsigned int sequence) | |
81 | { | |
82 | h1 %= CONNTRACK_LOCKS; | |
83 | h2 %= CONNTRACK_LOCKS; | |
84 | if (h1 <= h2) { | |
85 | spin_lock(&nf_conntrack_locks[h1]); | |
86 | if (h1 != h2) | |
87 | spin_lock_nested(&nf_conntrack_locks[h2], | |
88 | SINGLE_DEPTH_NESTING); | |
89 | } else { | |
90 | spin_lock(&nf_conntrack_locks[h2]); | |
91 | spin_lock_nested(&nf_conntrack_locks[h1], | |
92 | SINGLE_DEPTH_NESTING); | |
93 | } | |
94 | if (read_seqcount_retry(&net->ct.generation, sequence)) { | |
95 | nf_conntrack_double_unlock(h1, h2); | |
96 | return true; | |
97 | } | |
98 | return false; | |
99 | } | |
100 | ||
101 | static void nf_conntrack_all_lock(void) | |
102 | { | |
103 | int i; | |
104 | ||
105 | for (i = 0; i < CONNTRACK_LOCKS; i++) | |
106 | spin_lock_nested(&nf_conntrack_locks[i], i); | |
107 | } | |
108 | ||
109 | static void nf_conntrack_all_unlock(void) | |
110 | { | |
111 | int i; | |
112 | ||
113 | for (i = 0; i < CONNTRACK_LOCKS; i++) | |
114 | spin_unlock(&nf_conntrack_locks[i]); | |
115 | } | |
116 | ||
117 | unsigned int nf_conntrack_htable_size __read_mostly; | |
118 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); | |
119 | ||
120 | unsigned int nf_conntrack_max __read_mostly; | |
121 | EXPORT_SYMBOL_GPL(nf_conntrack_max); | |
122 | ||
123 | DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); | |
124 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); | |
125 | ||
126 | unsigned int nf_conntrack_hash_rnd __read_mostly; | |
127 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); | |
128 | ||
129 | static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) | |
130 | { | |
131 | unsigned int n; | |
132 | ||
133 | /* The direction must be ignored, so we hash everything up to the | |
134 | * destination ports (which is a multiple of 4) and treat the last | |
135 | * three bytes manually. | |
136 | */ | |
137 | n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); | |
138 | return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^ | |
139 | (((__force __u16)tuple->dst.u.all << 16) | | |
140 | tuple->dst.protonum)); | |
141 | } | |
142 | ||
143 | static u32 __hash_bucket(u32 hash, unsigned int size) | |
144 | { | |
145 | return reciprocal_scale(hash, size); | |
146 | } | |
147 | ||
148 | static u32 hash_bucket(u32 hash, const struct net *net) | |
149 | { | |
150 | return __hash_bucket(hash, net->ct.htable_size); | |
151 | } | |
152 | ||
153 | static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, | |
154 | u16 zone, unsigned int size) | |
155 | { | |
156 | return __hash_bucket(hash_conntrack_raw(tuple, zone), size); | |
157 | } | |
158 | ||
159 | static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, | |
160 | const struct nf_conntrack_tuple *tuple) | |
161 | { | |
162 | return __hash_conntrack(tuple, zone, net->ct.htable_size); | |
163 | } | |
164 | ||
165 | bool | |
166 | nf_ct_get_tuple(const struct sk_buff *skb, | |
167 | unsigned int nhoff, | |
168 | unsigned int dataoff, | |
169 | u_int16_t l3num, | |
170 | u_int8_t protonum, | |
171 | struct nf_conntrack_tuple *tuple, | |
172 | const struct nf_conntrack_l3proto *l3proto, | |
173 | const struct nf_conntrack_l4proto *l4proto) | |
174 | { | |
175 | memset(tuple, 0, sizeof(*tuple)); | |
176 | ||
177 | tuple->src.l3num = l3num; | |
178 | if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) | |
179 | return false; | |
180 | ||
181 | tuple->dst.protonum = protonum; | |
182 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; | |
183 | ||
184 | return l4proto->pkt_to_tuple(skb, dataoff, tuple); | |
185 | } | |
186 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); | |
187 | ||
188 | bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, | |
189 | u_int16_t l3num, struct nf_conntrack_tuple *tuple) | |
190 | { | |
191 | struct nf_conntrack_l3proto *l3proto; | |
192 | struct nf_conntrack_l4proto *l4proto; | |
193 | unsigned int protoff; | |
194 | u_int8_t protonum; | |
195 | int ret; | |
196 | ||
197 | rcu_read_lock(); | |
198 | ||
199 | l3proto = __nf_ct_l3proto_find(l3num); | |
200 | ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); | |
201 | if (ret != NF_ACCEPT) { | |
202 | rcu_read_unlock(); | |
203 | return false; | |
204 | } | |
205 | ||
206 | l4proto = __nf_ct_l4proto_find(l3num, protonum); | |
207 | ||
208 | ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, | |
209 | l3proto, l4proto); | |
210 | ||
211 | rcu_read_unlock(); | |
212 | return ret; | |
213 | } | |
214 | EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); | |
215 | ||
216 | bool | |
217 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | |
218 | const struct nf_conntrack_tuple *orig, | |
219 | const struct nf_conntrack_l3proto *l3proto, | |
220 | const struct nf_conntrack_l4proto *l4proto) | |
221 | { | |
222 | memset(inverse, 0, sizeof(*inverse)); | |
223 | ||
224 | inverse->src.l3num = orig->src.l3num; | |
225 | if (l3proto->invert_tuple(inverse, orig) == 0) | |
226 | return false; | |
227 | ||
228 | inverse->dst.dir = !orig->dst.dir; | |
229 | ||
230 | inverse->dst.protonum = orig->dst.protonum; | |
231 | return l4proto->invert_tuple(inverse, orig); | |
232 | } | |
233 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); | |
234 | ||
235 | static void | |
236 | clean_from_lists(struct nf_conn *ct) | |
237 | { | |
238 | pr_debug("clean_from_lists(%p)\n", ct); | |
239 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
240 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); | |
241 | ||
242 | /* Destroy all pending expectations */ | |
243 | nf_ct_remove_expectations(ct); | |
244 | } | |
245 | ||
246 | /* must be called with local_bh_disable */ | |
247 | static void nf_ct_add_to_dying_list(struct nf_conn *ct) | |
248 | { | |
249 | struct ct_pcpu *pcpu; | |
250 | ||
251 | /* add this conntrack to the (per cpu) dying list */ | |
252 | ct->cpu = smp_processor_id(); | |
253 | pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); | |
254 | ||
255 | spin_lock(&pcpu->lock); | |
256 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
257 | &pcpu->dying); | |
258 | spin_unlock(&pcpu->lock); | |
259 | } | |
260 | ||
261 | /* must be called with local_bh_disable */ | |
262 | static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) | |
263 | { | |
264 | struct ct_pcpu *pcpu; | |
265 | ||
266 | /* add this conntrack to the (per cpu) unconfirmed list */ | |
267 | ct->cpu = smp_processor_id(); | |
268 | pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); | |
269 | ||
270 | spin_lock(&pcpu->lock); | |
271 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
272 | &pcpu->unconfirmed); | |
273 | spin_unlock(&pcpu->lock); | |
274 | } | |
275 | ||
276 | /* must be called with local_bh_disable */ | |
277 | static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) | |
278 | { | |
279 | struct ct_pcpu *pcpu; | |
280 | ||
281 | /* We overload first tuple to link into unconfirmed or dying list.*/ | |
282 | pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); | |
283 | ||
284 | spin_lock(&pcpu->lock); | |
285 | BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); | |
286 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
287 | spin_unlock(&pcpu->lock); | |
288 | } | |
289 | ||
290 | /* Released via destroy_conntrack() */ | |
291 | struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags) | |
292 | { | |
293 | struct nf_conn *tmpl; | |
294 | ||
295 | tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL); | |
296 | if (tmpl == NULL) | |
297 | return NULL; | |
298 | ||
299 | tmpl->status = IPS_TEMPLATE; | |
300 | write_pnet(&tmpl->ct_net, net); | |
301 | ||
302 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
303 | if (zone) { | |
304 | struct nf_conntrack_zone *nf_ct_zone; | |
305 | ||
306 | nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC); | |
307 | if (!nf_ct_zone) | |
308 | goto out_free; | |
309 | nf_ct_zone->id = zone; | |
310 | } | |
311 | #endif | |
312 | atomic_set(&tmpl->ct_general.use, 0); | |
313 | ||
314 | return tmpl; | |
315 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
316 | out_free: | |
317 | kfree(tmpl); | |
318 | return NULL; | |
319 | #endif | |
320 | } | |
321 | EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); | |
322 | ||
323 | static void nf_ct_tmpl_free(struct nf_conn *tmpl) | |
324 | { | |
325 | nf_ct_ext_destroy(tmpl); | |
326 | nf_ct_ext_free(tmpl); | |
327 | kfree(tmpl); | |
328 | } | |
329 | ||
330 | static void | |
331 | destroy_conntrack(struct nf_conntrack *nfct) | |
332 | { | |
333 | struct nf_conn *ct = (struct nf_conn *)nfct; | |
334 | struct net *net = nf_ct_net(ct); | |
335 | struct nf_conntrack_l4proto *l4proto; | |
336 | ||
337 | pr_debug("destroy_conntrack(%p)\n", ct); | |
338 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); | |
339 | NF_CT_ASSERT(!timer_pending(&ct->timeout)); | |
340 | ||
341 | if (unlikely(nf_ct_is_template(ct))) { | |
342 | nf_ct_tmpl_free(ct); | |
343 | return; | |
344 | } | |
345 | rcu_read_lock(); | |
346 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | |
347 | if (l4proto && l4proto->destroy) | |
348 | l4proto->destroy(ct); | |
349 | ||
350 | rcu_read_unlock(); | |
351 | ||
352 | local_bh_disable(); | |
353 | /* Expectations will have been removed in clean_from_lists, | |
354 | * except TFTP can create an expectation on the first packet, | |
355 | * before connection is in the list, so we need to clean here, | |
356 | * too. | |
357 | */ | |
358 | nf_ct_remove_expectations(ct); | |
359 | ||
360 | nf_ct_del_from_dying_or_unconfirmed_list(ct); | |
361 | ||
362 | NF_CT_STAT_INC(net, delete); | |
363 | local_bh_enable(); | |
364 | ||
365 | if (ct->master) | |
366 | nf_ct_put(ct->master); | |
367 | ||
368 | pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); | |
369 | nf_conntrack_free(ct); | |
370 | } | |
371 | ||
372 | static void nf_ct_delete_from_lists(struct nf_conn *ct) | |
373 | { | |
374 | struct net *net = nf_ct_net(ct); | |
375 | unsigned int hash, reply_hash; | |
376 | u16 zone = nf_ct_zone(ct); | |
377 | unsigned int sequence; | |
378 | ||
379 | nf_ct_helper_destroy(ct); | |
380 | ||
381 | local_bh_disable(); | |
382 | do { | |
383 | sequence = read_seqcount_begin(&net->ct.generation); | |
384 | hash = hash_conntrack(net, zone, | |
385 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
386 | reply_hash = hash_conntrack(net, zone, | |
387 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
388 | } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); | |
389 | ||
390 | clean_from_lists(ct); | |
391 | nf_conntrack_double_unlock(hash, reply_hash); | |
392 | ||
393 | nf_ct_add_to_dying_list(ct); | |
394 | ||
395 | NF_CT_STAT_INC(net, delete_list); | |
396 | local_bh_enable(); | |
397 | } | |
398 | ||
399 | bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) | |
400 | { | |
401 | struct nf_conn_tstamp *tstamp; | |
402 | ||
403 | tstamp = nf_conn_tstamp_find(ct); | |
404 | if (tstamp && tstamp->stop == 0) | |
405 | tstamp->stop = ktime_get_real_ns(); | |
406 | ||
407 | if (nf_ct_is_dying(ct)) | |
408 | goto delete; | |
409 | ||
410 | if (nf_conntrack_event_report(IPCT_DESTROY, ct, | |
411 | portid, report) < 0) { | |
412 | /* destroy event was not delivered */ | |
413 | nf_ct_delete_from_lists(ct); | |
414 | nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); | |
415 | return false; | |
416 | } | |
417 | ||
418 | nf_conntrack_ecache_work(nf_ct_net(ct)); | |
419 | set_bit(IPS_DYING_BIT, &ct->status); | |
420 | delete: | |
421 | nf_ct_delete_from_lists(ct); | |
422 | nf_ct_put(ct); | |
423 | return true; | |
424 | } | |
425 | EXPORT_SYMBOL_GPL(nf_ct_delete); | |
426 | ||
427 | static void death_by_timeout(unsigned long ul_conntrack) | |
428 | { | |
429 | nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); | |
430 | } | |
431 | ||
432 | static inline bool | |
433 | nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, | |
434 | const struct nf_conntrack_tuple *tuple, | |
435 | u16 zone) | |
436 | { | |
437 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); | |
438 | ||
439 | /* A conntrack can be recreated with the equal tuple, | |
440 | * so we need to check that the conntrack is confirmed | |
441 | */ | |
442 | return nf_ct_tuple_equal(tuple, &h->tuple) && | |
443 | nf_ct_zone(ct) == zone && | |
444 | nf_ct_is_confirmed(ct); | |
445 | } | |
446 | ||
447 | /* | |
448 | * Warning : | |
449 | * - Caller must take a reference on returned object | |
450 | * and recheck nf_ct_tuple_equal(tuple, &h->tuple) | |
451 | */ | |
452 | static struct nf_conntrack_tuple_hash * | |
453 | ____nf_conntrack_find(struct net *net, u16 zone, | |
454 | const struct nf_conntrack_tuple *tuple, u32 hash) | |
455 | { | |
456 | struct nf_conntrack_tuple_hash *h; | |
457 | struct hlist_nulls_node *n; | |
458 | unsigned int bucket = hash_bucket(hash, net); | |
459 | ||
460 | /* Disable BHs the entire time since we normally need to disable them | |
461 | * at least once for the stats anyway. | |
462 | */ | |
463 | local_bh_disable(); | |
464 | begin: | |
465 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { | |
466 | if (nf_ct_key_equal(h, tuple, zone)) { | |
467 | NF_CT_STAT_INC(net, found); | |
468 | local_bh_enable(); | |
469 | return h; | |
470 | } | |
471 | NF_CT_STAT_INC(net, searched); | |
472 | } | |
473 | /* | |
474 | * if the nulls value we got at the end of this lookup is | |
475 | * not the expected one, we must restart lookup. | |
476 | * We probably met an item that was moved to another chain. | |
477 | */ | |
478 | if (get_nulls_value(n) != bucket) { | |
479 | NF_CT_STAT_INC(net, search_restart); | |
480 | goto begin; | |
481 | } | |
482 | local_bh_enable(); | |
483 | ||
484 | return NULL; | |
485 | } | |
486 | ||
487 | /* Find a connection corresponding to a tuple. */ | |
488 | static struct nf_conntrack_tuple_hash * | |
489 | __nf_conntrack_find_get(struct net *net, u16 zone, | |
490 | const struct nf_conntrack_tuple *tuple, u32 hash) | |
491 | { | |
492 | struct nf_conntrack_tuple_hash *h; | |
493 | struct nf_conn *ct; | |
494 | ||
495 | rcu_read_lock(); | |
496 | begin: | |
497 | h = ____nf_conntrack_find(net, zone, tuple, hash); | |
498 | if (h) { | |
499 | ct = nf_ct_tuplehash_to_ctrack(h); | |
500 | if (unlikely(nf_ct_is_dying(ct) || | |
501 | !atomic_inc_not_zero(&ct->ct_general.use))) | |
502 | h = NULL; | |
503 | else { | |
504 | if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { | |
505 | nf_ct_put(ct); | |
506 | goto begin; | |
507 | } | |
508 | } | |
509 | } | |
510 | rcu_read_unlock(); | |
511 | ||
512 | return h; | |
513 | } | |
514 | ||
515 | struct nf_conntrack_tuple_hash * | |
516 | nf_conntrack_find_get(struct net *net, u16 zone, | |
517 | const struct nf_conntrack_tuple *tuple) | |
518 | { | |
519 | return __nf_conntrack_find_get(net, zone, tuple, | |
520 | hash_conntrack_raw(tuple, zone)); | |
521 | } | |
522 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | |
523 | ||
524 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, | |
525 | unsigned int hash, | |
526 | unsigned int reply_hash) | |
527 | { | |
528 | struct net *net = nf_ct_net(ct); | |
529 | ||
530 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
531 | &net->ct.hash[hash]); | |
532 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, | |
533 | &net->ct.hash[reply_hash]); | |
534 | } | |
535 | ||
536 | int | |
537 | nf_conntrack_hash_check_insert(struct nf_conn *ct) | |
538 | { | |
539 | struct net *net = nf_ct_net(ct); | |
540 | unsigned int hash, reply_hash; | |
541 | struct nf_conntrack_tuple_hash *h; | |
542 | struct hlist_nulls_node *n; | |
543 | u16 zone; | |
544 | unsigned int sequence; | |
545 | ||
546 | zone = nf_ct_zone(ct); | |
547 | ||
548 | local_bh_disable(); | |
549 | do { | |
550 | sequence = read_seqcount_begin(&net->ct.generation); | |
551 | hash = hash_conntrack(net, zone, | |
552 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
553 | reply_hash = hash_conntrack(net, zone, | |
554 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
555 | } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); | |
556 | ||
557 | /* See if there's one in the list already, including reverse */ | |
558 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | |
559 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | |
560 | &h->tuple) && | |
561 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | |
562 | goto out; | |
563 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) | |
564 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | |
565 | &h->tuple) && | |
566 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | |
567 | goto out; | |
568 | ||
569 | add_timer(&ct->timeout); | |
570 | smp_wmb(); | |
571 | /* The caller holds a reference to this object */ | |
572 | atomic_set(&ct->ct_general.use, 2); | |
573 | __nf_conntrack_hash_insert(ct, hash, reply_hash); | |
574 | nf_conntrack_double_unlock(hash, reply_hash); | |
575 | NF_CT_STAT_INC(net, insert); | |
576 | local_bh_enable(); | |
577 | return 0; | |
578 | ||
579 | out: | |
580 | nf_conntrack_double_unlock(hash, reply_hash); | |
581 | NF_CT_STAT_INC(net, insert_failed); | |
582 | local_bh_enable(); | |
583 | return -EEXIST; | |
584 | } | |
585 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); | |
586 | ||
587 | /* Confirm a connection given skb; places it in hash table */ | |
588 | int | |
589 | __nf_conntrack_confirm(struct sk_buff *skb) | |
590 | { | |
591 | unsigned int hash, reply_hash; | |
592 | struct nf_conntrack_tuple_hash *h; | |
593 | struct nf_conn *ct; | |
594 | struct nf_conn_help *help; | |
595 | struct nf_conn_tstamp *tstamp; | |
596 | struct hlist_nulls_node *n; | |
597 | enum ip_conntrack_info ctinfo; | |
598 | struct net *net; | |
599 | u16 zone; | |
600 | unsigned int sequence; | |
601 | ||
602 | ct = nf_ct_get(skb, &ctinfo); | |
603 | net = nf_ct_net(ct); | |
604 | ||
605 | /* ipt_REJECT uses nf_conntrack_attach to attach related | |
606 | ICMP/TCP RST packets in other direction. Actual packet | |
607 | which created connection will be IP_CT_NEW or for an | |
608 | expected connection, IP_CT_RELATED. */ | |
609 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | |
610 | return NF_ACCEPT; | |
611 | ||
612 | zone = nf_ct_zone(ct); | |
613 | local_bh_disable(); | |
614 | ||
615 | do { | |
616 | sequence = read_seqcount_begin(&net->ct.generation); | |
617 | /* reuse the hash saved before */ | |
618 | hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; | |
619 | hash = hash_bucket(hash, net); | |
620 | reply_hash = hash_conntrack(net, zone, | |
621 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
622 | ||
623 | } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); | |
624 | ||
625 | /* We're not in hash table, and we refuse to set up related | |
626 | * connections for unconfirmed conns. But packet copies and | |
627 | * REJECT will give spurious warnings here. | |
628 | */ | |
629 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ | |
630 | ||
631 | /* No external references means no one else could have | |
632 | * confirmed us. | |
633 | */ | |
634 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | |
635 | pr_debug("Confirming conntrack %p\n", ct); | |
636 | /* We have to check the DYING flag after unlink to prevent | |
637 | * a race against nf_ct_get_next_corpse() possibly called from | |
638 | * user context, else we insert an already 'dead' hash, blocking | |
639 | * further use of that particular connection -JM. | |
640 | */ | |
641 | nf_ct_del_from_dying_or_unconfirmed_list(ct); | |
642 | ||
643 | if (unlikely(nf_ct_is_dying(ct))) | |
644 | goto out; | |
645 | ||
646 | /* See if there's one in the list already, including reverse: | |
647 | NAT could have grabbed it without realizing, since we're | |
648 | not in the hash. If there is, we lost race. */ | |
649 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | |
650 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | |
651 | &h->tuple) && | |
652 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | |
653 | goto out; | |
654 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) | |
655 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | |
656 | &h->tuple) && | |
657 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | |
658 | goto out; | |
659 | ||
660 | /* Timer relative to confirmation time, not original | |
661 | setting time, otherwise we'd get timer wrap in | |
662 | weird delay cases. */ | |
663 | ct->timeout.expires += jiffies; | |
664 | add_timer(&ct->timeout); | |
665 | atomic_inc(&ct->ct_general.use); | |
666 | ct->status |= IPS_CONFIRMED; | |
667 | ||
668 | /* set conntrack timestamp, if enabled. */ | |
669 | tstamp = nf_conn_tstamp_find(ct); | |
670 | if (tstamp) { | |
671 | if (skb->tstamp.tv64 == 0) | |
672 | __net_timestamp(skb); | |
673 | ||
674 | tstamp->start = ktime_to_ns(skb->tstamp); | |
675 | } | |
676 | /* Since the lookup is lockless, hash insertion must be done after | |
677 | * starting the timer and setting the CONFIRMED bit. The RCU barriers | |
678 | * guarantee that no other CPU can find the conntrack before the above | |
679 | * stores are visible. | |
680 | */ | |
681 | __nf_conntrack_hash_insert(ct, hash, reply_hash); | |
682 | nf_conntrack_double_unlock(hash, reply_hash); | |
683 | NF_CT_STAT_INC(net, insert); | |
684 | local_bh_enable(); | |
685 | ||
686 | help = nfct_help(ct); | |
687 | if (help && help->helper) | |
688 | nf_conntrack_event_cache(IPCT_HELPER, ct); | |
689 | ||
690 | nf_conntrack_event_cache(master_ct(ct) ? | |
691 | IPCT_RELATED : IPCT_NEW, ct); | |
692 | return NF_ACCEPT; | |
693 | ||
694 | out: | |
695 | nf_ct_add_to_dying_list(ct); | |
696 | nf_conntrack_double_unlock(hash, reply_hash); | |
697 | NF_CT_STAT_INC(net, insert_failed); | |
698 | local_bh_enable(); | |
699 | return NF_DROP; | |
700 | } | |
701 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); | |
702 | ||
703 | /* Returns true if a connection correspondings to the tuple (required | |
704 | for NAT). */ | |
705 | int | |
706 | nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |
707 | const struct nf_conn *ignored_conntrack) | |
708 | { | |
709 | struct net *net = nf_ct_net(ignored_conntrack); | |
710 | struct nf_conntrack_tuple_hash *h; | |
711 | struct hlist_nulls_node *n; | |
712 | struct nf_conn *ct; | |
713 | u16 zone = nf_ct_zone(ignored_conntrack); | |
714 | unsigned int hash = hash_conntrack(net, zone, tuple); | |
715 | ||
716 | /* Disable BHs the entire time since we need to disable them at | |
717 | * least once for the stats anyway. | |
718 | */ | |
719 | rcu_read_lock_bh(); | |
720 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | |
721 | ct = nf_ct_tuplehash_to_ctrack(h); | |
722 | if (ct != ignored_conntrack && | |
723 | nf_ct_tuple_equal(tuple, &h->tuple) && | |
724 | nf_ct_zone(ct) == zone) { | |
725 | NF_CT_STAT_INC(net, found); | |
726 | rcu_read_unlock_bh(); | |
727 | return 1; | |
728 | } | |
729 | NF_CT_STAT_INC(net, searched); | |
730 | } | |
731 | rcu_read_unlock_bh(); | |
732 | ||
733 | return 0; | |
734 | } | |
735 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); | |
736 | ||
737 | #define NF_CT_EVICTION_RANGE 8 | |
738 | ||
739 | /* There's a small race here where we may free a just-assured | |
740 | connection. Too bad: we're in trouble anyway. */ | |
741 | static noinline int early_drop(struct net *net, unsigned int _hash) | |
742 | { | |
743 | /* Use oldest entry, which is roughly LRU */ | |
744 | struct nf_conntrack_tuple_hash *h; | |
745 | struct nf_conn *ct = NULL, *tmp; | |
746 | struct hlist_nulls_node *n; | |
747 | unsigned int i = 0, cnt = 0; | |
748 | int dropped = 0; | |
749 | unsigned int hash, sequence; | |
750 | spinlock_t *lockp; | |
751 | ||
752 | local_bh_disable(); | |
753 | restart: | |
754 | sequence = read_seqcount_begin(&net->ct.generation); | |
755 | hash = hash_bucket(_hash, net); | |
756 | for (; i < net->ct.htable_size; i++) { | |
757 | lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; | |
758 | spin_lock(lockp); | |
759 | if (read_seqcount_retry(&net->ct.generation, sequence)) { | |
760 | spin_unlock(lockp); | |
761 | goto restart; | |
762 | } | |
763 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], | |
764 | hnnode) { | |
765 | tmp = nf_ct_tuplehash_to_ctrack(h); | |
766 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status) && | |
767 | !nf_ct_is_dying(tmp) && | |
768 | atomic_inc_not_zero(&tmp->ct_general.use)) { | |
769 | ct = tmp; | |
770 | break; | |
771 | } | |
772 | cnt++; | |
773 | } | |
774 | ||
775 | hash = (hash + 1) % net->ct.htable_size; | |
776 | spin_unlock(lockp); | |
777 | ||
778 | if (ct || cnt >= NF_CT_EVICTION_RANGE) | |
779 | break; | |
780 | ||
781 | } | |
782 | local_bh_enable(); | |
783 | ||
784 | if (!ct) | |
785 | return dropped; | |
786 | ||
787 | if (del_timer(&ct->timeout)) { | |
788 | if (nf_ct_delete(ct, 0, 0)) { | |
789 | dropped = 1; | |
790 | NF_CT_STAT_INC_ATOMIC(net, early_drop); | |
791 | } | |
792 | } | |
793 | nf_ct_put(ct); | |
794 | return dropped; | |
795 | } | |
796 | ||
797 | void init_nf_conntrack_hash_rnd(void) | |
798 | { | |
799 | unsigned int rand; | |
800 | ||
801 | /* | |
802 | * Why not initialize nf_conntrack_rnd in a "init()" function ? | |
803 | * Because there isn't enough entropy when system initializing, | |
804 | * and we initialize it as late as possible. | |
805 | */ | |
806 | do { | |
807 | get_random_bytes(&rand, sizeof(rand)); | |
808 | } while (!rand); | |
809 | cmpxchg(&nf_conntrack_hash_rnd, 0, rand); | |
810 | } | |
811 | ||
812 | static struct nf_conn * | |
813 | __nf_conntrack_alloc(struct net *net, u16 zone, | |
814 | const struct nf_conntrack_tuple *orig, | |
815 | const struct nf_conntrack_tuple *repl, | |
816 | gfp_t gfp, u32 hash) | |
817 | { | |
818 | struct nf_conn *ct; | |
819 | ||
820 | if (unlikely(!nf_conntrack_hash_rnd)) { | |
821 | init_nf_conntrack_hash_rnd(); | |
822 | /* recompute the hash as nf_conntrack_hash_rnd is initialized */ | |
823 | hash = hash_conntrack_raw(orig, zone); | |
824 | } | |
825 | ||
826 | /* We don't want any race condition at early drop stage */ | |
827 | atomic_inc(&net->ct.count); | |
828 | ||
829 | if (nf_conntrack_max && | |
830 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { | |
831 | if (!early_drop(net, hash)) { | |
832 | atomic_dec(&net->ct.count); | |
833 | net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); | |
834 | return ERR_PTR(-ENOMEM); | |
835 | } | |
836 | } | |
837 | ||
838 | /* | |
839 | * Do not use kmem_cache_zalloc(), as this cache uses | |
840 | * SLAB_DESTROY_BY_RCU. | |
841 | */ | |
842 | ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); | |
843 | if (ct == NULL) { | |
844 | atomic_dec(&net->ct.count); | |
845 | return ERR_PTR(-ENOMEM); | |
846 | } | |
847 | spin_lock_init(&ct->lock); | |
848 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | |
849 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; | |
850 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; | |
851 | /* save hash for reusing when confirming */ | |
852 | *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; | |
853 | ct->status = 0; | |
854 | /* Don't set timer yet: wait for confirmation */ | |
855 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | |
856 | write_pnet(&ct->ct_net, net); | |
857 | memset(&ct->__nfct_init_offset[0], 0, | |
858 | offsetof(struct nf_conn, proto) - | |
859 | offsetof(struct nf_conn, __nfct_init_offset[0])); | |
860 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
861 | if (zone) { | |
862 | struct nf_conntrack_zone *nf_ct_zone; | |
863 | ||
864 | nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); | |
865 | if (!nf_ct_zone) | |
866 | goto out_free; | |
867 | nf_ct_zone->id = zone; | |
868 | } | |
869 | #endif | |
870 | /* Because we use RCU lookups, we set ct_general.use to zero before | |
871 | * this is inserted in any list. | |
872 | */ | |
873 | atomic_set(&ct->ct_general.use, 0); | |
874 | return ct; | |
875 | ||
876 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
877 | out_free: | |
878 | atomic_dec(&net->ct.count); | |
879 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | |
880 | return ERR_PTR(-ENOMEM); | |
881 | #endif | |
882 | } | |
883 | ||
884 | struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, | |
885 | const struct nf_conntrack_tuple *orig, | |
886 | const struct nf_conntrack_tuple *repl, | |
887 | gfp_t gfp) | |
888 | { | |
889 | return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); | |
890 | } | |
891 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | |
892 | ||
893 | void nf_conntrack_free(struct nf_conn *ct) | |
894 | { | |
895 | struct net *net = nf_ct_net(ct); | |
896 | ||
897 | /* A freed object has refcnt == 0, that's | |
898 | * the golden rule for SLAB_DESTROY_BY_RCU | |
899 | */ | |
900 | NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0); | |
901 | ||
902 | nf_ct_ext_destroy(ct); | |
903 | nf_ct_ext_free(ct); | |
904 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | |
905 | smp_mb__before_atomic(); | |
906 | atomic_dec(&net->ct.count); | |
907 | } | |
908 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | |
909 | ||
910 | ||
911 | /* Allocate a new conntrack: we return -ENOMEM if classification | |
912 | failed due to stress. Otherwise it really is unclassifiable. */ | |
913 | static struct nf_conntrack_tuple_hash * | |
914 | init_conntrack(struct net *net, struct nf_conn *tmpl, | |
915 | const struct nf_conntrack_tuple *tuple, | |
916 | struct nf_conntrack_l3proto *l3proto, | |
917 | struct nf_conntrack_l4proto *l4proto, | |
918 | struct sk_buff *skb, | |
919 | unsigned int dataoff, u32 hash) | |
920 | { | |
921 | struct nf_conn *ct; | |
922 | struct nf_conn_help *help; | |
923 | struct nf_conntrack_tuple repl_tuple; | |
924 | struct nf_conntrack_ecache *ecache; | |
925 | struct nf_conntrack_expect *exp = NULL; | |
926 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; | |
927 | struct nf_conn_timeout *timeout_ext; | |
928 | unsigned int *timeouts; | |
929 | ||
930 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { | |
931 | pr_debug("Can't invert tuple.\n"); | |
932 | return NULL; | |
933 | } | |
934 | ||
935 | ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, | |
936 | hash); | |
937 | if (IS_ERR(ct)) | |
938 | return (struct nf_conntrack_tuple_hash *)ct; | |
939 | ||
940 | if (tmpl && nfct_synproxy(tmpl)) { | |
941 | nfct_seqadj_ext_add(ct); | |
942 | nfct_synproxy_ext_add(ct); | |
943 | } | |
944 | ||
945 | timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; | |
946 | if (timeout_ext) | |
947 | timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); | |
948 | else | |
949 | timeouts = l4proto->get_timeouts(net); | |
950 | ||
951 | if (!l4proto->new(ct, skb, dataoff, timeouts)) { | |
952 | nf_conntrack_free(ct); | |
953 | pr_debug("init conntrack: can't track with proto module\n"); | |
954 | return NULL; | |
955 | } | |
956 | ||
957 | if (timeout_ext) | |
958 | nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC); | |
959 | ||
960 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); | |
961 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); | |
962 | nf_ct_labels_ext_add(ct); | |
963 | ||
964 | ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; | |
965 | nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, | |
966 | ecache ? ecache->expmask : 0, | |
967 | GFP_ATOMIC); | |
968 | ||
969 | local_bh_disable(); | |
970 | if (net->ct.expect_count) { | |
971 | spin_lock(&nf_conntrack_expect_lock); | |
972 | exp = nf_ct_find_expectation(net, zone, tuple); | |
973 | if (exp) { | |
974 | pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", | |
975 | ct, exp); | |
976 | /* Welcome, Mr. Bond. We've been expecting you... */ | |
977 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | |
978 | /* exp->master safe, refcnt bumped in nf_ct_find_expectation */ | |
979 | ct->master = exp->master; | |
980 | if (exp->helper) { | |
981 | help = nf_ct_helper_ext_add(ct, exp->helper, | |
982 | GFP_ATOMIC); | |
983 | if (help) | |
984 | rcu_assign_pointer(help->helper, exp->helper); | |
985 | } | |
986 | ||
987 | #ifdef CONFIG_NF_CONNTRACK_MARK | |
988 | ct->mark = exp->master->mark; | |
989 | #endif | |
990 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | |
991 | ct->secmark = exp->master->secmark; | |
992 | #endif | |
993 | NF_CT_STAT_INC(net, expect_new); | |
994 | } | |
995 | spin_unlock(&nf_conntrack_expect_lock); | |
996 | } | |
997 | if (!exp) { | |
998 | __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); | |
999 | NF_CT_STAT_INC(net, new); | |
1000 | } | |
1001 | ||
1002 | /* Now it is inserted into the unconfirmed list, bump refcount */ | |
1003 | nf_conntrack_get(&ct->ct_general); | |
1004 | nf_ct_add_to_unconfirmed_list(ct); | |
1005 | ||
1006 | local_bh_enable(); | |
1007 | ||
1008 | if (exp) { | |
1009 | if (exp->expectfn) | |
1010 | exp->expectfn(ct, exp); | |
1011 | nf_ct_expect_put(exp); | |
1012 | } | |
1013 | ||
1014 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; | |
1015 | } | |
1016 | ||
1017 | /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ | |
1018 | static inline struct nf_conn * | |
1019 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, | |
1020 | struct sk_buff *skb, | |
1021 | unsigned int dataoff, | |
1022 | u_int16_t l3num, | |
1023 | u_int8_t protonum, | |
1024 | struct nf_conntrack_l3proto *l3proto, | |
1025 | struct nf_conntrack_l4proto *l4proto, | |
1026 | int *set_reply, | |
1027 | enum ip_conntrack_info *ctinfo) | |
1028 | { | |
1029 | struct nf_conntrack_tuple tuple; | |
1030 | struct nf_conntrack_tuple_hash *h; | |
1031 | struct nf_conn *ct; | |
1032 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; | |
1033 | u32 hash; | |
1034 | ||
1035 | if (!nf_ct_get_tuple(skb, skb_network_offset(skb), | |
1036 | dataoff, l3num, protonum, &tuple, l3proto, | |
1037 | l4proto)) { | |
1038 | pr_debug("resolve_normal_ct: Can't get tuple\n"); | |
1039 | return NULL; | |
1040 | } | |
1041 | ||
1042 | /* look for tuple match */ | |
1043 | hash = hash_conntrack_raw(&tuple, zone); | |
1044 | h = __nf_conntrack_find_get(net, zone, &tuple, hash); | |
1045 | if (!h) { | |
1046 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, | |
1047 | skb, dataoff, hash); | |
1048 | if (!h) | |
1049 | return NULL; | |
1050 | if (IS_ERR(h)) | |
1051 | return (void *)h; | |
1052 | } | |
1053 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1054 | ||
1055 | /* It exists; we have (non-exclusive) reference. */ | |
1056 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { | |
1057 | *ctinfo = IP_CT_ESTABLISHED_REPLY; | |
1058 | /* Please set reply bit if this packet OK */ | |
1059 | *set_reply = 1; | |
1060 | } else { | |
1061 | /* Once we've had two way comms, always ESTABLISHED. */ | |
1062 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | |
1063 | pr_debug("nf_conntrack_in: normal packet for %p\n", ct); | |
1064 | *ctinfo = IP_CT_ESTABLISHED; | |
1065 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { | |
1066 | pr_debug("nf_conntrack_in: related packet for %p\n", | |
1067 | ct); | |
1068 | *ctinfo = IP_CT_RELATED; | |
1069 | } else { | |
1070 | pr_debug("nf_conntrack_in: new packet for %p\n", ct); | |
1071 | *ctinfo = IP_CT_NEW; | |
1072 | } | |
1073 | *set_reply = 0; | |
1074 | } | |
1075 | skb->nfct = &ct->ct_general; | |
1076 | skb->nfctinfo = *ctinfo; | |
1077 | return ct; | |
1078 | } | |
1079 | ||
1080 | unsigned int | |
1081 | nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | |
1082 | struct sk_buff *skb) | |
1083 | { | |
1084 | struct nf_conn *ct, *tmpl = NULL; | |
1085 | enum ip_conntrack_info ctinfo; | |
1086 | struct nf_conntrack_l3proto *l3proto; | |
1087 | struct nf_conntrack_l4proto *l4proto; | |
1088 | unsigned int *timeouts; | |
1089 | unsigned int dataoff; | |
1090 | u_int8_t protonum; | |
1091 | int set_reply = 0; | |
1092 | int ret; | |
1093 | ||
1094 | if (skb->nfct) { | |
1095 | /* Previously seen (loopback or untracked)? Ignore. */ | |
1096 | tmpl = (struct nf_conn *)skb->nfct; | |
1097 | if (!nf_ct_is_template(tmpl)) { | |
1098 | NF_CT_STAT_INC_ATOMIC(net, ignore); | |
1099 | return NF_ACCEPT; | |
1100 | } | |
1101 | skb->nfct = NULL; | |
1102 | } | |
1103 | ||
1104 | /* rcu_read_lock()ed by nf_hook_slow */ | |
1105 | l3proto = __nf_ct_l3proto_find(pf); | |
1106 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), | |
1107 | &dataoff, &protonum); | |
1108 | if (ret <= 0) { | |
1109 | pr_debug("not prepared to track yet or error occurred\n"); | |
1110 | NF_CT_STAT_INC_ATOMIC(net, error); | |
1111 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1112 | ret = -ret; | |
1113 | goto out; | |
1114 | } | |
1115 | ||
1116 | l4proto = __nf_ct_l4proto_find(pf, protonum); | |
1117 | ||
1118 | /* It may be an special packet, error, unclean... | |
1119 | * inverse of the return code tells to the netfilter | |
1120 | * core what to do with the packet. */ | |
1121 | if (l4proto->error != NULL) { | |
1122 | ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, | |
1123 | pf, hooknum); | |
1124 | if (ret <= 0) { | |
1125 | NF_CT_STAT_INC_ATOMIC(net, error); | |
1126 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1127 | ret = -ret; | |
1128 | goto out; | |
1129 | } | |
1130 | /* ICMP[v6] protocol trackers may assign one conntrack. */ | |
1131 | if (skb->nfct) | |
1132 | goto out; | |
1133 | } | |
1134 | ||
1135 | ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, | |
1136 | l3proto, l4proto, &set_reply, &ctinfo); | |
1137 | if (!ct) { | |
1138 | /* Not valid part of a connection */ | |
1139 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1140 | ret = NF_ACCEPT; | |
1141 | goto out; | |
1142 | } | |
1143 | ||
1144 | if (IS_ERR(ct)) { | |
1145 | /* Too stressed to deal. */ | |
1146 | NF_CT_STAT_INC_ATOMIC(net, drop); | |
1147 | ret = NF_DROP; | |
1148 | goto out; | |
1149 | } | |
1150 | ||
1151 | NF_CT_ASSERT(skb->nfct); | |
1152 | ||
1153 | /* Decide what timeout policy we want to apply to this flow. */ | |
1154 | timeouts = nf_ct_timeout_lookup(net, ct, l4proto); | |
1155 | ||
1156 | ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); | |
1157 | if (ret <= 0) { | |
1158 | /* Invalid: inverse of the return code tells | |
1159 | * the netfilter core what to do */ | |
1160 | pr_debug("nf_conntrack_in: Can't track with proto module\n"); | |
1161 | nf_conntrack_put(skb->nfct); | |
1162 | skb->nfct = NULL; | |
1163 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1164 | if (ret == -NF_DROP) | |
1165 | NF_CT_STAT_INC_ATOMIC(net, drop); | |
1166 | ret = -ret; | |
1167 | goto out; | |
1168 | } | |
1169 | ||
1170 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | |
1171 | nf_conntrack_event_cache(IPCT_REPLY, ct); | |
1172 | out: | |
1173 | if (tmpl) { | |
1174 | /* Special case: we have to repeat this hook, assign the | |
1175 | * template again to this packet. We assume that this packet | |
1176 | * has no conntrack assigned. This is used by nf_ct_tcp. */ | |
1177 | if (ret == NF_REPEAT) | |
1178 | skb->nfct = (struct nf_conntrack *)tmpl; | |
1179 | else | |
1180 | nf_ct_put(tmpl); | |
1181 | } | |
1182 | ||
1183 | return ret; | |
1184 | } | |
1185 | EXPORT_SYMBOL_GPL(nf_conntrack_in); | |
1186 | ||
1187 | bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | |
1188 | const struct nf_conntrack_tuple *orig) | |
1189 | { | |
1190 | bool ret; | |
1191 | ||
1192 | rcu_read_lock(); | |
1193 | ret = nf_ct_invert_tuple(inverse, orig, | |
1194 | __nf_ct_l3proto_find(orig->src.l3num), | |
1195 | __nf_ct_l4proto_find(orig->src.l3num, | |
1196 | orig->dst.protonum)); | |
1197 | rcu_read_unlock(); | |
1198 | return ret; | |
1199 | } | |
1200 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); | |
1201 | ||
1202 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is | |
1203 | implicitly racy: see __nf_conntrack_confirm */ | |
1204 | void nf_conntrack_alter_reply(struct nf_conn *ct, | |
1205 | const struct nf_conntrack_tuple *newreply) | |
1206 | { | |
1207 | struct nf_conn_help *help = nfct_help(ct); | |
1208 | ||
1209 | /* Should be unconfirmed, so not in hash table yet */ | |
1210 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | |
1211 | ||
1212 | pr_debug("Altering reply tuple of %p to ", ct); | |
1213 | nf_ct_dump_tuple(newreply); | |
1214 | ||
1215 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; | |
1216 | if (ct->master || (help && !hlist_empty(&help->expectations))) | |
1217 | return; | |
1218 | ||
1219 | rcu_read_lock(); | |
1220 | __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); | |
1221 | rcu_read_unlock(); | |
1222 | } | |
1223 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); | |
1224 | ||
1225 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ | |
1226 | void __nf_ct_refresh_acct(struct nf_conn *ct, | |
1227 | enum ip_conntrack_info ctinfo, | |
1228 | const struct sk_buff *skb, | |
1229 | unsigned long extra_jiffies, | |
1230 | int do_acct) | |
1231 | { | |
1232 | NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); | |
1233 | NF_CT_ASSERT(skb); | |
1234 | ||
1235 | /* Only update if this is not a fixed timeout */ | |
1236 | if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) | |
1237 | goto acct; | |
1238 | ||
1239 | /* If not in hash table, timer will not be active yet */ | |
1240 | if (!nf_ct_is_confirmed(ct)) { | |
1241 | ct->timeout.expires = extra_jiffies; | |
1242 | } else { | |
1243 | unsigned long newtime = jiffies + extra_jiffies; | |
1244 | ||
1245 | /* Only update the timeout if the new timeout is at least | |
1246 | HZ jiffies from the old timeout. Need del_timer for race | |
1247 | avoidance (may already be dying). */ | |
1248 | if (newtime - ct->timeout.expires >= HZ) | |
1249 | mod_timer_pending(&ct->timeout, newtime); | |
1250 | } | |
1251 | ||
1252 | acct: | |
1253 | if (do_acct) { | |
1254 | struct nf_conn_acct *acct; | |
1255 | ||
1256 | acct = nf_conn_acct_find(ct); | |
1257 | if (acct) { | |
1258 | struct nf_conn_counter *counter = acct->counter; | |
1259 | ||
1260 | atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); | |
1261 | atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes); | |
1262 | } | |
1263 | } | |
1264 | } | |
1265 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | |
1266 | ||
1267 | bool __nf_ct_kill_acct(struct nf_conn *ct, | |
1268 | enum ip_conntrack_info ctinfo, | |
1269 | const struct sk_buff *skb, | |
1270 | int do_acct) | |
1271 | { | |
1272 | if (do_acct) { | |
1273 | struct nf_conn_acct *acct; | |
1274 | ||
1275 | acct = nf_conn_acct_find(ct); | |
1276 | if (acct) { | |
1277 | struct nf_conn_counter *counter = acct->counter; | |
1278 | ||
1279 | atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); | |
1280 | atomic64_add(skb->len - skb_network_offset(skb), | |
1281 | &counter[CTINFO2DIR(ctinfo)].bytes); | |
1282 | } | |
1283 | } | |
1284 | ||
1285 | if (del_timer(&ct->timeout)) { | |
1286 | ct->timeout.function((unsigned long)ct); | |
1287 | return true; | |
1288 | } | |
1289 | return false; | |
1290 | } | |
1291 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); | |
1292 | ||
1293 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1294 | static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { | |
1295 | .len = sizeof(struct nf_conntrack_zone), | |
1296 | .align = __alignof__(struct nf_conntrack_zone), | |
1297 | .id = NF_CT_EXT_ZONE, | |
1298 | }; | |
1299 | #endif | |
1300 | ||
1301 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) | |
1302 | ||
1303 | #include <linux/netfilter/nfnetlink.h> | |
1304 | #include <linux/netfilter/nfnetlink_conntrack.h> | |
1305 | #include <linux/mutex.h> | |
1306 | ||
1307 | /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be | |
1308 | * in ip_conntrack_core, since we don't want the protocols to autoload | |
1309 | * or depend on ctnetlink */ | |
1310 | int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, | |
1311 | const struct nf_conntrack_tuple *tuple) | |
1312 | { | |
1313 | if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || | |
1314 | nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) | |
1315 | goto nla_put_failure; | |
1316 | return 0; | |
1317 | ||
1318 | nla_put_failure: | |
1319 | return -1; | |
1320 | } | |
1321 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); | |
1322 | ||
1323 | const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { | |
1324 | [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, | |
1325 | [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, | |
1326 | }; | |
1327 | EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); | |
1328 | ||
1329 | int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], | |
1330 | struct nf_conntrack_tuple *t) | |
1331 | { | |
1332 | if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) | |
1333 | return -EINVAL; | |
1334 | ||
1335 | t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); | |
1336 | t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); | |
1337 | ||
1338 | return 0; | |
1339 | } | |
1340 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); | |
1341 | ||
1342 | int nf_ct_port_nlattr_tuple_size(void) | |
1343 | { | |
1344 | return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); | |
1345 | } | |
1346 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); | |
1347 | #endif | |
1348 | ||
1349 | /* Used by ipt_REJECT and ip6t_REJECT. */ | |
1350 | static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) | |
1351 | { | |
1352 | struct nf_conn *ct; | |
1353 | enum ip_conntrack_info ctinfo; | |
1354 | ||
1355 | /* This ICMP is in reverse direction to the packet which caused it */ | |
1356 | ct = nf_ct_get(skb, &ctinfo); | |
1357 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | |
1358 | ctinfo = IP_CT_RELATED_REPLY; | |
1359 | else | |
1360 | ctinfo = IP_CT_RELATED; | |
1361 | ||
1362 | /* Attach to new skbuff, and increment count */ | |
1363 | nskb->nfct = &ct->ct_general; | |
1364 | nskb->nfctinfo = ctinfo; | |
1365 | nf_conntrack_get(nskb->nfct); | |
1366 | } | |
1367 | ||
1368 | /* Bring out ya dead! */ | |
1369 | static struct nf_conn * | |
1370 | get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), | |
1371 | void *data, unsigned int *bucket) | |
1372 | { | |
1373 | struct nf_conntrack_tuple_hash *h; | |
1374 | struct nf_conn *ct; | |
1375 | struct hlist_nulls_node *n; | |
1376 | int cpu; | |
1377 | spinlock_t *lockp; | |
1378 | ||
1379 | for (; *bucket < net->ct.htable_size; (*bucket)++) { | |
1380 | lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; | |
1381 | local_bh_disable(); | |
1382 | spin_lock(lockp); | |
1383 | if (*bucket < net->ct.htable_size) { | |
1384 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { | |
1385 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | |
1386 | continue; | |
1387 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1388 | if (iter(ct, data)) | |
1389 | goto found; | |
1390 | } | |
1391 | } | |
1392 | spin_unlock(lockp); | |
1393 | local_bh_enable(); | |
1394 | } | |
1395 | ||
1396 | for_each_possible_cpu(cpu) { | |
1397 | struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); | |
1398 | ||
1399 | spin_lock_bh(&pcpu->lock); | |
1400 | hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { | |
1401 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1402 | if (iter(ct, data)) | |
1403 | set_bit(IPS_DYING_BIT, &ct->status); | |
1404 | } | |
1405 | spin_unlock_bh(&pcpu->lock); | |
1406 | } | |
1407 | return NULL; | |
1408 | found: | |
1409 | atomic_inc(&ct->ct_general.use); | |
1410 | spin_unlock(lockp); | |
1411 | local_bh_enable(); | |
1412 | return ct; | |
1413 | } | |
1414 | ||
1415 | void nf_ct_iterate_cleanup(struct net *net, | |
1416 | int (*iter)(struct nf_conn *i, void *data), | |
1417 | void *data, u32 portid, int report) | |
1418 | { | |
1419 | struct nf_conn *ct; | |
1420 | unsigned int bucket = 0; | |
1421 | ||
1422 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { | |
1423 | /* Time to push up daises... */ | |
1424 | if (del_timer(&ct->timeout)) | |
1425 | nf_ct_delete(ct, portid, report); | |
1426 | ||
1427 | /* ... else the timer will get him soon. */ | |
1428 | ||
1429 | nf_ct_put(ct); | |
1430 | } | |
1431 | } | |
1432 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); | |
1433 | ||
1434 | static int kill_all(struct nf_conn *i, void *data) | |
1435 | { | |
1436 | return 1; | |
1437 | } | |
1438 | ||
1439 | void nf_ct_free_hashtable(void *hash, unsigned int size) | |
1440 | { | |
1441 | if (is_vmalloc_addr(hash)) | |
1442 | vfree(hash); | |
1443 | else | |
1444 | free_pages((unsigned long)hash, | |
1445 | get_order(sizeof(struct hlist_head) * size)); | |
1446 | } | |
1447 | EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); | |
1448 | ||
1449 | static int untrack_refs(void) | |
1450 | { | |
1451 | int cnt = 0, cpu; | |
1452 | ||
1453 | for_each_possible_cpu(cpu) { | |
1454 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); | |
1455 | ||
1456 | cnt += atomic_read(&ct->ct_general.use) - 1; | |
1457 | } | |
1458 | return cnt; | |
1459 | } | |
1460 | ||
1461 | void nf_conntrack_cleanup_start(void) | |
1462 | { | |
1463 | RCU_INIT_POINTER(ip_ct_attach, NULL); | |
1464 | } | |
1465 | ||
1466 | void nf_conntrack_cleanup_end(void) | |
1467 | { | |
1468 | RCU_INIT_POINTER(nf_ct_destroy, NULL); | |
1469 | while (untrack_refs() > 0) | |
1470 | schedule(); | |
1471 | ||
1472 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1473 | nf_ct_extend_unregister(&nf_ct_zone_extend); | |
1474 | #endif | |
1475 | nf_conntrack_proto_fini(); | |
1476 | nf_conntrack_seqadj_fini(); | |
1477 | nf_conntrack_labels_fini(); | |
1478 | nf_conntrack_helper_fini(); | |
1479 | nf_conntrack_timeout_fini(); | |
1480 | nf_conntrack_ecache_fini(); | |
1481 | nf_conntrack_tstamp_fini(); | |
1482 | nf_conntrack_acct_fini(); | |
1483 | nf_conntrack_expect_fini(); | |
1484 | } | |
1485 | ||
1486 | /* | |
1487 | * Mishearing the voices in his head, our hero wonders how he's | |
1488 | * supposed to kill the mall. | |
1489 | */ | |
1490 | void nf_conntrack_cleanup_net(struct net *net) | |
1491 | { | |
1492 | LIST_HEAD(single); | |
1493 | ||
1494 | list_add(&net->exit_list, &single); | |
1495 | nf_conntrack_cleanup_net_list(&single); | |
1496 | } | |
1497 | ||
1498 | void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) | |
1499 | { | |
1500 | int busy; | |
1501 | struct net *net; | |
1502 | ||
1503 | /* | |
1504 | * This makes sure all current packets have passed through | |
1505 | * netfilter framework. Roll on, two-stage module | |
1506 | * delete... | |
1507 | */ | |
1508 | synchronize_net(); | |
1509 | i_see_dead_people: | |
1510 | busy = 0; | |
1511 | list_for_each_entry(net, net_exit_list, exit_list) { | |
1512 | nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0); | |
1513 | if (atomic_read(&net->ct.count) != 0) | |
1514 | busy = 1; | |
1515 | } | |
1516 | if (busy) { | |
1517 | schedule(); | |
1518 | goto i_see_dead_people; | |
1519 | } | |
1520 | ||
1521 | list_for_each_entry(net, net_exit_list, exit_list) { | |
1522 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); | |
1523 | nf_conntrack_proto_pernet_fini(net); | |
1524 | nf_conntrack_helper_pernet_fini(net); | |
1525 | nf_conntrack_ecache_pernet_fini(net); | |
1526 | nf_conntrack_tstamp_pernet_fini(net); | |
1527 | nf_conntrack_acct_pernet_fini(net); | |
1528 | nf_conntrack_expect_pernet_fini(net); | |
1529 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | |
1530 | kfree(net->ct.slabname); | |
1531 | free_percpu(net->ct.stat); | |
1532 | free_percpu(net->ct.pcpu_lists); | |
1533 | } | |
1534 | } | |
1535 | ||
1536 | void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) | |
1537 | { | |
1538 | struct hlist_nulls_head *hash; | |
1539 | unsigned int nr_slots, i; | |
1540 | size_t sz; | |
1541 | ||
1542 | BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); | |
1543 | nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); | |
1544 | sz = nr_slots * sizeof(struct hlist_nulls_head); | |
1545 | hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, | |
1546 | get_order(sz)); | |
1547 | if (!hash) { | |
1548 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); | |
1549 | hash = vzalloc(sz); | |
1550 | } | |
1551 | ||
1552 | if (hash && nulls) | |
1553 | for (i = 0; i < nr_slots; i++) | |
1554 | INIT_HLIST_NULLS_HEAD(&hash[i], i); | |
1555 | ||
1556 | return hash; | |
1557 | } | |
1558 | EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); | |
1559 | ||
1560 | int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | |
1561 | { | |
1562 | int i, bucket, rc; | |
1563 | unsigned int hashsize, old_size; | |
1564 | struct hlist_nulls_head *hash, *old_hash; | |
1565 | struct nf_conntrack_tuple_hash *h; | |
1566 | struct nf_conn *ct; | |
1567 | ||
1568 | if (current->nsproxy->net_ns != &init_net) | |
1569 | return -EOPNOTSUPP; | |
1570 | ||
1571 | /* On boot, we can set this without any fancy locking. */ | |
1572 | if (!nf_conntrack_htable_size) | |
1573 | return param_set_uint(val, kp); | |
1574 | ||
1575 | rc = kstrtouint(val, 0, &hashsize); | |
1576 | if (rc) | |
1577 | return rc; | |
1578 | if (!hashsize) | |
1579 | return -EINVAL; | |
1580 | ||
1581 | hash = nf_ct_alloc_hashtable(&hashsize, 1); | |
1582 | if (!hash) | |
1583 | return -ENOMEM; | |
1584 | ||
1585 | local_bh_disable(); | |
1586 | nf_conntrack_all_lock(); | |
1587 | write_seqcount_begin(&init_net.ct.generation); | |
1588 | ||
1589 | /* Lookups in the old hash might happen in parallel, which means we | |
1590 | * might get false negatives during connection lookup. New connections | |
1591 | * created because of a false negative won't make it into the hash | |
1592 | * though since that required taking the locks. | |
1593 | */ | |
1594 | ||
1595 | for (i = 0; i < init_net.ct.htable_size; i++) { | |
1596 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { | |
1597 | h = hlist_nulls_entry(init_net.ct.hash[i].first, | |
1598 | struct nf_conntrack_tuple_hash, hnnode); | |
1599 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1600 | hlist_nulls_del_rcu(&h->hnnode); | |
1601 | bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), | |
1602 | hashsize); | |
1603 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); | |
1604 | } | |
1605 | } | |
1606 | old_size = init_net.ct.htable_size; | |
1607 | old_hash = init_net.ct.hash; | |
1608 | ||
1609 | init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; | |
1610 | init_net.ct.hash = hash; | |
1611 | ||
1612 | write_seqcount_end(&init_net.ct.generation); | |
1613 | nf_conntrack_all_unlock(); | |
1614 | local_bh_enable(); | |
1615 | ||
1616 | nf_ct_free_hashtable(old_hash, old_size); | |
1617 | return 0; | |
1618 | } | |
1619 | EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); | |
1620 | ||
1621 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, | |
1622 | &nf_conntrack_htable_size, 0600); | |
1623 | ||
1624 | void nf_ct_untracked_status_or(unsigned long bits) | |
1625 | { | |
1626 | int cpu; | |
1627 | ||
1628 | for_each_possible_cpu(cpu) | |
1629 | per_cpu(nf_conntrack_untracked, cpu).status |= bits; | |
1630 | } | |
1631 | EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); | |
1632 | ||
1633 | int nf_conntrack_init_start(void) | |
1634 | { | |
1635 | int max_factor = 8; | |
1636 | int i, ret, cpu; | |
1637 | ||
1638 | for (i = 0; i < CONNTRACK_LOCKS; i++) | |
1639 | spin_lock_init(&nf_conntrack_locks[i]); | |
1640 | ||
1641 | if (!nf_conntrack_htable_size) { | |
1642 | /* Idea from tcp.c: use 1/16384 of memory. | |
1643 | * On i386: 32MB machine has 512 buckets. | |
1644 | * >= 1GB machines have 16384 buckets. | |
1645 | * >= 4GB machines have 65536 buckets. | |
1646 | */ | |
1647 | nf_conntrack_htable_size | |
1648 | = (((totalram_pages << PAGE_SHIFT) / 16384) | |
1649 | / sizeof(struct hlist_head)); | |
1650 | if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) | |
1651 | nf_conntrack_htable_size = 65536; | |
1652 | else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) | |
1653 | nf_conntrack_htable_size = 16384; | |
1654 | if (nf_conntrack_htable_size < 32) | |
1655 | nf_conntrack_htable_size = 32; | |
1656 | ||
1657 | /* Use a max. factor of four by default to get the same max as | |
1658 | * with the old struct list_heads. When a table size is given | |
1659 | * we use the old value of 8 to avoid reducing the max. | |
1660 | * entries. */ | |
1661 | max_factor = 4; | |
1662 | } | |
1663 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; | |
1664 | ||
1665 | printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", | |
1666 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, | |
1667 | nf_conntrack_max); | |
1668 | ||
1669 | ret = nf_conntrack_expect_init(); | |
1670 | if (ret < 0) | |
1671 | goto err_expect; | |
1672 | ||
1673 | ret = nf_conntrack_acct_init(); | |
1674 | if (ret < 0) | |
1675 | goto err_acct; | |
1676 | ||
1677 | ret = nf_conntrack_tstamp_init(); | |
1678 | if (ret < 0) | |
1679 | goto err_tstamp; | |
1680 | ||
1681 | ret = nf_conntrack_ecache_init(); | |
1682 | if (ret < 0) | |
1683 | goto err_ecache; | |
1684 | ||
1685 | ret = nf_conntrack_timeout_init(); | |
1686 | if (ret < 0) | |
1687 | goto err_timeout; | |
1688 | ||
1689 | ret = nf_conntrack_helper_init(); | |
1690 | if (ret < 0) | |
1691 | goto err_helper; | |
1692 | ||
1693 | ret = nf_conntrack_labels_init(); | |
1694 | if (ret < 0) | |
1695 | goto err_labels; | |
1696 | ||
1697 | ret = nf_conntrack_seqadj_init(); | |
1698 | if (ret < 0) | |
1699 | goto err_seqadj; | |
1700 | ||
1701 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1702 | ret = nf_ct_extend_register(&nf_ct_zone_extend); | |
1703 | if (ret < 0) | |
1704 | goto err_extend; | |
1705 | #endif | |
1706 | ret = nf_conntrack_proto_init(); | |
1707 | if (ret < 0) | |
1708 | goto err_proto; | |
1709 | ||
1710 | /* Set up fake conntrack: to never be deleted, not in any hashes */ | |
1711 | for_each_possible_cpu(cpu) { | |
1712 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); | |
1713 | write_pnet(&ct->ct_net, &init_net); | |
1714 | atomic_set(&ct->ct_general.use, 1); | |
1715 | } | |
1716 | /* - and look it like as a confirmed connection */ | |
1717 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); | |
1718 | return 0; | |
1719 | ||
1720 | err_proto: | |
1721 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1722 | nf_ct_extend_unregister(&nf_ct_zone_extend); | |
1723 | err_extend: | |
1724 | #endif | |
1725 | nf_conntrack_seqadj_fini(); | |
1726 | err_seqadj: | |
1727 | nf_conntrack_labels_fini(); | |
1728 | err_labels: | |
1729 | nf_conntrack_helper_fini(); | |
1730 | err_helper: | |
1731 | nf_conntrack_timeout_fini(); | |
1732 | err_timeout: | |
1733 | nf_conntrack_ecache_fini(); | |
1734 | err_ecache: | |
1735 | nf_conntrack_tstamp_fini(); | |
1736 | err_tstamp: | |
1737 | nf_conntrack_acct_fini(); | |
1738 | err_acct: | |
1739 | nf_conntrack_expect_fini(); | |
1740 | err_expect: | |
1741 | return ret; | |
1742 | } | |
1743 | ||
1744 | void nf_conntrack_init_end(void) | |
1745 | { | |
1746 | /* For use by REJECT target */ | |
1747 | RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); | |
1748 | RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); | |
1749 | } | |
1750 | ||
1751 | /* | |
1752 | * We need to use special "null" values, not used in hash table | |
1753 | */ | |
1754 | #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) | |
1755 | #define DYING_NULLS_VAL ((1<<30)+1) | |
1756 | #define TEMPLATE_NULLS_VAL ((1<<30)+2) | |
1757 | ||
1758 | int nf_conntrack_init_net(struct net *net) | |
1759 | { | |
1760 | int ret = -ENOMEM; | |
1761 | int cpu; | |
1762 | ||
1763 | atomic_set(&net->ct.count, 0); | |
1764 | seqcount_init(&net->ct.generation); | |
1765 | ||
1766 | net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); | |
1767 | if (!net->ct.pcpu_lists) | |
1768 | goto err_stat; | |
1769 | ||
1770 | for_each_possible_cpu(cpu) { | |
1771 | struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); | |
1772 | ||
1773 | spin_lock_init(&pcpu->lock); | |
1774 | INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); | |
1775 | INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); | |
1776 | } | |
1777 | ||
1778 | net->ct.stat = alloc_percpu(struct ip_conntrack_stat); | |
1779 | if (!net->ct.stat) | |
1780 | goto err_pcpu_lists; | |
1781 | ||
1782 | net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); | |
1783 | if (!net->ct.slabname) | |
1784 | goto err_slabname; | |
1785 | ||
1786 | net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, | |
1787 | sizeof(struct nf_conn), 0, | |
1788 | SLAB_DESTROY_BY_RCU, NULL); | |
1789 | if (!net->ct.nf_conntrack_cachep) { | |
1790 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | |
1791 | goto err_cache; | |
1792 | } | |
1793 | ||
1794 | net->ct.htable_size = nf_conntrack_htable_size; | |
1795 | net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); | |
1796 | if (!net->ct.hash) { | |
1797 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); | |
1798 | goto err_hash; | |
1799 | } | |
1800 | ret = nf_conntrack_expect_pernet_init(net); | |
1801 | if (ret < 0) | |
1802 | goto err_expect; | |
1803 | ret = nf_conntrack_acct_pernet_init(net); | |
1804 | if (ret < 0) | |
1805 | goto err_acct; | |
1806 | ret = nf_conntrack_tstamp_pernet_init(net); | |
1807 | if (ret < 0) | |
1808 | goto err_tstamp; | |
1809 | ret = nf_conntrack_ecache_pernet_init(net); | |
1810 | if (ret < 0) | |
1811 | goto err_ecache; | |
1812 | ret = nf_conntrack_helper_pernet_init(net); | |
1813 | if (ret < 0) | |
1814 | goto err_helper; | |
1815 | ret = nf_conntrack_proto_pernet_init(net); | |
1816 | if (ret < 0) | |
1817 | goto err_proto; | |
1818 | return 0; | |
1819 | ||
1820 | err_proto: | |
1821 | nf_conntrack_helper_pernet_fini(net); | |
1822 | err_helper: | |
1823 | nf_conntrack_ecache_pernet_fini(net); | |
1824 | err_ecache: | |
1825 | nf_conntrack_tstamp_pernet_fini(net); | |
1826 | err_tstamp: | |
1827 | nf_conntrack_acct_pernet_fini(net); | |
1828 | err_acct: | |
1829 | nf_conntrack_expect_pernet_fini(net); | |
1830 | err_expect: | |
1831 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); | |
1832 | err_hash: | |
1833 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | |
1834 | err_cache: | |
1835 | kfree(net->ct.slabname); | |
1836 | err_slabname: | |
1837 | free_percpu(net->ct.stat); | |
1838 | err_pcpu_lists: | |
1839 | free_percpu(net->ct.pcpu_lists); | |
1840 | err_stat: | |
1841 | return ret; | |
1842 | } |