]>
Commit | Line | Data |
---|---|---|
1 | /* Connection state tracking for netfilter. This is separated from, | |
2 | but required by, the NAT layer; it can also be used by an iptables | |
3 | extension. */ | |
4 | ||
5 | /* (C) 1999-2001 Paul `Rusty' Russell | |
6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | |
7 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | |
8 | * (C) 2005-2012 Patrick McHardy <kaber@trash.net> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
16 | ||
17 | #include <linux/types.h> | |
18 | #include <linux/netfilter.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/skbuff.h> | |
22 | #include <linux/proc_fs.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/stddef.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/random.h> | |
27 | #include <linux/jhash.h> | |
28 | #include <linux/err.h> | |
29 | #include <linux/percpu.h> | |
30 | #include <linux/moduleparam.h> | |
31 | #include <linux/notifier.h> | |
32 | #include <linux/kernel.h> | |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/socket.h> | |
35 | #include <linux/mm.h> | |
36 | #include <linux/nsproxy.h> | |
37 | #include <linux/rculist_nulls.h> | |
38 | ||
39 | #include <net/netfilter/nf_conntrack.h> | |
40 | #include <net/netfilter/nf_conntrack_l3proto.h> | |
41 | #include <net/netfilter/nf_conntrack_l4proto.h> | |
42 | #include <net/netfilter/nf_conntrack_expect.h> | |
43 | #include <net/netfilter/nf_conntrack_helper.h> | |
44 | #include <net/netfilter/nf_conntrack_seqadj.h> | |
45 | #include <net/netfilter/nf_conntrack_core.h> | |
46 | #include <net/netfilter/nf_conntrack_extend.h> | |
47 | #include <net/netfilter/nf_conntrack_acct.h> | |
48 | #include <net/netfilter/nf_conntrack_ecache.h> | |
49 | #include <net/netfilter/nf_conntrack_zones.h> | |
50 | #include <net/netfilter/nf_conntrack_timestamp.h> | |
51 | #include <net/netfilter/nf_conntrack_timeout.h> | |
52 | #include <net/netfilter/nf_conntrack_labels.h> | |
53 | #include <net/netfilter/nf_conntrack_synproxy.h> | |
54 | #include <net/netfilter/nf_nat.h> | |
55 | #include <net/netfilter/nf_nat_core.h> | |
56 | #include <net/netfilter/nf_nat_helper.h> | |
57 | ||
58 | #define NF_CONNTRACK_VERSION "0.5.0" | |
59 | ||
60 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, | |
61 | enum nf_nat_manip_type manip, | |
62 | const struct nlattr *attr) __read_mostly; | |
63 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); | |
64 | ||
65 | __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; | |
66 | EXPORT_SYMBOL_GPL(nf_conntrack_locks); | |
67 | ||
68 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); | |
69 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); | |
70 | ||
71 | static __read_mostly spinlock_t nf_conntrack_locks_all_lock; | |
72 | static __read_mostly seqcount_t nf_conntrack_generation; | |
73 | static __read_mostly bool nf_conntrack_locks_all; | |
74 | ||
75 | void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) | |
76 | { | |
77 | spin_lock(lock); | |
78 | while (unlikely(nf_conntrack_locks_all)) { | |
79 | spin_unlock(lock); | |
80 | spin_unlock_wait(&nf_conntrack_locks_all_lock); | |
81 | spin_lock(lock); | |
82 | } | |
83 | } | |
84 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); | |
85 | ||
86 | static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) | |
87 | { | |
88 | h1 %= CONNTRACK_LOCKS; | |
89 | h2 %= CONNTRACK_LOCKS; | |
90 | spin_unlock(&nf_conntrack_locks[h1]); | |
91 | if (h1 != h2) | |
92 | spin_unlock(&nf_conntrack_locks[h2]); | |
93 | } | |
94 | ||
95 | /* return true if we need to recompute hashes (in case hash table was resized) */ | |
96 | static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, | |
97 | unsigned int h2, unsigned int sequence) | |
98 | { | |
99 | h1 %= CONNTRACK_LOCKS; | |
100 | h2 %= CONNTRACK_LOCKS; | |
101 | if (h1 <= h2) { | |
102 | nf_conntrack_lock(&nf_conntrack_locks[h1]); | |
103 | if (h1 != h2) | |
104 | spin_lock_nested(&nf_conntrack_locks[h2], | |
105 | SINGLE_DEPTH_NESTING); | |
106 | } else { | |
107 | nf_conntrack_lock(&nf_conntrack_locks[h2]); | |
108 | spin_lock_nested(&nf_conntrack_locks[h1], | |
109 | SINGLE_DEPTH_NESTING); | |
110 | } | |
111 | if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { | |
112 | nf_conntrack_double_unlock(h1, h2); | |
113 | return true; | |
114 | } | |
115 | return false; | |
116 | } | |
117 | ||
118 | static void nf_conntrack_all_lock(void) | |
119 | { | |
120 | int i; | |
121 | ||
122 | spin_lock(&nf_conntrack_locks_all_lock); | |
123 | nf_conntrack_locks_all = true; | |
124 | ||
125 | for (i = 0; i < CONNTRACK_LOCKS; i++) { | |
126 | spin_unlock_wait(&nf_conntrack_locks[i]); | |
127 | } | |
128 | } | |
129 | ||
130 | static void nf_conntrack_all_unlock(void) | |
131 | { | |
132 | nf_conntrack_locks_all = false; | |
133 | spin_unlock(&nf_conntrack_locks_all_lock); | |
134 | } | |
135 | ||
136 | unsigned int nf_conntrack_htable_size __read_mostly; | |
137 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); | |
138 | ||
139 | unsigned int nf_conntrack_max __read_mostly; | |
140 | EXPORT_SYMBOL_GPL(nf_conntrack_max); | |
141 | ||
142 | DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); | |
143 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); | |
144 | ||
145 | static unsigned int nf_conntrack_hash_rnd __read_mostly; | |
146 | ||
147 | static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple) | |
148 | { | |
149 | unsigned int n; | |
150 | ||
151 | get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); | |
152 | ||
153 | /* The direction must be ignored, so we hash everything up to the | |
154 | * destination ports (which is a multiple of 4) and treat the last | |
155 | * three bytes manually. | |
156 | */ | |
157 | n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); | |
158 | return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^ | |
159 | (((__force __u16)tuple->dst.u.all << 16) | | |
160 | tuple->dst.protonum)); | |
161 | } | |
162 | ||
163 | static u32 __hash_bucket(u32 hash, unsigned int size) | |
164 | { | |
165 | return reciprocal_scale(hash, size); | |
166 | } | |
167 | ||
168 | static u32 hash_bucket(u32 hash, const struct net *net) | |
169 | { | |
170 | return __hash_bucket(hash, net->ct.htable_size); | |
171 | } | |
172 | ||
173 | static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, | |
174 | unsigned int size) | |
175 | { | |
176 | return __hash_bucket(hash_conntrack_raw(tuple), size); | |
177 | } | |
178 | ||
179 | static inline u_int32_t hash_conntrack(const struct net *net, | |
180 | const struct nf_conntrack_tuple *tuple) | |
181 | { | |
182 | return __hash_conntrack(tuple, net->ct.htable_size); | |
183 | } | |
184 | ||
185 | bool | |
186 | nf_ct_get_tuple(const struct sk_buff *skb, | |
187 | unsigned int nhoff, | |
188 | unsigned int dataoff, | |
189 | u_int16_t l3num, | |
190 | u_int8_t protonum, | |
191 | struct net *net, | |
192 | struct nf_conntrack_tuple *tuple, | |
193 | const struct nf_conntrack_l3proto *l3proto, | |
194 | const struct nf_conntrack_l4proto *l4proto) | |
195 | { | |
196 | memset(tuple, 0, sizeof(*tuple)); | |
197 | ||
198 | tuple->src.l3num = l3num; | |
199 | if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) | |
200 | return false; | |
201 | ||
202 | tuple->dst.protonum = protonum; | |
203 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; | |
204 | ||
205 | return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); | |
206 | } | |
207 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); | |
208 | ||
209 | bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, | |
210 | u_int16_t l3num, | |
211 | struct net *net, struct nf_conntrack_tuple *tuple) | |
212 | { | |
213 | struct nf_conntrack_l3proto *l3proto; | |
214 | struct nf_conntrack_l4proto *l4proto; | |
215 | unsigned int protoff; | |
216 | u_int8_t protonum; | |
217 | int ret; | |
218 | ||
219 | rcu_read_lock(); | |
220 | ||
221 | l3proto = __nf_ct_l3proto_find(l3num); | |
222 | ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); | |
223 | if (ret != NF_ACCEPT) { | |
224 | rcu_read_unlock(); | |
225 | return false; | |
226 | } | |
227 | ||
228 | l4proto = __nf_ct_l4proto_find(l3num, protonum); | |
229 | ||
230 | ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, | |
231 | l3proto, l4proto); | |
232 | ||
233 | rcu_read_unlock(); | |
234 | return ret; | |
235 | } | |
236 | EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); | |
237 | ||
238 | bool | |
239 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | |
240 | const struct nf_conntrack_tuple *orig, | |
241 | const struct nf_conntrack_l3proto *l3proto, | |
242 | const struct nf_conntrack_l4proto *l4proto) | |
243 | { | |
244 | memset(inverse, 0, sizeof(*inverse)); | |
245 | ||
246 | inverse->src.l3num = orig->src.l3num; | |
247 | if (l3proto->invert_tuple(inverse, orig) == 0) | |
248 | return false; | |
249 | ||
250 | inverse->dst.dir = !orig->dst.dir; | |
251 | ||
252 | inverse->dst.protonum = orig->dst.protonum; | |
253 | return l4proto->invert_tuple(inverse, orig); | |
254 | } | |
255 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); | |
256 | ||
257 | static void | |
258 | clean_from_lists(struct nf_conn *ct) | |
259 | { | |
260 | pr_debug("clean_from_lists(%p)\n", ct); | |
261 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
262 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); | |
263 | ||
264 | /* Destroy all pending expectations */ | |
265 | nf_ct_remove_expectations(ct); | |
266 | } | |
267 | ||
268 | /* must be called with local_bh_disable */ | |
269 | static void nf_ct_add_to_dying_list(struct nf_conn *ct) | |
270 | { | |
271 | struct ct_pcpu *pcpu; | |
272 | ||
273 | /* add this conntrack to the (per cpu) dying list */ | |
274 | ct->cpu = smp_processor_id(); | |
275 | pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); | |
276 | ||
277 | spin_lock(&pcpu->lock); | |
278 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
279 | &pcpu->dying); | |
280 | spin_unlock(&pcpu->lock); | |
281 | } | |
282 | ||
283 | /* must be called with local_bh_disable */ | |
284 | static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) | |
285 | { | |
286 | struct ct_pcpu *pcpu; | |
287 | ||
288 | /* add this conntrack to the (per cpu) unconfirmed list */ | |
289 | ct->cpu = smp_processor_id(); | |
290 | pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); | |
291 | ||
292 | spin_lock(&pcpu->lock); | |
293 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
294 | &pcpu->unconfirmed); | |
295 | spin_unlock(&pcpu->lock); | |
296 | } | |
297 | ||
298 | /* must be called with local_bh_disable */ | |
299 | static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) | |
300 | { | |
301 | struct ct_pcpu *pcpu; | |
302 | ||
303 | /* We overload first tuple to link into unconfirmed or dying list.*/ | |
304 | pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); | |
305 | ||
306 | spin_lock(&pcpu->lock); | |
307 | BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); | |
308 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
309 | spin_unlock(&pcpu->lock); | |
310 | } | |
311 | ||
312 | /* Released via destroy_conntrack() */ | |
313 | struct nf_conn *nf_ct_tmpl_alloc(struct net *net, | |
314 | const struct nf_conntrack_zone *zone, | |
315 | gfp_t flags) | |
316 | { | |
317 | struct nf_conn *tmpl; | |
318 | ||
319 | tmpl = kzalloc(sizeof(*tmpl), flags); | |
320 | if (tmpl == NULL) | |
321 | return NULL; | |
322 | ||
323 | tmpl->status = IPS_TEMPLATE; | |
324 | write_pnet(&tmpl->ct_net, net); | |
325 | ||
326 | if (nf_ct_zone_add(tmpl, flags, zone) < 0) | |
327 | goto out_free; | |
328 | ||
329 | atomic_set(&tmpl->ct_general.use, 0); | |
330 | ||
331 | return tmpl; | |
332 | out_free: | |
333 | kfree(tmpl); | |
334 | return NULL; | |
335 | } | |
336 | EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); | |
337 | ||
338 | void nf_ct_tmpl_free(struct nf_conn *tmpl) | |
339 | { | |
340 | nf_ct_ext_destroy(tmpl); | |
341 | nf_ct_ext_free(tmpl); | |
342 | kfree(tmpl); | |
343 | } | |
344 | EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); | |
345 | ||
346 | static void | |
347 | destroy_conntrack(struct nf_conntrack *nfct) | |
348 | { | |
349 | struct nf_conn *ct = (struct nf_conn *)nfct; | |
350 | struct net *net = nf_ct_net(ct); | |
351 | struct nf_conntrack_l4proto *l4proto; | |
352 | ||
353 | pr_debug("destroy_conntrack(%p)\n", ct); | |
354 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); | |
355 | NF_CT_ASSERT(!timer_pending(&ct->timeout)); | |
356 | ||
357 | if (unlikely(nf_ct_is_template(ct))) { | |
358 | nf_ct_tmpl_free(ct); | |
359 | return; | |
360 | } | |
361 | rcu_read_lock(); | |
362 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | |
363 | if (l4proto && l4proto->destroy) | |
364 | l4proto->destroy(ct); | |
365 | ||
366 | rcu_read_unlock(); | |
367 | ||
368 | local_bh_disable(); | |
369 | /* Expectations will have been removed in clean_from_lists, | |
370 | * except TFTP can create an expectation on the first packet, | |
371 | * before connection is in the list, so we need to clean here, | |
372 | * too. | |
373 | */ | |
374 | nf_ct_remove_expectations(ct); | |
375 | ||
376 | nf_ct_del_from_dying_or_unconfirmed_list(ct); | |
377 | ||
378 | NF_CT_STAT_INC(net, delete); | |
379 | local_bh_enable(); | |
380 | ||
381 | if (ct->master) | |
382 | nf_ct_put(ct->master); | |
383 | ||
384 | pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); | |
385 | nf_conntrack_free(ct); | |
386 | } | |
387 | ||
388 | static void nf_ct_delete_from_lists(struct nf_conn *ct) | |
389 | { | |
390 | struct net *net = nf_ct_net(ct); | |
391 | unsigned int hash, reply_hash; | |
392 | unsigned int sequence; | |
393 | ||
394 | nf_ct_helper_destroy(ct); | |
395 | ||
396 | local_bh_disable(); | |
397 | do { | |
398 | sequence = read_seqcount_begin(&nf_conntrack_generation); | |
399 | hash = hash_conntrack(net, | |
400 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
401 | reply_hash = hash_conntrack(net, | |
402 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
403 | } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); | |
404 | ||
405 | clean_from_lists(ct); | |
406 | nf_conntrack_double_unlock(hash, reply_hash); | |
407 | ||
408 | nf_ct_add_to_dying_list(ct); | |
409 | ||
410 | NF_CT_STAT_INC(net, delete_list); | |
411 | local_bh_enable(); | |
412 | } | |
413 | ||
414 | bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) | |
415 | { | |
416 | struct nf_conn_tstamp *tstamp; | |
417 | ||
418 | tstamp = nf_conn_tstamp_find(ct); | |
419 | if (tstamp && tstamp->stop == 0) | |
420 | tstamp->stop = ktime_get_real_ns(); | |
421 | ||
422 | if (nf_ct_is_dying(ct)) | |
423 | goto delete; | |
424 | ||
425 | if (nf_conntrack_event_report(IPCT_DESTROY, ct, | |
426 | portid, report) < 0) { | |
427 | /* destroy event was not delivered */ | |
428 | nf_ct_delete_from_lists(ct); | |
429 | nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); | |
430 | return false; | |
431 | } | |
432 | ||
433 | nf_conntrack_ecache_work(nf_ct_net(ct)); | |
434 | set_bit(IPS_DYING_BIT, &ct->status); | |
435 | delete: | |
436 | nf_ct_delete_from_lists(ct); | |
437 | nf_ct_put(ct); | |
438 | return true; | |
439 | } | |
440 | EXPORT_SYMBOL_GPL(nf_ct_delete); | |
441 | ||
442 | static void death_by_timeout(unsigned long ul_conntrack) | |
443 | { | |
444 | nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); | |
445 | } | |
446 | ||
447 | static inline bool | |
448 | nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, | |
449 | const struct nf_conntrack_tuple *tuple, | |
450 | const struct nf_conntrack_zone *zone) | |
451 | { | |
452 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); | |
453 | ||
454 | /* A conntrack can be recreated with the equal tuple, | |
455 | * so we need to check that the conntrack is confirmed | |
456 | */ | |
457 | return nf_ct_tuple_equal(tuple, &h->tuple) && | |
458 | nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && | |
459 | nf_ct_is_confirmed(ct); | |
460 | } | |
461 | ||
462 | /* | |
463 | * Warning : | |
464 | * - Caller must take a reference on returned object | |
465 | * and recheck nf_ct_tuple_equal(tuple, &h->tuple) | |
466 | */ | |
467 | static struct nf_conntrack_tuple_hash * | |
468 | ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, | |
469 | const struct nf_conntrack_tuple *tuple, u32 hash) | |
470 | { | |
471 | struct nf_conntrack_tuple_hash *h; | |
472 | struct hlist_nulls_node *n; | |
473 | unsigned int bucket = hash_bucket(hash, net); | |
474 | ||
475 | /* Disable BHs the entire time since we normally need to disable them | |
476 | * at least once for the stats anyway. | |
477 | */ | |
478 | local_bh_disable(); | |
479 | begin: | |
480 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { | |
481 | if (nf_ct_key_equal(h, tuple, zone)) { | |
482 | NF_CT_STAT_INC(net, found); | |
483 | local_bh_enable(); | |
484 | return h; | |
485 | } | |
486 | NF_CT_STAT_INC(net, searched); | |
487 | } | |
488 | /* | |
489 | * if the nulls value we got at the end of this lookup is | |
490 | * not the expected one, we must restart lookup. | |
491 | * We probably met an item that was moved to another chain. | |
492 | */ | |
493 | if (get_nulls_value(n) != bucket) { | |
494 | NF_CT_STAT_INC(net, search_restart); | |
495 | goto begin; | |
496 | } | |
497 | local_bh_enable(); | |
498 | ||
499 | return NULL; | |
500 | } | |
501 | ||
502 | /* Find a connection corresponding to a tuple. */ | |
503 | static struct nf_conntrack_tuple_hash * | |
504 | __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, | |
505 | const struct nf_conntrack_tuple *tuple, u32 hash) | |
506 | { | |
507 | struct nf_conntrack_tuple_hash *h; | |
508 | struct nf_conn *ct; | |
509 | ||
510 | rcu_read_lock(); | |
511 | begin: | |
512 | h = ____nf_conntrack_find(net, zone, tuple, hash); | |
513 | if (h) { | |
514 | ct = nf_ct_tuplehash_to_ctrack(h); | |
515 | if (unlikely(nf_ct_is_dying(ct) || | |
516 | !atomic_inc_not_zero(&ct->ct_general.use))) | |
517 | h = NULL; | |
518 | else { | |
519 | if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { | |
520 | nf_ct_put(ct); | |
521 | goto begin; | |
522 | } | |
523 | } | |
524 | } | |
525 | rcu_read_unlock(); | |
526 | ||
527 | return h; | |
528 | } | |
529 | ||
530 | struct nf_conntrack_tuple_hash * | |
531 | nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, | |
532 | const struct nf_conntrack_tuple *tuple) | |
533 | { | |
534 | return __nf_conntrack_find_get(net, zone, tuple, | |
535 | hash_conntrack_raw(tuple)); | |
536 | } | |
537 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | |
538 | ||
539 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, | |
540 | unsigned int hash, | |
541 | unsigned int reply_hash) | |
542 | { | |
543 | struct net *net = nf_ct_net(ct); | |
544 | ||
545 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
546 | &net->ct.hash[hash]); | |
547 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, | |
548 | &net->ct.hash[reply_hash]); | |
549 | } | |
550 | ||
551 | int | |
552 | nf_conntrack_hash_check_insert(struct nf_conn *ct) | |
553 | { | |
554 | const struct nf_conntrack_zone *zone; | |
555 | struct net *net = nf_ct_net(ct); | |
556 | unsigned int hash, reply_hash; | |
557 | struct nf_conntrack_tuple_hash *h; | |
558 | struct hlist_nulls_node *n; | |
559 | unsigned int sequence; | |
560 | ||
561 | zone = nf_ct_zone(ct); | |
562 | ||
563 | local_bh_disable(); | |
564 | do { | |
565 | sequence = read_seqcount_begin(&nf_conntrack_generation); | |
566 | hash = hash_conntrack(net, | |
567 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
568 | reply_hash = hash_conntrack(net, | |
569 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
570 | } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); | |
571 | ||
572 | /* See if there's one in the list already, including reverse */ | |
573 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | |
574 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | |
575 | &h->tuple) && | |
576 | nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, | |
577 | NF_CT_DIRECTION(h))) | |
578 | goto out; | |
579 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) | |
580 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | |
581 | &h->tuple) && | |
582 | nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, | |
583 | NF_CT_DIRECTION(h))) | |
584 | goto out; | |
585 | ||
586 | add_timer(&ct->timeout); | |
587 | smp_wmb(); | |
588 | /* The caller holds a reference to this object */ | |
589 | atomic_set(&ct->ct_general.use, 2); | |
590 | __nf_conntrack_hash_insert(ct, hash, reply_hash); | |
591 | nf_conntrack_double_unlock(hash, reply_hash); | |
592 | NF_CT_STAT_INC(net, insert); | |
593 | local_bh_enable(); | |
594 | return 0; | |
595 | ||
596 | out: | |
597 | nf_conntrack_double_unlock(hash, reply_hash); | |
598 | NF_CT_STAT_INC(net, insert_failed); | |
599 | local_bh_enable(); | |
600 | return -EEXIST; | |
601 | } | |
602 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); | |
603 | ||
604 | /* Confirm a connection given skb; places it in hash table */ | |
605 | int | |
606 | __nf_conntrack_confirm(struct sk_buff *skb) | |
607 | { | |
608 | const struct nf_conntrack_zone *zone; | |
609 | unsigned int hash, reply_hash; | |
610 | struct nf_conntrack_tuple_hash *h; | |
611 | struct nf_conn *ct; | |
612 | struct nf_conn_help *help; | |
613 | struct nf_conn_tstamp *tstamp; | |
614 | struct hlist_nulls_node *n; | |
615 | enum ip_conntrack_info ctinfo; | |
616 | struct net *net; | |
617 | unsigned int sequence; | |
618 | ||
619 | ct = nf_ct_get(skb, &ctinfo); | |
620 | net = nf_ct_net(ct); | |
621 | ||
622 | /* ipt_REJECT uses nf_conntrack_attach to attach related | |
623 | ICMP/TCP RST packets in other direction. Actual packet | |
624 | which created connection will be IP_CT_NEW or for an | |
625 | expected connection, IP_CT_RELATED. */ | |
626 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | |
627 | return NF_ACCEPT; | |
628 | ||
629 | zone = nf_ct_zone(ct); | |
630 | local_bh_disable(); | |
631 | ||
632 | do { | |
633 | sequence = read_seqcount_begin(&nf_conntrack_generation); | |
634 | /* reuse the hash saved before */ | |
635 | hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; | |
636 | hash = hash_bucket(hash, net); | |
637 | reply_hash = hash_conntrack(net, | |
638 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
639 | ||
640 | } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); | |
641 | ||
642 | /* We're not in hash table, and we refuse to set up related | |
643 | * connections for unconfirmed conns. But packet copies and | |
644 | * REJECT will give spurious warnings here. | |
645 | */ | |
646 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ | |
647 | ||
648 | /* No external references means no one else could have | |
649 | * confirmed us. | |
650 | */ | |
651 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | |
652 | pr_debug("Confirming conntrack %p\n", ct); | |
653 | /* We have to check the DYING flag after unlink to prevent | |
654 | * a race against nf_ct_get_next_corpse() possibly called from | |
655 | * user context, else we insert an already 'dead' hash, blocking | |
656 | * further use of that particular connection -JM. | |
657 | */ | |
658 | nf_ct_del_from_dying_or_unconfirmed_list(ct); | |
659 | ||
660 | if (unlikely(nf_ct_is_dying(ct))) | |
661 | goto out; | |
662 | ||
663 | /* See if there's one in the list already, including reverse: | |
664 | NAT could have grabbed it without realizing, since we're | |
665 | not in the hash. If there is, we lost race. */ | |
666 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | |
667 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | |
668 | &h->tuple) && | |
669 | nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, | |
670 | NF_CT_DIRECTION(h))) | |
671 | goto out; | |
672 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) | |
673 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | |
674 | &h->tuple) && | |
675 | nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, | |
676 | NF_CT_DIRECTION(h))) | |
677 | goto out; | |
678 | ||
679 | /* Timer relative to confirmation time, not original | |
680 | setting time, otherwise we'd get timer wrap in | |
681 | weird delay cases. */ | |
682 | ct->timeout.expires += jiffies; | |
683 | add_timer(&ct->timeout); | |
684 | atomic_inc(&ct->ct_general.use); | |
685 | ct->status |= IPS_CONFIRMED; | |
686 | ||
687 | /* set conntrack timestamp, if enabled. */ | |
688 | tstamp = nf_conn_tstamp_find(ct); | |
689 | if (tstamp) { | |
690 | if (skb->tstamp.tv64 == 0) | |
691 | __net_timestamp(skb); | |
692 | ||
693 | tstamp->start = ktime_to_ns(skb->tstamp); | |
694 | } | |
695 | /* Since the lookup is lockless, hash insertion must be done after | |
696 | * starting the timer and setting the CONFIRMED bit. The RCU barriers | |
697 | * guarantee that no other CPU can find the conntrack before the above | |
698 | * stores are visible. | |
699 | */ | |
700 | __nf_conntrack_hash_insert(ct, hash, reply_hash); | |
701 | nf_conntrack_double_unlock(hash, reply_hash); | |
702 | NF_CT_STAT_INC(net, insert); | |
703 | local_bh_enable(); | |
704 | ||
705 | help = nfct_help(ct); | |
706 | if (help && help->helper) | |
707 | nf_conntrack_event_cache(IPCT_HELPER, ct); | |
708 | ||
709 | nf_conntrack_event_cache(master_ct(ct) ? | |
710 | IPCT_RELATED : IPCT_NEW, ct); | |
711 | return NF_ACCEPT; | |
712 | ||
713 | out: | |
714 | nf_ct_add_to_dying_list(ct); | |
715 | nf_conntrack_double_unlock(hash, reply_hash); | |
716 | NF_CT_STAT_INC(net, insert_failed); | |
717 | local_bh_enable(); | |
718 | return NF_DROP; | |
719 | } | |
720 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); | |
721 | ||
722 | /* Returns true if a connection correspondings to the tuple (required | |
723 | for NAT). */ | |
724 | int | |
725 | nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |
726 | const struct nf_conn *ignored_conntrack) | |
727 | { | |
728 | struct net *net = nf_ct_net(ignored_conntrack); | |
729 | const struct nf_conntrack_zone *zone; | |
730 | struct nf_conntrack_tuple_hash *h; | |
731 | struct hlist_nulls_node *n; | |
732 | struct nf_conn *ct; | |
733 | unsigned int hash; | |
734 | ||
735 | zone = nf_ct_zone(ignored_conntrack); | |
736 | hash = hash_conntrack(net, tuple); | |
737 | ||
738 | /* Disable BHs the entire time since we need to disable them at | |
739 | * least once for the stats anyway. | |
740 | */ | |
741 | rcu_read_lock_bh(); | |
742 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | |
743 | ct = nf_ct_tuplehash_to_ctrack(h); | |
744 | if (ct != ignored_conntrack && | |
745 | nf_ct_tuple_equal(tuple, &h->tuple) && | |
746 | nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) { | |
747 | NF_CT_STAT_INC(net, found); | |
748 | rcu_read_unlock_bh(); | |
749 | return 1; | |
750 | } | |
751 | NF_CT_STAT_INC(net, searched); | |
752 | } | |
753 | rcu_read_unlock_bh(); | |
754 | ||
755 | return 0; | |
756 | } | |
757 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); | |
758 | ||
759 | #define NF_CT_EVICTION_RANGE 8 | |
760 | ||
761 | /* There's a small race here where we may free a just-assured | |
762 | connection. Too bad: we're in trouble anyway. */ | |
763 | static noinline int early_drop(struct net *net, unsigned int _hash) | |
764 | { | |
765 | /* Use oldest entry, which is roughly LRU */ | |
766 | struct nf_conntrack_tuple_hash *h; | |
767 | struct nf_conn *ct = NULL, *tmp; | |
768 | struct hlist_nulls_node *n; | |
769 | unsigned int i = 0, cnt = 0; | |
770 | int dropped = 0; | |
771 | unsigned int hash, sequence; | |
772 | spinlock_t *lockp; | |
773 | ||
774 | local_bh_disable(); | |
775 | restart: | |
776 | sequence = read_seqcount_begin(&nf_conntrack_generation); | |
777 | hash = hash_bucket(_hash, net); | |
778 | for (; i < net->ct.htable_size; i++) { | |
779 | lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; | |
780 | nf_conntrack_lock(lockp); | |
781 | if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { | |
782 | spin_unlock(lockp); | |
783 | goto restart; | |
784 | } | |
785 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], | |
786 | hnnode) { | |
787 | tmp = nf_ct_tuplehash_to_ctrack(h); | |
788 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status) && | |
789 | !nf_ct_is_dying(tmp) && | |
790 | atomic_inc_not_zero(&tmp->ct_general.use)) { | |
791 | ct = tmp; | |
792 | break; | |
793 | } | |
794 | cnt++; | |
795 | } | |
796 | ||
797 | hash = (hash + 1) % net->ct.htable_size; | |
798 | spin_unlock(lockp); | |
799 | ||
800 | if (ct || cnt >= NF_CT_EVICTION_RANGE) | |
801 | break; | |
802 | ||
803 | } | |
804 | local_bh_enable(); | |
805 | ||
806 | if (!ct) | |
807 | return dropped; | |
808 | ||
809 | if (del_timer(&ct->timeout)) { | |
810 | if (nf_ct_delete(ct, 0, 0)) { | |
811 | dropped = 1; | |
812 | NF_CT_STAT_INC_ATOMIC(net, early_drop); | |
813 | } | |
814 | } | |
815 | nf_ct_put(ct); | |
816 | return dropped; | |
817 | } | |
818 | ||
819 | static struct nf_conn * | |
820 | __nf_conntrack_alloc(struct net *net, | |
821 | const struct nf_conntrack_zone *zone, | |
822 | const struct nf_conntrack_tuple *orig, | |
823 | const struct nf_conntrack_tuple *repl, | |
824 | gfp_t gfp, u32 hash) | |
825 | { | |
826 | struct nf_conn *ct; | |
827 | ||
828 | /* We don't want any race condition at early drop stage */ | |
829 | atomic_inc(&net->ct.count); | |
830 | ||
831 | if (nf_conntrack_max && | |
832 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { | |
833 | if (!early_drop(net, hash)) { | |
834 | atomic_dec(&net->ct.count); | |
835 | net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); | |
836 | return ERR_PTR(-ENOMEM); | |
837 | } | |
838 | } | |
839 | ||
840 | /* | |
841 | * Do not use kmem_cache_zalloc(), as this cache uses | |
842 | * SLAB_DESTROY_BY_RCU. | |
843 | */ | |
844 | ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); | |
845 | if (ct == NULL) | |
846 | goto out; | |
847 | ||
848 | spin_lock_init(&ct->lock); | |
849 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | |
850 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; | |
851 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; | |
852 | /* save hash for reusing when confirming */ | |
853 | *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; | |
854 | ct->status = 0; | |
855 | /* Don't set timer yet: wait for confirmation */ | |
856 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | |
857 | write_pnet(&ct->ct_net, net); | |
858 | memset(&ct->__nfct_init_offset[0], 0, | |
859 | offsetof(struct nf_conn, proto) - | |
860 | offsetof(struct nf_conn, __nfct_init_offset[0])); | |
861 | ||
862 | if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0) | |
863 | goto out_free; | |
864 | ||
865 | /* Because we use RCU lookups, we set ct_general.use to zero before | |
866 | * this is inserted in any list. | |
867 | */ | |
868 | atomic_set(&ct->ct_general.use, 0); | |
869 | return ct; | |
870 | out_free: | |
871 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | |
872 | out: | |
873 | atomic_dec(&net->ct.count); | |
874 | return ERR_PTR(-ENOMEM); | |
875 | } | |
876 | ||
877 | struct nf_conn *nf_conntrack_alloc(struct net *net, | |
878 | const struct nf_conntrack_zone *zone, | |
879 | const struct nf_conntrack_tuple *orig, | |
880 | const struct nf_conntrack_tuple *repl, | |
881 | gfp_t gfp) | |
882 | { | |
883 | return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); | |
884 | } | |
885 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | |
886 | ||
887 | void nf_conntrack_free(struct nf_conn *ct) | |
888 | { | |
889 | struct net *net = nf_ct_net(ct); | |
890 | ||
891 | /* A freed object has refcnt == 0, that's | |
892 | * the golden rule for SLAB_DESTROY_BY_RCU | |
893 | */ | |
894 | NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0); | |
895 | ||
896 | nf_ct_ext_destroy(ct); | |
897 | nf_ct_ext_free(ct); | |
898 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | |
899 | smp_mb__before_atomic(); | |
900 | atomic_dec(&net->ct.count); | |
901 | } | |
902 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | |
903 | ||
904 | ||
905 | /* Allocate a new conntrack: we return -ENOMEM if classification | |
906 | failed due to stress. Otherwise it really is unclassifiable. */ | |
907 | static struct nf_conntrack_tuple_hash * | |
908 | init_conntrack(struct net *net, struct nf_conn *tmpl, | |
909 | const struct nf_conntrack_tuple *tuple, | |
910 | struct nf_conntrack_l3proto *l3proto, | |
911 | struct nf_conntrack_l4proto *l4proto, | |
912 | struct sk_buff *skb, | |
913 | unsigned int dataoff, u32 hash) | |
914 | { | |
915 | struct nf_conn *ct; | |
916 | struct nf_conn_help *help; | |
917 | struct nf_conntrack_tuple repl_tuple; | |
918 | struct nf_conntrack_ecache *ecache; | |
919 | struct nf_conntrack_expect *exp = NULL; | |
920 | const struct nf_conntrack_zone *zone; | |
921 | struct nf_conn_timeout *timeout_ext; | |
922 | struct nf_conntrack_zone tmp; | |
923 | unsigned int *timeouts; | |
924 | ||
925 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { | |
926 | pr_debug("Can't invert tuple.\n"); | |
927 | return NULL; | |
928 | } | |
929 | ||
930 | zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); | |
931 | ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, | |
932 | hash); | |
933 | if (IS_ERR(ct)) | |
934 | return (struct nf_conntrack_tuple_hash *)ct; | |
935 | ||
936 | if (tmpl && nfct_synproxy(tmpl)) { | |
937 | nfct_seqadj_ext_add(ct); | |
938 | nfct_synproxy_ext_add(ct); | |
939 | } | |
940 | ||
941 | timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; | |
942 | if (timeout_ext) { | |
943 | timeouts = nf_ct_timeout_data(timeout_ext); | |
944 | if (unlikely(!timeouts)) | |
945 | timeouts = l4proto->get_timeouts(net); | |
946 | } else { | |
947 | timeouts = l4proto->get_timeouts(net); | |
948 | } | |
949 | ||
950 | if (!l4proto->new(ct, skb, dataoff, timeouts)) { | |
951 | nf_conntrack_free(ct); | |
952 | pr_debug("can't track with proto module\n"); | |
953 | return NULL; | |
954 | } | |
955 | ||
956 | if (timeout_ext) | |
957 | nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout), | |
958 | GFP_ATOMIC); | |
959 | ||
960 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); | |
961 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); | |
962 | nf_ct_labels_ext_add(ct); | |
963 | ||
964 | ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; | |
965 | nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, | |
966 | ecache ? ecache->expmask : 0, | |
967 | GFP_ATOMIC); | |
968 | ||
969 | local_bh_disable(); | |
970 | if (net->ct.expect_count) { | |
971 | spin_lock(&nf_conntrack_expect_lock); | |
972 | exp = nf_ct_find_expectation(net, zone, tuple); | |
973 | if (exp) { | |
974 | pr_debug("expectation arrives ct=%p exp=%p\n", | |
975 | ct, exp); | |
976 | /* Welcome, Mr. Bond. We've been expecting you... */ | |
977 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | |
978 | /* exp->master safe, refcnt bumped in nf_ct_find_expectation */ | |
979 | ct->master = exp->master; | |
980 | if (exp->helper) { | |
981 | help = nf_ct_helper_ext_add(ct, exp->helper, | |
982 | GFP_ATOMIC); | |
983 | if (help) | |
984 | rcu_assign_pointer(help->helper, exp->helper); | |
985 | } | |
986 | ||
987 | #ifdef CONFIG_NF_CONNTRACK_MARK | |
988 | ct->mark = exp->master->mark; | |
989 | #endif | |
990 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | |
991 | ct->secmark = exp->master->secmark; | |
992 | #endif | |
993 | NF_CT_STAT_INC(net, expect_new); | |
994 | } | |
995 | spin_unlock(&nf_conntrack_expect_lock); | |
996 | } | |
997 | if (!exp) { | |
998 | __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); | |
999 | NF_CT_STAT_INC(net, new); | |
1000 | } | |
1001 | ||
1002 | /* Now it is inserted into the unconfirmed list, bump refcount */ | |
1003 | nf_conntrack_get(&ct->ct_general); | |
1004 | nf_ct_add_to_unconfirmed_list(ct); | |
1005 | ||
1006 | local_bh_enable(); | |
1007 | ||
1008 | if (exp) { | |
1009 | if (exp->expectfn) | |
1010 | exp->expectfn(ct, exp); | |
1011 | nf_ct_expect_put(exp); | |
1012 | } | |
1013 | ||
1014 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; | |
1015 | } | |
1016 | ||
1017 | /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ | |
1018 | static inline struct nf_conn * | |
1019 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, | |
1020 | struct sk_buff *skb, | |
1021 | unsigned int dataoff, | |
1022 | u_int16_t l3num, | |
1023 | u_int8_t protonum, | |
1024 | struct nf_conntrack_l3proto *l3proto, | |
1025 | struct nf_conntrack_l4proto *l4proto, | |
1026 | int *set_reply, | |
1027 | enum ip_conntrack_info *ctinfo) | |
1028 | { | |
1029 | const struct nf_conntrack_zone *zone; | |
1030 | struct nf_conntrack_tuple tuple; | |
1031 | struct nf_conntrack_tuple_hash *h; | |
1032 | struct nf_conntrack_zone tmp; | |
1033 | struct nf_conn *ct; | |
1034 | u32 hash; | |
1035 | ||
1036 | if (!nf_ct_get_tuple(skb, skb_network_offset(skb), | |
1037 | dataoff, l3num, protonum, net, &tuple, l3proto, | |
1038 | l4proto)) { | |
1039 | pr_debug("Can't get tuple\n"); | |
1040 | return NULL; | |
1041 | } | |
1042 | ||
1043 | /* look for tuple match */ | |
1044 | zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); | |
1045 | hash = hash_conntrack_raw(&tuple); | |
1046 | h = __nf_conntrack_find_get(net, zone, &tuple, hash); | |
1047 | if (!h) { | |
1048 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, | |
1049 | skb, dataoff, hash); | |
1050 | if (!h) | |
1051 | return NULL; | |
1052 | if (IS_ERR(h)) | |
1053 | return (void *)h; | |
1054 | } | |
1055 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1056 | ||
1057 | /* It exists; we have (non-exclusive) reference. */ | |
1058 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { | |
1059 | *ctinfo = IP_CT_ESTABLISHED_REPLY; | |
1060 | /* Please set reply bit if this packet OK */ | |
1061 | *set_reply = 1; | |
1062 | } else { | |
1063 | /* Once we've had two way comms, always ESTABLISHED. */ | |
1064 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | |
1065 | pr_debug("normal packet for %p\n", ct); | |
1066 | *ctinfo = IP_CT_ESTABLISHED; | |
1067 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { | |
1068 | pr_debug("related packet for %p\n", ct); | |
1069 | *ctinfo = IP_CT_RELATED; | |
1070 | } else { | |
1071 | pr_debug("new packet for %p\n", ct); | |
1072 | *ctinfo = IP_CT_NEW; | |
1073 | } | |
1074 | *set_reply = 0; | |
1075 | } | |
1076 | skb->nfct = &ct->ct_general; | |
1077 | skb->nfctinfo = *ctinfo; | |
1078 | return ct; | |
1079 | } | |
1080 | ||
1081 | unsigned int | |
1082 | nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | |
1083 | struct sk_buff *skb) | |
1084 | { | |
1085 | struct nf_conn *ct, *tmpl = NULL; | |
1086 | enum ip_conntrack_info ctinfo; | |
1087 | struct nf_conntrack_l3proto *l3proto; | |
1088 | struct nf_conntrack_l4proto *l4proto; | |
1089 | unsigned int *timeouts; | |
1090 | unsigned int dataoff; | |
1091 | u_int8_t protonum; | |
1092 | int set_reply = 0; | |
1093 | int ret; | |
1094 | ||
1095 | if (skb->nfct) { | |
1096 | /* Previously seen (loopback or untracked)? Ignore. */ | |
1097 | tmpl = (struct nf_conn *)skb->nfct; | |
1098 | if (!nf_ct_is_template(tmpl)) { | |
1099 | NF_CT_STAT_INC_ATOMIC(net, ignore); | |
1100 | return NF_ACCEPT; | |
1101 | } | |
1102 | skb->nfct = NULL; | |
1103 | } | |
1104 | ||
1105 | /* rcu_read_lock()ed by nf_hook_slow */ | |
1106 | l3proto = __nf_ct_l3proto_find(pf); | |
1107 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), | |
1108 | &dataoff, &protonum); | |
1109 | if (ret <= 0) { | |
1110 | pr_debug("not prepared to track yet or error occurred\n"); | |
1111 | NF_CT_STAT_INC_ATOMIC(net, error); | |
1112 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1113 | ret = -ret; | |
1114 | goto out; | |
1115 | } | |
1116 | ||
1117 | l4proto = __nf_ct_l4proto_find(pf, protonum); | |
1118 | ||
1119 | /* It may be an special packet, error, unclean... | |
1120 | * inverse of the return code tells to the netfilter | |
1121 | * core what to do with the packet. */ | |
1122 | if (l4proto->error != NULL) { | |
1123 | ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, | |
1124 | pf, hooknum); | |
1125 | if (ret <= 0) { | |
1126 | NF_CT_STAT_INC_ATOMIC(net, error); | |
1127 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1128 | ret = -ret; | |
1129 | goto out; | |
1130 | } | |
1131 | /* ICMP[v6] protocol trackers may assign one conntrack. */ | |
1132 | if (skb->nfct) | |
1133 | goto out; | |
1134 | } | |
1135 | ||
1136 | ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, | |
1137 | l3proto, l4proto, &set_reply, &ctinfo); | |
1138 | if (!ct) { | |
1139 | /* Not valid part of a connection */ | |
1140 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1141 | ret = NF_ACCEPT; | |
1142 | goto out; | |
1143 | } | |
1144 | ||
1145 | if (IS_ERR(ct)) { | |
1146 | /* Too stressed to deal. */ | |
1147 | NF_CT_STAT_INC_ATOMIC(net, drop); | |
1148 | ret = NF_DROP; | |
1149 | goto out; | |
1150 | } | |
1151 | ||
1152 | NF_CT_ASSERT(skb->nfct); | |
1153 | ||
1154 | /* Decide what timeout policy we want to apply to this flow. */ | |
1155 | timeouts = nf_ct_timeout_lookup(net, ct, l4proto); | |
1156 | ||
1157 | ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); | |
1158 | if (ret <= 0) { | |
1159 | /* Invalid: inverse of the return code tells | |
1160 | * the netfilter core what to do */ | |
1161 | pr_debug("nf_conntrack_in: Can't track with proto module\n"); | |
1162 | nf_conntrack_put(skb->nfct); | |
1163 | skb->nfct = NULL; | |
1164 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
1165 | if (ret == -NF_DROP) | |
1166 | NF_CT_STAT_INC_ATOMIC(net, drop); | |
1167 | ret = -ret; | |
1168 | goto out; | |
1169 | } | |
1170 | ||
1171 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | |
1172 | nf_conntrack_event_cache(IPCT_REPLY, ct); | |
1173 | out: | |
1174 | if (tmpl) { | |
1175 | /* Special case: we have to repeat this hook, assign the | |
1176 | * template again to this packet. We assume that this packet | |
1177 | * has no conntrack assigned. This is used by nf_ct_tcp. */ | |
1178 | if (ret == NF_REPEAT) | |
1179 | skb->nfct = (struct nf_conntrack *)tmpl; | |
1180 | else | |
1181 | nf_ct_put(tmpl); | |
1182 | } | |
1183 | ||
1184 | return ret; | |
1185 | } | |
1186 | EXPORT_SYMBOL_GPL(nf_conntrack_in); | |
1187 | ||
1188 | bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | |
1189 | const struct nf_conntrack_tuple *orig) | |
1190 | { | |
1191 | bool ret; | |
1192 | ||
1193 | rcu_read_lock(); | |
1194 | ret = nf_ct_invert_tuple(inverse, orig, | |
1195 | __nf_ct_l3proto_find(orig->src.l3num), | |
1196 | __nf_ct_l4proto_find(orig->src.l3num, | |
1197 | orig->dst.protonum)); | |
1198 | rcu_read_unlock(); | |
1199 | return ret; | |
1200 | } | |
1201 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); | |
1202 | ||
1203 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is | |
1204 | implicitly racy: see __nf_conntrack_confirm */ | |
1205 | void nf_conntrack_alter_reply(struct nf_conn *ct, | |
1206 | const struct nf_conntrack_tuple *newreply) | |
1207 | { | |
1208 | struct nf_conn_help *help = nfct_help(ct); | |
1209 | ||
1210 | /* Should be unconfirmed, so not in hash table yet */ | |
1211 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | |
1212 | ||
1213 | pr_debug("Altering reply tuple of %p to ", ct); | |
1214 | nf_ct_dump_tuple(newreply); | |
1215 | ||
1216 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; | |
1217 | if (ct->master || (help && !hlist_empty(&help->expectations))) | |
1218 | return; | |
1219 | ||
1220 | rcu_read_lock(); | |
1221 | __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); | |
1222 | rcu_read_unlock(); | |
1223 | } | |
1224 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); | |
1225 | ||
1226 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ | |
1227 | void __nf_ct_refresh_acct(struct nf_conn *ct, | |
1228 | enum ip_conntrack_info ctinfo, | |
1229 | const struct sk_buff *skb, | |
1230 | unsigned long extra_jiffies, | |
1231 | int do_acct) | |
1232 | { | |
1233 | NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); | |
1234 | NF_CT_ASSERT(skb); | |
1235 | ||
1236 | /* Only update if this is not a fixed timeout */ | |
1237 | if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) | |
1238 | goto acct; | |
1239 | ||
1240 | /* If not in hash table, timer will not be active yet */ | |
1241 | if (!nf_ct_is_confirmed(ct)) { | |
1242 | ct->timeout.expires = extra_jiffies; | |
1243 | } else { | |
1244 | unsigned long newtime = jiffies + extra_jiffies; | |
1245 | ||
1246 | /* Only update the timeout if the new timeout is at least | |
1247 | HZ jiffies from the old timeout. Need del_timer for race | |
1248 | avoidance (may already be dying). */ | |
1249 | if (newtime - ct->timeout.expires >= HZ) | |
1250 | mod_timer_pending(&ct->timeout, newtime); | |
1251 | } | |
1252 | ||
1253 | acct: | |
1254 | if (do_acct) { | |
1255 | struct nf_conn_acct *acct; | |
1256 | ||
1257 | acct = nf_conn_acct_find(ct); | |
1258 | if (acct) { | |
1259 | struct nf_conn_counter *counter = acct->counter; | |
1260 | ||
1261 | atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); | |
1262 | atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes); | |
1263 | } | |
1264 | } | |
1265 | } | |
1266 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | |
1267 | ||
1268 | bool __nf_ct_kill_acct(struct nf_conn *ct, | |
1269 | enum ip_conntrack_info ctinfo, | |
1270 | const struct sk_buff *skb, | |
1271 | int do_acct) | |
1272 | { | |
1273 | if (do_acct) { | |
1274 | struct nf_conn_acct *acct; | |
1275 | ||
1276 | acct = nf_conn_acct_find(ct); | |
1277 | if (acct) { | |
1278 | struct nf_conn_counter *counter = acct->counter; | |
1279 | ||
1280 | atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); | |
1281 | atomic64_add(skb->len - skb_network_offset(skb), | |
1282 | &counter[CTINFO2DIR(ctinfo)].bytes); | |
1283 | } | |
1284 | } | |
1285 | ||
1286 | if (del_timer(&ct->timeout)) { | |
1287 | ct->timeout.function((unsigned long)ct); | |
1288 | return true; | |
1289 | } | |
1290 | return false; | |
1291 | } | |
1292 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); | |
1293 | ||
1294 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1295 | static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { | |
1296 | .len = sizeof(struct nf_conntrack_zone), | |
1297 | .align = __alignof__(struct nf_conntrack_zone), | |
1298 | .id = NF_CT_EXT_ZONE, | |
1299 | }; | |
1300 | #endif | |
1301 | ||
1302 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) | |
1303 | ||
1304 | #include <linux/netfilter/nfnetlink.h> | |
1305 | #include <linux/netfilter/nfnetlink_conntrack.h> | |
1306 | #include <linux/mutex.h> | |
1307 | ||
1308 | /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be | |
1309 | * in ip_conntrack_core, since we don't want the protocols to autoload | |
1310 | * or depend on ctnetlink */ | |
1311 | int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, | |
1312 | const struct nf_conntrack_tuple *tuple) | |
1313 | { | |
1314 | if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || | |
1315 | nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) | |
1316 | goto nla_put_failure; | |
1317 | return 0; | |
1318 | ||
1319 | nla_put_failure: | |
1320 | return -1; | |
1321 | } | |
1322 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); | |
1323 | ||
1324 | const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { | |
1325 | [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, | |
1326 | [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, | |
1327 | }; | |
1328 | EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); | |
1329 | ||
1330 | int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], | |
1331 | struct nf_conntrack_tuple *t) | |
1332 | { | |
1333 | if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) | |
1334 | return -EINVAL; | |
1335 | ||
1336 | t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); | |
1337 | t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); | |
1338 | ||
1339 | return 0; | |
1340 | } | |
1341 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); | |
1342 | ||
1343 | int nf_ct_port_nlattr_tuple_size(void) | |
1344 | { | |
1345 | return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); | |
1346 | } | |
1347 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); | |
1348 | #endif | |
1349 | ||
1350 | /* Used by ipt_REJECT and ip6t_REJECT. */ | |
1351 | static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) | |
1352 | { | |
1353 | struct nf_conn *ct; | |
1354 | enum ip_conntrack_info ctinfo; | |
1355 | ||
1356 | /* This ICMP is in reverse direction to the packet which caused it */ | |
1357 | ct = nf_ct_get(skb, &ctinfo); | |
1358 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | |
1359 | ctinfo = IP_CT_RELATED_REPLY; | |
1360 | else | |
1361 | ctinfo = IP_CT_RELATED; | |
1362 | ||
1363 | /* Attach to new skbuff, and increment count */ | |
1364 | nskb->nfct = &ct->ct_general; | |
1365 | nskb->nfctinfo = ctinfo; | |
1366 | nf_conntrack_get(nskb->nfct); | |
1367 | } | |
1368 | ||
1369 | /* Bring out ya dead! */ | |
1370 | static struct nf_conn * | |
1371 | get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), | |
1372 | void *data, unsigned int *bucket) | |
1373 | { | |
1374 | struct nf_conntrack_tuple_hash *h; | |
1375 | struct nf_conn *ct; | |
1376 | struct hlist_nulls_node *n; | |
1377 | int cpu; | |
1378 | spinlock_t *lockp; | |
1379 | ||
1380 | for (; *bucket < net->ct.htable_size; (*bucket)++) { | |
1381 | lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; | |
1382 | local_bh_disable(); | |
1383 | nf_conntrack_lock(lockp); | |
1384 | if (*bucket < net->ct.htable_size) { | |
1385 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { | |
1386 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | |
1387 | continue; | |
1388 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1389 | if (iter(ct, data)) | |
1390 | goto found; | |
1391 | } | |
1392 | } | |
1393 | spin_unlock(lockp); | |
1394 | local_bh_enable(); | |
1395 | cond_resched(); | |
1396 | } | |
1397 | ||
1398 | for_each_possible_cpu(cpu) { | |
1399 | struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); | |
1400 | ||
1401 | spin_lock_bh(&pcpu->lock); | |
1402 | hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { | |
1403 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1404 | if (iter(ct, data)) | |
1405 | set_bit(IPS_DYING_BIT, &ct->status); | |
1406 | } | |
1407 | spin_unlock_bh(&pcpu->lock); | |
1408 | cond_resched(); | |
1409 | } | |
1410 | return NULL; | |
1411 | found: | |
1412 | atomic_inc(&ct->ct_general.use); | |
1413 | spin_unlock(lockp); | |
1414 | local_bh_enable(); | |
1415 | return ct; | |
1416 | } | |
1417 | ||
1418 | void nf_ct_iterate_cleanup(struct net *net, | |
1419 | int (*iter)(struct nf_conn *i, void *data), | |
1420 | void *data, u32 portid, int report) | |
1421 | { | |
1422 | struct nf_conn *ct; | |
1423 | unsigned int bucket = 0; | |
1424 | ||
1425 | might_sleep(); | |
1426 | ||
1427 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { | |
1428 | /* Time to push up daises... */ | |
1429 | if (del_timer(&ct->timeout)) | |
1430 | nf_ct_delete(ct, portid, report); | |
1431 | ||
1432 | /* ... else the timer will get him soon. */ | |
1433 | ||
1434 | nf_ct_put(ct); | |
1435 | cond_resched(); | |
1436 | } | |
1437 | } | |
1438 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); | |
1439 | ||
1440 | static int kill_all(struct nf_conn *i, void *data) | |
1441 | { | |
1442 | return 1; | |
1443 | } | |
1444 | ||
1445 | void nf_ct_free_hashtable(void *hash, unsigned int size) | |
1446 | { | |
1447 | if (is_vmalloc_addr(hash)) | |
1448 | vfree(hash); | |
1449 | else | |
1450 | free_pages((unsigned long)hash, | |
1451 | get_order(sizeof(struct hlist_head) * size)); | |
1452 | } | |
1453 | EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); | |
1454 | ||
1455 | static int untrack_refs(void) | |
1456 | { | |
1457 | int cnt = 0, cpu; | |
1458 | ||
1459 | for_each_possible_cpu(cpu) { | |
1460 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); | |
1461 | ||
1462 | cnt += atomic_read(&ct->ct_general.use) - 1; | |
1463 | } | |
1464 | return cnt; | |
1465 | } | |
1466 | ||
1467 | void nf_conntrack_cleanup_start(void) | |
1468 | { | |
1469 | RCU_INIT_POINTER(ip_ct_attach, NULL); | |
1470 | } | |
1471 | ||
1472 | void nf_conntrack_cleanup_end(void) | |
1473 | { | |
1474 | RCU_INIT_POINTER(nf_ct_destroy, NULL); | |
1475 | while (untrack_refs() > 0) | |
1476 | schedule(); | |
1477 | ||
1478 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1479 | nf_ct_extend_unregister(&nf_ct_zone_extend); | |
1480 | #endif | |
1481 | nf_conntrack_proto_fini(); | |
1482 | nf_conntrack_seqadj_fini(); | |
1483 | nf_conntrack_labels_fini(); | |
1484 | nf_conntrack_helper_fini(); | |
1485 | nf_conntrack_timeout_fini(); | |
1486 | nf_conntrack_ecache_fini(); | |
1487 | nf_conntrack_tstamp_fini(); | |
1488 | nf_conntrack_acct_fini(); | |
1489 | nf_conntrack_expect_fini(); | |
1490 | } | |
1491 | ||
1492 | /* | |
1493 | * Mishearing the voices in his head, our hero wonders how he's | |
1494 | * supposed to kill the mall. | |
1495 | */ | |
1496 | void nf_conntrack_cleanup_net(struct net *net) | |
1497 | { | |
1498 | LIST_HEAD(single); | |
1499 | ||
1500 | list_add(&net->exit_list, &single); | |
1501 | nf_conntrack_cleanup_net_list(&single); | |
1502 | } | |
1503 | ||
1504 | void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) | |
1505 | { | |
1506 | int busy; | |
1507 | struct net *net; | |
1508 | ||
1509 | /* | |
1510 | * This makes sure all current packets have passed through | |
1511 | * netfilter framework. Roll on, two-stage module | |
1512 | * delete... | |
1513 | */ | |
1514 | synchronize_net(); | |
1515 | i_see_dead_people: | |
1516 | busy = 0; | |
1517 | list_for_each_entry(net, net_exit_list, exit_list) { | |
1518 | nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0); | |
1519 | if (atomic_read(&net->ct.count) != 0) | |
1520 | busy = 1; | |
1521 | } | |
1522 | if (busy) { | |
1523 | schedule(); | |
1524 | goto i_see_dead_people; | |
1525 | } | |
1526 | ||
1527 | list_for_each_entry(net, net_exit_list, exit_list) { | |
1528 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); | |
1529 | nf_conntrack_proto_pernet_fini(net); | |
1530 | nf_conntrack_helper_pernet_fini(net); | |
1531 | nf_conntrack_ecache_pernet_fini(net); | |
1532 | nf_conntrack_tstamp_pernet_fini(net); | |
1533 | nf_conntrack_acct_pernet_fini(net); | |
1534 | nf_conntrack_expect_pernet_fini(net); | |
1535 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | |
1536 | kfree(net->ct.slabname); | |
1537 | free_percpu(net->ct.stat); | |
1538 | free_percpu(net->ct.pcpu_lists); | |
1539 | } | |
1540 | } | |
1541 | ||
1542 | void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) | |
1543 | { | |
1544 | struct hlist_nulls_head *hash; | |
1545 | unsigned int nr_slots, i; | |
1546 | size_t sz; | |
1547 | ||
1548 | BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); | |
1549 | nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); | |
1550 | sz = nr_slots * sizeof(struct hlist_nulls_head); | |
1551 | hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, | |
1552 | get_order(sz)); | |
1553 | if (!hash) | |
1554 | hash = vzalloc(sz); | |
1555 | ||
1556 | if (hash && nulls) | |
1557 | for (i = 0; i < nr_slots; i++) | |
1558 | INIT_HLIST_NULLS_HEAD(&hash[i], i); | |
1559 | ||
1560 | return hash; | |
1561 | } | |
1562 | EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); | |
1563 | ||
1564 | int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | |
1565 | { | |
1566 | int i, bucket, rc; | |
1567 | unsigned int hashsize, old_size; | |
1568 | struct hlist_nulls_head *hash, *old_hash; | |
1569 | struct nf_conntrack_tuple_hash *h; | |
1570 | struct nf_conn *ct; | |
1571 | ||
1572 | if (current->nsproxy->net_ns != &init_net) | |
1573 | return -EOPNOTSUPP; | |
1574 | ||
1575 | /* On boot, we can set this without any fancy locking. */ | |
1576 | if (!nf_conntrack_htable_size) | |
1577 | return param_set_uint(val, kp); | |
1578 | ||
1579 | rc = kstrtouint(val, 0, &hashsize); | |
1580 | if (rc) | |
1581 | return rc; | |
1582 | if (!hashsize) | |
1583 | return -EINVAL; | |
1584 | ||
1585 | hash = nf_ct_alloc_hashtable(&hashsize, 1); | |
1586 | if (!hash) | |
1587 | return -ENOMEM; | |
1588 | ||
1589 | local_bh_disable(); | |
1590 | nf_conntrack_all_lock(); | |
1591 | write_seqcount_begin(&nf_conntrack_generation); | |
1592 | ||
1593 | /* Lookups in the old hash might happen in parallel, which means we | |
1594 | * might get false negatives during connection lookup. New connections | |
1595 | * created because of a false negative won't make it into the hash | |
1596 | * though since that required taking the locks. | |
1597 | */ | |
1598 | ||
1599 | for (i = 0; i < init_net.ct.htable_size; i++) { | |
1600 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { | |
1601 | h = hlist_nulls_entry(init_net.ct.hash[i].first, | |
1602 | struct nf_conntrack_tuple_hash, hnnode); | |
1603 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1604 | hlist_nulls_del_rcu(&h->hnnode); | |
1605 | bucket = __hash_conntrack(&h->tuple, hashsize); | |
1606 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); | |
1607 | } | |
1608 | } | |
1609 | old_size = init_net.ct.htable_size; | |
1610 | old_hash = init_net.ct.hash; | |
1611 | ||
1612 | init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; | |
1613 | init_net.ct.hash = hash; | |
1614 | ||
1615 | write_seqcount_end(&nf_conntrack_generation); | |
1616 | nf_conntrack_all_unlock(); | |
1617 | local_bh_enable(); | |
1618 | ||
1619 | nf_ct_free_hashtable(old_hash, old_size); | |
1620 | return 0; | |
1621 | } | |
1622 | EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); | |
1623 | ||
1624 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, | |
1625 | &nf_conntrack_htable_size, 0600); | |
1626 | ||
1627 | void nf_ct_untracked_status_or(unsigned long bits) | |
1628 | { | |
1629 | int cpu; | |
1630 | ||
1631 | for_each_possible_cpu(cpu) | |
1632 | per_cpu(nf_conntrack_untracked, cpu).status |= bits; | |
1633 | } | |
1634 | EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); | |
1635 | ||
1636 | int nf_conntrack_init_start(void) | |
1637 | { | |
1638 | int max_factor = 8; | |
1639 | int i, ret, cpu; | |
1640 | ||
1641 | seqcount_init(&nf_conntrack_generation); | |
1642 | ||
1643 | for (i = 0; i < CONNTRACK_LOCKS; i++) | |
1644 | spin_lock_init(&nf_conntrack_locks[i]); | |
1645 | ||
1646 | if (!nf_conntrack_htable_size) { | |
1647 | /* Idea from tcp.c: use 1/16384 of memory. | |
1648 | * On i386: 32MB machine has 512 buckets. | |
1649 | * >= 1GB machines have 16384 buckets. | |
1650 | * >= 4GB machines have 65536 buckets. | |
1651 | */ | |
1652 | nf_conntrack_htable_size | |
1653 | = (((totalram_pages << PAGE_SHIFT) / 16384) | |
1654 | / sizeof(struct hlist_head)); | |
1655 | if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) | |
1656 | nf_conntrack_htable_size = 65536; | |
1657 | else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) | |
1658 | nf_conntrack_htable_size = 16384; | |
1659 | if (nf_conntrack_htable_size < 32) | |
1660 | nf_conntrack_htable_size = 32; | |
1661 | ||
1662 | /* Use a max. factor of four by default to get the same max as | |
1663 | * with the old struct list_heads. When a table size is given | |
1664 | * we use the old value of 8 to avoid reducing the max. | |
1665 | * entries. */ | |
1666 | max_factor = 4; | |
1667 | } | |
1668 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; | |
1669 | ||
1670 | printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", | |
1671 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, | |
1672 | nf_conntrack_max); | |
1673 | ||
1674 | ret = nf_conntrack_expect_init(); | |
1675 | if (ret < 0) | |
1676 | goto err_expect; | |
1677 | ||
1678 | ret = nf_conntrack_acct_init(); | |
1679 | if (ret < 0) | |
1680 | goto err_acct; | |
1681 | ||
1682 | ret = nf_conntrack_tstamp_init(); | |
1683 | if (ret < 0) | |
1684 | goto err_tstamp; | |
1685 | ||
1686 | ret = nf_conntrack_ecache_init(); | |
1687 | if (ret < 0) | |
1688 | goto err_ecache; | |
1689 | ||
1690 | ret = nf_conntrack_timeout_init(); | |
1691 | if (ret < 0) | |
1692 | goto err_timeout; | |
1693 | ||
1694 | ret = nf_conntrack_helper_init(); | |
1695 | if (ret < 0) | |
1696 | goto err_helper; | |
1697 | ||
1698 | ret = nf_conntrack_labels_init(); | |
1699 | if (ret < 0) | |
1700 | goto err_labels; | |
1701 | ||
1702 | ret = nf_conntrack_seqadj_init(); | |
1703 | if (ret < 0) | |
1704 | goto err_seqadj; | |
1705 | ||
1706 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1707 | ret = nf_ct_extend_register(&nf_ct_zone_extend); | |
1708 | if (ret < 0) | |
1709 | goto err_extend; | |
1710 | #endif | |
1711 | ret = nf_conntrack_proto_init(); | |
1712 | if (ret < 0) | |
1713 | goto err_proto; | |
1714 | ||
1715 | /* Set up fake conntrack: to never be deleted, not in any hashes */ | |
1716 | for_each_possible_cpu(cpu) { | |
1717 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); | |
1718 | write_pnet(&ct->ct_net, &init_net); | |
1719 | atomic_set(&ct->ct_general.use, 1); | |
1720 | } | |
1721 | /* - and look it like as a confirmed connection */ | |
1722 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); | |
1723 | return 0; | |
1724 | ||
1725 | err_proto: | |
1726 | #ifdef CONFIG_NF_CONNTRACK_ZONES | |
1727 | nf_ct_extend_unregister(&nf_ct_zone_extend); | |
1728 | err_extend: | |
1729 | #endif | |
1730 | nf_conntrack_seqadj_fini(); | |
1731 | err_seqadj: | |
1732 | nf_conntrack_labels_fini(); | |
1733 | err_labels: | |
1734 | nf_conntrack_helper_fini(); | |
1735 | err_helper: | |
1736 | nf_conntrack_timeout_fini(); | |
1737 | err_timeout: | |
1738 | nf_conntrack_ecache_fini(); | |
1739 | err_ecache: | |
1740 | nf_conntrack_tstamp_fini(); | |
1741 | err_tstamp: | |
1742 | nf_conntrack_acct_fini(); | |
1743 | err_acct: | |
1744 | nf_conntrack_expect_fini(); | |
1745 | err_expect: | |
1746 | return ret; | |
1747 | } | |
1748 | ||
1749 | void nf_conntrack_init_end(void) | |
1750 | { | |
1751 | /* For use by REJECT target */ | |
1752 | RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); | |
1753 | RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); | |
1754 | } | |
1755 | ||
1756 | /* | |
1757 | * We need to use special "null" values, not used in hash table | |
1758 | */ | |
1759 | #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) | |
1760 | #define DYING_NULLS_VAL ((1<<30)+1) | |
1761 | #define TEMPLATE_NULLS_VAL ((1<<30)+2) | |
1762 | ||
1763 | int nf_conntrack_init_net(struct net *net) | |
1764 | { | |
1765 | int ret = -ENOMEM; | |
1766 | int cpu; | |
1767 | ||
1768 | atomic_set(&net->ct.count, 0); | |
1769 | ||
1770 | net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); | |
1771 | if (!net->ct.pcpu_lists) | |
1772 | goto err_stat; | |
1773 | ||
1774 | for_each_possible_cpu(cpu) { | |
1775 | struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); | |
1776 | ||
1777 | spin_lock_init(&pcpu->lock); | |
1778 | INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); | |
1779 | INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); | |
1780 | } | |
1781 | ||
1782 | net->ct.stat = alloc_percpu(struct ip_conntrack_stat); | |
1783 | if (!net->ct.stat) | |
1784 | goto err_pcpu_lists; | |
1785 | ||
1786 | net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); | |
1787 | if (!net->ct.slabname) | |
1788 | goto err_slabname; | |
1789 | ||
1790 | net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, | |
1791 | sizeof(struct nf_conn), 0, | |
1792 | SLAB_DESTROY_BY_RCU, NULL); | |
1793 | if (!net->ct.nf_conntrack_cachep) { | |
1794 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | |
1795 | goto err_cache; | |
1796 | } | |
1797 | ||
1798 | net->ct.htable_size = nf_conntrack_htable_size; | |
1799 | net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); | |
1800 | if (!net->ct.hash) { | |
1801 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); | |
1802 | goto err_hash; | |
1803 | } | |
1804 | ret = nf_conntrack_expect_pernet_init(net); | |
1805 | if (ret < 0) | |
1806 | goto err_expect; | |
1807 | ret = nf_conntrack_acct_pernet_init(net); | |
1808 | if (ret < 0) | |
1809 | goto err_acct; | |
1810 | ret = nf_conntrack_tstamp_pernet_init(net); | |
1811 | if (ret < 0) | |
1812 | goto err_tstamp; | |
1813 | ret = nf_conntrack_ecache_pernet_init(net); | |
1814 | if (ret < 0) | |
1815 | goto err_ecache; | |
1816 | ret = nf_conntrack_helper_pernet_init(net); | |
1817 | if (ret < 0) | |
1818 | goto err_helper; | |
1819 | ret = nf_conntrack_proto_pernet_init(net); | |
1820 | if (ret < 0) | |
1821 | goto err_proto; | |
1822 | return 0; | |
1823 | ||
1824 | err_proto: | |
1825 | nf_conntrack_helper_pernet_fini(net); | |
1826 | err_helper: | |
1827 | nf_conntrack_ecache_pernet_fini(net); | |
1828 | err_ecache: | |
1829 | nf_conntrack_tstamp_pernet_fini(net); | |
1830 | err_tstamp: | |
1831 | nf_conntrack_acct_pernet_fini(net); | |
1832 | err_acct: | |
1833 | nf_conntrack_expect_pernet_fini(net); | |
1834 | err_expect: | |
1835 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); | |
1836 | err_hash: | |
1837 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | |
1838 | err_cache: | |
1839 | kfree(net->ct.slabname); | |
1840 | err_slabname: | |
1841 | free_percpu(net->ct.stat); | |
1842 | err_pcpu_lists: | |
1843 | free_percpu(net->ct.pcpu_lists); | |
1844 | err_stat: | |
1845 | return ret; | |
1846 | } |