]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/netfilter/nf_conntrack_core.c
netfilter: conntrack: add nf_ct_iterate_destroy
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
35 #include <linux/mm.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
38
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_expect.h>
43 #include <net/netfilter/nf_conntrack_helper.h>
44 #include <net/netfilter/nf_conntrack_seqadj.h>
45 #include <net/netfilter/nf_conntrack_core.h>
46 #include <net/netfilter/nf_conntrack_extend.h>
47 #include <net/netfilter/nf_conntrack_acct.h>
48 #include <net/netfilter/nf_conntrack_ecache.h>
49 #include <net/netfilter/nf_conntrack_zones.h>
50 #include <net/netfilter/nf_conntrack_timestamp.h>
51 #include <net/netfilter/nf_conntrack_timeout.h>
52 #include <net/netfilter/nf_conntrack_labels.h>
53 #include <net/netfilter/nf_conntrack_synproxy.h>
54 #include <net/netfilter/nf_nat.h>
55 #include <net/netfilter/nf_nat_core.h>
56 #include <net/netfilter/nf_nat_helper.h>
57 #include <net/netns/hash.h>
58
59 #define NF_CONNTRACK_VERSION "0.5.0"
60
61 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
62 enum nf_nat_manip_type manip,
63 const struct nlattr *attr) __read_mostly;
64 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
65
66 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
67 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
68
69 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
70 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
71
72 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
73 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
74
75 struct conntrack_gc_work {
76 struct delayed_work dwork;
77 u32 last_bucket;
78 bool exiting;
79 bool early_drop;
80 long next_gc_run;
81 };
82
83 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
84 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
85 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
86 static __read_mostly bool nf_conntrack_locks_all;
87
88 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
89 #define GC_MAX_BUCKETS_DIV 128u
90 /* upper bound of full table scan */
91 #define GC_MAX_SCAN_JIFFIES (16u * HZ)
92 /* desired ratio of entries found to be expired */
93 #define GC_EVICT_RATIO 50u
94
95 static struct conntrack_gc_work conntrack_gc_work;
96
97 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
98 {
99 spin_lock(lock);
100 while (unlikely(nf_conntrack_locks_all)) {
101 spin_unlock(lock);
102
103 /*
104 * Order the 'nf_conntrack_locks_all' load vs. the
105 * spin_unlock_wait() loads below, to ensure
106 * that 'nf_conntrack_locks_all_lock' is indeed held:
107 */
108 smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
109 spin_unlock_wait(&nf_conntrack_locks_all_lock);
110 spin_lock(lock);
111 }
112 }
113 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
114
115 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
116 {
117 h1 %= CONNTRACK_LOCKS;
118 h2 %= CONNTRACK_LOCKS;
119 spin_unlock(&nf_conntrack_locks[h1]);
120 if (h1 != h2)
121 spin_unlock(&nf_conntrack_locks[h2]);
122 }
123
124 /* return true if we need to recompute hashes (in case hash table was resized) */
125 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
126 unsigned int h2, unsigned int sequence)
127 {
128 h1 %= CONNTRACK_LOCKS;
129 h2 %= CONNTRACK_LOCKS;
130 if (h1 <= h2) {
131 nf_conntrack_lock(&nf_conntrack_locks[h1]);
132 if (h1 != h2)
133 spin_lock_nested(&nf_conntrack_locks[h2],
134 SINGLE_DEPTH_NESTING);
135 } else {
136 nf_conntrack_lock(&nf_conntrack_locks[h2]);
137 spin_lock_nested(&nf_conntrack_locks[h1],
138 SINGLE_DEPTH_NESTING);
139 }
140 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
141 nf_conntrack_double_unlock(h1, h2);
142 return true;
143 }
144 return false;
145 }
146
147 static void nf_conntrack_all_lock(void)
148 {
149 int i;
150
151 spin_lock(&nf_conntrack_locks_all_lock);
152 nf_conntrack_locks_all = true;
153
154 /*
155 * Order the above store of 'nf_conntrack_locks_all' against
156 * the spin_unlock_wait() loads below, such that if
157 * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
158 * we must observe nf_conntrack_locks[] held:
159 */
160 smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
161
162 for (i = 0; i < CONNTRACK_LOCKS; i++) {
163 spin_unlock_wait(&nf_conntrack_locks[i]);
164 }
165 }
166
167 static void nf_conntrack_all_unlock(void)
168 {
169 /*
170 * All prior stores must be complete before we clear
171 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
172 * might observe the false value but not the entire
173 * critical section:
174 */
175 smp_store_release(&nf_conntrack_locks_all, false);
176 spin_unlock(&nf_conntrack_locks_all_lock);
177 }
178
179 unsigned int nf_conntrack_htable_size __read_mostly;
180 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
181
182 unsigned int nf_conntrack_max __read_mostly;
183 seqcount_t nf_conntrack_generation __read_mostly;
184 static unsigned int nf_conntrack_hash_rnd __read_mostly;
185
186 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
187 const struct net *net)
188 {
189 unsigned int n;
190 u32 seed;
191
192 get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
193
194 /* The direction must be ignored, so we hash everything up to the
195 * destination ports (which is a multiple of 4) and treat the last
196 * three bytes manually.
197 */
198 seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
199 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
200 return jhash2((u32 *)tuple, n, seed ^
201 (((__force __u16)tuple->dst.u.all << 16) |
202 tuple->dst.protonum));
203 }
204
205 static u32 scale_hash(u32 hash)
206 {
207 return reciprocal_scale(hash, nf_conntrack_htable_size);
208 }
209
210 static u32 __hash_conntrack(const struct net *net,
211 const struct nf_conntrack_tuple *tuple,
212 unsigned int size)
213 {
214 return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
215 }
216
217 static u32 hash_conntrack(const struct net *net,
218 const struct nf_conntrack_tuple *tuple)
219 {
220 return scale_hash(hash_conntrack_raw(tuple, net));
221 }
222
223 bool
224 nf_ct_get_tuple(const struct sk_buff *skb,
225 unsigned int nhoff,
226 unsigned int dataoff,
227 u_int16_t l3num,
228 u_int8_t protonum,
229 struct net *net,
230 struct nf_conntrack_tuple *tuple,
231 const struct nf_conntrack_l3proto *l3proto,
232 const struct nf_conntrack_l4proto *l4proto)
233 {
234 memset(tuple, 0, sizeof(*tuple));
235
236 tuple->src.l3num = l3num;
237 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
238 return false;
239
240 tuple->dst.protonum = protonum;
241 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
242
243 return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
244 }
245 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
246
247 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
248 u_int16_t l3num,
249 struct net *net, struct nf_conntrack_tuple *tuple)
250 {
251 struct nf_conntrack_l3proto *l3proto;
252 struct nf_conntrack_l4proto *l4proto;
253 unsigned int protoff;
254 u_int8_t protonum;
255 int ret;
256
257 rcu_read_lock();
258
259 l3proto = __nf_ct_l3proto_find(l3num);
260 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
261 if (ret != NF_ACCEPT) {
262 rcu_read_unlock();
263 return false;
264 }
265
266 l4proto = __nf_ct_l4proto_find(l3num, protonum);
267
268 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
269 l3proto, l4proto);
270
271 rcu_read_unlock();
272 return ret;
273 }
274 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
275
276 bool
277 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
278 const struct nf_conntrack_tuple *orig,
279 const struct nf_conntrack_l3proto *l3proto,
280 const struct nf_conntrack_l4proto *l4proto)
281 {
282 memset(inverse, 0, sizeof(*inverse));
283
284 inverse->src.l3num = orig->src.l3num;
285 if (l3proto->invert_tuple(inverse, orig) == 0)
286 return false;
287
288 inverse->dst.dir = !orig->dst.dir;
289
290 inverse->dst.protonum = orig->dst.protonum;
291 return l4proto->invert_tuple(inverse, orig);
292 }
293 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
294
295 static void
296 clean_from_lists(struct nf_conn *ct)
297 {
298 pr_debug("clean_from_lists(%p)\n", ct);
299 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
300 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
301
302 /* Destroy all pending expectations */
303 nf_ct_remove_expectations(ct);
304 }
305
306 /* must be called with local_bh_disable */
307 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
308 {
309 struct ct_pcpu *pcpu;
310
311 /* add this conntrack to the (per cpu) dying list */
312 ct->cpu = smp_processor_id();
313 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
314
315 spin_lock(&pcpu->lock);
316 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
317 &pcpu->dying);
318 spin_unlock(&pcpu->lock);
319 }
320
321 /* must be called with local_bh_disable */
322 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
323 {
324 struct ct_pcpu *pcpu;
325
326 /* add this conntrack to the (per cpu) unconfirmed list */
327 ct->cpu = smp_processor_id();
328 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
329
330 spin_lock(&pcpu->lock);
331 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
332 &pcpu->unconfirmed);
333 spin_unlock(&pcpu->lock);
334 }
335
336 /* must be called with local_bh_disable */
337 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
338 {
339 struct ct_pcpu *pcpu;
340
341 /* We overload first tuple to link into unconfirmed or dying list.*/
342 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
343
344 spin_lock(&pcpu->lock);
345 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
346 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
347 spin_unlock(&pcpu->lock);
348 }
349
350 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
351
352 /* Released via destroy_conntrack() */
353 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
354 const struct nf_conntrack_zone *zone,
355 gfp_t flags)
356 {
357 struct nf_conn *tmpl, *p;
358
359 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
360 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
361 if (!tmpl)
362 return NULL;
363
364 p = tmpl;
365 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
366 if (tmpl != p) {
367 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
368 tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
369 }
370 } else {
371 tmpl = kzalloc(sizeof(*tmpl), flags);
372 if (!tmpl)
373 return NULL;
374 }
375
376 tmpl->status = IPS_TEMPLATE;
377 write_pnet(&tmpl->ct_net, net);
378 nf_ct_zone_add(tmpl, zone);
379 atomic_set(&tmpl->ct_general.use, 0);
380
381 return tmpl;
382 }
383 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
384
385 void nf_ct_tmpl_free(struct nf_conn *tmpl)
386 {
387 nf_ct_ext_destroy(tmpl);
388 nf_ct_ext_free(tmpl);
389
390 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
391 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
392 else
393 kfree(tmpl);
394 }
395 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
396
397 static void
398 destroy_conntrack(struct nf_conntrack *nfct)
399 {
400 struct nf_conn *ct = (struct nf_conn *)nfct;
401 struct nf_conntrack_l4proto *l4proto;
402
403 pr_debug("destroy_conntrack(%p)\n", ct);
404 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
405
406 if (unlikely(nf_ct_is_template(ct))) {
407 nf_ct_tmpl_free(ct);
408 return;
409 }
410 rcu_read_lock();
411 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
412 if (l4proto->destroy)
413 l4proto->destroy(ct);
414
415 rcu_read_unlock();
416
417 local_bh_disable();
418 /* Expectations will have been removed in clean_from_lists,
419 * except TFTP can create an expectation on the first packet,
420 * before connection is in the list, so we need to clean here,
421 * too.
422 */
423 nf_ct_remove_expectations(ct);
424
425 nf_ct_del_from_dying_or_unconfirmed_list(ct);
426
427 local_bh_enable();
428
429 if (ct->master)
430 nf_ct_put(ct->master);
431
432 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
433 nf_conntrack_free(ct);
434 }
435
436 static void nf_ct_delete_from_lists(struct nf_conn *ct)
437 {
438 struct net *net = nf_ct_net(ct);
439 unsigned int hash, reply_hash;
440 unsigned int sequence;
441
442 nf_ct_helper_destroy(ct);
443
444 local_bh_disable();
445 do {
446 sequence = read_seqcount_begin(&nf_conntrack_generation);
447 hash = hash_conntrack(net,
448 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
449 reply_hash = hash_conntrack(net,
450 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
451 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
452
453 clean_from_lists(ct);
454 nf_conntrack_double_unlock(hash, reply_hash);
455
456 nf_ct_add_to_dying_list(ct);
457
458 local_bh_enable();
459 }
460
461 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
462 {
463 struct nf_conn_tstamp *tstamp;
464
465 if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
466 return false;
467
468 tstamp = nf_conn_tstamp_find(ct);
469 if (tstamp && tstamp->stop == 0)
470 tstamp->stop = ktime_get_real_ns();
471
472 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
473 portid, report) < 0) {
474 /* destroy event was not delivered. nf_ct_put will
475 * be done by event cache worker on redelivery.
476 */
477 nf_ct_delete_from_lists(ct);
478 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
479 return false;
480 }
481
482 nf_conntrack_ecache_work(nf_ct_net(ct));
483 nf_ct_delete_from_lists(ct);
484 nf_ct_put(ct);
485 return true;
486 }
487 EXPORT_SYMBOL_GPL(nf_ct_delete);
488
489 static inline bool
490 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
491 const struct nf_conntrack_tuple *tuple,
492 const struct nf_conntrack_zone *zone,
493 const struct net *net)
494 {
495 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
496
497 /* A conntrack can be recreated with the equal tuple,
498 * so we need to check that the conntrack is confirmed
499 */
500 return nf_ct_tuple_equal(tuple, &h->tuple) &&
501 nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
502 nf_ct_is_confirmed(ct) &&
503 net_eq(net, nf_ct_net(ct));
504 }
505
506 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
507 static void nf_ct_gc_expired(struct nf_conn *ct)
508 {
509 if (!atomic_inc_not_zero(&ct->ct_general.use))
510 return;
511
512 if (nf_ct_should_gc(ct))
513 nf_ct_kill(ct);
514
515 nf_ct_put(ct);
516 }
517
518 /*
519 * Warning :
520 * - Caller must take a reference on returned object
521 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
522 */
523 static struct nf_conntrack_tuple_hash *
524 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
525 const struct nf_conntrack_tuple *tuple, u32 hash)
526 {
527 struct nf_conntrack_tuple_hash *h;
528 struct hlist_nulls_head *ct_hash;
529 struct hlist_nulls_node *n;
530 unsigned int bucket, hsize;
531
532 begin:
533 nf_conntrack_get_ht(&ct_hash, &hsize);
534 bucket = reciprocal_scale(hash, hsize);
535
536 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
537 struct nf_conn *ct;
538
539 ct = nf_ct_tuplehash_to_ctrack(h);
540 if (nf_ct_is_expired(ct)) {
541 nf_ct_gc_expired(ct);
542 continue;
543 }
544
545 if (nf_ct_is_dying(ct))
546 continue;
547
548 if (nf_ct_key_equal(h, tuple, zone, net))
549 return h;
550 }
551 /*
552 * if the nulls value we got at the end of this lookup is
553 * not the expected one, we must restart lookup.
554 * We probably met an item that was moved to another chain.
555 */
556 if (get_nulls_value(n) != bucket) {
557 NF_CT_STAT_INC_ATOMIC(net, search_restart);
558 goto begin;
559 }
560
561 return NULL;
562 }
563
564 /* Find a connection corresponding to a tuple. */
565 static struct nf_conntrack_tuple_hash *
566 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
567 const struct nf_conntrack_tuple *tuple, u32 hash)
568 {
569 struct nf_conntrack_tuple_hash *h;
570 struct nf_conn *ct;
571
572 rcu_read_lock();
573 begin:
574 h = ____nf_conntrack_find(net, zone, tuple, hash);
575 if (h) {
576 ct = nf_ct_tuplehash_to_ctrack(h);
577 if (unlikely(nf_ct_is_dying(ct) ||
578 !atomic_inc_not_zero(&ct->ct_general.use)))
579 h = NULL;
580 else {
581 if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
582 nf_ct_put(ct);
583 goto begin;
584 }
585 }
586 }
587 rcu_read_unlock();
588
589 return h;
590 }
591
592 struct nf_conntrack_tuple_hash *
593 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
594 const struct nf_conntrack_tuple *tuple)
595 {
596 return __nf_conntrack_find_get(net, zone, tuple,
597 hash_conntrack_raw(tuple, net));
598 }
599 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
600
601 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
602 unsigned int hash,
603 unsigned int reply_hash)
604 {
605 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
606 &nf_conntrack_hash[hash]);
607 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
608 &nf_conntrack_hash[reply_hash]);
609 }
610
611 int
612 nf_conntrack_hash_check_insert(struct nf_conn *ct)
613 {
614 const struct nf_conntrack_zone *zone;
615 struct net *net = nf_ct_net(ct);
616 unsigned int hash, reply_hash;
617 struct nf_conntrack_tuple_hash *h;
618 struct hlist_nulls_node *n;
619 unsigned int sequence;
620
621 zone = nf_ct_zone(ct);
622
623 local_bh_disable();
624 do {
625 sequence = read_seqcount_begin(&nf_conntrack_generation);
626 hash = hash_conntrack(net,
627 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
628 reply_hash = hash_conntrack(net,
629 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
630 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
631
632 /* See if there's one in the list already, including reverse */
633 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
634 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
635 zone, net))
636 goto out;
637
638 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
639 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
640 zone, net))
641 goto out;
642
643 smp_wmb();
644 /* The caller holds a reference to this object */
645 atomic_set(&ct->ct_general.use, 2);
646 __nf_conntrack_hash_insert(ct, hash, reply_hash);
647 nf_conntrack_double_unlock(hash, reply_hash);
648 NF_CT_STAT_INC(net, insert);
649 local_bh_enable();
650 return 0;
651
652 out:
653 nf_conntrack_double_unlock(hash, reply_hash);
654 NF_CT_STAT_INC(net, insert_failed);
655 local_bh_enable();
656 return -EEXIST;
657 }
658 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
659
660 static inline void nf_ct_acct_update(struct nf_conn *ct,
661 enum ip_conntrack_info ctinfo,
662 unsigned int len)
663 {
664 struct nf_conn_acct *acct;
665
666 acct = nf_conn_acct_find(ct);
667 if (acct) {
668 struct nf_conn_counter *counter = acct->counter;
669
670 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
671 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
672 }
673 }
674
675 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
676 const struct nf_conn *loser_ct)
677 {
678 struct nf_conn_acct *acct;
679
680 acct = nf_conn_acct_find(loser_ct);
681 if (acct) {
682 struct nf_conn_counter *counter = acct->counter;
683 unsigned int bytes;
684
685 /* u32 should be fine since we must have seen one packet. */
686 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
687 nf_ct_acct_update(ct, ctinfo, bytes);
688 }
689 }
690
691 /* Resolve race on insertion if this protocol allows this. */
692 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
693 enum ip_conntrack_info ctinfo,
694 struct nf_conntrack_tuple_hash *h)
695 {
696 /* This is the conntrack entry already in hashes that won race. */
697 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
698 struct nf_conntrack_l4proto *l4proto;
699
700 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
701 if (l4proto->allow_clash &&
702 ((ct->status & IPS_NAT_DONE_MASK) == 0) &&
703 !nf_ct_is_dying(ct) &&
704 atomic_inc_not_zero(&ct->ct_general.use)) {
705 enum ip_conntrack_info oldinfo;
706 struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
707
708 nf_ct_acct_merge(ct, ctinfo, loser_ct);
709 nf_conntrack_put(&loser_ct->ct_general);
710 nf_ct_set(skb, ct, oldinfo);
711 return NF_ACCEPT;
712 }
713 NF_CT_STAT_INC(net, drop);
714 return NF_DROP;
715 }
716
717 /* Confirm a connection given skb; places it in hash table */
718 int
719 __nf_conntrack_confirm(struct sk_buff *skb)
720 {
721 const struct nf_conntrack_zone *zone;
722 unsigned int hash, reply_hash;
723 struct nf_conntrack_tuple_hash *h;
724 struct nf_conn *ct;
725 struct nf_conn_help *help;
726 struct nf_conn_tstamp *tstamp;
727 struct hlist_nulls_node *n;
728 enum ip_conntrack_info ctinfo;
729 struct net *net;
730 unsigned int sequence;
731 int ret = NF_DROP;
732
733 ct = nf_ct_get(skb, &ctinfo);
734 net = nf_ct_net(ct);
735
736 /* ipt_REJECT uses nf_conntrack_attach to attach related
737 ICMP/TCP RST packets in other direction. Actual packet
738 which created connection will be IP_CT_NEW or for an
739 expected connection, IP_CT_RELATED. */
740 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
741 return NF_ACCEPT;
742
743 zone = nf_ct_zone(ct);
744 local_bh_disable();
745
746 do {
747 sequence = read_seqcount_begin(&nf_conntrack_generation);
748 /* reuse the hash saved before */
749 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
750 hash = scale_hash(hash);
751 reply_hash = hash_conntrack(net,
752 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
753
754 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
755
756 /* We're not in hash table, and we refuse to set up related
757 * connections for unconfirmed conns. But packet copies and
758 * REJECT will give spurious warnings here.
759 */
760 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
761
762 /* No external references means no one else could have
763 * confirmed us.
764 */
765 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
766 pr_debug("Confirming conntrack %p\n", ct);
767 /* We have to check the DYING flag after unlink to prevent
768 * a race against nf_ct_get_next_corpse() possibly called from
769 * user context, else we insert an already 'dead' hash, blocking
770 * further use of that particular connection -JM.
771 */
772 nf_ct_del_from_dying_or_unconfirmed_list(ct);
773
774 if (unlikely(nf_ct_is_dying(ct))) {
775 nf_ct_add_to_dying_list(ct);
776 goto dying;
777 }
778
779 /* See if there's one in the list already, including reverse:
780 NAT could have grabbed it without realizing, since we're
781 not in the hash. If there is, we lost race. */
782 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
783 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
784 zone, net))
785 goto out;
786
787 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
788 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
789 zone, net))
790 goto out;
791
792 /* Timer relative to confirmation time, not original
793 setting time, otherwise we'd get timer wrap in
794 weird delay cases. */
795 ct->timeout += nfct_time_stamp;
796 atomic_inc(&ct->ct_general.use);
797 ct->status |= IPS_CONFIRMED;
798
799 /* set conntrack timestamp, if enabled. */
800 tstamp = nf_conn_tstamp_find(ct);
801 if (tstamp) {
802 if (skb->tstamp == 0)
803 __net_timestamp(skb);
804
805 tstamp->start = ktime_to_ns(skb->tstamp);
806 }
807 /* Since the lookup is lockless, hash insertion must be done after
808 * starting the timer and setting the CONFIRMED bit. The RCU barriers
809 * guarantee that no other CPU can find the conntrack before the above
810 * stores are visible.
811 */
812 __nf_conntrack_hash_insert(ct, hash, reply_hash);
813 nf_conntrack_double_unlock(hash, reply_hash);
814 local_bh_enable();
815
816 help = nfct_help(ct);
817 if (help && help->helper)
818 nf_conntrack_event_cache(IPCT_HELPER, ct);
819
820 nf_conntrack_event_cache(master_ct(ct) ?
821 IPCT_RELATED : IPCT_NEW, ct);
822 return NF_ACCEPT;
823
824 out:
825 nf_ct_add_to_dying_list(ct);
826 ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
827 dying:
828 nf_conntrack_double_unlock(hash, reply_hash);
829 NF_CT_STAT_INC(net, insert_failed);
830 local_bh_enable();
831 return ret;
832 }
833 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
834
835 /* Returns true if a connection correspondings to the tuple (required
836 for NAT). */
837 int
838 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
839 const struct nf_conn *ignored_conntrack)
840 {
841 struct net *net = nf_ct_net(ignored_conntrack);
842 const struct nf_conntrack_zone *zone;
843 struct nf_conntrack_tuple_hash *h;
844 struct hlist_nulls_head *ct_hash;
845 unsigned int hash, hsize;
846 struct hlist_nulls_node *n;
847 struct nf_conn *ct;
848
849 zone = nf_ct_zone(ignored_conntrack);
850
851 rcu_read_lock();
852 begin:
853 nf_conntrack_get_ht(&ct_hash, &hsize);
854 hash = __hash_conntrack(net, tuple, hsize);
855
856 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
857 ct = nf_ct_tuplehash_to_ctrack(h);
858
859 if (ct == ignored_conntrack)
860 continue;
861
862 if (nf_ct_is_expired(ct)) {
863 nf_ct_gc_expired(ct);
864 continue;
865 }
866
867 if (nf_ct_key_equal(h, tuple, zone, net)) {
868 NF_CT_STAT_INC_ATOMIC(net, found);
869 rcu_read_unlock();
870 return 1;
871 }
872 }
873
874 if (get_nulls_value(n) != hash) {
875 NF_CT_STAT_INC_ATOMIC(net, search_restart);
876 goto begin;
877 }
878
879 rcu_read_unlock();
880
881 return 0;
882 }
883 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
884
885 #define NF_CT_EVICTION_RANGE 8
886
887 /* There's a small race here where we may free a just-assured
888 connection. Too bad: we're in trouble anyway. */
889 static unsigned int early_drop_list(struct net *net,
890 struct hlist_nulls_head *head)
891 {
892 struct nf_conntrack_tuple_hash *h;
893 struct hlist_nulls_node *n;
894 unsigned int drops = 0;
895 struct nf_conn *tmp;
896
897 hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
898 tmp = nf_ct_tuplehash_to_ctrack(h);
899
900 if (nf_ct_is_expired(tmp)) {
901 nf_ct_gc_expired(tmp);
902 continue;
903 }
904
905 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
906 !net_eq(nf_ct_net(tmp), net) ||
907 nf_ct_is_dying(tmp))
908 continue;
909
910 if (!atomic_inc_not_zero(&tmp->ct_general.use))
911 continue;
912
913 /* kill only if still in same netns -- might have moved due to
914 * SLAB_TYPESAFE_BY_RCU rules.
915 *
916 * We steal the timer reference. If that fails timer has
917 * already fired or someone else deleted it. Just drop ref
918 * and move to next entry.
919 */
920 if (net_eq(nf_ct_net(tmp), net) &&
921 nf_ct_is_confirmed(tmp) &&
922 nf_ct_delete(tmp, 0, 0))
923 drops++;
924
925 nf_ct_put(tmp);
926 }
927
928 return drops;
929 }
930
931 static noinline int early_drop(struct net *net, unsigned int _hash)
932 {
933 unsigned int i;
934
935 for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
936 struct hlist_nulls_head *ct_hash;
937 unsigned int hash, hsize, drops;
938
939 rcu_read_lock();
940 nf_conntrack_get_ht(&ct_hash, &hsize);
941 hash = reciprocal_scale(_hash++, hsize);
942
943 drops = early_drop_list(net, &ct_hash[hash]);
944 rcu_read_unlock();
945
946 if (drops) {
947 NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
948 return true;
949 }
950 }
951
952 return false;
953 }
954
955 static bool gc_worker_skip_ct(const struct nf_conn *ct)
956 {
957 return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
958 }
959
960 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
961 {
962 const struct nf_conntrack_l4proto *l4proto;
963
964 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
965 return true;
966
967 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
968 if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
969 return true;
970
971 return false;
972 }
973
974 static void gc_worker(struct work_struct *work)
975 {
976 unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
977 unsigned int i, goal, buckets = 0, expired_count = 0;
978 unsigned int nf_conntrack_max95 = 0;
979 struct conntrack_gc_work *gc_work;
980 unsigned int ratio, scanned = 0;
981 unsigned long next_run;
982
983 gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
984
985 goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
986 i = gc_work->last_bucket;
987 if (gc_work->early_drop)
988 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
989
990 do {
991 struct nf_conntrack_tuple_hash *h;
992 struct hlist_nulls_head *ct_hash;
993 struct hlist_nulls_node *n;
994 unsigned int hashsz;
995 struct nf_conn *tmp;
996
997 i++;
998 rcu_read_lock();
999
1000 nf_conntrack_get_ht(&ct_hash, &hashsz);
1001 if (i >= hashsz)
1002 i = 0;
1003
1004 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1005 struct net *net;
1006
1007 tmp = nf_ct_tuplehash_to_ctrack(h);
1008
1009 scanned++;
1010 if (nf_ct_is_expired(tmp)) {
1011 nf_ct_gc_expired(tmp);
1012 expired_count++;
1013 continue;
1014 }
1015
1016 if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1017 continue;
1018
1019 net = nf_ct_net(tmp);
1020 if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1021 continue;
1022
1023 /* need to take reference to avoid possible races */
1024 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1025 continue;
1026
1027 if (gc_worker_skip_ct(tmp)) {
1028 nf_ct_put(tmp);
1029 continue;
1030 }
1031
1032 if (gc_worker_can_early_drop(tmp))
1033 nf_ct_kill(tmp);
1034
1035 nf_ct_put(tmp);
1036 }
1037
1038 /* could check get_nulls_value() here and restart if ct
1039 * was moved to another chain. But given gc is best-effort
1040 * we will just continue with next hash slot.
1041 */
1042 rcu_read_unlock();
1043 cond_resched_rcu_qs();
1044 } while (++buckets < goal);
1045
1046 if (gc_work->exiting)
1047 return;
1048
1049 /*
1050 * Eviction will normally happen from the packet path, and not
1051 * from this gc worker.
1052 *
1053 * This worker is only here to reap expired entries when system went
1054 * idle after a busy period.
1055 *
1056 * The heuristics below are supposed to balance conflicting goals:
1057 *
1058 * 1. Minimize time until we notice a stale entry
1059 * 2. Maximize scan intervals to not waste cycles
1060 *
1061 * Normally, expire ratio will be close to 0.
1062 *
1063 * As soon as a sizeable fraction of the entries have expired
1064 * increase scan frequency.
1065 */
1066 ratio = scanned ? expired_count * 100 / scanned : 0;
1067 if (ratio > GC_EVICT_RATIO) {
1068 gc_work->next_gc_run = min_interval;
1069 } else {
1070 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1071
1072 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1073
1074 gc_work->next_gc_run += min_interval;
1075 if (gc_work->next_gc_run > max)
1076 gc_work->next_gc_run = max;
1077 }
1078
1079 next_run = gc_work->next_gc_run;
1080 gc_work->last_bucket = i;
1081 gc_work->early_drop = false;
1082 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
1083 }
1084
1085 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1086 {
1087 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1088 gc_work->next_gc_run = HZ;
1089 gc_work->exiting = false;
1090 }
1091
1092 static struct nf_conn *
1093 __nf_conntrack_alloc(struct net *net,
1094 const struct nf_conntrack_zone *zone,
1095 const struct nf_conntrack_tuple *orig,
1096 const struct nf_conntrack_tuple *repl,
1097 gfp_t gfp, u32 hash)
1098 {
1099 struct nf_conn *ct;
1100
1101 /* We don't want any race condition at early drop stage */
1102 atomic_inc(&net->ct.count);
1103
1104 if (nf_conntrack_max &&
1105 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1106 if (!early_drop(net, hash)) {
1107 if (!conntrack_gc_work.early_drop)
1108 conntrack_gc_work.early_drop = true;
1109 atomic_dec(&net->ct.count);
1110 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1111 return ERR_PTR(-ENOMEM);
1112 }
1113 }
1114
1115 /*
1116 * Do not use kmem_cache_zalloc(), as this cache uses
1117 * SLAB_TYPESAFE_BY_RCU.
1118 */
1119 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1120 if (ct == NULL)
1121 goto out;
1122
1123 spin_lock_init(&ct->lock);
1124 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1125 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1126 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1127 /* save hash for reusing when confirming */
1128 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1129 ct->status = 0;
1130 write_pnet(&ct->ct_net, net);
1131 memset(&ct->__nfct_init_offset[0], 0,
1132 offsetof(struct nf_conn, proto) -
1133 offsetof(struct nf_conn, __nfct_init_offset[0]));
1134
1135 nf_ct_zone_add(ct, zone);
1136
1137 /* Because we use RCU lookups, we set ct_general.use to zero before
1138 * this is inserted in any list.
1139 */
1140 atomic_set(&ct->ct_general.use, 0);
1141 return ct;
1142 out:
1143 atomic_dec(&net->ct.count);
1144 return ERR_PTR(-ENOMEM);
1145 }
1146
1147 struct nf_conn *nf_conntrack_alloc(struct net *net,
1148 const struct nf_conntrack_zone *zone,
1149 const struct nf_conntrack_tuple *orig,
1150 const struct nf_conntrack_tuple *repl,
1151 gfp_t gfp)
1152 {
1153 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1154 }
1155 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1156
1157 void nf_conntrack_free(struct nf_conn *ct)
1158 {
1159 struct net *net = nf_ct_net(ct);
1160
1161 /* A freed object has refcnt == 0, that's
1162 * the golden rule for SLAB_TYPESAFE_BY_RCU
1163 */
1164 NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
1165
1166 nf_ct_ext_destroy(ct);
1167 nf_ct_ext_free(ct);
1168 kmem_cache_free(nf_conntrack_cachep, ct);
1169 smp_mb__before_atomic();
1170 atomic_dec(&net->ct.count);
1171 }
1172 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1173
1174
1175 /* Allocate a new conntrack: we return -ENOMEM if classification
1176 failed due to stress. Otherwise it really is unclassifiable. */
1177 static noinline struct nf_conntrack_tuple_hash *
1178 init_conntrack(struct net *net, struct nf_conn *tmpl,
1179 const struct nf_conntrack_tuple *tuple,
1180 struct nf_conntrack_l3proto *l3proto,
1181 struct nf_conntrack_l4proto *l4proto,
1182 struct sk_buff *skb,
1183 unsigned int dataoff, u32 hash)
1184 {
1185 struct nf_conn *ct;
1186 struct nf_conn_help *help;
1187 struct nf_conntrack_tuple repl_tuple;
1188 struct nf_conntrack_ecache *ecache;
1189 struct nf_conntrack_expect *exp = NULL;
1190 const struct nf_conntrack_zone *zone;
1191 struct nf_conn_timeout *timeout_ext;
1192 struct nf_conntrack_zone tmp;
1193 unsigned int *timeouts;
1194
1195 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
1196 pr_debug("Can't invert tuple.\n");
1197 return NULL;
1198 }
1199
1200 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1201 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1202 hash);
1203 if (IS_ERR(ct))
1204 return (struct nf_conntrack_tuple_hash *)ct;
1205
1206 if (!nf_ct_add_synproxy(ct, tmpl)) {
1207 nf_conntrack_free(ct);
1208 return ERR_PTR(-ENOMEM);
1209 }
1210
1211 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1212 if (timeout_ext) {
1213 timeouts = nf_ct_timeout_data(timeout_ext);
1214 if (unlikely(!timeouts))
1215 timeouts = l4proto->get_timeouts(net);
1216 } else {
1217 timeouts = l4proto->get_timeouts(net);
1218 }
1219
1220 if (!l4proto->new(ct, skb, dataoff, timeouts)) {
1221 nf_conntrack_free(ct);
1222 pr_debug("can't track with proto module\n");
1223 return NULL;
1224 }
1225
1226 if (timeout_ext)
1227 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1228 GFP_ATOMIC);
1229
1230 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1231 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1232 nf_ct_labels_ext_add(ct);
1233
1234 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1235 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1236 ecache ? ecache->expmask : 0,
1237 GFP_ATOMIC);
1238
1239 local_bh_disable();
1240 if (net->ct.expect_count) {
1241 spin_lock(&nf_conntrack_expect_lock);
1242 exp = nf_ct_find_expectation(net, zone, tuple);
1243 if (exp) {
1244 pr_debug("expectation arrives ct=%p exp=%p\n",
1245 ct, exp);
1246 /* Welcome, Mr. Bond. We've been expecting you... */
1247 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1248 /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1249 ct->master = exp->master;
1250 if (exp->helper) {
1251 help = nf_ct_helper_ext_add(ct, exp->helper,
1252 GFP_ATOMIC);
1253 if (help)
1254 rcu_assign_pointer(help->helper, exp->helper);
1255 }
1256
1257 #ifdef CONFIG_NF_CONNTRACK_MARK
1258 ct->mark = exp->master->mark;
1259 #endif
1260 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1261 ct->secmark = exp->master->secmark;
1262 #endif
1263 NF_CT_STAT_INC(net, expect_new);
1264 }
1265 spin_unlock(&nf_conntrack_expect_lock);
1266 }
1267 if (!exp)
1268 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1269
1270 /* Now it is inserted into the unconfirmed list, bump refcount */
1271 nf_conntrack_get(&ct->ct_general);
1272 nf_ct_add_to_unconfirmed_list(ct);
1273
1274 local_bh_enable();
1275
1276 if (exp) {
1277 if (exp->expectfn)
1278 exp->expectfn(ct, exp);
1279 nf_ct_expect_put(exp);
1280 }
1281
1282 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1283 }
1284
1285 /* On success, returns 0, sets skb->_nfct | ctinfo */
1286 static int
1287 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1288 struct sk_buff *skb,
1289 unsigned int dataoff,
1290 u_int16_t l3num,
1291 u_int8_t protonum,
1292 struct nf_conntrack_l3proto *l3proto,
1293 struct nf_conntrack_l4proto *l4proto)
1294 {
1295 const struct nf_conntrack_zone *zone;
1296 struct nf_conntrack_tuple tuple;
1297 struct nf_conntrack_tuple_hash *h;
1298 enum ip_conntrack_info ctinfo;
1299 struct nf_conntrack_zone tmp;
1300 struct nf_conn *ct;
1301 u32 hash;
1302
1303 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1304 dataoff, l3num, protonum, net, &tuple, l3proto,
1305 l4proto)) {
1306 pr_debug("Can't get tuple\n");
1307 return 0;
1308 }
1309
1310 /* look for tuple match */
1311 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1312 hash = hash_conntrack_raw(&tuple, net);
1313 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1314 if (!h) {
1315 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1316 skb, dataoff, hash);
1317 if (!h)
1318 return 0;
1319 if (IS_ERR(h))
1320 return PTR_ERR(h);
1321 }
1322 ct = nf_ct_tuplehash_to_ctrack(h);
1323
1324 /* It exists; we have (non-exclusive) reference. */
1325 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1326 ctinfo = IP_CT_ESTABLISHED_REPLY;
1327 } else {
1328 /* Once we've had two way comms, always ESTABLISHED. */
1329 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1330 pr_debug("normal packet for %p\n", ct);
1331 ctinfo = IP_CT_ESTABLISHED;
1332 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1333 pr_debug("related packet for %p\n", ct);
1334 ctinfo = IP_CT_RELATED;
1335 } else {
1336 pr_debug("new packet for %p\n", ct);
1337 ctinfo = IP_CT_NEW;
1338 }
1339 }
1340 nf_ct_set(skb, ct, ctinfo);
1341 return 0;
1342 }
1343
1344 unsigned int
1345 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1346 struct sk_buff *skb)
1347 {
1348 struct nf_conn *ct, *tmpl;
1349 enum ip_conntrack_info ctinfo;
1350 struct nf_conntrack_l3proto *l3proto;
1351 struct nf_conntrack_l4proto *l4proto;
1352 unsigned int *timeouts;
1353 unsigned int dataoff;
1354 u_int8_t protonum;
1355 int ret;
1356
1357 tmpl = nf_ct_get(skb, &ctinfo);
1358 if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1359 /* Previously seen (loopback or untracked)? Ignore. */
1360 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1361 ctinfo == IP_CT_UNTRACKED) {
1362 NF_CT_STAT_INC_ATOMIC(net, ignore);
1363 return NF_ACCEPT;
1364 }
1365 skb->_nfct = 0;
1366 }
1367
1368 /* rcu_read_lock()ed by nf_hook_thresh */
1369 l3proto = __nf_ct_l3proto_find(pf);
1370 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1371 &dataoff, &protonum);
1372 if (ret <= 0) {
1373 pr_debug("not prepared to track yet or error occurred\n");
1374 NF_CT_STAT_INC_ATOMIC(net, error);
1375 NF_CT_STAT_INC_ATOMIC(net, invalid);
1376 ret = -ret;
1377 goto out;
1378 }
1379
1380 l4proto = __nf_ct_l4proto_find(pf, protonum);
1381
1382 /* It may be an special packet, error, unclean...
1383 * inverse of the return code tells to the netfilter
1384 * core what to do with the packet. */
1385 if (l4proto->error != NULL) {
1386 ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
1387 if (ret <= 0) {
1388 NF_CT_STAT_INC_ATOMIC(net, error);
1389 NF_CT_STAT_INC_ATOMIC(net, invalid);
1390 ret = -ret;
1391 goto out;
1392 }
1393 /* ICMP[v6] protocol trackers may assign one conntrack. */
1394 if (skb->_nfct)
1395 goto out;
1396 }
1397 repeat:
1398 ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1399 l3proto, l4proto);
1400 if (ret < 0) {
1401 /* Too stressed to deal. */
1402 NF_CT_STAT_INC_ATOMIC(net, drop);
1403 ret = NF_DROP;
1404 goto out;
1405 }
1406
1407 ct = nf_ct_get(skb, &ctinfo);
1408 if (!ct) {
1409 /* Not valid part of a connection */
1410 NF_CT_STAT_INC_ATOMIC(net, invalid);
1411 ret = NF_ACCEPT;
1412 goto out;
1413 }
1414
1415 /* Decide what timeout policy we want to apply to this flow. */
1416 timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1417
1418 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1419 if (ret <= 0) {
1420 /* Invalid: inverse of the return code tells
1421 * the netfilter core what to do */
1422 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1423 nf_conntrack_put(&ct->ct_general);
1424 skb->_nfct = 0;
1425 NF_CT_STAT_INC_ATOMIC(net, invalid);
1426 if (ret == -NF_DROP)
1427 NF_CT_STAT_INC_ATOMIC(net, drop);
1428 /* Special case: TCP tracker reports an attempt to reopen a
1429 * closed/aborted connection. We have to go back and create a
1430 * fresh conntrack.
1431 */
1432 if (ret == -NF_REPEAT)
1433 goto repeat;
1434 ret = -ret;
1435 goto out;
1436 }
1437
1438 if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1439 !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1440 nf_conntrack_event_cache(IPCT_REPLY, ct);
1441 out:
1442 if (tmpl)
1443 nf_ct_put(tmpl);
1444
1445 return ret;
1446 }
1447 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1448
1449 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1450 const struct nf_conntrack_tuple *orig)
1451 {
1452 bool ret;
1453
1454 rcu_read_lock();
1455 ret = nf_ct_invert_tuple(inverse, orig,
1456 __nf_ct_l3proto_find(orig->src.l3num),
1457 __nf_ct_l4proto_find(orig->src.l3num,
1458 orig->dst.protonum));
1459 rcu_read_unlock();
1460 return ret;
1461 }
1462 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1463
1464 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
1465 implicitly racy: see __nf_conntrack_confirm */
1466 void nf_conntrack_alter_reply(struct nf_conn *ct,
1467 const struct nf_conntrack_tuple *newreply)
1468 {
1469 struct nf_conn_help *help = nfct_help(ct);
1470
1471 /* Should be unconfirmed, so not in hash table yet */
1472 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1473
1474 pr_debug("Altering reply tuple of %p to ", ct);
1475 nf_ct_dump_tuple(newreply);
1476
1477 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1478 if (ct->master || (help && !hlist_empty(&help->expectations)))
1479 return;
1480
1481 rcu_read_lock();
1482 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1483 rcu_read_unlock();
1484 }
1485 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1486
1487 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1488 void __nf_ct_refresh_acct(struct nf_conn *ct,
1489 enum ip_conntrack_info ctinfo,
1490 const struct sk_buff *skb,
1491 unsigned long extra_jiffies,
1492 int do_acct)
1493 {
1494 NF_CT_ASSERT(skb);
1495
1496 /* Only update if this is not a fixed timeout */
1497 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1498 goto acct;
1499
1500 /* If not in hash table, timer will not be active yet */
1501 if (nf_ct_is_confirmed(ct))
1502 extra_jiffies += nfct_time_stamp;
1503
1504 ct->timeout = extra_jiffies;
1505 acct:
1506 if (do_acct)
1507 nf_ct_acct_update(ct, ctinfo, skb->len);
1508 }
1509 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1510
1511 bool nf_ct_kill_acct(struct nf_conn *ct,
1512 enum ip_conntrack_info ctinfo,
1513 const struct sk_buff *skb)
1514 {
1515 nf_ct_acct_update(ct, ctinfo, skb->len);
1516
1517 return nf_ct_delete(ct, 0, 0);
1518 }
1519 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1520
1521 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1522
1523 #include <linux/netfilter/nfnetlink.h>
1524 #include <linux/netfilter/nfnetlink_conntrack.h>
1525 #include <linux/mutex.h>
1526
1527 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1528 * in ip_conntrack_core, since we don't want the protocols to autoload
1529 * or depend on ctnetlink */
1530 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1531 const struct nf_conntrack_tuple *tuple)
1532 {
1533 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1534 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1535 goto nla_put_failure;
1536 return 0;
1537
1538 nla_put_failure:
1539 return -1;
1540 }
1541 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1542
1543 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1544 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1545 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1546 };
1547 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1548
1549 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1550 struct nf_conntrack_tuple *t)
1551 {
1552 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1553 return -EINVAL;
1554
1555 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1556 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1557
1558 return 0;
1559 }
1560 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1561
1562 int nf_ct_port_nlattr_tuple_size(void)
1563 {
1564 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1565 }
1566 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1567 #endif
1568
1569 /* Used by ipt_REJECT and ip6t_REJECT. */
1570 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1571 {
1572 struct nf_conn *ct;
1573 enum ip_conntrack_info ctinfo;
1574
1575 /* This ICMP is in reverse direction to the packet which caused it */
1576 ct = nf_ct_get(skb, &ctinfo);
1577 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1578 ctinfo = IP_CT_RELATED_REPLY;
1579 else
1580 ctinfo = IP_CT_RELATED;
1581
1582 /* Attach to new skbuff, and increment count */
1583 nf_ct_set(nskb, ct, ctinfo);
1584 nf_conntrack_get(skb_nfct(nskb));
1585 }
1586
1587 /* Bring out ya dead! */
1588 static struct nf_conn *
1589 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1590 void *data, unsigned int *bucket)
1591 {
1592 struct nf_conntrack_tuple_hash *h;
1593 struct nf_conn *ct;
1594 struct hlist_nulls_node *n;
1595 spinlock_t *lockp;
1596
1597 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1598 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1599 local_bh_disable();
1600 nf_conntrack_lock(lockp);
1601 if (*bucket < nf_conntrack_htable_size) {
1602 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1603 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1604 continue;
1605 ct = nf_ct_tuplehash_to_ctrack(h);
1606 if (iter(ct, data))
1607 goto found;
1608 }
1609 }
1610 spin_unlock(lockp);
1611 local_bh_enable();
1612 cond_resched();
1613 }
1614
1615 return NULL;
1616 found:
1617 atomic_inc(&ct->ct_general.use);
1618 spin_unlock(lockp);
1619 local_bh_enable();
1620 return ct;
1621 }
1622
1623 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
1624 void *data, u32 portid, int report)
1625 {
1626 struct nf_conn *ct;
1627 unsigned int bucket = 0;
1628
1629 might_sleep();
1630
1631 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
1632 /* Time to push up daises... */
1633
1634 nf_ct_delete(ct, portid, report);
1635 nf_ct_put(ct);
1636 cond_resched();
1637 }
1638 }
1639
1640 struct iter_data {
1641 int (*iter)(struct nf_conn *i, void *data);
1642 void *data;
1643 struct net *net;
1644 };
1645
1646 static int iter_net_only(struct nf_conn *i, void *data)
1647 {
1648 struct iter_data *d = data;
1649
1650 if (!net_eq(d->net, nf_ct_net(i)))
1651 return 0;
1652
1653 return d->iter(i, d->data);
1654 }
1655
1656 static void
1657 __nf_ct_unconfirmed_destroy(struct net *net)
1658 {
1659 int cpu;
1660
1661 for_each_possible_cpu(cpu) {
1662 struct nf_conntrack_tuple_hash *h;
1663 struct hlist_nulls_node *n;
1664 struct ct_pcpu *pcpu;
1665
1666 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1667
1668 spin_lock_bh(&pcpu->lock);
1669 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1670 struct nf_conn *ct;
1671
1672 ct = nf_ct_tuplehash_to_ctrack(h);
1673
1674 /* we cannot call iter() on unconfirmed list, the
1675 * owning cpu can reallocate ct->ext at any time.
1676 */
1677 set_bit(IPS_DYING_BIT, &ct->status);
1678 }
1679 spin_unlock_bh(&pcpu->lock);
1680 cond_resched();
1681 }
1682 }
1683
1684 void nf_ct_iterate_cleanup_net(struct net *net,
1685 int (*iter)(struct nf_conn *i, void *data),
1686 void *data, u32 portid, int report)
1687 {
1688 struct iter_data d;
1689
1690 might_sleep();
1691
1692 if (atomic_read(&net->ct.count) == 0)
1693 return;
1694
1695 __nf_ct_unconfirmed_destroy(net);
1696
1697 d.iter = iter;
1698 d.data = data;
1699 d.net = net;
1700
1701 synchronize_net();
1702
1703 nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
1704 }
1705 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
1706
1707 /**
1708 * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
1709 * @iter: callback to invoke for each conntrack
1710 * @data: data to pass to @iter
1711 *
1712 * Like nf_ct_iterate_cleanup, but first marks conntracks on the
1713 * unconfirmed list as dying (so they will not be inserted into
1714 * main table).
1715 */
1716 void
1717 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
1718 {
1719 struct net *net;
1720
1721 rtnl_lock();
1722 for_each_net(net) {
1723 if (atomic_read(&net->ct.count) == 0)
1724 continue;
1725 __nf_ct_unconfirmed_destroy(net);
1726 }
1727 rtnl_unlock();
1728
1729 /* a conntrack could have been unlinked from unconfirmed list
1730 * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
1731 * This makes sure its inserted into conntrack table.
1732 */
1733 synchronize_net();
1734
1735 nf_ct_iterate_cleanup(iter, data, 0, 0);
1736 }
1737 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
1738
1739 static int kill_all(struct nf_conn *i, void *data)
1740 {
1741 return net_eq(nf_ct_net(i), data);
1742 }
1743
1744 void nf_ct_free_hashtable(void *hash, unsigned int size)
1745 {
1746 if (is_vmalloc_addr(hash))
1747 vfree(hash);
1748 else
1749 free_pages((unsigned long)hash,
1750 get_order(sizeof(struct hlist_head) * size));
1751 }
1752 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1753
1754 void nf_conntrack_cleanup_start(void)
1755 {
1756 conntrack_gc_work.exiting = true;
1757 RCU_INIT_POINTER(ip_ct_attach, NULL);
1758 }
1759
1760 void nf_conntrack_cleanup_end(void)
1761 {
1762 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1763
1764 cancel_delayed_work_sync(&conntrack_gc_work.dwork);
1765 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1766
1767 nf_conntrack_proto_fini();
1768 nf_conntrack_seqadj_fini();
1769 nf_conntrack_labels_fini();
1770 nf_conntrack_helper_fini();
1771 nf_conntrack_timeout_fini();
1772 nf_conntrack_ecache_fini();
1773 nf_conntrack_tstamp_fini();
1774 nf_conntrack_acct_fini();
1775 nf_conntrack_expect_fini();
1776
1777 kmem_cache_destroy(nf_conntrack_cachep);
1778 }
1779
1780 /*
1781 * Mishearing the voices in his head, our hero wonders how he's
1782 * supposed to kill the mall.
1783 */
1784 void nf_conntrack_cleanup_net(struct net *net)
1785 {
1786 LIST_HEAD(single);
1787
1788 list_add(&net->exit_list, &single);
1789 nf_conntrack_cleanup_net_list(&single);
1790 }
1791
1792 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1793 {
1794 int busy;
1795 struct net *net;
1796
1797 /*
1798 * This makes sure all current packets have passed through
1799 * netfilter framework. Roll on, two-stage module
1800 * delete...
1801 */
1802 synchronize_net();
1803 i_see_dead_people:
1804 busy = 0;
1805 list_for_each_entry(net, net_exit_list, exit_list) {
1806 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
1807 if (atomic_read(&net->ct.count) != 0)
1808 busy = 1;
1809 }
1810 if (busy) {
1811 schedule();
1812 goto i_see_dead_people;
1813 }
1814
1815 list_for_each_entry(net, net_exit_list, exit_list) {
1816 nf_conntrack_proto_pernet_fini(net);
1817 nf_conntrack_helper_pernet_fini(net);
1818 nf_conntrack_ecache_pernet_fini(net);
1819 nf_conntrack_tstamp_pernet_fini(net);
1820 nf_conntrack_acct_pernet_fini(net);
1821 nf_conntrack_expect_pernet_fini(net);
1822 free_percpu(net->ct.stat);
1823 free_percpu(net->ct.pcpu_lists);
1824 }
1825 }
1826
1827 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1828 {
1829 struct hlist_nulls_head *hash;
1830 unsigned int nr_slots, i;
1831 size_t sz;
1832
1833 if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1834 return NULL;
1835
1836 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1837 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1838
1839 if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1840 return NULL;
1841
1842 sz = nr_slots * sizeof(struct hlist_nulls_head);
1843 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1844 get_order(sz));
1845 if (!hash)
1846 hash = vzalloc(sz);
1847
1848 if (hash && nulls)
1849 for (i = 0; i < nr_slots; i++)
1850 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1851
1852 return hash;
1853 }
1854 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1855
1856 int nf_conntrack_hash_resize(unsigned int hashsize)
1857 {
1858 int i, bucket;
1859 unsigned int old_size;
1860 struct hlist_nulls_head *hash, *old_hash;
1861 struct nf_conntrack_tuple_hash *h;
1862 struct nf_conn *ct;
1863
1864 if (!hashsize)
1865 return -EINVAL;
1866
1867 hash = nf_ct_alloc_hashtable(&hashsize, 1);
1868 if (!hash)
1869 return -ENOMEM;
1870
1871 old_size = nf_conntrack_htable_size;
1872 if (old_size == hashsize) {
1873 nf_ct_free_hashtable(hash, hashsize);
1874 return 0;
1875 }
1876
1877 local_bh_disable();
1878 nf_conntrack_all_lock();
1879 write_seqcount_begin(&nf_conntrack_generation);
1880
1881 /* Lookups in the old hash might happen in parallel, which means we
1882 * might get false negatives during connection lookup. New connections
1883 * created because of a false negative won't make it into the hash
1884 * though since that required taking the locks.
1885 */
1886
1887 for (i = 0; i < nf_conntrack_htable_size; i++) {
1888 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
1889 h = hlist_nulls_entry(nf_conntrack_hash[i].first,
1890 struct nf_conntrack_tuple_hash, hnnode);
1891 ct = nf_ct_tuplehash_to_ctrack(h);
1892 hlist_nulls_del_rcu(&h->hnnode);
1893 bucket = __hash_conntrack(nf_ct_net(ct),
1894 &h->tuple, hashsize);
1895 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1896 }
1897 }
1898 old_size = nf_conntrack_htable_size;
1899 old_hash = nf_conntrack_hash;
1900
1901 nf_conntrack_hash = hash;
1902 nf_conntrack_htable_size = hashsize;
1903
1904 write_seqcount_end(&nf_conntrack_generation);
1905 nf_conntrack_all_unlock();
1906 local_bh_enable();
1907
1908 synchronize_net();
1909 nf_ct_free_hashtable(old_hash, old_size);
1910 return 0;
1911 }
1912
1913 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1914 {
1915 unsigned int hashsize;
1916 int rc;
1917
1918 if (current->nsproxy->net_ns != &init_net)
1919 return -EOPNOTSUPP;
1920
1921 /* On boot, we can set this without any fancy locking. */
1922 if (!nf_conntrack_htable_size)
1923 return param_set_uint(val, kp);
1924
1925 rc = kstrtouint(val, 0, &hashsize);
1926 if (rc)
1927 return rc;
1928
1929 return nf_conntrack_hash_resize(hashsize);
1930 }
1931 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1932
1933 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1934 &nf_conntrack_htable_size, 0600);
1935
1936 static __always_inline unsigned int total_extension_size(void)
1937 {
1938 /* remember to add new extensions below */
1939 BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
1940
1941 return sizeof(struct nf_ct_ext) +
1942 sizeof(struct nf_conn_help)
1943 #if IS_ENABLED(CONFIG_NF_NAT)
1944 + sizeof(struct nf_conn_nat)
1945 #endif
1946 + sizeof(struct nf_conn_seqadj)
1947 + sizeof(struct nf_conn_acct)
1948 #ifdef CONFIG_NF_CONNTRACK_EVENTS
1949 + sizeof(struct nf_conntrack_ecache)
1950 #endif
1951 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
1952 + sizeof(struct nf_conn_tstamp)
1953 #endif
1954 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1955 + sizeof(struct nf_conn_timeout)
1956 #endif
1957 #ifdef CONFIG_NF_CONNTRACK_LABELS
1958 + sizeof(struct nf_conn_labels)
1959 #endif
1960 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
1961 + sizeof(struct nf_conn_synproxy)
1962 #endif
1963 ;
1964 };
1965
1966 int nf_conntrack_init_start(void)
1967 {
1968 int max_factor = 8;
1969 int ret = -ENOMEM;
1970 int i;
1971
1972 /* struct nf_ct_ext uses u8 to store offsets/size */
1973 BUILD_BUG_ON(total_extension_size() > 255u);
1974
1975 seqcount_init(&nf_conntrack_generation);
1976
1977 for (i = 0; i < CONNTRACK_LOCKS; i++)
1978 spin_lock_init(&nf_conntrack_locks[i]);
1979
1980 if (!nf_conntrack_htable_size) {
1981 /* Idea from tcp.c: use 1/16384 of memory.
1982 * On i386: 32MB machine has 512 buckets.
1983 * >= 1GB machines have 16384 buckets.
1984 * >= 4GB machines have 65536 buckets.
1985 */
1986 nf_conntrack_htable_size
1987 = (((totalram_pages << PAGE_SHIFT) / 16384)
1988 / sizeof(struct hlist_head));
1989 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1990 nf_conntrack_htable_size = 65536;
1991 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1992 nf_conntrack_htable_size = 16384;
1993 if (nf_conntrack_htable_size < 32)
1994 nf_conntrack_htable_size = 32;
1995
1996 /* Use a max. factor of four by default to get the same max as
1997 * with the old struct list_heads. When a table size is given
1998 * we use the old value of 8 to avoid reducing the max.
1999 * entries. */
2000 max_factor = 4;
2001 }
2002
2003 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2004 if (!nf_conntrack_hash)
2005 return -ENOMEM;
2006
2007 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2008
2009 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2010 sizeof(struct nf_conn),
2011 NFCT_INFOMASK + 1,
2012 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2013 if (!nf_conntrack_cachep)
2014 goto err_cachep;
2015
2016 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
2017 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
2018 nf_conntrack_max);
2019
2020 ret = nf_conntrack_expect_init();
2021 if (ret < 0)
2022 goto err_expect;
2023
2024 ret = nf_conntrack_acct_init();
2025 if (ret < 0)
2026 goto err_acct;
2027
2028 ret = nf_conntrack_tstamp_init();
2029 if (ret < 0)
2030 goto err_tstamp;
2031
2032 ret = nf_conntrack_ecache_init();
2033 if (ret < 0)
2034 goto err_ecache;
2035
2036 ret = nf_conntrack_timeout_init();
2037 if (ret < 0)
2038 goto err_timeout;
2039
2040 ret = nf_conntrack_helper_init();
2041 if (ret < 0)
2042 goto err_helper;
2043
2044 ret = nf_conntrack_labels_init();
2045 if (ret < 0)
2046 goto err_labels;
2047
2048 ret = nf_conntrack_seqadj_init();
2049 if (ret < 0)
2050 goto err_seqadj;
2051
2052 ret = nf_conntrack_proto_init();
2053 if (ret < 0)
2054 goto err_proto;
2055
2056 conntrack_gc_work_init(&conntrack_gc_work);
2057 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
2058
2059 return 0;
2060
2061 err_proto:
2062 nf_conntrack_seqadj_fini();
2063 err_seqadj:
2064 nf_conntrack_labels_fini();
2065 err_labels:
2066 nf_conntrack_helper_fini();
2067 err_helper:
2068 nf_conntrack_timeout_fini();
2069 err_timeout:
2070 nf_conntrack_ecache_fini();
2071 err_ecache:
2072 nf_conntrack_tstamp_fini();
2073 err_tstamp:
2074 nf_conntrack_acct_fini();
2075 err_acct:
2076 nf_conntrack_expect_fini();
2077 err_expect:
2078 kmem_cache_destroy(nf_conntrack_cachep);
2079 err_cachep:
2080 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
2081 return ret;
2082 }
2083
2084 void nf_conntrack_init_end(void)
2085 {
2086 /* For use by REJECT target */
2087 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2088 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
2089 }
2090
2091 /*
2092 * We need to use special "null" values, not used in hash table
2093 */
2094 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
2095 #define DYING_NULLS_VAL ((1<<30)+1)
2096 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
2097
2098 int nf_conntrack_init_net(struct net *net)
2099 {
2100 int ret = -ENOMEM;
2101 int cpu;
2102
2103 BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2104 atomic_set(&net->ct.count, 0);
2105
2106 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2107 if (!net->ct.pcpu_lists)
2108 goto err_stat;
2109
2110 for_each_possible_cpu(cpu) {
2111 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2112
2113 spin_lock_init(&pcpu->lock);
2114 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2115 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2116 }
2117
2118 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2119 if (!net->ct.stat)
2120 goto err_pcpu_lists;
2121
2122 ret = nf_conntrack_expect_pernet_init(net);
2123 if (ret < 0)
2124 goto err_expect;
2125 ret = nf_conntrack_acct_pernet_init(net);
2126 if (ret < 0)
2127 goto err_acct;
2128 ret = nf_conntrack_tstamp_pernet_init(net);
2129 if (ret < 0)
2130 goto err_tstamp;
2131 ret = nf_conntrack_ecache_pernet_init(net);
2132 if (ret < 0)
2133 goto err_ecache;
2134 ret = nf_conntrack_helper_pernet_init(net);
2135 if (ret < 0)
2136 goto err_helper;
2137 ret = nf_conntrack_proto_pernet_init(net);
2138 if (ret < 0)
2139 goto err_proto;
2140 return 0;
2141
2142 err_proto:
2143 nf_conntrack_helper_pernet_fini(net);
2144 err_helper:
2145 nf_conntrack_ecache_pernet_fini(net);
2146 err_ecache:
2147 nf_conntrack_tstamp_pernet_fini(net);
2148 err_tstamp:
2149 nf_conntrack_acct_pernet_fini(net);
2150 err_acct:
2151 nf_conntrack_expect_pernet_fini(net);
2152 err_expect:
2153 free_percpu(net->ct.stat);
2154 err_pcpu_lists:
2155 free_percpu(net->ct.pcpu_lists);
2156 err_stat:
2157 return ret;
2158 }