]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/netfilter/nf_conntrack_core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nf_conntrack_core.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
9fb9cbb1
YK
2/* Connection state tracking for netfilter. This is separated from,
3 but required by, the NAT layer; it can also be used by an iptables
4 extension. */
5
6/* (C) 1999-2001 Paul `Rusty' Russell
dc808fe2 7 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
9fb9cbb1 8 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 9 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9fb9cbb1
YK
10 */
11
ccd63c20
WJ
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
9fb9cbb1
YK
14#include <linux/types.h>
15#include <linux/netfilter.h>
16#include <linux/module.h>
d43c36dc 17#include <linux/sched.h>
9fb9cbb1
YK
18#include <linux/skbuff.h>
19#include <linux/proc_fs.h>
20#include <linux/vmalloc.h>
21#include <linux/stddef.h>
22#include <linux/slab.h>
23#include <linux/random.h>
24#include <linux/jhash.h>
3c791076 25#include <linux/siphash.h>
9fb9cbb1
YK
26#include <linux/err.h>
27#include <linux/percpu.h>
28#include <linux/moduleparam.h>
29#include <linux/notifier.h>
30#include <linux/kernel.h>
31#include <linux/netdevice.h>
32#include <linux/socket.h>
d7fe0f24 33#include <linux/mm.h>
d696c7bd 34#include <linux/nsproxy.h>
ea781f19 35#include <linux/rculist_nulls.h>
9fb9cbb1 36
9fb9cbb1 37#include <net/netfilter/nf_conntrack.h>
605dcad6 38#include <net/netfilter/nf_conntrack_l4proto.h>
77ab9cff 39#include <net/netfilter/nf_conntrack_expect.h>
9fb9cbb1 40#include <net/netfilter/nf_conntrack_helper.h>
41d73ec0 41#include <net/netfilter/nf_conntrack_seqadj.h>
9fb9cbb1 42#include <net/netfilter/nf_conntrack_core.h>
ecfab2c9 43#include <net/netfilter/nf_conntrack_extend.h>
58401572 44#include <net/netfilter/nf_conntrack_acct.h>
a0891aa6 45#include <net/netfilter/nf_conntrack_ecache.h>
5d0aa2cc 46#include <net/netfilter/nf_conntrack_zones.h>
a992ca2a 47#include <net/netfilter/nf_conntrack_timestamp.h>
dd705072 48#include <net/netfilter/nf_conntrack_timeout.h>
c539f017 49#include <net/netfilter/nf_conntrack_labels.h>
48b1de4c 50#include <net/netfilter/nf_conntrack_synproxy.h>
e6a7d3c0 51#include <net/netfilter/nf_nat.h>
49376368 52#include <net/netfilter/nf_nat_helper.h>
1b8c8a9f 53#include <net/netns/hash.h>
6816d931 54#include <net/ip.h>
9fb9cbb1 55
e2a75007
FW
56#include "nf_internals.h"
57
93bb0ceb
JDB
58__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
59EXPORT_SYMBOL_GPL(nf_conntrack_locks);
9fb9cbb1 60
ca7433df
JDB
61__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
62EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
63
56d52d48
FW
64struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
65EXPORT_SYMBOL_GPL(nf_conntrack_hash);
66
b87a2f91
FW
67struct conntrack_gc_work {
68 struct delayed_work dwork;
69 u32 last_bucket;
70 bool exiting;
c6dd940b 71 bool early_drop;
e0df8cae 72 long next_gc_run;
b87a2f91
FW
73};
74
0c5366b3 75static __read_mostly struct kmem_cache *nf_conntrack_cachep;
44b63b0a 76static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
b16c2919
SL
77static __read_mostly bool nf_conntrack_locks_all;
78
e0df8cae 79/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
e5072053
FW
80#define GC_MAX_BUCKETS_DIV 128u
81/* upper bound of full table scan */
82#define GC_MAX_SCAN_JIFFIES (16u * HZ)
83/* desired ratio of entries found to be expired */
84#define GC_EVICT_RATIO 50u
b87a2f91
FW
85
86static struct conntrack_gc_work conntrack_gc_work;
87
b16c2919
SL
88void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
89{
3ef0c7a7 90 /* 1) Acquire the lock */
b16c2919 91 spin_lock(lock);
b316ff78 92
3ef0c7a7
MS
93 /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
94 * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
95 */
96 if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
97 return;
98
99 /* fast path failed, unlock */
100 spin_unlock(lock);
101
102 /* Slow path 1) get global lock */
103 spin_lock(&nf_conntrack_locks_all_lock);
104
105 /* Slow path 2) get the lock we want */
106 spin_lock(lock);
107
108 /* Slow path 3) release the global lock */
109 spin_unlock(&nf_conntrack_locks_all_lock);
b16c2919
SL
110}
111EXPORT_SYMBOL_GPL(nf_conntrack_lock);
112
93bb0ceb
JDB
113static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
114{
115 h1 %= CONNTRACK_LOCKS;
116 h2 %= CONNTRACK_LOCKS;
117 spin_unlock(&nf_conntrack_locks[h1]);
118 if (h1 != h2)
119 spin_unlock(&nf_conntrack_locks[h2]);
120}
121
122/* return true if we need to recompute hashes (in case hash table was resized) */
123static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
124 unsigned int h2, unsigned int sequence)
125{
126 h1 %= CONNTRACK_LOCKS;
127 h2 %= CONNTRACK_LOCKS;
128 if (h1 <= h2) {
b16c2919 129 nf_conntrack_lock(&nf_conntrack_locks[h1]);
93bb0ceb
JDB
130 if (h1 != h2)
131 spin_lock_nested(&nf_conntrack_locks[h2],
132 SINGLE_DEPTH_NESTING);
133 } else {
b16c2919 134 nf_conntrack_lock(&nf_conntrack_locks[h2]);
93bb0ceb
JDB
135 spin_lock_nested(&nf_conntrack_locks[h1],
136 SINGLE_DEPTH_NESTING);
137 }
a3efd812 138 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
93bb0ceb
JDB
139 nf_conntrack_double_unlock(h1, h2);
140 return true;
141 }
142 return false;
143}
144
145static void nf_conntrack_all_lock(void)
146{
147 int i;
148
b16c2919 149 spin_lock(&nf_conntrack_locks_all_lock);
b16c2919 150
3ef0c7a7 151 nf_conntrack_locks_all = true;
b316ff78 152
b16c2919 153 for (i = 0; i < CONNTRACK_LOCKS; i++) {
3ef0c7a7
MS
154 spin_lock(&nf_conntrack_locks[i]);
155
156 /* This spin_unlock provides the "release" to ensure that
157 * nf_conntrack_locks_all==true is visible to everyone that
158 * acquired spin_lock(&nf_conntrack_locks[]).
159 */
160 spin_unlock(&nf_conntrack_locks[i]);
b16c2919 161 }
93bb0ceb
JDB
162}
163
164static void nf_conntrack_all_unlock(void)
165{
3ef0c7a7 166 /* All prior stores must be complete before we clear
b316ff78
PZ
167 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
168 * might observe the false value but not the entire
3ef0c7a7
MS
169 * critical section.
170 * It pairs with the smp_load_acquire() in nf_conntrack_lock()
b316ff78
PZ
171 */
172 smp_store_release(&nf_conntrack_locks_all, false);
b16c2919 173 spin_unlock(&nf_conntrack_locks_all_lock);
93bb0ceb
JDB
174}
175
e2b7606c 176unsigned int nf_conntrack_htable_size __read_mostly;
2567c4ea
PNA
177EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
178
e478075c 179unsigned int nf_conntrack_max __read_mostly;
538c5672 180EXPORT_SYMBOL_GPL(nf_conntrack_max);
92e47ba8 181seqcount_t nf_conntrack_generation __read_mostly;
141658fb 182static unsigned int nf_conntrack_hash_rnd __read_mostly;
9fb9cbb1 183
1b8c8a9f
FW
184static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
185 const struct net *net)
9fb9cbb1 186{
0794935e 187 unsigned int n;
1b8c8a9f 188 u32 seed;
0794935e 189
141658fb
FW
190 get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
191
0794935e
PM
192 /* The direction must be ignored, so we hash everything up to the
193 * destination ports (which is a multiple of 4) and treat the last
194 * three bytes manually.
195 */
1b8c8a9f 196 seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
0794935e 197 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
1b8c8a9f 198 return jhash2((u32 *)tuple, n, seed ^
99f07e91
CG
199 (((__force __u16)tuple->dst.u.all << 16) |
200 tuple->dst.protonum));
201}
202
56d52d48 203static u32 scale_hash(u32 hash)
99f07e91 204{
56d52d48 205 return reciprocal_scale(hash, nf_conntrack_htable_size);
99f07e91 206}
0794935e 207
1b8c8a9f
FW
208static u32 __hash_conntrack(const struct net *net,
209 const struct nf_conntrack_tuple *tuple,
210 unsigned int size)
99f07e91 211{
1b8c8a9f 212 return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
9fb9cbb1
YK
213}
214
1b8c8a9f
FW
215static u32 hash_conntrack(const struct net *net,
216 const struct nf_conntrack_tuple *tuple)
9fb9cbb1 217{
56d52d48 218 return scale_hash(hash_conntrack_raw(tuple, net));
9fb9cbb1
YK
219}
220
e2f7cc72
FW
221static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
222 unsigned int dataoff,
223 struct nf_conntrack_tuple *tuple)
224{ struct {
225 __be16 sport;
226 __be16 dport;
227 } _inet_hdr, *inet_hdr;
228
229 /* Actually only need first 4 bytes to get ports. */
230 inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
231 if (!inet_hdr)
232 return false;
233
234 tuple->src.u.udp.port = inet_hdr->sport;
235 tuple->dst.u.udp.port = inet_hdr->dport;
236 return true;
237}
238
60e3be94 239static bool
9fb9cbb1
YK
240nf_ct_get_tuple(const struct sk_buff *skb,
241 unsigned int nhoff,
242 unsigned int dataoff,
243 u_int16_t l3num,
244 u_int8_t protonum,
a31f1adc 245 struct net *net,
303e0c55 246 struct nf_conntrack_tuple *tuple)
9fb9cbb1 247{
47a91b14
FW
248 unsigned int size;
249 const __be32 *ap;
250 __be32 _addrs[8];
251
443a70d5 252 memset(tuple, 0, sizeof(*tuple));
9fb9cbb1
YK
253
254 tuple->src.l3num = l3num;
47a91b14
FW
255 switch (l3num) {
256 case NFPROTO_IPV4:
257 nhoff += offsetof(struct iphdr, saddr);
258 size = 2 * sizeof(__be32);
259 break;
260 case NFPROTO_IPV6:
261 nhoff += offsetof(struct ipv6hdr, saddr);
262 size = sizeof(_addrs);
263 break;
264 default:
265 return true;
266 }
267
268 ap = skb_header_pointer(skb, nhoff, size, _addrs);
269 if (!ap)
5f2b4c90 270 return false;
9fb9cbb1 271
47a91b14
FW
272 switch (l3num) {
273 case NFPROTO_IPV4:
274 tuple->src.u3.ip = ap[0];
275 tuple->dst.u3.ip = ap[1];
276 break;
277 case NFPROTO_IPV6:
278 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
279 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
280 break;
281 }
282
9fb9cbb1
YK
283 tuple->dst.protonum = protonum;
284 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
285
e2e48b47 286 switch (protonum) {
81e01647 287#if IS_ENABLED(CONFIG_IPV6)
e2e48b47
FW
288 case IPPROTO_ICMPV6:
289 return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
81e01647 290#endif
e2e48b47
FW
291 case IPPROTO_ICMP:
292 return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
df5e1629
FW
293#ifdef CONFIG_NF_CT_PROTO_GRE
294 case IPPROTO_GRE:
295 return gre_pkt_to_tuple(skb, dataoff, net, tuple);
296#endif
e2f7cc72
FW
297 case IPPROTO_TCP:
298 case IPPROTO_UDP: /* fallthrough */
299 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
300#ifdef CONFIG_NF_CT_PROTO_UDPLITE
301 case IPPROTO_UDPLITE:
302 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
303#endif
304#ifdef CONFIG_NF_CT_PROTO_SCTP
305 case IPPROTO_SCTP:
306 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
307#endif
308#ifdef CONFIG_NF_CT_PROTO_DCCP
309 case IPPROTO_DCCP:
310 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
311#endif
312 default:
313 break;
e2e48b47 314 }
97e08cae 315
97e08cae 316 return true;
9fb9cbb1
YK
317}
318
6816d931
FW
319static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
320 u_int8_t *protonum)
321{
322 int dataoff = -1;
6816d931
FW
323 const struct iphdr *iph;
324 struct iphdr _iph;
325
326 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
327 if (!iph)
328 return -1;
329
330 /* Conntrack defragments packets, we might still see fragments
331 * inside ICMP packets though.
332 */
333 if (iph->frag_off & htons(IP_OFFSET))
334 return -1;
335
336 dataoff = nhoff + (iph->ihl << 2);
337 *protonum = iph->protocol;
338
339 /* Check bogus IP headers */
340 if (dataoff > skb->len) {
341 pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
342 nhoff, iph->ihl << 2, skb->len);
343 return -1;
344 }
6816d931
FW
345 return dataoff;
346}
347
a0ae2562 348#if IS_ENABLED(CONFIG_IPV6)
6816d931
FW
349static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
350 u8 *protonum)
351{
352 int protoff = -1;
6816d931
FW
353 unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
354 __be16 frag_off;
355 u8 nexthdr;
356
357 if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
358 &nexthdr, sizeof(nexthdr)) != 0) {
359 pr_debug("can't get nexthdr\n");
360 return -1;
361 }
362 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
363 /*
364 * (protoff == skb->len) means the packet has not data, just
365 * IPv6 and possibly extensions headers, but it is tracked anyway
366 */
367 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
368 pr_debug("can't find proto in pkt\n");
369 return -1;
370 }
371
372 *protonum = nexthdr;
6816d931
FW
373 return protoff;
374}
a0ae2562 375#endif
6816d931
FW
376
377static int get_l4proto(const struct sk_buff *skb,
378 unsigned int nhoff, u8 pf, u8 *l4num)
379{
380 switch (pf) {
381 case NFPROTO_IPV4:
382 return ipv4_get_l4proto(skb, nhoff, l4num);
a0ae2562 383#if IS_ENABLED(CONFIG_IPV6)
6816d931
FW
384 case NFPROTO_IPV6:
385 return ipv6_get_l4proto(skb, nhoff, l4num);
a0ae2562 386#endif
6816d931
FW
387 default:
388 *l4num = 0;
389 break;
390 }
391 return -1;
9fb9cbb1
YK
392}
393
5f2b4c90 394bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
a31f1adc
EB
395 u_int16_t l3num,
396 struct net *net, struct nf_conntrack_tuple *tuple)
e2a3123f 397{
6816d931
FW
398 u8 protonum;
399 int protoff;
e2a3123f 400
6816d931 401 protoff = get_l4proto(skb, nhoff, l3num, &protonum);
303e0c55 402 if (protoff <= 0)
5f2b4c90 403 return false;
e2a3123f 404
303e0c55 405 return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
e2a3123f
YK
406}
407EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
408
5f2b4c90 409bool
9fb9cbb1 410nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
303e0c55 411 const struct nf_conntrack_tuple *orig)
9fb9cbb1 412{
443a70d5 413 memset(inverse, 0, sizeof(*inverse));
9fb9cbb1
YK
414
415 inverse->src.l3num = orig->src.l3num;
d1b6fe94
FW
416
417 switch (orig->src.l3num) {
418 case NFPROTO_IPV4:
419 inverse->src.u3.ip = orig->dst.u3.ip;
420 inverse->dst.u3.ip = orig->src.u3.ip;
421 break;
422 case NFPROTO_IPV6:
423 inverse->src.u3.in6 = orig->dst.u3.in6;
424 inverse->dst.u3.in6 = orig->src.u3.in6;
425 break;
426 default:
427 break;
428 }
9fb9cbb1
YK
429
430 inverse->dst.dir = !orig->dst.dir;
431
432 inverse->dst.protonum = orig->dst.protonum;
8b3892ea 433
197c4300
FW
434 switch (orig->dst.protonum) {
435 case IPPROTO_ICMP:
436 return nf_conntrack_invert_icmp_tuple(inverse, orig);
81e01647 437#if IS_ENABLED(CONFIG_IPV6)
197c4300
FW
438 case IPPROTO_ICMPV6:
439 return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
81e01647 440#endif
197c4300 441 }
8b3892ea
FW
442
443 inverse->src.u.all = orig->dst.u.all;
444 inverse->dst.u.all = orig->src.u.all;
445 return true;
9fb9cbb1 446}
13b18339 447EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
9fb9cbb1 448
3c791076
FW
449/* Generate a almost-unique pseudo-id for a given conntrack.
450 *
451 * intentionally doesn't re-use any of the seeds used for hash
452 * table location, we assume id gets exposed to userspace.
453 *
454 * Following nf_conn items do not change throughout lifetime
656c8e9c 455 * of the nf_conn:
3c791076
FW
456 *
457 * 1. nf_conn address
656c8e9c
DM
458 * 2. nf_conn->master address (normally NULL)
459 * 3. the associated net namespace
460 * 4. the original direction tuple
3c791076
FW
461 */
462u32 nf_ct_get_id(const struct nf_conn *ct)
463{
464 static __read_mostly siphash_key_t ct_id_seed;
465 unsigned long a, b, c, d;
466
467 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
468
469 a = (unsigned long)ct;
656c8e9c
DM
470 b = (unsigned long)ct->master;
471 c = (unsigned long)nf_ct_net(ct);
472 d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
473 sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
3c791076
FW
474 &ct_id_seed);
475#ifdef CONFIG_64BIT
476 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
477#else
478 return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
479#endif
480}
481EXPORT_SYMBOL_GPL(nf_ct_get_id);
482
9fb9cbb1
YK
483static void
484clean_from_lists(struct nf_conn *ct)
485{
0d53778e 486 pr_debug("clean_from_lists(%p)\n", ct);
ea781f19
ED
487 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
488 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
9fb9cbb1
YK
489
490 /* Destroy all pending expectations */
c1d10adb 491 nf_ct_remove_expectations(ct);
9fb9cbb1
YK
492}
493
b7779d06
JDB
494/* must be called with local_bh_disable */
495static void nf_ct_add_to_dying_list(struct nf_conn *ct)
496{
497 struct ct_pcpu *pcpu;
498
499 /* add this conntrack to the (per cpu) dying list */
500 ct->cpu = smp_processor_id();
501 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
502
503 spin_lock(&pcpu->lock);
504 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
505 &pcpu->dying);
506 spin_unlock(&pcpu->lock);
507}
508
509/* must be called with local_bh_disable */
510static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
511{
512 struct ct_pcpu *pcpu;
513
514 /* add this conntrack to the (per cpu) unconfirmed list */
515 ct->cpu = smp_processor_id();
516 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
517
518 spin_lock(&pcpu->lock);
519 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
520 &pcpu->unconfirmed);
521 spin_unlock(&pcpu->lock);
522}
523
524/* must be called with local_bh_disable */
525static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
526{
527 struct ct_pcpu *pcpu;
528
529 /* We overload first tuple to link into unconfirmed or dying list.*/
530 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
531
532 spin_lock(&pcpu->lock);
533 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
534 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
535 spin_unlock(&pcpu->lock);
536}
537
30322309
FW
538#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
539
0838aa7f 540/* Released via destroy_conntrack() */
308ac914
DB
541struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
542 const struct nf_conntrack_zone *zone,
543 gfp_t flags)
0838aa7f 544{
30322309 545 struct nf_conn *tmpl, *p;
0838aa7f 546
30322309
FW
547 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
548 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
549 if (!tmpl)
550 return NULL;
551
552 p = tmpl;
553 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
554 if (tmpl != p) {
555 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
556 tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
557 }
558 } else {
559 tmpl = kzalloc(sizeof(*tmpl), flags);
560 if (!tmpl)
561 return NULL;
562 }
0838aa7f
PNA
563
564 tmpl->status = IPS_TEMPLATE;
565 write_pnet(&tmpl->ct_net, net);
6c8dee98 566 nf_ct_zone_add(tmpl, zone);
0838aa7f
PNA
567 atomic_set(&tmpl->ct_general.use, 0);
568
569 return tmpl;
0838aa7f
PNA
570}
571EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
572
9cf94eab 573void nf_ct_tmpl_free(struct nf_conn *tmpl)
0838aa7f
PNA
574{
575 nf_ct_ext_destroy(tmpl);
30322309
FW
576
577 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
578 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
579 else
580 kfree(tmpl);
0838aa7f 581}
9cf94eab 582EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
0838aa7f 583
e5689435
FW
584static void destroy_gre_conntrack(struct nf_conn *ct)
585{
81e01647 586#ifdef CONFIG_NF_CT_PROTO_GRE
e5689435
FW
587 struct nf_conn *master = ct->master;
588
589 if (master)
590 nf_ct_gre_keymap_destroy(master);
81e01647 591#endif
e5689435
FW
592}
593
9fb9cbb1
YK
594static void
595destroy_conntrack(struct nf_conntrack *nfct)
596{
597 struct nf_conn *ct = (struct nf_conn *)nfct;
9fb9cbb1 598
0d53778e 599 pr_debug("destroy_conntrack(%p)\n", ct);
44d6e2f2 600 WARN_ON(atomic_read(&nfct->use) != 0);
9fb9cbb1 601
0838aa7f
PNA
602 if (unlikely(nf_ct_is_template(ct))) {
603 nf_ct_tmpl_free(ct);
604 return;
605 }
e5689435
FW
606
607 if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
608 destroy_gre_conntrack(ct);
9fb9cbb1 609
ca7433df 610 local_bh_disable();
9fb9cbb1
YK
611 /* Expectations will have been removed in clean_from_lists,
612 * except TFTP can create an expectation on the first packet,
613 * before connection is in the list, so we need to clean here,
ca7433df
JDB
614 * too.
615 */
c1d10adb 616 nf_ct_remove_expectations(ct);
9fb9cbb1 617
b7779d06 618 nf_ct_del_from_dying_or_unconfirmed_list(ct);
9fb9cbb1 619
ca7433df 620 local_bh_enable();
9fb9cbb1
YK
621
622 if (ct->master)
623 nf_ct_put(ct->master);
624
0d53778e 625 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
9fb9cbb1
YK
626 nf_conntrack_free(ct);
627}
628
02982c27 629static void nf_ct_delete_from_lists(struct nf_conn *ct)
9fb9cbb1 630{
0d55af87 631 struct net *net = nf_ct_net(ct);
93bb0ceb 632 unsigned int hash, reply_hash;
93bb0ceb 633 unsigned int sequence;
9fb9cbb1 634
9858a3ae 635 nf_ct_helper_destroy(ct);
93bb0ceb
JDB
636
637 local_bh_disable();
638 do {
a3efd812 639 sequence = read_seqcount_begin(&nf_conntrack_generation);
deedb590 640 hash = hash_conntrack(net,
93bb0ceb 641 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
deedb590 642 reply_hash = hash_conntrack(net,
93bb0ceb
JDB
643 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
644 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
645
9fb9cbb1 646 clean_from_lists(ct);
93bb0ceb
JDB
647 nf_conntrack_double_unlock(hash, reply_hash);
648
b7779d06 649 nf_ct_add_to_dying_list(ct);
93bb0ceb 650
93bb0ceb 651 local_bh_enable();
dd7669a9 652}
dd7669a9 653
02982c27 654bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
dd7669a9 655{
a992ca2a
PNA
656 struct nf_conn_tstamp *tstamp;
657
f330a7fd
FW
658 if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
659 return false;
660
a992ca2a
PNA
661 tstamp = nf_conn_tstamp_find(ct);
662 if (tstamp && tstamp->stop == 0)
d2de875c 663 tstamp->stop = ktime_get_real_ns();
dd7669a9 664
9500507c
FW
665 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
666 portid, report) < 0) {
f330a7fd
FW
667 /* destroy event was not delivered. nf_ct_put will
668 * be done by event cache worker on redelivery.
669 */
dd7669a9 670 nf_ct_delete_from_lists(ct);
9500507c 671 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
02982c27 672 return false;
dd7669a9 673 }
9500507c
FW
674
675 nf_conntrack_ecache_work(nf_ct_net(ct));
dd7669a9 676 nf_ct_delete_from_lists(ct);
9fb9cbb1 677 nf_ct_put(ct);
02982c27
FW
678 return true;
679}
680EXPORT_SYMBOL_GPL(nf_ct_delete);
681
c6825c09
AV
682static inline bool
683nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
308ac914 684 const struct nf_conntrack_tuple *tuple,
e0c7d472
FW
685 const struct nf_conntrack_zone *zone,
686 const struct net *net)
c6825c09
AV
687{
688 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
689
690 /* A conntrack can be recreated with the equal tuple,
691 * so we need to check that the conntrack is confirmed
692 */
693 return nf_ct_tuple_equal(tuple, &h->tuple) &&
deedb590 694 nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
e0c7d472
FW
695 nf_ct_is_confirmed(ct) &&
696 net_eq(net, nf_ct_net(ct));
c6825c09
AV
697}
698
ed07d9a0
MP
699static inline bool
700nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
701{
702 return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
703 &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
704 nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
705 &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
706 nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
707 nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
708 net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
709}
710
f330a7fd
FW
711/* caller must hold rcu readlock and none of the nf_conntrack_locks */
712static void nf_ct_gc_expired(struct nf_conn *ct)
713{
714 if (!atomic_inc_not_zero(&ct->ct_general.use))
715 return;
716
717 if (nf_ct_should_gc(ct))
718 nf_ct_kill(ct);
719
720 nf_ct_put(ct);
721}
722
ea781f19
ED
723/*
724 * Warning :
725 * - Caller must take a reference on returned object
726 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
ea781f19 727 */
99f07e91 728static struct nf_conntrack_tuple_hash *
308ac914 729____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
99f07e91 730 const struct nf_conntrack_tuple *tuple, u32 hash)
9fb9cbb1
YK
731{
732 struct nf_conntrack_tuple_hash *h;
5e3c61f9 733 struct hlist_nulls_head *ct_hash;
ea781f19 734 struct hlist_nulls_node *n;
92e47ba8 735 unsigned int bucket, hsize;
9fb9cbb1 736
ea781f19 737begin:
92e47ba8
LZ
738 nf_conntrack_get_ht(&ct_hash, &hsize);
739 bucket = reciprocal_scale(hash, hsize);
5e3c61f9
FW
740
741 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
f330a7fd
FW
742 struct nf_conn *ct;
743
744 ct = nf_ct_tuplehash_to_ctrack(h);
745 if (nf_ct_is_expired(ct)) {
746 nf_ct_gc_expired(ct);
747 continue;
748 }
749
8e8118f8 750 if (nf_ct_key_equal(h, tuple, zone, net))
9fb9cbb1 751 return h;
9fb9cbb1 752 }
ea781f19
ED
753 /*
754 * if the nulls value we got at the end of this lookup is
755 * not the expected one, we must restart lookup.
756 * We probably met an item that was moved to another chain.
757 */
99f07e91 758 if (get_nulls_value(n) != bucket) {
2cf12348 759 NF_CT_STAT_INC_ATOMIC(net, search_restart);
ea781f19 760 goto begin;
af740b2c 761 }
9fb9cbb1
YK
762
763 return NULL;
764}
99f07e91 765
9fb9cbb1 766/* Find a connection corresponding to a tuple. */
99f07e91 767static struct nf_conntrack_tuple_hash *
308ac914 768__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
99f07e91 769 const struct nf_conntrack_tuple *tuple, u32 hash)
9fb9cbb1
YK
770{
771 struct nf_conntrack_tuple_hash *h;
76507f69 772 struct nf_conn *ct;
9fb9cbb1 773
76507f69 774 rcu_read_lock();
87e389b4 775
99f07e91 776 h = ____nf_conntrack_find(net, zone, tuple, hash);
76507f69 777 if (h) {
87e389b4
FW
778 /* We have a candidate that matches the tuple we're interested
779 * in, try to obtain a reference and re-check tuple
780 */
76507f69 781 ct = nf_ct_tuplehash_to_ctrack(h);
87e389b4
FW
782 if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
783 if (likely(nf_ct_key_equal(h, tuple, zone, net)))
784 goto found;
785
786 /* TYPESAFE_BY_RCU recycled the candidate */
787 nf_ct_put(ct);
ea781f19 788 }
87e389b4
FW
789
790 h = NULL;
76507f69 791 }
87e389b4 792found:
76507f69 793 rcu_read_unlock();
9fb9cbb1
YK
794
795 return h;
796}
99f07e91
CG
797
798struct nf_conntrack_tuple_hash *
308ac914 799nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
99f07e91
CG
800 const struct nf_conntrack_tuple *tuple)
801{
802 return __nf_conntrack_find_get(net, zone, tuple,
1b8c8a9f 803 hash_conntrack_raw(tuple, net));
99f07e91 804}
13b18339 805EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
9fb9cbb1 806
c1d10adb
PNA
807static void __nf_conntrack_hash_insert(struct nf_conn *ct,
808 unsigned int hash,
b476b72a 809 unsigned int reply_hash)
c1d10adb 810{
ea781f19 811 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
56d52d48 812 &nf_conntrack_hash[hash]);
ea781f19 813 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
56d52d48 814 &nf_conntrack_hash[reply_hash]);
c1d10adb
PNA
815}
816
7d367e06
JK
817int
818nf_conntrack_hash_check_insert(struct nf_conn *ct)
c1d10adb 819{
308ac914 820 const struct nf_conntrack_zone *zone;
d696c7bd 821 struct net *net = nf_ct_net(ct);
b476b72a 822 unsigned int hash, reply_hash;
7d367e06
JK
823 struct nf_conntrack_tuple_hash *h;
824 struct hlist_nulls_node *n;
93bb0ceb 825 unsigned int sequence;
c1d10adb 826
5d0aa2cc 827 zone = nf_ct_zone(ct);
7d367e06 828
93bb0ceb
JDB
829 local_bh_disable();
830 do {
a3efd812 831 sequence = read_seqcount_begin(&nf_conntrack_generation);
deedb590 832 hash = hash_conntrack(net,
93bb0ceb 833 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
deedb590 834 reply_hash = hash_conntrack(net,
93bb0ceb
JDB
835 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
836 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
7d367e06
JK
837
838 /* See if there's one in the list already, including reverse */
56d52d48 839 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
86804348 840 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
e0c7d472 841 zone, net))
7d367e06 842 goto out;
86804348 843
56d52d48 844 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
86804348 845 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
e0c7d472 846 zone, net))
7d367e06 847 goto out;
c1d10adb 848
e53376be
PNA
849 smp_wmb();
850 /* The caller holds a reference to this object */
851 atomic_set(&ct->ct_general.use, 2);
b476b72a 852 __nf_conntrack_hash_insert(ct, hash, reply_hash);
93bb0ceb 853 nf_conntrack_double_unlock(hash, reply_hash);
7d367e06 854 NF_CT_STAT_INC(net, insert);
93bb0ceb 855 local_bh_enable();
7d367e06
JK
856 return 0;
857
858out:
93bb0ceb 859 nf_conntrack_double_unlock(hash, reply_hash);
7d367e06 860 NF_CT_STAT_INC(net, insert_failed);
93bb0ceb 861 local_bh_enable();
7d367e06 862 return -EEXIST;
c1d10adb 863}
7d367e06 864EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
c1d10adb 865
ba76738c
PNA
866static inline void nf_ct_acct_update(struct nf_conn *ct,
867 enum ip_conntrack_info ctinfo,
868 unsigned int len)
869{
870 struct nf_conn_acct *acct;
871
872 acct = nf_conn_acct_find(ct);
873 if (acct) {
874 struct nf_conn_counter *counter = acct->counter;
875
876 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
877 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
878 }
879}
880
71d8c47f
PNA
881static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
882 const struct nf_conn *loser_ct)
883{
884 struct nf_conn_acct *acct;
885
886 acct = nf_conn_acct_find(loser_ct);
887 if (acct) {
888 struct nf_conn_counter *counter = acct->counter;
71d8c47f
PNA
889 unsigned int bytes;
890
891 /* u32 should be fine since we must have seen one packet. */
892 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
893 nf_ct_acct_update(ct, ctinfo, bytes);
894 }
895}
896
897/* Resolve race on insertion if this protocol allows this. */
898static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
899 enum ip_conntrack_info ctinfo,
900 struct nf_conntrack_tuple_hash *h)
901{
902 /* This is the conntrack entry already in hashes that won race. */
903 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
b3480fe0 904 const struct nf_conntrack_l4proto *l4proto;
ed07d9a0
MP
905 enum ip_conntrack_info oldinfo;
906 struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
71d8c47f 907
4a60dc74 908 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
71d8c47f
PNA
909 if (l4proto->allow_clash &&
910 !nf_ct_is_dying(ct) &&
911 atomic_inc_not_zero(&ct->ct_general.use)) {
ed07d9a0
MP
912 if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
913 nf_ct_match(ct, loser_ct)) {
914 nf_ct_acct_merge(ct, ctinfo, loser_ct);
915 nf_conntrack_put(&loser_ct->ct_general);
916 nf_ct_set(skb, ct, oldinfo);
917 return NF_ACCEPT;
918 }
919 nf_ct_put(ct);
71d8c47f
PNA
920 }
921 NF_CT_STAT_INC(net, drop);
922 return NF_DROP;
923}
924
9fb9cbb1
YK
925/* Confirm a connection given skb; places it in hash table */
926int
3db05fea 927__nf_conntrack_confirm(struct sk_buff *skb)
9fb9cbb1 928{
308ac914 929 const struct nf_conntrack_zone *zone;
b476b72a 930 unsigned int hash, reply_hash;
df0933dc 931 struct nf_conntrack_tuple_hash *h;
9fb9cbb1 932 struct nf_conn *ct;
df0933dc 933 struct nf_conn_help *help;
a992ca2a 934 struct nf_conn_tstamp *tstamp;
ea781f19 935 struct hlist_nulls_node *n;
9fb9cbb1 936 enum ip_conntrack_info ctinfo;
400dad39 937 struct net *net;
93bb0ceb 938 unsigned int sequence;
71d8c47f 939 int ret = NF_DROP;
9fb9cbb1 940
3db05fea 941 ct = nf_ct_get(skb, &ctinfo);
400dad39 942 net = nf_ct_net(ct);
9fb9cbb1
YK
943
944 /* ipt_REJECT uses nf_conntrack_attach to attach related
945 ICMP/TCP RST packets in other direction. Actual packet
946 which created connection will be IP_CT_NEW or for an
947 expected connection, IP_CT_RELATED. */
948 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
949 return NF_ACCEPT;
950
5d0aa2cc 951 zone = nf_ct_zone(ct);
93bb0ceb
JDB
952 local_bh_disable();
953
954 do {
a3efd812 955 sequence = read_seqcount_begin(&nf_conntrack_generation);
93bb0ceb
JDB
956 /* reuse the hash saved before */
957 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
56d52d48 958 hash = scale_hash(hash);
deedb590 959 reply_hash = hash_conntrack(net,
93bb0ceb
JDB
960 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
961
962 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
9fb9cbb1
YK
963
964 /* We're not in hash table, and we refuse to set up related
93bb0ceb
JDB
965 * connections for unconfirmed conns. But packet copies and
966 * REJECT will give spurious warnings here.
967 */
9fb9cbb1 968
13f5251f
CMW
969 /* Another skb with the same unconfirmed conntrack may
970 * win the race. This may happen for bridge(br_flood)
971 * or broadcast/multicast packets do skb_clone with
972 * unconfirmed conntrack.
93bb0ceb 973 */
13f5251f
CMW
974 if (unlikely(nf_ct_is_confirmed(ct))) {
975 WARN_ON_ONCE(1);
976 nf_conntrack_double_unlock(hash, reply_hash);
977 local_bh_enable();
978 return NF_DROP;
979 }
980
0d53778e 981 pr_debug("Confirming conntrack %p\n", ct);
8ca3f5e9
PNA
982 /* We have to check the DYING flag after unlink to prevent
983 * a race against nf_ct_get_next_corpse() possibly called from
984 * user context, else we insert an already 'dead' hash, blocking
985 * further use of that particular connection -JM.
986 */
987 nf_ct_del_from_dying_or_unconfirmed_list(ct);
988
71d8c47f
PNA
989 if (unlikely(nf_ct_is_dying(ct))) {
990 nf_ct_add_to_dying_list(ct);
991 goto dying;
992 }
fc350777 993
9fb9cbb1
YK
994 /* See if there's one in the list already, including reverse:
995 NAT could have grabbed it without realizing, since we're
996 not in the hash. If there is, we lost race. */
56d52d48 997 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
86804348 998 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
e0c7d472 999 zone, net))
df0933dc 1000 goto out;
86804348 1001
56d52d48 1002 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
86804348 1003 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
e0c7d472 1004 zone, net))
df0933dc 1005 goto out;
9fb9cbb1 1006
df0933dc
PM
1007 /* Timer relative to confirmation time, not original
1008 setting time, otherwise we'd get timer wrap in
1009 weird delay cases. */
f330a7fd 1010 ct->timeout += nfct_time_stamp;
df0933dc 1011 atomic_inc(&ct->ct_general.use);
45eec341 1012 ct->status |= IPS_CONFIRMED;
5c8ec910 1013
a992ca2a
PNA
1014 /* set conntrack timestamp, if enabled. */
1015 tstamp = nf_conn_tstamp_find(ct);
916f6efa
FW
1016 if (tstamp)
1017 tstamp->start = ktime_get_real_ns();
a992ca2a 1018
5c8ec910
PM
1019 /* Since the lookup is lockless, hash insertion must be done after
1020 * starting the timer and setting the CONFIRMED bit. The RCU barriers
1021 * guarantee that no other CPU can find the conntrack before the above
1022 * stores are visible.
1023 */
b476b72a 1024 __nf_conntrack_hash_insert(ct, hash, reply_hash);
93bb0ceb 1025 nf_conntrack_double_unlock(hash, reply_hash);
93bb0ceb 1026 local_bh_enable();
5c8ec910 1027
df0933dc
PM
1028 help = nfct_help(ct);
1029 if (help && help->helper)
a71996fc 1030 nf_conntrack_event_cache(IPCT_HELPER, ct);
17e6e4ea 1031
df0933dc 1032 nf_conntrack_event_cache(master_ct(ct) ?
a71996fc 1033 IPCT_RELATED : IPCT_NEW, ct);
df0933dc 1034 return NF_ACCEPT;
9fb9cbb1 1035
df0933dc 1036out:
8ca3f5e9 1037 nf_ct_add_to_dying_list(ct);
71d8c47f
PNA
1038 ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
1039dying:
93bb0ceb 1040 nf_conntrack_double_unlock(hash, reply_hash);
0d55af87 1041 NF_CT_STAT_INC(net, insert_failed);
93bb0ceb 1042 local_bh_enable();
71d8c47f 1043 return ret;
9fb9cbb1 1044}
13b18339 1045EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
9fb9cbb1
YK
1046
1047/* Returns true if a connection correspondings to the tuple (required
1048 for NAT). */
1049int
1050nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1051 const struct nf_conn *ignored_conntrack)
1052{
400dad39 1053 struct net *net = nf_ct_net(ignored_conntrack);
308ac914 1054 const struct nf_conntrack_zone *zone;
9fb9cbb1 1055 struct nf_conntrack_tuple_hash *h;
5e3c61f9 1056 struct hlist_nulls_head *ct_hash;
92e47ba8 1057 unsigned int hash, hsize;
ea781f19 1058 struct hlist_nulls_node *n;
5d0aa2cc 1059 struct nf_conn *ct;
308ac914
DB
1060
1061 zone = nf_ct_zone(ignored_conntrack);
9fb9cbb1 1062
2cf12348 1063 rcu_read_lock();
95a8d19f 1064 begin:
92e47ba8
LZ
1065 nf_conntrack_get_ht(&ct_hash, &hsize);
1066 hash = __hash_conntrack(net, tuple, hsize);
5e3c61f9
FW
1067
1068 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
5d0aa2cc 1069 ct = nf_ct_tuplehash_to_ctrack(h);
f330a7fd
FW
1070
1071 if (ct == ignored_conntrack)
1072 continue;
1073
1074 if (nf_ct_is_expired(ct)) {
1075 nf_ct_gc_expired(ct);
1076 continue;
1077 }
1078
1079 if (nf_ct_key_equal(h, tuple, zone, net)) {
4e35c1cb
MP
1080 /* Tuple is taken already, so caller will need to find
1081 * a new source port to use.
1082 *
1083 * Only exception:
1084 * If the *original tuples* are identical, then both
1085 * conntracks refer to the same flow.
1086 * This is a rare situation, it can occur e.g. when
1087 * more than one UDP packet is sent from same socket
1088 * in different threads.
1089 *
1090 * Let nf_ct_resolve_clash() deal with this later.
1091 */
1092 if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1093 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1094 continue;
1095
2cf12348
FW
1096 NF_CT_STAT_INC_ATOMIC(net, found);
1097 rcu_read_unlock();
ba419aff
PM
1098 return 1;
1099 }
ba419aff 1100 }
95a8d19f
FW
1101
1102 if (get_nulls_value(n) != hash) {
1103 NF_CT_STAT_INC_ATOMIC(net, search_restart);
1104 goto begin;
1105 }
1106
2cf12348 1107 rcu_read_unlock();
9fb9cbb1 1108
ba419aff 1109 return 0;
9fb9cbb1 1110}
13b18339 1111EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
9fb9cbb1 1112
7ae7730f
PM
1113#define NF_CT_EVICTION_RANGE 8
1114
9fb9cbb1
YK
1115/* There's a small race here where we may free a just-assured
1116 connection. Too bad: we're in trouble anyway. */
242922a0
FW
1117static unsigned int early_drop_list(struct net *net,
1118 struct hlist_nulls_head *head)
9fb9cbb1 1119{
9fb9cbb1 1120 struct nf_conntrack_tuple_hash *h;
ea781f19 1121 struct hlist_nulls_node *n;
242922a0
FW
1122 unsigned int drops = 0;
1123 struct nf_conn *tmp;
3e86638e 1124
242922a0
FW
1125 hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1126 tmp = nf_ct_tuplehash_to_ctrack(h);
9fb9cbb1 1127
90964016
PNA
1128 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1129 continue;
1130
f330a7fd
FW
1131 if (nf_ct_is_expired(tmp)) {
1132 nf_ct_gc_expired(tmp);
1133 continue;
1134 }
1135
242922a0
FW
1136 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1137 !net_eq(nf_ct_net(tmp), net) ||
1138 nf_ct_is_dying(tmp))
1139 continue;
76507f69 1140
242922a0
FW
1141 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1142 continue;
76507f69 1143
242922a0 1144 /* kill only if still in same netns -- might have moved due to
5f0d5a3a 1145 * SLAB_TYPESAFE_BY_RCU rules.
242922a0
FW
1146 *
1147 * We steal the timer reference. If that fails timer has
1148 * already fired or someone else deleted it. Just drop ref
1149 * and move to next entry.
1150 */
1151 if (net_eq(nf_ct_net(tmp), net) &&
1152 nf_ct_is_confirmed(tmp) &&
242922a0
FW
1153 nf_ct_delete(tmp, 0, 0))
1154 drops++;
1155
1156 nf_ct_put(tmp);
9fb9cbb1 1157 }
3e86638e 1158
242922a0
FW
1159 return drops;
1160}
9fb9cbb1 1161
f393808d 1162static noinline int early_drop(struct net *net, unsigned int hash)
242922a0 1163{
f393808d 1164 unsigned int i, bucket;
9fb9cbb1 1165
242922a0
FW
1166 for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1167 struct hlist_nulls_head *ct_hash;
f393808d 1168 unsigned int hsize, drops;
242922a0 1169
3101e0fc 1170 rcu_read_lock();
92e47ba8 1171 nf_conntrack_get_ht(&ct_hash, &hsize);
f393808d
VK
1172 if (!i)
1173 bucket = reciprocal_scale(hash, hsize);
1174 else
1175 bucket = (bucket + 1) % hsize;
242922a0 1176
f393808d 1177 drops = early_drop_list(net, &ct_hash[bucket]);
3101e0fc
LZ
1178 rcu_read_unlock();
1179
242922a0
FW
1180 if (drops) {
1181 NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1182 return true;
74138511 1183 }
9fb9cbb1 1184 }
3e86638e 1185
242922a0 1186 return false;
9fb9cbb1
YK
1187}
1188
c6dd940b
FW
1189static bool gc_worker_skip_ct(const struct nf_conn *ct)
1190{
1191 return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1192}
1193
1194static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1195{
1196 const struct nf_conntrack_l4proto *l4proto;
1197
1198 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1199 return true;
1200
4a60dc74 1201 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
c6dd940b
FW
1202 if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1203 return true;
1204
1205 return false;
1206}
1207
90964016
PNA
1208#define DAY (86400 * HZ)
1209
1210/* Set an arbitrary timeout large enough not to ever expire, this save
1211 * us a check for the IPS_OFFLOAD_BIT from the packet path via
1212 * nf_ct_is_expired().
1213 */
1214static void nf_ct_offload_timeout(struct nf_conn *ct)
1215{
1216 if (nf_ct_expires(ct) < DAY / 2)
1217 ct->timeout = nfct_time_stamp + DAY;
1218}
1219
b87a2f91
FW
1220static void gc_worker(struct work_struct *work)
1221{
e5072053 1222 unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
b87a2f91 1223 unsigned int i, goal, buckets = 0, expired_count = 0;
c6dd940b 1224 unsigned int nf_conntrack_max95 = 0;
b87a2f91 1225 struct conntrack_gc_work *gc_work;
e0df8cae
FW
1226 unsigned int ratio, scanned = 0;
1227 unsigned long next_run;
b87a2f91
FW
1228
1229 gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1230
e0df8cae 1231 goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
b87a2f91 1232 i = gc_work->last_bucket;
c6dd940b
FW
1233 if (gc_work->early_drop)
1234 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
b87a2f91
FW
1235
1236 do {
1237 struct nf_conntrack_tuple_hash *h;
1238 struct hlist_nulls_head *ct_hash;
1239 struct hlist_nulls_node *n;
1240 unsigned int hashsz;
1241 struct nf_conn *tmp;
1242
1243 i++;
1244 rcu_read_lock();
1245
1246 nf_conntrack_get_ht(&ct_hash, &hashsz);
1247 if (i >= hashsz)
1248 i = 0;
1249
1250 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
c6dd940b
FW
1251 struct net *net;
1252
b87a2f91
FW
1253 tmp = nf_ct_tuplehash_to_ctrack(h);
1254
c023c0e4 1255 scanned++;
90964016
PNA
1256 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1257 nf_ct_offload_timeout(tmp);
1258 continue;
1259 }
1260
b87a2f91
FW
1261 if (nf_ct_is_expired(tmp)) {
1262 nf_ct_gc_expired(tmp);
1263 expired_count++;
1264 continue;
1265 }
c6dd940b
FW
1266
1267 if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1268 continue;
1269
1270 net = nf_ct_net(tmp);
1271 if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1272 continue;
1273
1274 /* need to take reference to avoid possible races */
1275 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1276 continue;
1277
1278 if (gc_worker_skip_ct(tmp)) {
1279 nf_ct_put(tmp);
1280 continue;
1281 }
1282
1283 if (gc_worker_can_early_drop(tmp))
1284 nf_ct_kill(tmp);
1285
1286 nf_ct_put(tmp);
b87a2f91
FW
1287 }
1288
1289 /* could check get_nulls_value() here and restart if ct
1290 * was moved to another chain. But given gc is best-effort
1291 * we will just continue with next hash slot.
1292 */
1293 rcu_read_unlock();
ffa53c58 1294 cond_resched();
524b698d 1295 } while (++buckets < goal);
b87a2f91
FW
1296
1297 if (gc_work->exiting)
1298 return;
1299
e0df8cae
FW
1300 /*
1301 * Eviction will normally happen from the packet path, and not
1302 * from this gc worker.
1303 *
1304 * This worker is only here to reap expired entries when system went
1305 * idle after a busy period.
1306 *
1307 * The heuristics below are supposed to balance conflicting goals:
1308 *
1309 * 1. Minimize time until we notice a stale entry
1310 * 2. Maximize scan intervals to not waste cycles
1311 *
e5072053 1312 * Normally, expire ratio will be close to 0.
e0df8cae 1313 *
e5072053
FW
1314 * As soon as a sizeable fraction of the entries have expired
1315 * increase scan frequency.
e0df8cae 1316 */
c023c0e4 1317 ratio = scanned ? expired_count * 100 / scanned : 0;
e5072053
FW
1318 if (ratio > GC_EVICT_RATIO) {
1319 gc_work->next_gc_run = min_interval;
e0df8cae 1320 } else {
e5072053 1321 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
e0df8cae 1322
e5072053
FW
1323 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1324
1325 gc_work->next_gc_run += min_interval;
1326 if (gc_work->next_gc_run > max)
1327 gc_work->next_gc_run = max;
e0df8cae 1328 }
c023c0e4 1329
e5072053 1330 next_run = gc_work->next_gc_run;
b87a2f91 1331 gc_work->last_bucket = i;
c6dd940b 1332 gc_work->early_drop = false;
0984d427 1333 queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
b87a2f91
FW
1334}
1335
1336static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1337{
a232cd0e 1338 INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
e5072053 1339 gc_work->next_gc_run = HZ;
b87a2f91
FW
1340 gc_work->exiting = false;
1341}
1342
99f07e91 1343static struct nf_conn *
308ac914
DB
1344__nf_conntrack_alloc(struct net *net,
1345 const struct nf_conntrack_zone *zone,
99f07e91
CG
1346 const struct nf_conntrack_tuple *orig,
1347 const struct nf_conntrack_tuple *repl,
1348 gfp_t gfp, u32 hash)
9fb9cbb1 1349{
cd7fcbf1 1350 struct nf_conn *ct;
9fb9cbb1 1351
5251e2d2 1352 /* We don't want any race condition at early drop stage */
49ac8713 1353 atomic_inc(&net->ct.count);
5251e2d2 1354
76eb9460 1355 if (nf_conntrack_max &&
49ac8713 1356 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
93bb0ceb 1357 if (!early_drop(net, hash)) {
c6dd940b
FW
1358 if (!conntrack_gc_work.early_drop)
1359 conntrack_gc_work.early_drop = true;
49ac8713 1360 atomic_dec(&net->ct.count);
e87cc472 1361 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
9fb9cbb1
YK
1362 return ERR_PTR(-ENOMEM);
1363 }
1364 }
1365
941297f4
ED
1366 /*
1367 * Do not use kmem_cache_zalloc(), as this cache uses
5f0d5a3a 1368 * SLAB_TYPESAFE_BY_RCU.
941297f4 1369 */
0c5366b3 1370 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
5e8018fc
DB
1371 if (ct == NULL)
1372 goto out;
1373
440f0d58 1374 spin_lock_init(&ct->lock);
c88130bc 1375 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
941297f4 1376 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
c88130bc 1377 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
99f07e91
CG
1378 /* save hash for reusing when confirming */
1379 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
c41884ce 1380 ct->status = 0;
8176c833 1381 ct->timeout = 0;
c2d9ba9b 1382 write_pnet(&ct->ct_net, net);
c41884ce
FW
1383 memset(&ct->__nfct_init_offset[0], 0,
1384 offsetof(struct nf_conn, proto) -
1385 offsetof(struct nf_conn, __nfct_init_offset[0]));
5e8018fc 1386
6c8dee98 1387 nf_ct_zone_add(ct, zone);
5e8018fc 1388
e53376be
PNA
1389 /* Because we use RCU lookups, we set ct_general.use to zero before
1390 * this is inserted in any list.
941297f4 1391 */
e53376be 1392 atomic_set(&ct->ct_general.use, 0);
c88130bc 1393 return ct;
5e8018fc
DB
1394out:
1395 atomic_dec(&net->ct.count);
5d0aa2cc 1396 return ERR_PTR(-ENOMEM);
9fb9cbb1 1397}
99f07e91 1398
308ac914
DB
1399struct nf_conn *nf_conntrack_alloc(struct net *net,
1400 const struct nf_conntrack_zone *zone,
99f07e91
CG
1401 const struct nf_conntrack_tuple *orig,
1402 const struct nf_conntrack_tuple *repl,
1403 gfp_t gfp)
1404{
1405 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1406}
13b18339 1407EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
9fb9cbb1 1408
c88130bc 1409void nf_conntrack_free(struct nf_conn *ct)
76507f69 1410{
1d45209d
ED
1411 struct net *net = nf_ct_net(ct);
1412
e53376be 1413 /* A freed object has refcnt == 0, that's
5f0d5a3a 1414 * the golden rule for SLAB_TYPESAFE_BY_RCU
e53376be 1415 */
44d6e2f2 1416 WARN_ON(atomic_read(&ct->ct_general.use) != 0);
e53376be 1417
ceeff754 1418 nf_ct_ext_destroy(ct);
0c5366b3 1419 kmem_cache_free(nf_conntrack_cachep, ct);
4e857c58 1420 smp_mb__before_atomic();
0c3c6c00 1421 atomic_dec(&net->ct.count);
76507f69 1422}
13b18339 1423EXPORT_SYMBOL_GPL(nf_conntrack_free);
9fb9cbb1 1424
c539f017 1425
9fb9cbb1
YK
1426/* Allocate a new conntrack: we return -ENOMEM if classification
1427 failed due to stress. Otherwise it really is unclassifiable. */
fc09e4a7 1428static noinline struct nf_conntrack_tuple_hash *
b2a15a60 1429init_conntrack(struct net *net, struct nf_conn *tmpl,
5a1fb391 1430 const struct nf_conntrack_tuple *tuple,
9fb9cbb1 1431 struct sk_buff *skb,
60b5f8f7 1432 unsigned int dataoff, u32 hash)
9fb9cbb1 1433{
c88130bc 1434 struct nf_conn *ct;
3c158f7f 1435 struct nf_conn_help *help;
9fb9cbb1 1436 struct nf_conntrack_tuple repl_tuple;
b2a15a60 1437 struct nf_conntrack_ecache *ecache;
ca7433df 1438 struct nf_conntrack_expect *exp = NULL;
308ac914 1439 const struct nf_conntrack_zone *zone;
60b5f8f7 1440 struct nf_conn_timeout *timeout_ext;
5e8018fc 1441 struct nf_conntrack_zone tmp;
9fb9cbb1 1442
303e0c55 1443 if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
0d53778e 1444 pr_debug("Can't invert tuple.\n");
9fb9cbb1
YK
1445 return NULL;
1446 }
1447
5e8018fc 1448 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
99f07e91
CG
1449 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1450 hash);
0a9ee813 1451 if (IS_ERR(ct))
c88130bc 1452 return (struct nf_conntrack_tuple_hash *)ct;
9fb9cbb1 1453
4440a2ab
GF
1454 if (!nf_ct_add_synproxy(ct, tmpl)) {
1455 nf_conntrack_free(ct);
1456 return ERR_PTR(-ENOMEM);
48b1de4c
PM
1457 }
1458
60b5f8f7 1459 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
60b5f8f7 1460
60b5f8f7 1461 if (timeout_ext)
ae2d708e
PNA
1462 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1463 GFP_ATOMIC);
60b5f8f7 1464
58401572 1465 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
a992ca2a 1466 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
c539f017 1467 nf_ct_labels_ext_add(ct);
b2a15a60
PM
1468
1469 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1470 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1471 ecache ? ecache->expmask : 0,
1472 GFP_ATOMIC);
58401572 1473
ca7433df
JDB
1474 local_bh_disable();
1475 if (net->ct.expect_count) {
1476 spin_lock(&nf_conntrack_expect_lock);
1477 exp = nf_ct_find_expectation(net, zone, tuple);
1478 if (exp) {
ccd63c20 1479 pr_debug("expectation arrives ct=%p exp=%p\n",
ca7433df
JDB
1480 ct, exp);
1481 /* Welcome, Mr. Bond. We've been expecting you... */
1482 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1483 /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1484 ct->master = exp->master;
1485 if (exp->helper) {
440534d3 1486 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
ca7433df
JDB
1487 if (help)
1488 rcu_assign_pointer(help->helper, exp->helper);
1489 }
ceceae1b 1490
9fb9cbb1 1491#ifdef CONFIG_NF_CONNTRACK_MARK
ca7433df 1492 ct->mark = exp->master->mark;
7c9728c3
JM
1493#endif
1494#ifdef CONFIG_NF_CONNTRACK_SECMARK
ca7433df 1495 ct->secmark = exp->master->secmark;
9fb9cbb1 1496#endif
ca7433df
JDB
1497 NF_CT_STAT_INC(net, expect_new);
1498 }
1499 spin_unlock(&nf_conntrack_expect_lock);
1500 }
8e8118f8 1501 if (!exp)
b2a15a60 1502 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
9fb9cbb1 1503
e53376be
PNA
1504 /* Now it is inserted into the unconfirmed list, bump refcount */
1505 nf_conntrack_get(&ct->ct_general);
b7779d06 1506 nf_ct_add_to_unconfirmed_list(ct);
9fb9cbb1 1507
ca7433df 1508 local_bh_enable();
9fb9cbb1
YK
1509
1510 if (exp) {
1511 if (exp->expectfn)
c88130bc 1512 exp->expectfn(ct, exp);
6823645d 1513 nf_ct_expect_put(exp);
9fb9cbb1
YK
1514 }
1515
c88130bc 1516 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
9fb9cbb1
YK
1517}
1518
fc09e4a7
FW
1519/* On success, returns 0, sets skb->_nfct | ctinfo */
1520static int
93e66024 1521resolve_normal_ct(struct nf_conn *tmpl,
a702a65f 1522 struct sk_buff *skb,
9fb9cbb1 1523 unsigned int dataoff,
9fb9cbb1 1524 u_int8_t protonum,
93e66024 1525 const struct nf_hook_state *state)
9fb9cbb1 1526{
308ac914 1527 const struct nf_conntrack_zone *zone;
9fb9cbb1
YK
1528 struct nf_conntrack_tuple tuple;
1529 struct nf_conntrack_tuple_hash *h;
fc09e4a7 1530 enum ip_conntrack_info ctinfo;
5e8018fc 1531 struct nf_conntrack_zone tmp;
9fb9cbb1 1532 struct nf_conn *ct;
99f07e91 1533 u32 hash;
9fb9cbb1 1534
bbe735e4 1535 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
93e66024 1536 dataoff, state->pf, protonum, state->net,
303e0c55 1537 &tuple)) {
ccd63c20 1538 pr_debug("Can't get tuple\n");
fc09e4a7 1539 return 0;
9fb9cbb1
YK
1540 }
1541
1542 /* look for tuple match */
5e8018fc 1543 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
93e66024
FW
1544 hash = hash_conntrack_raw(&tuple, state->net);
1545 h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
9fb9cbb1 1546 if (!h) {
303e0c55 1547 h = init_conntrack(state->net, tmpl, &tuple,
60b5f8f7 1548 skb, dataoff, hash);
9fb9cbb1 1549 if (!h)
fc09e4a7 1550 return 0;
9fb9cbb1 1551 if (IS_ERR(h))
fc09e4a7 1552 return PTR_ERR(h);
9fb9cbb1
YK
1553 }
1554 ct = nf_ct_tuplehash_to_ctrack(h);
1555
1556 /* It exists; we have (non-exclusive) reference. */
1557 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
fc09e4a7 1558 ctinfo = IP_CT_ESTABLISHED_REPLY;
9fb9cbb1
YK
1559 } else {
1560 /* Once we've had two way comms, always ESTABLISHED. */
1561 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
ccd63c20 1562 pr_debug("normal packet for %p\n", ct);
fc09e4a7 1563 ctinfo = IP_CT_ESTABLISHED;
9fb9cbb1 1564 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
ccd63c20 1565 pr_debug("related packet for %p\n", ct);
fc09e4a7 1566 ctinfo = IP_CT_RELATED;
9fb9cbb1 1567 } else {
ccd63c20 1568 pr_debug("new packet for %p\n", ct);
fc09e4a7 1569 ctinfo = IP_CT_NEW;
9fb9cbb1 1570 }
9fb9cbb1 1571 }
fc09e4a7
FW
1572 nf_ct_set(skb, ct, ctinfo);
1573 return 0;
9fb9cbb1
YK
1574}
1575
6fe78fa4
FW
1576/*
1577 * icmp packets need special treatment to handle error messages that are
1578 * related to a connection.
1579 *
1580 * Callers need to check if skb has a conntrack assigned when this
1581 * helper returns; in such case skb belongs to an already known connection.
1582 */
1583static unsigned int __cold
1584nf_conntrack_handle_icmp(struct nf_conn *tmpl,
1585 struct sk_buff *skb,
1586 unsigned int dataoff,
1587 u8 protonum,
1588 const struct nf_hook_state *state)
1589{
1590 int ret;
1591
1592 if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
1593 ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
1594#if IS_ENABLED(CONFIG_IPV6)
1595 else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
1596 ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
1597#endif
1598 else
1599 return NF_ACCEPT;
1600
1601 if (ret <= 0) {
1602 NF_CT_STAT_INC_ATOMIC(state->net, error);
1603 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1604 }
1605
1606 return ret;
1607}
1608
44fb87f6
FW
1609static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
1610 enum ip_conntrack_info ctinfo)
1611{
1612 const unsigned int *timeout = nf_ct_timeout_lookup(ct);
1613
1614 if (!timeout)
1615 timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
1616
1617 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
1618 return NF_ACCEPT;
1619}
1620
a47c5404
FW
1621/* Returns verdict for packet, or -1 for invalid. */
1622static int nf_conntrack_handle_packet(struct nf_conn *ct,
1623 struct sk_buff *skb,
1624 unsigned int dataoff,
1625 enum ip_conntrack_info ctinfo,
1626 const struct nf_hook_state *state)
1627{
1628 switch (nf_ct_protonum(ct)) {
1629 case IPPROTO_TCP:
1630 return nf_conntrack_tcp_packet(ct, skb, dataoff,
1631 ctinfo, state);
1632 case IPPROTO_UDP:
1633 return nf_conntrack_udp_packet(ct, skb, dataoff,
1634 ctinfo, state);
1635 case IPPROTO_ICMP:
1636 return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
81e01647 1637#if IS_ENABLED(CONFIG_IPV6)
a47c5404
FW
1638 case IPPROTO_ICMPV6:
1639 return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
81e01647 1640#endif
a47c5404
FW
1641#ifdef CONFIG_NF_CT_PROTO_UDPLITE
1642 case IPPROTO_UDPLITE:
1643 return nf_conntrack_udplite_packet(ct, skb, dataoff,
1644 ctinfo, state);
1645#endif
1646#ifdef CONFIG_NF_CT_PROTO_SCTP
1647 case IPPROTO_SCTP:
1648 return nf_conntrack_sctp_packet(ct, skb, dataoff,
1649 ctinfo, state);
1650#endif
1651#ifdef CONFIG_NF_CT_PROTO_DCCP
1652 case IPPROTO_DCCP:
1653 return nf_conntrack_dccp_packet(ct, skb, dataoff,
1654 ctinfo, state);
44fb87f6
FW
1655#endif
1656#ifdef CONFIG_NF_CT_PROTO_GRE
1657 case IPPROTO_GRE:
1658 return nf_conntrack_gre_packet(ct, skb, dataoff,
1659 ctinfo, state);
a47c5404
FW
1660#endif
1661 }
1662
44fb87f6 1663 return generic_packet(ct, skb, ctinfo);
a47c5404
FW
1664}
1665
9fb9cbb1 1666unsigned int
93e66024 1667nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
9fb9cbb1 1668{
9fb9cbb1 1669 enum ip_conntrack_info ctinfo;
93e66024 1670 struct nf_conn *ct, *tmpl;
9fb9cbb1 1671 u_int8_t protonum;
6816d931 1672 int dataoff, ret;
9fb9cbb1 1673
97a6ad13 1674 tmpl = nf_ct_get(skb, &ctinfo);
cc41c84b 1675 if (tmpl || ctinfo == IP_CT_UNTRACKED) {
b2a15a60 1676 /* Previously seen (loopback or untracked)? Ignore. */
cc41c84b
FW
1677 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1678 ctinfo == IP_CT_UNTRACKED) {
93e66024 1679 NF_CT_STAT_INC_ATOMIC(state->net, ignore);
b2a15a60
PM
1680 return NF_ACCEPT;
1681 }
a9e419dc 1682 skb->_nfct = 0;
9fb9cbb1
YK
1683 }
1684
e2361cb9 1685 /* rcu_read_lock()ed by nf_hook_thresh */
93e66024 1686 dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
6816d931 1687 if (dataoff <= 0) {
25985edc 1688 pr_debug("not prepared to track yet or error occurred\n");
93e66024
FW
1689 NF_CT_STAT_INC_ATOMIC(state->net, error);
1690 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
6816d931 1691 ret = NF_ACCEPT;
b2a15a60 1692 goto out;
9fb9cbb1
YK
1693 }
1694
6fe78fa4
FW
1695 if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
1696 ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
1697 protonum, state);
74c51a14 1698 if (ret <= 0) {
b2a15a60
PM
1699 ret = -ret;
1700 goto out;
74c51a14 1701 }
88ed01d1 1702 /* ICMP[v6] protocol trackers may assign one conntrack. */
a9e419dc 1703 if (skb->_nfct)
88ed01d1 1704 goto out;
9fb9cbb1 1705 }
08733a0c 1706repeat:
93e66024 1707 ret = resolve_normal_ct(tmpl, skb, dataoff,
303e0c55 1708 protonum, state);
fc09e4a7 1709 if (ret < 0) {
9fb9cbb1 1710 /* Too stressed to deal. */
93e66024 1711 NF_CT_STAT_INC_ATOMIC(state->net, drop);
b2a15a60
PM
1712 ret = NF_DROP;
1713 goto out;
9fb9cbb1
YK
1714 }
1715
fc09e4a7
FW
1716 ct = nf_ct_get(skb, &ctinfo);
1717 if (!ct) {
1718 /* Not valid part of a connection */
93e66024 1719 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
fc09e4a7
FW
1720 ret = NF_ACCEPT;
1721 goto out;
1722 }
9fb9cbb1 1723
44fb87f6 1724 ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
ec8d5409 1725 if (ret <= 0) {
9fb9cbb1
YK
1726 /* Invalid: inverse of the return code tells
1727 * the netfilter core what to do */
0d53778e 1728 pr_debug("nf_conntrack_in: Can't track with proto module\n");
97a6ad13 1729 nf_conntrack_put(&ct->ct_general);
a9e419dc 1730 skb->_nfct = 0;
93e66024 1731 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
7d1e0459 1732 if (ret == -NF_DROP)
93e66024 1733 NF_CT_STAT_INC_ATOMIC(state->net, drop);
56a62e22
AB
1734 /* Special case: TCP tracker reports an attempt to reopen a
1735 * closed/aborted connection. We have to go back and create a
1736 * fresh conntrack.
1737 */
1738 if (ret == -NF_REPEAT)
1739 goto repeat;
b2a15a60
PM
1740 ret = -ret;
1741 goto out;
9fb9cbb1
YK
1742 }
1743
fc09e4a7
FW
1744 if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1745 !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
858b3133 1746 nf_conntrack_event_cache(IPCT_REPLY, ct);
b2a15a60 1747out:
56a62e22
AB
1748 if (tmpl)
1749 nf_ct_put(tmpl);
9fb9cbb1
YK
1750
1751 return ret;
1752}
13b18339 1753EXPORT_SYMBOL_GPL(nf_conntrack_in);
9fb9cbb1 1754
5b1158e9
JK
1755/* Alter reply tuple (maybe alter helper). This is for NAT, and is
1756 implicitly racy: see __nf_conntrack_confirm */
1757void nf_conntrack_alter_reply(struct nf_conn *ct,
1758 const struct nf_conntrack_tuple *newreply)
1759{
1760 struct nf_conn_help *help = nfct_help(ct);
1761
5b1158e9 1762 /* Should be unconfirmed, so not in hash table yet */
44d6e2f2 1763 WARN_ON(nf_ct_is_confirmed(ct));
5b1158e9 1764
0d53778e 1765 pr_debug("Altering reply tuple of %p to ", ct);
3c9fba65 1766 nf_ct_dump_tuple(newreply);
5b1158e9
JK
1767
1768 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
ef1a5a50 1769 if (ct->master || (help && !hlist_empty(&help->expectations)))
c52fbb41 1770 return;
ceceae1b 1771
c52fbb41 1772 rcu_read_lock();
b2a15a60 1773 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
c52fbb41 1774 rcu_read_unlock();
5b1158e9 1775}
13b18339 1776EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
5b1158e9 1777
9fb9cbb1
YK
1778/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1779void __nf_ct_refresh_acct(struct nf_conn *ct,
1780 enum ip_conntrack_info ctinfo,
1781 const struct sk_buff *skb,
cc169213
FW
1782 u32 extra_jiffies,
1783 bool do_acct)
9fb9cbb1 1784{
997ae831 1785 /* Only update if this is not a fixed timeout */
47d95045
PM
1786 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1787 goto acct;
997ae831 1788
9fb9cbb1 1789 /* If not in hash table, timer will not be active yet */
f330a7fd
FW
1790 if (nf_ct_is_confirmed(ct))
1791 extra_jiffies += nfct_time_stamp;
9fb9cbb1 1792
e37542ba
ED
1793 if (READ_ONCE(ct->timeout) != extra_jiffies)
1794 WRITE_ONCE(ct->timeout, extra_jiffies);
47d95045 1795acct:
ba76738c
PNA
1796 if (do_acct)
1797 nf_ct_acct_update(ct, ctinfo, skb->len);
9fb9cbb1 1798}
13b18339 1799EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
9fb9cbb1 1800
ad66713f
FW
1801bool nf_ct_kill_acct(struct nf_conn *ct,
1802 enum ip_conntrack_info ctinfo,
1803 const struct sk_buff *skb)
51091764 1804{
ad66713f 1805 nf_ct_acct_update(ct, ctinfo, skb->len);
58401572 1806
f330a7fd 1807 return nf_ct_delete(ct, 0, 0);
51091764 1808}
ad66713f 1809EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
51091764 1810
c0cd1156 1811#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
c1d10adb
PNA
1812
1813#include <linux/netfilter/nfnetlink.h>
1814#include <linux/netfilter/nfnetlink_conntrack.h>
57b47a53
IM
1815#include <linux/mutex.h>
1816
05ba4c89 1817/* Generic function for tcp/udp/sctp/dccp and alike. */
fdf70832 1818int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
c1d10adb
PNA
1819 const struct nf_conntrack_tuple *tuple)
1820{
bae65be8
DM
1821 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1822 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1823 goto nla_put_failure;
c1d10adb
PNA
1824 return 0;
1825
df6fb868 1826nla_put_failure:
c1d10adb
PNA
1827 return -1;
1828}
fdf70832 1829EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
c1d10adb 1830
f73e924c
PM
1831const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1832 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1833 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
c1d10adb 1834};
f73e924c 1835EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
c1d10adb 1836
fdf70832 1837int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
c1d10adb
PNA
1838 struct nf_conntrack_tuple *t)
1839{
df6fb868 1840 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
c1d10adb
PNA
1841 return -EINVAL;
1842
77236b6e
PM
1843 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1844 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
c1d10adb
PNA
1845
1846 return 0;
1847}
fdf70832 1848EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
5c0de29d 1849
5caaed15 1850unsigned int nf_ct_port_nlattr_tuple_size(void)
5c0de29d 1851{
5caaed15
FW
1852 static unsigned int size __read_mostly;
1853
1854 if (!size)
1855 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1856
1857 return size;
5c0de29d
HE
1858}
1859EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
c1d10adb
PNA
1860#endif
1861
9fb9cbb1 1862/* Used by ipt_REJECT and ip6t_REJECT. */
312a0c16 1863static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
9fb9cbb1
YK
1864{
1865 struct nf_conn *ct;
1866 enum ip_conntrack_info ctinfo;
1867
1868 /* This ICMP is in reverse direction to the packet which caused it */
1869 ct = nf_ct_get(skb, &ctinfo);
1870 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
fb048833 1871 ctinfo = IP_CT_RELATED_REPLY;
9fb9cbb1
YK
1872 else
1873 ctinfo = IP_CT_RELATED;
1874
1875 /* Attach to new skbuff, and increment count */
c74454fa 1876 nf_ct_set(nskb, ct, ctinfo);
cb9c6836 1877 nf_conntrack_get(skb_nfct(nskb));
9fb9cbb1
YK
1878}
1879
368982cd
PNA
1880static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
1881{
368982cd
PNA
1882 struct nf_conntrack_tuple_hash *h;
1883 struct nf_conntrack_tuple tuple;
1884 enum ip_conntrack_info ctinfo;
1885 struct nf_nat_hook *nat_hook;
6816d931 1886 unsigned int status;
368982cd 1887 struct nf_conn *ct;
6816d931 1888 int dataoff;
368982cd
PNA
1889 u16 l3num;
1890 u8 l4num;
1891
1892 ct = nf_ct_get(skb, &ctinfo);
1893 if (!ct || nf_ct_is_confirmed(ct))
1894 return 0;
1895
1896 l3num = nf_ct_l3num(ct);
368982cd 1897
6816d931
FW
1898 dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
1899 if (dataoff <= 0)
368982cd
PNA
1900 return -1;
1901
368982cd 1902 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
303e0c55 1903 l4num, net, &tuple))
368982cd
PNA
1904 return -1;
1905
1906 if (ct->status & IPS_SRC_NAT) {
1907 memcpy(tuple.src.u3.all,
1908 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
1909 sizeof(tuple.src.u3.all));
1910 tuple.src.u.all =
1911 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
1912 }
1913
1914 if (ct->status & IPS_DST_NAT) {
1915 memcpy(tuple.dst.u3.all,
1916 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
1917 sizeof(tuple.dst.u3.all));
1918 tuple.dst.u.all =
1919 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
1920 }
1921
1922 h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
1923 if (!h)
1924 return 0;
1925
1926 /* Store status bits of the conntrack that is clashing to re-do NAT
1927 * mangling according to what it has been done already to this packet.
1928 */
1929 status = ct->status;
1930
1931 nf_ct_put(ct);
1932 ct = nf_ct_tuplehash_to_ctrack(h);
1933 nf_ct_set(skb, ct, ctinfo);
1934
1935 nat_hook = rcu_dereference(nf_nat_hook);
1936 if (!nat_hook)
1937 return 0;
1938
1939 if (status & IPS_SRC_NAT &&
1940 nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
1941 IP_CT_DIR_ORIGINAL) == NF_DROP)
1942 return -1;
1943
1944 if (status & IPS_DST_NAT &&
1945 nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
1946 IP_CT_DIR_ORIGINAL) == NF_DROP)
1947 return -1;
1948
1949 return 0;
1950}
1951
b60a6040
THJ
1952static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
1953 const struct sk_buff *skb)
1954{
1955 const struct nf_conntrack_tuple *src_tuple;
1956 const struct nf_conntrack_tuple_hash *hash;
1957 struct nf_conntrack_tuple srctuple;
1958 enum ip_conntrack_info ctinfo;
1959 struct nf_conn *ct;
1960
1961 ct = nf_ct_get(skb, &ctinfo);
1962 if (ct) {
1963 src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
1964 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1965 return true;
1966 }
1967
1968 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
1969 NFPROTO_IPV4, dev_net(skb->dev),
1970 &srctuple))
1971 return false;
1972
1973 hash = nf_conntrack_find_get(dev_net(skb->dev),
1974 &nf_ct_zone_dflt,
1975 &srctuple);
1976 if (!hash)
1977 return false;
1978
1979 ct = nf_ct_tuplehash_to_ctrack(hash);
1980 src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
1981 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1982 nf_ct_put(ct);
1983
1984 return true;
1985}
1986
9fb9cbb1 1987/* Bring out ya dead! */
df0933dc 1988static struct nf_conn *
2843fb69 1989get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
9fb9cbb1
YK
1990 void *data, unsigned int *bucket)
1991{
df0933dc
PM
1992 struct nf_conntrack_tuple_hash *h;
1993 struct nf_conn *ct;
ea781f19 1994 struct hlist_nulls_node *n;
93bb0ceb 1995 spinlock_t *lockp;
9fb9cbb1 1996
56d52d48 1997 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
93bb0ceb
JDB
1998 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1999 local_bh_disable();
b16c2919 2000 nf_conntrack_lock(lockp);
56d52d48
FW
2001 if (*bucket < nf_conntrack_htable_size) {
2002 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
93bb0ceb
JDB
2003 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
2004 continue;
2005 ct = nf_ct_tuplehash_to_ctrack(h);
2843fb69 2006 if (iter(ct, data))
93bb0ceb
JDB
2007 goto found;
2008 }
df0933dc 2009 }
93bb0ceb
JDB
2010 spin_unlock(lockp);
2011 local_bh_enable();
d93c6258 2012 cond_resched();
601e68e1 2013 }
b7779d06 2014
b0feacaa
FW
2015 return NULL;
2016found:
2017 atomic_inc(&ct->ct_general.use);
2018 spin_unlock(lockp);
2019 local_bh_enable();
2020 return ct;
2021}
2022
2843fb69
FW
2023static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
2024 void *data, u32 portid, int report)
2025{
0d02d564 2026 unsigned int bucket = 0, sequence;
2843fb69 2027 struct nf_conn *ct;
2843fb69
FW
2028
2029 might_sleep();
2030
0d02d564
FW
2031 for (;;) {
2032 sequence = read_seqcount_begin(&nf_conntrack_generation);
2843fb69 2033
0d02d564
FW
2034 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
2035 /* Time to push up daises... */
2036
2037 nf_ct_delete(ct, portid, report);
2038 nf_ct_put(ct);
2039 cond_resched();
2040 }
2041
2042 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
2043 break;
2044 bucket = 0;
2843fb69
FW
2045 }
2046}
2047
2048struct iter_data {
2049 int (*iter)(struct nf_conn *i, void *data);
2050 void *data;
2051 struct net *net;
2052};
2053
2054static int iter_net_only(struct nf_conn *i, void *data)
2055{
2056 struct iter_data *d = data;
2057
2058 if (!net_eq(d->net, nf_ct_net(i)))
2059 return 0;
2060
2061 return d->iter(i, d->data);
2062}
2063
b0feacaa
FW
2064static void
2065__nf_ct_unconfirmed_destroy(struct net *net)
2066{
2067 int cpu;
2068
b7779d06 2069 for_each_possible_cpu(cpu) {
b0feacaa
FW
2070 struct nf_conntrack_tuple_hash *h;
2071 struct hlist_nulls_node *n;
2072 struct ct_pcpu *pcpu;
2073
2074 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
b7779d06
JDB
2075
2076 spin_lock_bh(&pcpu->lock);
2077 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
b0feacaa
FW
2078 struct nf_conn *ct;
2079
b7779d06 2080 ct = nf_ct_tuplehash_to_ctrack(h);
b0feacaa
FW
2081
2082 /* we cannot call iter() on unconfirmed list, the
2083 * owning cpu can reallocate ct->ext at any time.
2084 */
2085 set_bit(IPS_DYING_BIT, &ct->status);
b7779d06
JDB
2086 }
2087 spin_unlock_bh(&pcpu->lock);
d93c6258 2088 cond_resched();
b7779d06 2089 }
9fb9cbb1
YK
2090}
2091
84657984
FW
2092void nf_ct_unconfirmed_destroy(struct net *net)
2093{
2094 might_sleep();
2095
2096 if (atomic_read(&net->ct.count) > 0) {
2097 __nf_ct_unconfirmed_destroy(net);
e2a75007 2098 nf_queue_nf_hook_drop(net);
84657984
FW
2099 synchronize_net();
2100 }
2101}
2102EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
2103
9fd6452d
FW
2104void nf_ct_iterate_cleanup_net(struct net *net,
2105 int (*iter)(struct nf_conn *i, void *data),
2106 void *data, u32 portid, int report)
9fb9cbb1 2107{
2843fb69 2108 struct iter_data d;
9fb9cbb1 2109
d93c6258
FW
2110 might_sleep();
2111
88b68bc5
FW
2112 if (atomic_read(&net->ct.count) == 0)
2113 return;
2114
2843fb69
FW
2115 d.iter = iter;
2116 d.data = data;
2117 d.net = net;
2118
2843fb69
FW
2119 nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
2120}
2121EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
9fb9cbb1 2122
2843fb69
FW
2123/**
2124 * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
2125 * @iter: callback to invoke for each conntrack
2126 * @data: data to pass to @iter
2127 *
2128 * Like nf_ct_iterate_cleanup, but first marks conntracks on the
2129 * unconfirmed list as dying (so they will not be inserted into
2130 * main table).
7866cc57
FW
2131 *
2132 * Can only be called in module exit path.
2843fb69
FW
2133 */
2134void
2135nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
2136{
2137 struct net *net;
2138
f0b07bb1 2139 down_read(&net_rwsem);
2843fb69
FW
2140 for_each_net(net) {
2141 if (atomic_read(&net->ct.count) == 0)
2142 continue;
2143 __nf_ct_unconfirmed_destroy(net);
e2a75007 2144 nf_queue_nf_hook_drop(net);
9fb9cbb1 2145 }
f0b07bb1 2146 up_read(&net_rwsem);
2843fb69 2147
7866cc57
FW
2148 /* Need to wait for netns cleanup worker to finish, if its
2149 * running -- it might have deleted a net namespace from
2150 * the global list, so our __nf_ct_unconfirmed_destroy() might
2151 * not have affected all namespaces.
2152 */
2153 net_ns_barrier();
2154
2843fb69
FW
2155 /* a conntrack could have been unlinked from unconfirmed list
2156 * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2157 * This makes sure its inserted into conntrack table.
2158 */
2159 synchronize_net();
2160
2161 nf_ct_iterate_cleanup(iter, data, 0, 0);
9fb9cbb1 2162}
2843fb69 2163EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
9fb9cbb1 2164
274d383b
PNA
2165static int kill_all(struct nf_conn *i, void *data)
2166{
2843fb69 2167 return net_eq(nf_ct_net(i), data);
274d383b
PNA
2168}
2169
f94161c1 2170void nf_conntrack_cleanup_start(void)
9fb9cbb1 2171{
b87a2f91 2172 conntrack_gc_work.exiting = true;
f94161c1
G
2173 RCU_INIT_POINTER(ip_ct_attach, NULL);
2174}
2175
2176void nf_conntrack_cleanup_end(void)
2177{
1f4b2439 2178 RCU_INIT_POINTER(nf_ct_hook, NULL);
b87a2f91 2179 cancel_delayed_work_sync(&conntrack_gc_work.dwork);
285189c7 2180 kvfree(nf_conntrack_hash);
56d52d48 2181
04d87001 2182 nf_conntrack_proto_fini();
41d73ec0 2183 nf_conntrack_seqadj_fini();
5f69b8f5 2184 nf_conntrack_labels_fini();
5e615b22 2185 nf_conntrack_helper_fini();
8684094c 2186 nf_conntrack_timeout_fini();
3fe0f943 2187 nf_conntrack_ecache_fini();
73f4001a 2188 nf_conntrack_tstamp_fini();
b7ff3a1f 2189 nf_conntrack_acct_fini();
83b4dbe1 2190 nf_conntrack_expect_fini();
77571149
FW
2191
2192 kmem_cache_destroy(nf_conntrack_cachep);
08f6547d 2193}
9fb9cbb1 2194
f94161c1
G
2195/*
2196 * Mishearing the voices in his head, our hero wonders how he's
2197 * supposed to kill the mall.
2198 */
2199void nf_conntrack_cleanup_net(struct net *net)
08f6547d 2200{
dece40e8
VD
2201 LIST_HEAD(single);
2202
2203 list_add(&net->exit_list, &single);
2204 nf_conntrack_cleanup_net_list(&single);
2205}
2206
2207void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2208{
2209 int busy;
2210 struct net *net;
2211
f94161c1
G
2212 /*
2213 * This makes sure all current packets have passed through
2214 * netfilter framework. Roll on, two-stage module
2215 * delete...
2216 */
2217 synchronize_net();
dece40e8
VD
2218i_see_dead_people:
2219 busy = 0;
2220 list_for_each_entry(net, net_exit_list, exit_list) {
2843fb69 2221 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
dece40e8
VD
2222 if (atomic_read(&net->ct.count) != 0)
2223 busy = 1;
2224 }
2225 if (busy) {
9fb9cbb1
YK
2226 schedule();
2227 goto i_see_dead_people;
2228 }
2229
dece40e8 2230 list_for_each_entry(net, net_exit_list, exit_list) {
dece40e8 2231 nf_conntrack_proto_pernet_fini(net);
dece40e8 2232 nf_conntrack_ecache_pernet_fini(net);
dece40e8 2233 nf_conntrack_expect_pernet_fini(net);
dece40e8 2234 free_percpu(net->ct.stat);
b7779d06 2235 free_percpu(net->ct.pcpu_lists);
dece40e8 2236 }
08f6547d
AD
2237}
2238
d862a662 2239void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
9fb9cbb1 2240{
ea781f19
ED
2241 struct hlist_nulls_head *hash;
2242 unsigned int nr_slots, i;
9fb9cbb1 2243
9cc1c73a
FW
2244 if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2245 return NULL;
2246
ea781f19
ED
2247 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2248 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
9cc1c73a 2249
285189c7
LR
2250 hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
2251 GFP_KERNEL | __GFP_ZERO);
9fb9cbb1 2252
ea781f19
ED
2253 if (hash && nulls)
2254 for (i = 0; i < nr_slots; i++)
2255 INIT_HLIST_NULLS_HEAD(&hash[i], i);
9fb9cbb1
YK
2256
2257 return hash;
2258}
ac565e5f 2259EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
9fb9cbb1 2260
3183ab89 2261int nf_conntrack_hash_resize(unsigned int hashsize)
9fb9cbb1 2262{
3183ab89
FW
2263 int i, bucket;
2264 unsigned int old_size;
ea781f19 2265 struct hlist_nulls_head *hash, *old_hash;
9fb9cbb1 2266 struct nf_conntrack_tuple_hash *h;
5d0aa2cc 2267 struct nf_conn *ct;
9fb9cbb1 2268
9fb9cbb1
YK
2269 if (!hashsize)
2270 return -EINVAL;
2271
d862a662 2272 hash = nf_ct_alloc_hashtable(&hashsize, 1);
9fb9cbb1
YK
2273 if (!hash)
2274 return -ENOMEM;
2275
3183ab89
FW
2276 old_size = nf_conntrack_htable_size;
2277 if (old_size == hashsize) {
285189c7 2278 kvfree(hash);
3183ab89
FW
2279 return 0;
2280 }
2281
93bb0ceb
JDB
2282 local_bh_disable();
2283 nf_conntrack_all_lock();
a3efd812 2284 write_seqcount_begin(&nf_conntrack_generation);
93bb0ceb 2285
76507f69
PM
2286 /* Lookups in the old hash might happen in parallel, which means we
2287 * might get false negatives during connection lookup. New connections
2288 * created because of a false negative won't make it into the hash
93bb0ceb 2289 * though since that required taking the locks.
76507f69 2290 */
93bb0ceb 2291
56d52d48
FW
2292 for (i = 0; i < nf_conntrack_htable_size; i++) {
2293 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2294 h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2295 struct nf_conntrack_tuple_hash, hnnode);
5d0aa2cc 2296 ct = nf_ct_tuplehash_to_ctrack(h);
ea781f19 2297 hlist_nulls_del_rcu(&h->hnnode);
1b8c8a9f
FW
2298 bucket = __hash_conntrack(nf_ct_net(ct),
2299 &h->tuple, hashsize);
ea781f19 2300 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
9fb9cbb1
YK
2301 }
2302 }
56d52d48
FW
2303 old_size = nf_conntrack_htable_size;
2304 old_hash = nf_conntrack_hash;
9fb9cbb1 2305
56d52d48
FW
2306 nf_conntrack_hash = hash;
2307 nf_conntrack_htable_size = hashsize;
93bb0ceb 2308
a3efd812 2309 write_seqcount_end(&nf_conntrack_generation);
93bb0ceb
JDB
2310 nf_conntrack_all_unlock();
2311 local_bh_enable();
9fb9cbb1 2312
5e3c61f9 2313 synchronize_net();
285189c7 2314 kvfree(old_hash);
9fb9cbb1
YK
2315 return 0;
2316}
3183ab89 2317
e4dca7b7 2318int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
3183ab89
FW
2319{
2320 unsigned int hashsize;
2321 int rc;
2322
2323 if (current->nsproxy->net_ns != &init_net)
2324 return -EOPNOTSUPP;
2325
2326 /* On boot, we can set this without any fancy locking. */
2045cdfa 2327 if (!nf_conntrack_hash)
3183ab89
FW
2328 return param_set_uint(val, kp);
2329
2330 rc = kstrtouint(val, 0, &hashsize);
2331 if (rc)
2332 return rc;
2333
2334 return nf_conntrack_hash_resize(hashsize);
2335}
fae718dd 2336EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
9fb9cbb1 2337
ab71632c 2338static __always_inline unsigned int total_extension_size(void)
b3a5db10
FW
2339{
2340 /* remember to add new extensions below */
2341 BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2342
2343 return sizeof(struct nf_ct_ext) +
2344 sizeof(struct nf_conn_help)
2345#if IS_ENABLED(CONFIG_NF_NAT)
2346 + sizeof(struct nf_conn_nat)
2347#endif
2348 + sizeof(struct nf_conn_seqadj)
2349 + sizeof(struct nf_conn_acct)
2350#ifdef CONFIG_NF_CONNTRACK_EVENTS
2351 + sizeof(struct nf_conntrack_ecache)
2352#endif
2353#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2354 + sizeof(struct nf_conn_tstamp)
2355#endif
2356#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2357 + sizeof(struct nf_conn_timeout)
2358#endif
2359#ifdef CONFIG_NF_CONNTRACK_LABELS
2360 + sizeof(struct nf_conn_labels)
2361#endif
2362#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2363 + sizeof(struct nf_conn_synproxy)
2364#endif
2365 ;
2366};
2367
f94161c1 2368int nf_conntrack_init_start(void)
9fb9cbb1 2369{
ca79b0c2 2370 unsigned long nr_pages = totalram_pages();
f205c5e0 2371 int max_factor = 8;
0c5366b3 2372 int ret = -ENOMEM;
cc41c84b 2373 int i;
93bb0ceb 2374
b3a5db10
FW
2375 /* struct nf_ct_ext uses u8 to store offsets/size */
2376 BUILD_BUG_ON(total_extension_size() > 255u);
2377
a3efd812
FW
2378 seqcount_init(&nf_conntrack_generation);
2379
d5d20912 2380 for (i = 0; i < CONNTRACK_LOCKS; i++)
93bb0ceb 2381 spin_lock_init(&nf_conntrack_locks[i]);
9fb9cbb1 2382
9fb9cbb1 2383 if (!nf_conntrack_htable_size) {
88eab472
ML
2384 /* Idea from tcp.c: use 1/16384 of memory.
2385 * On i386: 32MB machine has 512 buckets.
2386 * >= 1GB machines have 16384 buckets.
2387 * >= 4GB machines have 65536 buckets.
2388 */
9fb9cbb1 2389 nf_conntrack_htable_size
3d6357de 2390 = (((nr_pages << PAGE_SHIFT) / 16384)
f205c5e0 2391 / sizeof(struct hlist_head));
3d6357de 2392 if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
88eab472 2393 nf_conntrack_htable_size = 65536;
3d6357de 2394 else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
f205c5e0
PM
2395 nf_conntrack_htable_size = 16384;
2396 if (nf_conntrack_htable_size < 32)
2397 nf_conntrack_htable_size = 32;
2398
2399 /* Use a max. factor of four by default to get the same max as
2400 * with the old struct list_heads. When a table size is given
2401 * we use the old value of 8 to avoid reducing the max.
2402 * entries. */
2403 max_factor = 4;
9fb9cbb1 2404 }
56d52d48
FW
2405
2406 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2407 if (!nf_conntrack_hash)
2408 return -ENOMEM;
2409
f205c5e0 2410 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
8e5105a0 2411
0c5366b3 2412 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
a9e419dc
FW
2413 sizeof(struct nf_conn),
2414 NFCT_INFOMASK + 1,
5f0d5a3a 2415 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
0c5366b3
FW
2416 if (!nf_conntrack_cachep)
2417 goto err_cachep;
2418
83b4dbe1
G
2419 ret = nf_conntrack_expect_init();
2420 if (ret < 0)
2421 goto err_expect;
2422
b7ff3a1f
G
2423 ret = nf_conntrack_acct_init();
2424 if (ret < 0)
2425 goto err_acct;
2426
73f4001a
G
2427 ret = nf_conntrack_tstamp_init();
2428 if (ret < 0)
2429 goto err_tstamp;
2430
3fe0f943
G
2431 ret = nf_conntrack_ecache_init();
2432 if (ret < 0)
2433 goto err_ecache;
2434
8684094c
G
2435 ret = nf_conntrack_timeout_init();
2436 if (ret < 0)
2437 goto err_timeout;
2438
5e615b22
G
2439 ret = nf_conntrack_helper_init();
2440 if (ret < 0)
2441 goto err_helper;
2442
5f69b8f5
G
2443 ret = nf_conntrack_labels_init();
2444 if (ret < 0)
2445 goto err_labels;
2446
41d73ec0
PM
2447 ret = nf_conntrack_seqadj_init();
2448 if (ret < 0)
2449 goto err_seqadj;
2450
04d87001
G
2451 ret = nf_conntrack_proto_init();
2452 if (ret < 0)
2453 goto err_proto;
2454
b87a2f91 2455 conntrack_gc_work_init(&conntrack_gc_work);
0984d427 2456 queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
b87a2f91 2457
08f6547d
AD
2458 return 0;
2459
04d87001 2460err_proto:
41d73ec0
PM
2461 nf_conntrack_seqadj_fini();
2462err_seqadj:
04d87001 2463 nf_conntrack_labels_fini();
5f69b8f5
G
2464err_labels:
2465 nf_conntrack_helper_fini();
5e615b22
G
2466err_helper:
2467 nf_conntrack_timeout_fini();
8684094c
G
2468err_timeout:
2469 nf_conntrack_ecache_fini();
3fe0f943
G
2470err_ecache:
2471 nf_conntrack_tstamp_fini();
73f4001a
G
2472err_tstamp:
2473 nf_conntrack_acct_fini();
b7ff3a1f
G
2474err_acct:
2475 nf_conntrack_expect_fini();
83b4dbe1 2476err_expect:
0c5366b3
FW
2477 kmem_cache_destroy(nf_conntrack_cachep);
2478err_cachep:
285189c7 2479 kvfree(nf_conntrack_hash);
08f6547d
AD
2480 return ret;
2481}
2482
1f4b2439 2483static struct nf_ct_hook nf_conntrack_hook = {
368982cd 2484 .update = nf_conntrack_update,
1f4b2439 2485 .destroy = destroy_conntrack,
b60a6040 2486 .get_tuple_skb = nf_conntrack_get_tuple_skb,
1f4b2439
PNA
2487};
2488
f94161c1
G
2489void nf_conntrack_init_end(void)
2490{
2491 /* For use by REJECT target */
2492 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1f4b2439 2493 RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
f94161c1
G
2494}
2495
8cc20198
ED
2496/*
2497 * We need to use special "null" values, not used in hash table
2498 */
2499#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
2500#define DYING_NULLS_VAL ((1<<30)+1)
252b3e8c 2501#define TEMPLATE_NULLS_VAL ((1<<30)+2)
8cc20198 2502
f94161c1 2503int nf_conntrack_init_net(struct net *net)
08f6547d 2504{
b7779d06
JDB
2505 int ret = -ENOMEM;
2506 int cpu;
ceceae1b 2507
cc41c84b 2508 BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2e7b162c 2509 BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
08f6547d 2510 atomic_set(&net->ct.count, 0);
b7779d06
JDB
2511
2512 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2513 if (!net->ct.pcpu_lists)
08f6547d 2514 goto err_stat;
b7779d06
JDB
2515
2516 for_each_possible_cpu(cpu) {
2517 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2518
2519 spin_lock_init(&pcpu->lock);
2520 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2521 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
08f6547d 2522 }
5b3501fa 2523
b7779d06
JDB
2524 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2525 if (!net->ct.stat)
2526 goto err_pcpu_lists;
2527
83b4dbe1 2528 ret = nf_conntrack_expect_pernet_init(net);
08f6547d
AD
2529 if (ret < 0)
2530 goto err_expect;
fc3893fd
FW
2531
2532 nf_conntrack_acct_pernet_init(net);
2533 nf_conntrack_tstamp_pernet_init(net);
2534 nf_conntrack_ecache_pernet_init(net);
2535 nf_conntrack_helper_pernet_init(net);
4a60dc74 2536 nf_conntrack_proto_pernet_init(net);
fc3893fd 2537
08f6547d 2538 return 0;
c539f017 2539
08f6547d 2540err_expect:
0d55af87 2541 free_percpu(net->ct.stat);
b7779d06
JDB
2542err_pcpu_lists:
2543 free_percpu(net->ct.pcpu_lists);
0d55af87 2544err_stat:
08f6547d
AD
2545 return ret;
2546}