]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nf_nat_core.c
netfilter: nf_tables: use WARN_ON_ONCE instead of BUG_ON in nft_do_chain()
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nf_nat_core.c
1 /*
2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/gfp.h>
16 #include <net/xfrm.h>
17 #include <linux/jhash.h>
18 #include <linux/rtnetlink.h>
19
20 #include <net/netfilter/nf_conntrack.h>
21 #include <net/netfilter/nf_conntrack_core.h>
22 #include <net/netfilter/nf_nat.h>
23 #include <net/netfilter/nf_nat_l3proto.h>
24 #include <net/netfilter/nf_nat_l4proto.h>
25 #include <net/netfilter/nf_nat_core.h>
26 #include <net/netfilter/nf_nat_helper.h>
27 #include <net/netfilter/nf_conntrack_helper.h>
28 #include <net/netfilter/nf_conntrack_seqadj.h>
29 #include <net/netfilter/nf_conntrack_l3proto.h>
30 #include <net/netfilter/nf_conntrack_zones.h>
31 #include <linux/netfilter/nf_nat.h>
32
33 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
34
35 static DEFINE_MUTEX(nf_nat_proto_mutex);
36 static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
37 __read_mostly;
38 static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
39 __read_mostly;
40
41 static struct hlist_head *nf_nat_bysource __read_mostly;
42 static unsigned int nf_nat_htable_size __read_mostly;
43 static unsigned int nf_nat_hash_rnd __read_mostly;
44
45 inline const struct nf_nat_l3proto *
46 __nf_nat_l3proto_find(u8 family)
47 {
48 return rcu_dereference(nf_nat_l3protos[family]);
49 }
50
51 inline const struct nf_nat_l4proto *
52 __nf_nat_l4proto_find(u8 family, u8 protonum)
53 {
54 return rcu_dereference(nf_nat_l4protos[family][protonum]);
55 }
56 EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
57
58 #ifdef CONFIG_XFRM
59 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
60 {
61 const struct nf_nat_l3proto *l3proto;
62 const struct nf_conn *ct;
63 enum ip_conntrack_info ctinfo;
64 enum ip_conntrack_dir dir;
65 unsigned long statusbit;
66 u8 family;
67
68 ct = nf_ct_get(skb, &ctinfo);
69 if (ct == NULL)
70 return;
71
72 family = nf_ct_l3num(ct);
73 l3proto = __nf_nat_l3proto_find(family);
74 if (l3proto == NULL)
75 return;
76
77 dir = CTINFO2DIR(ctinfo);
78 if (dir == IP_CT_DIR_ORIGINAL)
79 statusbit = IPS_DST_NAT;
80 else
81 statusbit = IPS_SRC_NAT;
82
83 l3proto->decode_session(skb, ct, dir, statusbit, fl);
84 }
85
86 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
87 {
88 struct flowi fl;
89 unsigned int hh_len;
90 struct dst_entry *dst;
91 int err;
92
93 err = xfrm_decode_session(skb, &fl, family);
94 if (err < 0)
95 return err;
96
97 dst = skb_dst(skb);
98 if (dst->xfrm)
99 dst = ((struct xfrm_dst *)dst)->route;
100 dst_hold(dst);
101
102 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
103 if (IS_ERR(dst))
104 return PTR_ERR(dst);
105
106 skb_dst_drop(skb);
107 skb_dst_set(skb, dst);
108
109 /* Change in oif may mean change in hh_len. */
110 hh_len = skb_dst(skb)->dev->hard_header_len;
111 if (skb_headroom(skb) < hh_len &&
112 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
113 return -ENOMEM;
114 return 0;
115 }
116 EXPORT_SYMBOL(nf_xfrm_me_harder);
117 #endif /* CONFIG_XFRM */
118
119 /* We keep an extra hash for each conntrack, for fast searching. */
120 static unsigned int
121 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
122 {
123 unsigned int hash;
124
125 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
126
127 /* Original src, to ensure we map it consistently if poss. */
128 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
129 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
130
131 return reciprocal_scale(hash, nf_nat_htable_size);
132 }
133
134 /* Is this tuple already taken? (not by us) */
135 int
136 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
137 const struct nf_conn *ignored_conntrack)
138 {
139 /* Conntrack tracking doesn't keep track of outgoing tuples; only
140 * incoming ones. NAT means they don't have a fixed mapping,
141 * so we invert the tuple and look for the incoming reply.
142 *
143 * We could keep a separate hash if this proves too slow.
144 */
145 struct nf_conntrack_tuple reply;
146
147 nf_ct_invert_tuplepr(&reply, tuple);
148 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
149 }
150 EXPORT_SYMBOL(nf_nat_used_tuple);
151
152 /* If we source map this tuple so reply looks like reply_tuple, will
153 * that meet the constraints of range.
154 */
155 static int in_range(const struct nf_nat_l3proto *l3proto,
156 const struct nf_nat_l4proto *l4proto,
157 const struct nf_conntrack_tuple *tuple,
158 const struct nf_nat_range *range)
159 {
160 /* If we are supposed to map IPs, then we must be in the
161 * range specified, otherwise let this drag us onto a new src IP.
162 */
163 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
164 !l3proto->in_range(tuple, range))
165 return 0;
166
167 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
168 l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
169 &range->min_proto, &range->max_proto))
170 return 1;
171
172 return 0;
173 }
174
175 static inline int
176 same_src(const struct nf_conn *ct,
177 const struct nf_conntrack_tuple *tuple)
178 {
179 const struct nf_conntrack_tuple *t;
180
181 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
182 return (t->dst.protonum == tuple->dst.protonum &&
183 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
184 t->src.u.all == tuple->src.u.all);
185 }
186
187 /* Only called for SRC manip */
188 static int
189 find_appropriate_src(struct net *net,
190 const struct nf_conntrack_zone *zone,
191 const struct nf_nat_l3proto *l3proto,
192 const struct nf_nat_l4proto *l4proto,
193 const struct nf_conntrack_tuple *tuple,
194 struct nf_conntrack_tuple *result,
195 const struct nf_nat_range *range)
196 {
197 unsigned int h = hash_by_src(net, tuple);
198 const struct nf_conn *ct;
199
200 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
201 if (same_src(ct, tuple) &&
202 net_eq(net, nf_ct_net(ct)) &&
203 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
204 /* Copy source part from reply tuple. */
205 nf_ct_invert_tuplepr(result,
206 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
207 result->dst = tuple->dst;
208
209 if (in_range(l3proto, l4proto, result, range))
210 return 1;
211 }
212 }
213 return 0;
214 }
215
216 /* For [FUTURE] fragmentation handling, we want the least-used
217 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
218 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
219 * 1-65535, we don't do pro-rata allocation based on ports; we choose
220 * the ip with the lowest src-ip/dst-ip/proto usage.
221 */
222 static void
223 find_best_ips_proto(const struct nf_conntrack_zone *zone,
224 struct nf_conntrack_tuple *tuple,
225 const struct nf_nat_range *range,
226 const struct nf_conn *ct,
227 enum nf_nat_manip_type maniptype)
228 {
229 union nf_inet_addr *var_ipp;
230 unsigned int i, max;
231 /* Host order */
232 u32 minip, maxip, j, dist;
233 bool full_range;
234
235 /* No IP mapping? Do nothing. */
236 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
237 return;
238
239 if (maniptype == NF_NAT_MANIP_SRC)
240 var_ipp = &tuple->src.u3;
241 else
242 var_ipp = &tuple->dst.u3;
243
244 /* Fast path: only one choice. */
245 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
246 *var_ipp = range->min_addr;
247 return;
248 }
249
250 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
251 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
252 else
253 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
254
255 /* Hashing source and destination IPs gives a fairly even
256 * spread in practice (if there are a small number of IPs
257 * involved, there usually aren't that many connections
258 * anyway). The consistency means that servers see the same
259 * client coming from the same IP (some Internet Banking sites
260 * like this), even across reboots.
261 */
262 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
263 range->flags & NF_NAT_RANGE_PERSISTENT ?
264 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
265
266 full_range = false;
267 for (i = 0; i <= max; i++) {
268 /* If first bytes of the address are at the maximum, use the
269 * distance. Otherwise use the full range.
270 */
271 if (!full_range) {
272 minip = ntohl((__force __be32)range->min_addr.all[i]);
273 maxip = ntohl((__force __be32)range->max_addr.all[i]);
274 dist = maxip - minip + 1;
275 } else {
276 minip = 0;
277 dist = ~0;
278 }
279
280 var_ipp->all[i] = (__force __u32)
281 htonl(minip + reciprocal_scale(j, dist));
282 if (var_ipp->all[i] != range->max_addr.all[i])
283 full_range = true;
284
285 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
286 j ^= (__force u32)tuple->dst.u3.all[i];
287 }
288 }
289
290 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
291 * we change the source to map into the range. For NF_INET_PRE_ROUTING
292 * and NF_INET_LOCAL_OUT, we change the destination to map into the
293 * range. It might not be possible to get a unique tuple, but we try.
294 * At worst (or if we race), we will end up with a final duplicate in
295 * __ip_conntrack_confirm and drop the packet. */
296 static void
297 get_unique_tuple(struct nf_conntrack_tuple *tuple,
298 const struct nf_conntrack_tuple *orig_tuple,
299 const struct nf_nat_range *range,
300 struct nf_conn *ct,
301 enum nf_nat_manip_type maniptype)
302 {
303 const struct nf_conntrack_zone *zone;
304 const struct nf_nat_l3proto *l3proto;
305 const struct nf_nat_l4proto *l4proto;
306 struct net *net = nf_ct_net(ct);
307
308 zone = nf_ct_zone(ct);
309
310 rcu_read_lock();
311 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
312 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
313 orig_tuple->dst.protonum);
314
315 /* 1) If this srcip/proto/src-proto-part is currently mapped,
316 * and that same mapping gives a unique tuple within the given
317 * range, use that.
318 *
319 * This is only required for source (ie. NAT/masq) mappings.
320 * So far, we don't do local source mappings, so multiple
321 * manips not an issue.
322 */
323 if (maniptype == NF_NAT_MANIP_SRC &&
324 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
325 /* try the original tuple first */
326 if (in_range(l3proto, l4proto, orig_tuple, range)) {
327 if (!nf_nat_used_tuple(orig_tuple, ct)) {
328 *tuple = *orig_tuple;
329 goto out;
330 }
331 } else if (find_appropriate_src(net, zone, l3proto, l4proto,
332 orig_tuple, tuple, range)) {
333 pr_debug("get_unique_tuple: Found current src map\n");
334 if (!nf_nat_used_tuple(tuple, ct))
335 goto out;
336 }
337 }
338
339 /* 2) Select the least-used IP/proto combination in the given range */
340 *tuple = *orig_tuple;
341 find_best_ips_proto(zone, tuple, range, ct, maniptype);
342
343 /* 3) The per-protocol part of the manip is made to map into
344 * the range to make a unique tuple.
345 */
346
347 /* Only bother mapping if it's not already in range and unique */
348 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
349 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
350 if (l4proto->in_range(tuple, maniptype,
351 &range->min_proto,
352 &range->max_proto) &&
353 (range->min_proto.all == range->max_proto.all ||
354 !nf_nat_used_tuple(tuple, ct)))
355 goto out;
356 } else if (!nf_nat_used_tuple(tuple, ct)) {
357 goto out;
358 }
359 }
360
361 /* Last change: get protocol to try to obtain unique tuple. */
362 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
363 out:
364 rcu_read_unlock();
365 }
366
367 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
368 {
369 struct nf_conn_nat *nat = nfct_nat(ct);
370 if (nat)
371 return nat;
372
373 if (!nf_ct_is_confirmed(ct))
374 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
375
376 return nat;
377 }
378 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
379
380 unsigned int
381 nf_nat_setup_info(struct nf_conn *ct,
382 const struct nf_nat_range *range,
383 enum nf_nat_manip_type maniptype)
384 {
385 struct net *net = nf_ct_net(ct);
386 struct nf_conntrack_tuple curr_tuple, new_tuple;
387
388 /* Can't setup nat info for confirmed ct. */
389 if (nf_ct_is_confirmed(ct))
390 return NF_ACCEPT;
391
392 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
393 maniptype != NF_NAT_MANIP_DST);
394
395 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
396 return NF_DROP;
397
398 /* What we've got will look like inverse of reply. Normally
399 * this is what is in the conntrack, except for prior
400 * manipulations (future optimization: if num_manips == 0,
401 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
402 */
403 nf_ct_invert_tuplepr(&curr_tuple,
404 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
405
406 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
407
408 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
409 struct nf_conntrack_tuple reply;
410
411 /* Alter conntrack table so will recognize replies. */
412 nf_ct_invert_tuplepr(&reply, &new_tuple);
413 nf_conntrack_alter_reply(ct, &reply);
414
415 /* Non-atomic: we own this at the moment. */
416 if (maniptype == NF_NAT_MANIP_SRC)
417 ct->status |= IPS_SRC_NAT;
418 else
419 ct->status |= IPS_DST_NAT;
420
421 if (nfct_help(ct) && !nfct_seqadj(ct))
422 if (!nfct_seqadj_ext_add(ct))
423 return NF_DROP;
424 }
425
426 if (maniptype == NF_NAT_MANIP_SRC) {
427 unsigned int srchash;
428 spinlock_t *lock;
429
430 srchash = hash_by_src(net,
431 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
432 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
433 spin_lock_bh(lock);
434 hlist_add_head_rcu(&ct->nat_bysource,
435 &nf_nat_bysource[srchash]);
436 spin_unlock_bh(lock);
437 }
438
439 /* It's done. */
440 if (maniptype == NF_NAT_MANIP_DST)
441 ct->status |= IPS_DST_NAT_DONE;
442 else
443 ct->status |= IPS_SRC_NAT_DONE;
444
445 return NF_ACCEPT;
446 }
447 EXPORT_SYMBOL(nf_nat_setup_info);
448
449 static unsigned int
450 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
451 {
452 /* Force range to this IP; let proto decide mapping for
453 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
454 * Use reply in case it's already been mangled (eg local packet).
455 */
456 union nf_inet_addr ip =
457 (manip == NF_NAT_MANIP_SRC ?
458 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
459 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
460 struct nf_nat_range range = {
461 .flags = NF_NAT_RANGE_MAP_IPS,
462 .min_addr = ip,
463 .max_addr = ip,
464 };
465 return nf_nat_setup_info(ct, &range, manip);
466 }
467
468 unsigned int
469 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
470 {
471 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
472 }
473 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
474
475 /* Do packet manipulations according to nf_nat_setup_info. */
476 unsigned int nf_nat_packet(struct nf_conn *ct,
477 enum ip_conntrack_info ctinfo,
478 unsigned int hooknum,
479 struct sk_buff *skb)
480 {
481 const struct nf_nat_l3proto *l3proto;
482 const struct nf_nat_l4proto *l4proto;
483 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
484 unsigned long statusbit;
485 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
486
487 if (mtype == NF_NAT_MANIP_SRC)
488 statusbit = IPS_SRC_NAT;
489 else
490 statusbit = IPS_DST_NAT;
491
492 /* Invert if this is reply dir. */
493 if (dir == IP_CT_DIR_REPLY)
494 statusbit ^= IPS_NAT_MASK;
495
496 /* Non-atomic: these bits don't change. */
497 if (ct->status & statusbit) {
498 struct nf_conntrack_tuple target;
499
500 /* We are aiming to look like inverse of other direction. */
501 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
502
503 l3proto = __nf_nat_l3proto_find(target.src.l3num);
504 l4proto = __nf_nat_l4proto_find(target.src.l3num,
505 target.dst.protonum);
506 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
507 return NF_DROP;
508 }
509 return NF_ACCEPT;
510 }
511 EXPORT_SYMBOL_GPL(nf_nat_packet);
512
513 struct nf_nat_proto_clean {
514 u8 l3proto;
515 u8 l4proto;
516 };
517
518 /* kill conntracks with affected NAT section */
519 static int nf_nat_proto_remove(struct nf_conn *i, void *data)
520 {
521 const struct nf_nat_proto_clean *clean = data;
522
523 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
524 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
525 return 0;
526
527 return i->status & IPS_NAT_MASK ? 1 : 0;
528 }
529
530 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
531 {
532 unsigned int h;
533
534 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
535 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
536 hlist_del_rcu(&ct->nat_bysource);
537 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
538 }
539
540 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
541 {
542 if (nf_nat_proto_remove(ct, data))
543 return 1;
544
545 /* This module is being removed and conntrack has nat null binding.
546 * Remove it from bysource hash, as the table will be freed soon.
547 *
548 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
549 * will delete entry from already-freed table.
550 */
551 if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
552 __nf_nat_cleanup_conntrack(ct);
553
554 /* don't delete conntrack. Although that would make things a lot
555 * simpler, we'd end up flushing all conntracks on nat rmmod.
556 */
557 return 0;
558 }
559
560 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
561 {
562 struct nf_nat_proto_clean clean = {
563 .l3proto = l3proto,
564 .l4proto = l4proto,
565 };
566
567 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
568 }
569
570 static void nf_nat_l3proto_clean(u8 l3proto)
571 {
572 struct nf_nat_proto_clean clean = {
573 .l3proto = l3proto,
574 };
575
576 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
577 }
578
579 /* Protocol registration. */
580 int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
581 {
582 const struct nf_nat_l4proto **l4protos;
583 unsigned int i;
584 int ret = 0;
585
586 mutex_lock(&nf_nat_proto_mutex);
587 if (nf_nat_l4protos[l3proto] == NULL) {
588 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
589 GFP_KERNEL);
590 if (l4protos == NULL) {
591 ret = -ENOMEM;
592 goto out;
593 }
594
595 for (i = 0; i < IPPROTO_MAX; i++)
596 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
597
598 /* Before making proto_array visible to lockless readers,
599 * we must make sure its content is committed to memory.
600 */
601 smp_wmb();
602
603 nf_nat_l4protos[l3proto] = l4protos;
604 }
605
606 if (rcu_dereference_protected(
607 nf_nat_l4protos[l3proto][l4proto->l4proto],
608 lockdep_is_held(&nf_nat_proto_mutex)
609 ) != &nf_nat_l4proto_unknown) {
610 ret = -EBUSY;
611 goto out;
612 }
613 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
614 out:
615 mutex_unlock(&nf_nat_proto_mutex);
616 return ret;
617 }
618 EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
619
620 /* No one stores the protocol anywhere; simply delete it. */
621 void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
622 {
623 mutex_lock(&nf_nat_proto_mutex);
624 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
625 &nf_nat_l4proto_unknown);
626 mutex_unlock(&nf_nat_proto_mutex);
627 synchronize_rcu();
628
629 nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
630 }
631 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
632
633 int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
634 {
635 int err;
636
637 err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
638 if (err < 0)
639 return err;
640
641 mutex_lock(&nf_nat_proto_mutex);
642 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
643 &nf_nat_l4proto_tcp);
644 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
645 &nf_nat_l4proto_udp);
646 #ifdef CONFIG_NF_NAT_PROTO_DCCP
647 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP],
648 &nf_nat_l4proto_dccp);
649 #endif
650 #ifdef CONFIG_NF_NAT_PROTO_SCTP
651 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP],
652 &nf_nat_l4proto_sctp);
653 #endif
654 #ifdef CONFIG_NF_NAT_PROTO_UDPLITE
655 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE],
656 &nf_nat_l4proto_udplite);
657 #endif
658 mutex_unlock(&nf_nat_proto_mutex);
659
660 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
661 return 0;
662 }
663 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
664
665 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
666 {
667 mutex_lock(&nf_nat_proto_mutex);
668 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
669 mutex_unlock(&nf_nat_proto_mutex);
670 synchronize_rcu();
671
672 nf_nat_l3proto_clean(l3proto->l3proto);
673 nf_ct_l3proto_module_put(l3proto->l3proto);
674 }
675 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
676
677 /* No one using conntrack by the time this called. */
678 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
679 {
680 if (ct->status & IPS_SRC_NAT_DONE)
681 __nf_nat_cleanup_conntrack(ct);
682 }
683
684 static struct nf_ct_ext_type nat_extend __read_mostly = {
685 .len = sizeof(struct nf_conn_nat),
686 .align = __alignof__(struct nf_conn_nat),
687 .destroy = nf_nat_cleanup_conntrack,
688 .id = NF_CT_EXT_NAT,
689 };
690
691 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
692
693 #include <linux/netfilter/nfnetlink.h>
694 #include <linux/netfilter/nfnetlink_conntrack.h>
695
696 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
697 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
698 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
699 };
700
701 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
702 const struct nf_conn *ct,
703 struct nf_nat_range *range)
704 {
705 struct nlattr *tb[CTA_PROTONAT_MAX+1];
706 const struct nf_nat_l4proto *l4proto;
707 int err;
708
709 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr,
710 protonat_nla_policy, NULL);
711 if (err < 0)
712 return err;
713
714 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
715 if (l4proto->nlattr_to_range)
716 err = l4proto->nlattr_to_range(tb, range);
717
718 return err;
719 }
720
721 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
722 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
723 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
724 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
725 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
726 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
727 };
728
729 static int
730 nfnetlink_parse_nat(const struct nlattr *nat,
731 const struct nf_conn *ct, struct nf_nat_range *range,
732 const struct nf_nat_l3proto *l3proto)
733 {
734 struct nlattr *tb[CTA_NAT_MAX+1];
735 int err;
736
737 memset(range, 0, sizeof(*range));
738
739 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL);
740 if (err < 0)
741 return err;
742
743 err = l3proto->nlattr_to_range(tb, range);
744 if (err < 0)
745 return err;
746
747 if (!tb[CTA_NAT_PROTO])
748 return 0;
749
750 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
751 }
752
753 /* This function is called under rcu_read_lock() */
754 static int
755 nfnetlink_parse_nat_setup(struct nf_conn *ct,
756 enum nf_nat_manip_type manip,
757 const struct nlattr *attr)
758 {
759 struct nf_nat_range range;
760 const struct nf_nat_l3proto *l3proto;
761 int err;
762
763 /* Should not happen, restricted to creating new conntracks
764 * via ctnetlink.
765 */
766 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
767 return -EEXIST;
768
769 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
770 * attach the null binding, otherwise this may oops.
771 */
772 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
773 if (l3proto == NULL)
774 return -EAGAIN;
775
776 /* No NAT information has been passed, allocate the null-binding */
777 if (attr == NULL)
778 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
779
780 err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
781 if (err < 0)
782 return err;
783
784 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
785 }
786 #else
787 static int
788 nfnetlink_parse_nat_setup(struct nf_conn *ct,
789 enum nf_nat_manip_type manip,
790 const struct nlattr *attr)
791 {
792 return -EOPNOTSUPP;
793 }
794 #endif
795
796 static struct nf_ct_helper_expectfn follow_master_nat = {
797 .name = "nat-follow-master",
798 .expectfn = nf_nat_follow_master,
799 };
800
801 static int __init nf_nat_init(void)
802 {
803 int ret, i;
804
805 /* Leave them the same for the moment. */
806 nf_nat_htable_size = nf_conntrack_htable_size;
807 if (nf_nat_htable_size < CONNTRACK_LOCKS)
808 nf_nat_htable_size = CONNTRACK_LOCKS;
809
810 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
811 if (!nf_nat_bysource)
812 return -ENOMEM;
813
814 ret = nf_ct_extend_register(&nat_extend);
815 if (ret < 0) {
816 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
817 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
818 return ret;
819 }
820
821 for (i = 0; i < CONNTRACK_LOCKS; i++)
822 spin_lock_init(&nf_nat_locks[i]);
823
824 nf_ct_helper_expectfn_register(&follow_master_nat);
825
826 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
827 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
828 nfnetlink_parse_nat_setup);
829 #ifdef CONFIG_XFRM
830 BUG_ON(nf_nat_decode_session_hook != NULL);
831 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
832 #endif
833 return 0;
834 }
835
836 static void __exit nf_nat_cleanup(void)
837 {
838 struct nf_nat_proto_clean clean = {};
839 unsigned int i;
840
841 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
842
843 nf_ct_extend_unregister(&nat_extend);
844 nf_ct_helper_expectfn_unregister(&follow_master_nat);
845 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
846 #ifdef CONFIG_XFRM
847 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
848 #endif
849 synchronize_rcu();
850
851 for (i = 0; i < NFPROTO_NUMPROTO; i++)
852 kfree(nf_nat_l4protos[i]);
853 synchronize_net();
854 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
855 }
856
857 MODULE_LICENSE("GPL");
858
859 module_init(nf_nat_init);
860 module_exit(nf_nat_cleanup);