]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/netfilter/nf_nat_core.c
netfilter: nf_nat: handle NF_DROP from nfnetlink_parse_nat_setup()
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nf_nat_core.c
CommitLineData
c7232c99
PM
1/*
2 * (C) 1999-2001 Paul `Rusty' Russell
5b1158e9 3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
c7232c99 4 * (C) 2011 Patrick McHardy <kaber@trash.net>
5b1158e9
JK
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/timer.h>
14#include <linux/skbuff.h>
5a0e3ad6 15#include <linux/gfp.h>
c7232c99 16#include <net/xfrm.h>
5b1158e9 17#include <linux/jhash.h>
c7232c99 18#include <linux/rtnetlink.h>
5b1158e9 19
5b1158e9
JK
20#include <net/netfilter/nf_conntrack.h>
21#include <net/netfilter/nf_conntrack_core.h>
22#include <net/netfilter/nf_nat.h>
c7232c99
PM
23#include <net/netfilter/nf_nat_l3proto.h>
24#include <net/netfilter/nf_nat_l4proto.h>
5b1158e9
JK
25#include <net/netfilter/nf_nat_core.h>
26#include <net/netfilter/nf_nat_helper.h>
27#include <net/netfilter/nf_conntrack_helper.h>
41d73ec0 28#include <net/netfilter/nf_conntrack_seqadj.h>
5b1158e9 29#include <net/netfilter/nf_conntrack_l3proto.h>
5d0aa2cc 30#include <net/netfilter/nf_conntrack_zones.h>
c7232c99 31#include <linux/netfilter/nf_nat.h>
5b1158e9 32
c7232c99
PM
33static DEFINE_MUTEX(nf_nat_proto_mutex);
34static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
35 __read_mostly;
36static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
ce4b1ceb 37 __read_mostly;
a76ae1c8 38
870190a9
FW
39struct nf_nat_conn_key {
40 const struct net *net;
41 const struct nf_conntrack_tuple *tuple;
42 const struct nf_conntrack_zone *zone;
43};
44
45static struct rhashtable nf_nat_bysource_table;
c7232c99
PM
46
47inline const struct nf_nat_l3proto *
48__nf_nat_l3proto_find(u8 family)
5b1158e9 49{
c7232c99 50 return rcu_dereference(nf_nat_l3protos[family]);
5b1158e9
JK
51}
52
c7232c99
PM
53inline const struct nf_nat_l4proto *
54__nf_nat_l4proto_find(u8 family, u8 protonum)
55{
56 return rcu_dereference(nf_nat_l4protos[family][protonum]);
57}
58EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
59
60#ifdef CONFIG_XFRM
61static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
62{
63 const struct nf_nat_l3proto *l3proto;
64 const struct nf_conn *ct;
65 enum ip_conntrack_info ctinfo;
66 enum ip_conntrack_dir dir;
67 unsigned long statusbit;
68 u8 family;
69
70 ct = nf_ct_get(skb, &ctinfo);
71 if (ct == NULL)
72 return;
73
74 family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
75 rcu_read_lock();
76 l3proto = __nf_nat_l3proto_find(family);
77 if (l3proto == NULL)
78 goto out;
79
80 dir = CTINFO2DIR(ctinfo);
81 if (dir == IP_CT_DIR_ORIGINAL)
82 statusbit = IPS_DST_NAT;
83 else
84 statusbit = IPS_SRC_NAT;
85
86 l3proto->decode_session(skb, ct, dir, statusbit, fl);
87out:
88 rcu_read_unlock();
89}
90
c7af6483 91int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
c7232c99
PM
92{
93 struct flowi fl;
94 unsigned int hh_len;
95 struct dst_entry *dst;
aaa795ad 96 int err;
c7232c99 97
aaa795ad 98 err = xfrm_decode_session(skb, &fl, family);
e7e6f630 99 if (err < 0)
aaa795ad 100 return err;
c7232c99
PM
101
102 dst = skb_dst(skb);
103 if (dst->xfrm)
104 dst = ((struct xfrm_dst *)dst)->route;
105 dst_hold(dst);
106
c7af6483 107 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
c7232c99 108 if (IS_ERR(dst))
aaa795ad 109 return PTR_ERR(dst);
c7232c99
PM
110
111 skb_dst_drop(skb);
112 skb_dst_set(skb, dst);
113
114 /* Change in oif may mean change in hh_len. */
115 hh_len = skb_dst(skb)->dev->hard_header_len;
116 if (skb_headroom(skb) < hh_len &&
117 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
aaa795ad 118 return -ENOMEM;
c7232c99
PM
119 return 0;
120}
121EXPORT_SYMBOL(nf_xfrm_me_harder);
122#endif /* CONFIG_XFRM */
123
870190a9 124static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
5b1158e9 125{
870190a9
FW
126 const struct nf_conntrack_tuple *t;
127 const struct nf_conn *ct = data;
7001c6d1 128
870190a9 129 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
5b1158e9 130 /* Original src, to ensure we map it consistently if poss. */
8fc54f68 131
870190a9
FW
132 seed ^= net_hash_mix(nf_ct_net(ct));
133 return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
134 t->dst.protonum ^ seed);
5b1158e9
JK
135}
136
5b1158e9
JK
137/* Is this tuple already taken? (not by us) */
138int
139nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
140 const struct nf_conn *ignored_conntrack)
141{
142 /* Conntrack tracking doesn't keep track of outgoing tuples; only
c7232c99
PM
143 * incoming ones. NAT means they don't have a fixed mapping,
144 * so we invert the tuple and look for the incoming reply.
145 *
146 * We could keep a separate hash if this proves too slow.
147 */
5b1158e9
JK
148 struct nf_conntrack_tuple reply;
149
150 nf_ct_invert_tuplepr(&reply, tuple);
151 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
152}
153EXPORT_SYMBOL(nf_nat_used_tuple);
154
155/* If we source map this tuple so reply looks like reply_tuple, will
c7232c99
PM
156 * that meet the constraints of range.
157 */
158static int in_range(const struct nf_nat_l3proto *l3proto,
159 const struct nf_nat_l4proto *l4proto,
160 const struct nf_conntrack_tuple *tuple,
161 const struct nf_nat_range *range)
5b1158e9 162{
5b1158e9 163 /* If we are supposed to map IPs, then we must be in the
c7232c99
PM
164 * range specified, otherwise let this drag us onto a new src IP.
165 */
166 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
167 !l3proto->in_range(tuple, range))
168 return 0;
5b1158e9 169
cbc9f2f4 170 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
c7232c99
PM
171 l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
172 &range->min_proto, &range->max_proto))
173 return 1;
5b1158e9 174
c7232c99 175 return 0;
5b1158e9
JK
176}
177
178static inline int
179same_src(const struct nf_conn *ct,
180 const struct nf_conntrack_tuple *tuple)
181{
182 const struct nf_conntrack_tuple *t;
183
184 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
185 return (t->dst.protonum == tuple->dst.protonum &&
c7232c99 186 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
5b1158e9
JK
187 t->src.u.all == tuple->src.u.all);
188}
189
870190a9
FW
190static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
191 const void *obj)
192{
193 const struct nf_nat_conn_key *key = arg->key;
194 const struct nf_conn *ct = obj;
195
196 return same_src(ct, key->tuple) &&
197 net_eq(nf_ct_net(ct), key->net) &&
198 nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL);
199}
200
201static struct rhashtable_params nf_nat_bysource_params = {
202 .head_offset = offsetof(struct nf_conn, nat_bysource),
203 .obj_hashfn = nf_nat_bysource_hash,
204 .obj_cmpfn = nf_nat_bysource_cmp,
205 .nelem_hint = 256,
206 .min_size = 1024,
207 .nulls_base = (1U << RHT_BASE_SHIFT),
208};
209
5b1158e9
JK
210/* Only called for SRC manip */
211static int
308ac914
DB
212find_appropriate_src(struct net *net,
213 const struct nf_conntrack_zone *zone,
c7232c99
PM
214 const struct nf_nat_l3proto *l3proto,
215 const struct nf_nat_l4proto *l4proto,
0c4c9288 216 const struct nf_conntrack_tuple *tuple,
5b1158e9 217 struct nf_conntrack_tuple *result,
c7232c99 218 const struct nf_nat_range *range)
5b1158e9 219{
72b72949 220 const struct nf_conn *ct;
870190a9
FW
221 struct nf_nat_conn_key key = {
222 .net = net,
223 .tuple = tuple,
224 .zone = zone
225 };
5b1158e9 226
870190a9
FW
227 ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key,
228 nf_nat_bysource_params);
229 if (!ct)
230 return 0;
231
232 nf_ct_invert_tuplepr(result,
233 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
234 result->dst = tuple->dst;
235
236 return in_range(l3proto, l4proto, result, range);
5b1158e9
JK
237}
238
239/* For [FUTURE] fragmentation handling, we want the least-used
c7232c99
PM
240 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
241 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
242 * 1-65535, we don't do pro-rata allocation based on ports; we choose
243 * the ip with the lowest src-ip/dst-ip/proto usage.
244 */
5b1158e9 245static void
308ac914
DB
246find_best_ips_proto(const struct nf_conntrack_zone *zone,
247 struct nf_conntrack_tuple *tuple,
c7232c99 248 const struct nf_nat_range *range,
5b1158e9
JK
249 const struct nf_conn *ct,
250 enum nf_nat_manip_type maniptype)
251{
c7232c99
PM
252 union nf_inet_addr *var_ipp;
253 unsigned int i, max;
5b1158e9 254 /* Host order */
c7232c99
PM
255 u32 minip, maxip, j, dist;
256 bool full_range;
5b1158e9
JK
257
258 /* No IP mapping? Do nothing. */
cbc9f2f4 259 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
5b1158e9
JK
260 return;
261
cbc9f2f4 262 if (maniptype == NF_NAT_MANIP_SRC)
c7232c99 263 var_ipp = &tuple->src.u3;
5b1158e9 264 else
c7232c99 265 var_ipp = &tuple->dst.u3;
5b1158e9
JK
266
267 /* Fast path: only one choice. */
c7232c99
PM
268 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
269 *var_ipp = range->min_addr;
5b1158e9
JK
270 return;
271 }
272
c7232c99
PM
273 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
274 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
275 else
276 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
277
5b1158e9
JK
278 /* Hashing source and destination IPs gives a fairly even
279 * spread in practice (if there are a small number of IPs
280 * involved, there usually aren't that many connections
281 * anyway). The consistency means that servers see the same
282 * client coming from the same IP (some Internet Banking sites
c7232c99
PM
283 * like this), even across reboots.
284 */
5693d68d 285 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
c7232c99 286 range->flags & NF_NAT_RANGE_PERSISTENT ?
308ac914 287 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
c7232c99
PM
288
289 full_range = false;
290 for (i = 0; i <= max; i++) {
291 /* If first bytes of the address are at the maximum, use the
292 * distance. Otherwise use the full range.
293 */
294 if (!full_range) {
295 minip = ntohl((__force __be32)range->min_addr.all[i]);
296 maxip = ntohl((__force __be32)range->max_addr.all[i]);
297 dist = maxip - minip + 1;
298 } else {
299 minip = 0;
300 dist = ~0;
301 }
302
303 var_ipp->all[i] = (__force __u32)
8fc54f68 304 htonl(minip + reciprocal_scale(j, dist));
c7232c99
PM
305 if (var_ipp->all[i] != range->max_addr.all[i])
306 full_range = true;
307
308 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
309 j ^= (__force u32)tuple->dst.u3.all[i];
310 }
5b1158e9
JK
311}
312
c7232c99
PM
313/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
314 * we change the source to map into the range. For NF_INET_PRE_ROUTING
6e23ae2a 315 * and NF_INET_LOCAL_OUT, we change the destination to map into the
c7232c99 316 * range. It might not be possible to get a unique tuple, but we try.
5b1158e9
JK
317 * At worst (or if we race), we will end up with a final duplicate in
318 * __ip_conntrack_confirm and drop the packet. */
319static void
320get_unique_tuple(struct nf_conntrack_tuple *tuple,
321 const struct nf_conntrack_tuple *orig_tuple,
c7232c99 322 const struct nf_nat_range *range,
5b1158e9
JK
323 struct nf_conn *ct,
324 enum nf_nat_manip_type maniptype)
325{
308ac914 326 const struct nf_conntrack_zone *zone;
c7232c99
PM
327 const struct nf_nat_l3proto *l3proto;
328 const struct nf_nat_l4proto *l4proto;
0c4c9288 329 struct net *net = nf_ct_net(ct);
308ac914
DB
330
331 zone = nf_ct_zone(ct);
5b1158e9 332
c7232c99
PM
333 rcu_read_lock();
334 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
335 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
336 orig_tuple->dst.protonum);
5b1158e9 337
c7232c99
PM
338 /* 1) If this srcip/proto/src-proto-part is currently mapped,
339 * and that same mapping gives a unique tuple within the given
340 * range, use that.
341 *
342 * This is only required for source (ie. NAT/masq) mappings.
343 * So far, we don't do local source mappings, so multiple
344 * manips not an issue.
345 */
cbc9f2f4 346 if (maniptype == NF_NAT_MANIP_SRC &&
34ce3240 347 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
41a7cab6 348 /* try the original tuple first */
c7232c99 349 if (in_range(l3proto, l4proto, orig_tuple, range)) {
41a7cab6
CG
350 if (!nf_nat_used_tuple(orig_tuple, ct)) {
351 *tuple = *orig_tuple;
c7232c99 352 goto out;
41a7cab6 353 }
c7232c99
PM
354 } else if (find_appropriate_src(net, zone, l3proto, l4proto,
355 orig_tuple, tuple, range)) {
0d53778e 356 pr_debug("get_unique_tuple: Found current src map\n");
0dbff689 357 if (!nf_nat_used_tuple(tuple, ct))
c7232c99 358 goto out;
5b1158e9
JK
359 }
360 }
361
c7232c99 362 /* 2) Select the least-used IP/proto combination in the given range */
5b1158e9 363 *tuple = *orig_tuple;
5d0aa2cc 364 find_best_ips_proto(zone, tuple, range, ct, maniptype);
5b1158e9
JK
365
366 /* 3) The per-protocol part of the manip is made to map into
c7232c99
PM
367 * the range to make a unique tuple.
368 */
5b1158e9
JK
369
370 /* Only bother mapping if it's not already in range and unique */
34ce3240 371 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
cbc9f2f4 372 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
c7232c99
PM
373 if (l4proto->in_range(tuple, maniptype,
374 &range->min_proto,
375 &range->max_proto) &&
376 (range->min_proto.all == range->max_proto.all ||
99ad3c53
CG
377 !nf_nat_used_tuple(tuple, ct)))
378 goto out;
379 } else if (!nf_nat_used_tuple(tuple, ct)) {
380 goto out;
381 }
382 }
5b1158e9
JK
383
384 /* Last change: get protocol to try to obtain unique tuple. */
c7232c99 385 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
e22a0548
PM
386out:
387 rcu_read_unlock();
5b1158e9
JK
388}
389
f768e5bd
FW
390struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
391{
392 struct nf_conn_nat *nat = nfct_nat(ct);
393 if (nat)
394 return nat;
395
396 if (!nf_ct_is_confirmed(ct))
397 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
398
399 return nat;
400}
401EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
402
5b1158e9
JK
403unsigned int
404nf_nat_setup_info(struct nf_conn *ct,
c7232c99 405 const struct nf_nat_range *range,
cc01dcbd 406 enum nf_nat_manip_type maniptype)
5b1158e9
JK
407{
408 struct nf_conntrack_tuple curr_tuple, new_tuple;
2d59e5ca 409 struct nf_conn_nat *nat;
5b1158e9 410
2d59e5ca 411 /* nat helper or nfctnetlink also setup binding */
f768e5bd
FW
412 nat = nf_ct_nat_ext_add(ct);
413 if (nat == NULL)
414 return NF_ACCEPT;
2d59e5ca 415
cbc9f2f4
PM
416 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
417 maniptype == NF_NAT_MANIP_DST);
5b1158e9
JK
418 BUG_ON(nf_nat_initialized(ct, maniptype));
419
420 /* What we've got will look like inverse of reply. Normally
c7232c99
PM
421 * this is what is in the conntrack, except for prior
422 * manipulations (future optimization: if num_manips == 0,
423 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
424 */
5b1158e9
JK
425 nf_ct_invert_tuplepr(&curr_tuple,
426 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
427
428 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
429
430 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
431 struct nf_conntrack_tuple reply;
432
433 /* Alter conntrack table so will recognize replies. */
434 nf_ct_invert_tuplepr(&reply, &new_tuple);
435 nf_conntrack_alter_reply(ct, &reply);
436
437 /* Non-atomic: we own this at the moment. */
cbc9f2f4 438 if (maniptype == NF_NAT_MANIP_SRC)
5b1158e9
JK
439 ct->status |= IPS_SRC_NAT;
440 else
441 ct->status |= IPS_DST_NAT;
41d73ec0
PM
442
443 if (nfct_help(ct))
444 nfct_seqadj_ext_add(ct);
5b1158e9
JK
445 }
446
cbc9f2f4 447 if (maniptype == NF_NAT_MANIP_SRC) {
870190a9
FW
448 int err;
449
450 err = rhashtable_insert_fast(&nf_nat_bysource_table,
451 &ct->nat_bysource,
452 nf_nat_bysource_params);
453 if (err)
454 return NF_DROP;
5b1158e9
JK
455 }
456
457 /* It's done. */
cbc9f2f4 458 if (maniptype == NF_NAT_MANIP_DST)
a7c2f4d7 459 ct->status |= IPS_DST_NAT_DONE;
5b1158e9 460 else
a7c2f4d7 461 ct->status |= IPS_SRC_NAT_DONE;
5b1158e9
JK
462
463 return NF_ACCEPT;
464}
465EXPORT_SYMBOL(nf_nat_setup_info);
466
0eba801b
PNA
467static unsigned int
468__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
f59cb045
PNA
469{
470 /* Force range to this IP; let proto decide mapping for
471 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
472 * Use reply in case it's already been mangled (eg local packet).
473 */
474 union nf_inet_addr ip =
0eba801b 475 (manip == NF_NAT_MANIP_SRC ?
f59cb045
PNA
476 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
477 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
478 struct nf_nat_range range = {
479 .flags = NF_NAT_RANGE_MAP_IPS,
480 .min_addr = ip,
481 .max_addr = ip,
482 };
0eba801b
PNA
483 return nf_nat_setup_info(ct, &range, manip);
484}
485
486unsigned int
487nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
488{
489 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
f59cb045
PNA
490}
491EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
492
5b1158e9
JK
493/* Do packet manipulations according to nf_nat_setup_info. */
494unsigned int nf_nat_packet(struct nf_conn *ct,
495 enum ip_conntrack_info ctinfo,
496 unsigned int hooknum,
3db05fea 497 struct sk_buff *skb)
5b1158e9 498{
c7232c99
PM
499 const struct nf_nat_l3proto *l3proto;
500 const struct nf_nat_l4proto *l4proto;
5b1158e9
JK
501 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
502 unsigned long statusbit;
503 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
504
cbc9f2f4 505 if (mtype == NF_NAT_MANIP_SRC)
5b1158e9
JK
506 statusbit = IPS_SRC_NAT;
507 else
508 statusbit = IPS_DST_NAT;
509
510 /* Invert if this is reply dir. */
511 if (dir == IP_CT_DIR_REPLY)
512 statusbit ^= IPS_NAT_MASK;
513
514 /* Non-atomic: these bits don't change. */
515 if (ct->status & statusbit) {
516 struct nf_conntrack_tuple target;
517
518 /* We are aiming to look like inverse of other direction. */
519 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
520
c7232c99
PM
521 l3proto = __nf_nat_l3proto_find(target.src.l3num);
522 l4proto = __nf_nat_l4proto_find(target.src.l3num,
523 target.dst.protonum);
524 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
5b1158e9
JK
525 return NF_DROP;
526 }
527 return NF_ACCEPT;
528}
529EXPORT_SYMBOL_GPL(nf_nat_packet);
530
c7232c99
PM
531struct nf_nat_proto_clean {
532 u8 l3proto;
533 u8 l4proto;
c7232c99
PM
534};
535
c2d421e1
FW
536/* kill conntracks with affected NAT section */
537static int nf_nat_proto_remove(struct nf_conn *i, void *data)
5b1158e9 538{
c7232c99
PM
539 const struct nf_nat_proto_clean *clean = data;
540 struct nf_conn_nat *nat = nfct_nat(i);
5b1158e9 541
c7232c99 542 if (!nat)
5b1158e9 543 return 0;
c2d421e1 544
c7232c99
PM
545 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
546 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
5b1158e9
JK
547 return 0;
548
c2d421e1 549 return i->status & IPS_NAT_MASK ? 1 : 0;
c7232c99 550}
5b1158e9 551
945b2b2d
FW
552static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
553{
554 struct nf_conn_nat *nat = nfct_nat(ct);
555
556 if (nf_nat_proto_remove(ct, data))
557 return 1;
558
7c966435 559 if (!nat)
945b2b2d
FW
560 return 0;
561
562 /* This netns is being destroyed, and conntrack has nat null binding.
563 * Remove it from bysource hash, as the table will be freed soon.
564 *
565 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
566 * will delete entry from already-freed table.
567 */
568 if (!del_timer(&ct->timeout))
569 return 1;
570
945b2b2d 571 ct->status &= ~IPS_NAT_DONE_MASK;
870190a9
FW
572
573 rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
574 nf_nat_bysource_params);
945b2b2d
FW
575
576 add_timer(&ct->timeout);
577
578 /* don't delete conntrack. Although that would make things a lot
579 * simpler, we'd end up flushing all conntracks on nat rmmod.
580 */
581 return 0;
582}
583
c7232c99
PM
584static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
585{
586 struct nf_nat_proto_clean clean = {
587 .l3proto = l3proto,
588 .l4proto = l4proto,
589 };
590 struct net *net;
591
592 rtnl_lock();
c7232c99 593 for_each_net(net)
c655bc68 594 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
c7232c99
PM
595 rtnl_unlock();
596}
5b1158e9 597
c7232c99
PM
598static void nf_nat_l3proto_clean(u8 l3proto)
599{
600 struct nf_nat_proto_clean clean = {
601 .l3proto = l3proto,
602 };
603 struct net *net;
604
605 rtnl_lock();
5b1158e9 606
c7232c99 607 for_each_net(net)
c655bc68 608 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
c7232c99 609 rtnl_unlock();
5b1158e9 610}
5b1158e9
JK
611
612/* Protocol registration. */
c7232c99 613int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
5b1158e9 614{
c7232c99
PM
615 const struct nf_nat_l4proto **l4protos;
616 unsigned int i;
5b1158e9
JK
617 int ret = 0;
618
c7232c99
PM
619 mutex_lock(&nf_nat_proto_mutex);
620 if (nf_nat_l4protos[l3proto] == NULL) {
621 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
622 GFP_KERNEL);
623 if (l4protos == NULL) {
624 ret = -ENOMEM;
625 goto out;
626 }
627
628 for (i = 0; i < IPPROTO_MAX; i++)
629 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
630
631 /* Before making proto_array visible to lockless readers,
632 * we must make sure its content is committed to memory.
633 */
634 smp_wmb();
635
636 nf_nat_l4protos[l3proto] = l4protos;
637 }
638
eb733162 639 if (rcu_dereference_protected(
c7232c99
PM
640 nf_nat_l4protos[l3proto][l4proto->l4proto],
641 lockdep_is_held(&nf_nat_proto_mutex)
642 ) != &nf_nat_l4proto_unknown) {
5b1158e9
JK
643 ret = -EBUSY;
644 goto out;
645 }
c7232c99 646 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
5b1158e9 647 out:
c7232c99 648 mutex_unlock(&nf_nat_proto_mutex);
5b1158e9
JK
649 return ret;
650}
c7232c99 651EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
5b1158e9 652
25985edc 653/* No one stores the protocol anywhere; simply delete it. */
c7232c99 654void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
5b1158e9 655{
c7232c99
PM
656 mutex_lock(&nf_nat_proto_mutex);
657 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
658 &nf_nat_l4proto_unknown);
659 mutex_unlock(&nf_nat_proto_mutex);
e22a0548 660 synchronize_rcu();
c7232c99
PM
661
662 nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
5b1158e9 663}
c7232c99
PM
664EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
665
666int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
667{
668 int err;
669
670 err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
671 if (err < 0)
672 return err;
673
674 mutex_lock(&nf_nat_proto_mutex);
675 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
676 &nf_nat_l4proto_tcp);
677 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
678 &nf_nat_l4proto_udp);
679 mutex_unlock(&nf_nat_proto_mutex);
680
681 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
682 return 0;
683}
684EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
685
686void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
687{
688 mutex_lock(&nf_nat_proto_mutex);
689 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
690 mutex_unlock(&nf_nat_proto_mutex);
691 synchronize_rcu();
692
693 nf_nat_l3proto_clean(l3proto->l3proto);
694 nf_ct_l3proto_module_put(l3proto->l3proto);
695}
696EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
5b1158e9 697
25985edc 698/* No one using conntrack by the time this called. */
d8a0509a
YK
699static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
700{
701 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
702
7c966435 703 if (!nat)
d8a0509a
YK
704 return;
705
870190a9
FW
706 rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
707 nf_nat_bysource_params);
2d59e5ca
YK
708}
709
61eb3107 710static struct nf_ct_ext_type nat_extend __read_mostly = {
d8a0509a
YK
711 .len = sizeof(struct nf_conn_nat),
712 .align = __alignof__(struct nf_conn_nat),
713 .destroy = nf_nat_cleanup_conntrack,
d8a0509a
YK
714 .id = NF_CT_EXT_NAT,
715 .flags = NF_CT_EXT_F_PREALLOC,
2d59e5ca
YK
716};
717
24de3d37 718#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
e6a7d3c0
PNA
719
720#include <linux/netfilter/nfnetlink.h>
721#include <linux/netfilter/nfnetlink_conntrack.h>
722
723static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
724 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
725 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
726};
727
728static int nfnetlink_parse_nat_proto(struct nlattr *attr,
729 const struct nf_conn *ct,
c7232c99 730 struct nf_nat_range *range)
e6a7d3c0
PNA
731{
732 struct nlattr *tb[CTA_PROTONAT_MAX+1];
c7232c99 733 const struct nf_nat_l4proto *l4proto;
e6a7d3c0
PNA
734 int err;
735
736 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
737 if (err < 0)
738 return err;
739
c7232c99
PM
740 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
741 if (l4proto->nlattr_to_range)
742 err = l4proto->nlattr_to_range(tb, range);
743
e6a7d3c0
PNA
744 return err;
745}
746
747static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
c7232c99
PM
748 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
749 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
58a317f1
PM
750 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
751 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
329fb58a 752 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
e6a7d3c0
PNA
753};
754
755static int
39938324 756nfnetlink_parse_nat(const struct nlattr *nat,
0eba801b
PNA
757 const struct nf_conn *ct, struct nf_nat_range *range,
758 const struct nf_nat_l3proto *l3proto)
e6a7d3c0
PNA
759{
760 struct nlattr *tb[CTA_NAT_MAX+1];
761 int err;
762
763 memset(range, 0, sizeof(*range));
764
765 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
766 if (err < 0)
767 return err;
768
c7232c99
PM
769 err = l3proto->nlattr_to_range(tb, range);
770 if (err < 0)
0eba801b 771 return err;
e6a7d3c0
PNA
772
773 if (!tb[CTA_NAT_PROTO])
0eba801b 774 return 0;
e6a7d3c0 775
0eba801b 776 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
e6a7d3c0
PNA
777}
778
0eba801b 779/* This function is called under rcu_read_lock() */
e6a7d3c0
PNA
780static int
781nfnetlink_parse_nat_setup(struct nf_conn *ct,
782 enum nf_nat_manip_type manip,
39938324 783 const struct nlattr *attr)
e6a7d3c0 784{
c7232c99 785 struct nf_nat_range range;
0eba801b 786 const struct nf_nat_l3proto *l3proto;
c7232c99 787 int err;
e6a7d3c0 788
0eba801b
PNA
789 /* Should not happen, restricted to creating new conntracks
790 * via ctnetlink.
791 */
792 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
793 return -EEXIST;
794
795 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
796 * attach the null binding, otherwise this may oops.
797 */
798 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
799 if (l3proto == NULL)
800 return -EAGAIN;
801
802 /* No NAT information has been passed, allocate the null-binding */
803 if (attr == NULL)
804 return __nf_nat_alloc_null_binding(ct, manip);
805
806 err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
c7232c99
PM
807 if (err < 0)
808 return err;
e6a7d3c0 809
ecfcdfec 810 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
e6a7d3c0
PNA
811}
812#else
813static int
814nfnetlink_parse_nat_setup(struct nf_conn *ct,
815 enum nf_nat_manip_type manip,
39938324 816 const struct nlattr *attr)
e6a7d3c0
PNA
817{
818 return -EOPNOTSUPP;
819}
820#endif
821
0c4c9288
AD
822static void __net_exit nf_nat_net_exit(struct net *net)
823{
c7232c99
PM
824 struct nf_nat_proto_clean clean = {};
825
945b2b2d 826 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
0c4c9288
AD
827}
828
829static struct pernet_operations nf_nat_net_ops = {
0c4c9288
AD
830 .exit = nf_nat_net_exit,
831};
832
544d5c7d
PNA
833static struct nf_ct_helper_expectfn follow_master_nat = {
834 .name = "nat-follow-master",
835 .expectfn = nf_nat_follow_master,
836};
837
5b1158e9
JK
838static int __init nf_nat_init(void)
839{
2d59e5ca
YK
840 int ret;
841
870190a9
FW
842 ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
843 if (ret)
844 return ret;
a76ae1c8 845
2d59e5ca
YK
846 ret = nf_ct_extend_register(&nat_extend);
847 if (ret < 0) {
870190a9 848 rhashtable_destroy(&nf_nat_bysource_table);
2d59e5ca
YK
849 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
850 return ret;
851 }
5b1158e9 852
0c4c9288
AD
853 ret = register_pernet_subsys(&nf_nat_net_ops);
854 if (ret < 0)
2d59e5ca 855 goto cleanup_extend;
5b1158e9 856
c7232c99 857 nf_ct_helper_expectfn_register(&follow_master_nat);
5b1158e9 858
5b1158e9 859 /* Initialize fake conntrack so that NAT will skip it */
5bfddbd4 860 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
5b1158e9 861
e6a7d3c0 862 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
a9b3cd7f 863 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
e6a7d3c0 864 nfnetlink_parse_nat_setup);
c7232c99
PM
865#ifdef CONFIG_XFRM
866 BUG_ON(nf_nat_decode_session_hook != NULL);
867 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
868#endif
5b1158e9 869 return 0;
2d59e5ca
YK
870
871 cleanup_extend:
870190a9 872 rhashtable_destroy(&nf_nat_bysource_table);
2d59e5ca
YK
873 nf_ct_extend_unregister(&nat_extend);
874 return ret;
5b1158e9
JK
875}
876
5b1158e9
JK
877static void __exit nf_nat_cleanup(void)
878{
c7232c99
PM
879 unsigned int i;
880
0c4c9288 881 unregister_pernet_subsys(&nf_nat_net_ops);
2d59e5ca 882 nf_ct_extend_unregister(&nat_extend);
544d5c7d 883 nf_ct_helper_expectfn_unregister(&follow_master_nat);
a9b3cd7f 884 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
c7232c99
PM
885#ifdef CONFIG_XFRM
886 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
887#endif
888 for (i = 0; i < NFPROTO_NUMPROTO; i++)
889 kfree(nf_nat_l4protos[i]);
870190a9
FW
890
891 rhashtable_destroy(&nf_nat_bysource_table);
5b1158e9
JK
892}
893
894MODULE_LICENSE("GPL");
895
896module_init(nf_nat_init);
897module_exit(nf_nat_cleanup);