2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/gfp.h>
17 #include <linux/jhash.h>
18 #include <linux/rtnetlink.h>
20 #include <net/netfilter/nf_conntrack.h>
21 #include <net/netfilter/nf_conntrack_core.h>
22 #include <net/netfilter/nf_nat.h>
23 #include <net/netfilter/nf_nat_l3proto.h>
24 #include <net/netfilter/nf_nat_l4proto.h>
25 #include <net/netfilter/nf_nat_core.h>
26 #include <net/netfilter/nf_nat_helper.h>
27 #include <net/netfilter/nf_conntrack_helper.h>
28 #include <net/netfilter/nf_conntrack_seqadj.h>
29 #include <net/netfilter/nf_conntrack_l3proto.h>
30 #include <net/netfilter/nf_conntrack_zones.h>
31 #include <linux/netfilter/nf_nat.h>
33 static DEFINE_SPINLOCK(nf_nat_lock
);
35 static DEFINE_MUTEX(nf_nat_proto_mutex
);
36 static const struct nf_nat_l3proto __rcu
*nf_nat_l3protos
[NFPROTO_NUMPROTO
]
38 static const struct nf_nat_l4proto __rcu
**nf_nat_l4protos
[NFPROTO_NUMPROTO
]
41 static struct hlist_head
*nf_nat_bysource __read_mostly
;
42 static unsigned int nf_nat_htable_size __read_mostly
;
43 static unsigned int nf_nat_hash_rnd __read_mostly
;
45 inline const struct nf_nat_l3proto
*
46 __nf_nat_l3proto_find(u8 family
)
48 return rcu_dereference(nf_nat_l3protos
[family
]);
51 inline const struct nf_nat_l4proto
*
52 __nf_nat_l4proto_find(u8 family
, u8 protonum
)
54 return rcu_dereference(nf_nat_l4protos
[family
][protonum
]);
56 EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find
);
59 static void __nf_nat_decode_session(struct sk_buff
*skb
, struct flowi
*fl
)
61 const struct nf_nat_l3proto
*l3proto
;
62 const struct nf_conn
*ct
;
63 enum ip_conntrack_info ctinfo
;
64 enum ip_conntrack_dir dir
;
65 unsigned long statusbit
;
68 ct
= nf_ct_get(skb
, &ctinfo
);
72 family
= nf_ct_l3num(ct
);
73 l3proto
= __nf_nat_l3proto_find(family
);
77 dir
= CTINFO2DIR(ctinfo
);
78 if (dir
== IP_CT_DIR_ORIGINAL
)
79 statusbit
= IPS_DST_NAT
;
81 statusbit
= IPS_SRC_NAT
;
83 l3proto
->decode_session(skb
, ct
, dir
, statusbit
, fl
);
86 int nf_xfrm_me_harder(struct net
*net
, struct sk_buff
*skb
, unsigned int family
)
90 struct dst_entry
*dst
;
93 err
= xfrm_decode_session(skb
, &fl
, family
);
99 dst
= ((struct xfrm_dst
*)dst
)->route
;
102 dst
= xfrm_lookup(net
, dst
, &fl
, skb
->sk
, 0);
107 skb_dst_set(skb
, dst
);
109 /* Change in oif may mean change in hh_len. */
110 hh_len
= skb_dst(skb
)->dev
->hard_header_len
;
111 if (skb_headroom(skb
) < hh_len
&&
112 pskb_expand_head(skb
, hh_len
- skb_headroom(skb
), 0, GFP_ATOMIC
))
116 EXPORT_SYMBOL(nf_xfrm_me_harder
);
117 #endif /* CONFIG_XFRM */
119 /* We keep an extra hash for each conntrack, for fast searching. */
121 hash_by_src(const struct net
*n
, const struct nf_conntrack_tuple
*tuple
)
125 get_random_once(&nf_nat_hash_rnd
, sizeof(nf_nat_hash_rnd
));
127 /* Original src, to ensure we map it consistently if poss. */
128 hash
= jhash2((u32
*)&tuple
->src
, sizeof(tuple
->src
) / sizeof(u32
),
129 tuple
->dst
.protonum
^ nf_nat_hash_rnd
^ net_hash_mix(n
));
131 return reciprocal_scale(hash
, nf_nat_htable_size
);
134 /* Is this tuple already taken? (not by us) */
136 nf_nat_used_tuple(const struct nf_conntrack_tuple
*tuple
,
137 const struct nf_conn
*ignored_conntrack
)
139 /* Conntrack tracking doesn't keep track of outgoing tuples; only
140 * incoming ones. NAT means they don't have a fixed mapping,
141 * so we invert the tuple and look for the incoming reply.
143 * We could keep a separate hash if this proves too slow.
145 struct nf_conntrack_tuple reply
;
147 nf_ct_invert_tuplepr(&reply
, tuple
);
148 return nf_conntrack_tuple_taken(&reply
, ignored_conntrack
);
150 EXPORT_SYMBOL(nf_nat_used_tuple
);
152 /* If we source map this tuple so reply looks like reply_tuple, will
153 * that meet the constraints of range.
155 static int in_range(const struct nf_nat_l3proto
*l3proto
,
156 const struct nf_nat_l4proto
*l4proto
,
157 const struct nf_conntrack_tuple
*tuple
,
158 const struct nf_nat_range
*range
)
160 /* If we are supposed to map IPs, then we must be in the
161 * range specified, otherwise let this drag us onto a new src IP.
163 if (range
->flags
& NF_NAT_RANGE_MAP_IPS
&&
164 !l3proto
->in_range(tuple
, range
))
167 if (!(range
->flags
& NF_NAT_RANGE_PROTO_SPECIFIED
) ||
168 l4proto
->in_range(tuple
, NF_NAT_MANIP_SRC
,
169 &range
->min_proto
, &range
->max_proto
))
176 same_src(const struct nf_conn
*ct
,
177 const struct nf_conntrack_tuple
*tuple
)
179 const struct nf_conntrack_tuple
*t
;
181 t
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
182 return (t
->dst
.protonum
== tuple
->dst
.protonum
&&
183 nf_inet_addr_cmp(&t
->src
.u3
, &tuple
->src
.u3
) &&
184 t
->src
.u
.all
== tuple
->src
.u
.all
);
187 /* Only called for SRC manip */
189 find_appropriate_src(struct net
*net
,
190 const struct nf_conntrack_zone
*zone
,
191 const struct nf_nat_l3proto
*l3proto
,
192 const struct nf_nat_l4proto
*l4proto
,
193 const struct nf_conntrack_tuple
*tuple
,
194 struct nf_conntrack_tuple
*result
,
195 const struct nf_nat_range
*range
)
197 unsigned int h
= hash_by_src(net
, tuple
);
198 const struct nf_conn
*ct
;
200 hlist_for_each_entry_rcu(ct
, &nf_nat_bysource
[h
], nat_bysource
) {
201 if (same_src(ct
, tuple
) &&
202 net_eq(net
, nf_ct_net(ct
)) &&
203 nf_ct_zone_equal(ct
, zone
, IP_CT_DIR_ORIGINAL
)) {
204 /* Copy source part from reply tuple. */
205 nf_ct_invert_tuplepr(result
,
206 &ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
);
207 result
->dst
= tuple
->dst
;
209 if (in_range(l3proto
, l4proto
, result
, range
))
216 /* For [FUTURE] fragmentation handling, we want the least-used
217 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
218 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
219 * 1-65535, we don't do pro-rata allocation based on ports; we choose
220 * the ip with the lowest src-ip/dst-ip/proto usage.
223 find_best_ips_proto(const struct nf_conntrack_zone
*zone
,
224 struct nf_conntrack_tuple
*tuple
,
225 const struct nf_nat_range
*range
,
226 const struct nf_conn
*ct
,
227 enum nf_nat_manip_type maniptype
)
229 union nf_inet_addr
*var_ipp
;
232 u32 minip
, maxip
, j
, dist
;
235 /* No IP mapping? Do nothing. */
236 if (!(range
->flags
& NF_NAT_RANGE_MAP_IPS
))
239 if (maniptype
== NF_NAT_MANIP_SRC
)
240 var_ipp
= &tuple
->src
.u3
;
242 var_ipp
= &tuple
->dst
.u3
;
244 /* Fast path: only one choice. */
245 if (nf_inet_addr_cmp(&range
->min_addr
, &range
->max_addr
)) {
246 *var_ipp
= range
->min_addr
;
250 if (nf_ct_l3num(ct
) == NFPROTO_IPV4
)
251 max
= sizeof(var_ipp
->ip
) / sizeof(u32
) - 1;
253 max
= sizeof(var_ipp
->ip6
) / sizeof(u32
) - 1;
255 /* Hashing source and destination IPs gives a fairly even
256 * spread in practice (if there are a small number of IPs
257 * involved, there usually aren't that many connections
258 * anyway). The consistency means that servers see the same
259 * client coming from the same IP (some Internet Banking sites
260 * like this), even across reboots.
262 j
= jhash2((u32
*)&tuple
->src
.u3
, sizeof(tuple
->src
.u3
) / sizeof(u32
),
263 range
->flags
& NF_NAT_RANGE_PERSISTENT
?
264 0 : (__force u32
)tuple
->dst
.u3
.all
[max
] ^ zone
->id
);
267 for (i
= 0; i
<= max
; i
++) {
268 /* If first bytes of the address are at the maximum, use the
269 * distance. Otherwise use the full range.
272 minip
= ntohl((__force __be32
)range
->min_addr
.all
[i
]);
273 maxip
= ntohl((__force __be32
)range
->max_addr
.all
[i
]);
274 dist
= maxip
- minip
+ 1;
280 var_ipp
->all
[i
] = (__force __u32
)
281 htonl(minip
+ reciprocal_scale(j
, dist
));
282 if (var_ipp
->all
[i
] != range
->max_addr
.all
[i
])
285 if (!(range
->flags
& NF_NAT_RANGE_PERSISTENT
))
286 j
^= (__force u32
)tuple
->dst
.u3
.all
[i
];
290 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
291 * we change the source to map into the range. For NF_INET_PRE_ROUTING
292 * and NF_INET_LOCAL_OUT, we change the destination to map into the
293 * range. It might not be possible to get a unique tuple, but we try.
294 * At worst (or if we race), we will end up with a final duplicate in
295 * __ip_conntrack_confirm and drop the packet. */
297 get_unique_tuple(struct nf_conntrack_tuple
*tuple
,
298 const struct nf_conntrack_tuple
*orig_tuple
,
299 const struct nf_nat_range
*range
,
301 enum nf_nat_manip_type maniptype
)
303 const struct nf_conntrack_zone
*zone
;
304 const struct nf_nat_l3proto
*l3proto
;
305 const struct nf_nat_l4proto
*l4proto
;
306 struct net
*net
= nf_ct_net(ct
);
308 zone
= nf_ct_zone(ct
);
311 l3proto
= __nf_nat_l3proto_find(orig_tuple
->src
.l3num
);
312 l4proto
= __nf_nat_l4proto_find(orig_tuple
->src
.l3num
,
313 orig_tuple
->dst
.protonum
);
315 /* 1) If this srcip/proto/src-proto-part is currently mapped,
316 * and that same mapping gives a unique tuple within the given
319 * This is only required for source (ie. NAT/masq) mappings.
320 * So far, we don't do local source mappings, so multiple
321 * manips not an issue.
323 if (maniptype
== NF_NAT_MANIP_SRC
&&
324 !(range
->flags
& NF_NAT_RANGE_PROTO_RANDOM_ALL
)) {
325 /* try the original tuple first */
326 if (in_range(l3proto
, l4proto
, orig_tuple
, range
)) {
327 if (!nf_nat_used_tuple(orig_tuple
, ct
)) {
328 *tuple
= *orig_tuple
;
331 } else if (find_appropriate_src(net
, zone
, l3proto
, l4proto
,
332 orig_tuple
, tuple
, range
)) {
333 pr_debug("get_unique_tuple: Found current src map\n");
334 if (!nf_nat_used_tuple(tuple
, ct
))
339 /* 2) Select the least-used IP/proto combination in the given range */
340 *tuple
= *orig_tuple
;
341 find_best_ips_proto(zone
, tuple
, range
, ct
, maniptype
);
343 /* 3) The per-protocol part of the manip is made to map into
344 * the range to make a unique tuple.
347 /* Only bother mapping if it's not already in range and unique */
348 if (!(range
->flags
& NF_NAT_RANGE_PROTO_RANDOM_ALL
)) {
349 if (range
->flags
& NF_NAT_RANGE_PROTO_SPECIFIED
) {
350 if (l4proto
->in_range(tuple
, maniptype
,
352 &range
->max_proto
) &&
353 (range
->min_proto
.all
== range
->max_proto
.all
||
354 !nf_nat_used_tuple(tuple
, ct
)))
356 } else if (!nf_nat_used_tuple(tuple
, ct
)) {
361 /* Last change: get protocol to try to obtain unique tuple. */
362 l4proto
->unique_tuple(l3proto
, tuple
, range
, maniptype
, ct
);
367 struct nf_conn_nat
*nf_ct_nat_ext_add(struct nf_conn
*ct
)
369 struct nf_conn_nat
*nat
= nfct_nat(ct
);
373 if (!nf_ct_is_confirmed(ct
))
374 nat
= nf_ct_ext_add(ct
, NF_CT_EXT_NAT
, GFP_ATOMIC
);
378 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add
);
381 nf_nat_setup_info(struct nf_conn
*ct
,
382 const struct nf_nat_range
*range
,
383 enum nf_nat_manip_type maniptype
)
385 struct net
*net
= nf_ct_net(ct
);
386 struct nf_conntrack_tuple curr_tuple
, new_tuple
;
388 /* Can't setup nat info for confirmed ct. */
389 if (nf_ct_is_confirmed(ct
))
392 NF_CT_ASSERT(maniptype
== NF_NAT_MANIP_SRC
||
393 maniptype
== NF_NAT_MANIP_DST
);
394 BUG_ON(nf_nat_initialized(ct
, maniptype
));
396 /* What we've got will look like inverse of reply. Normally
397 * this is what is in the conntrack, except for prior
398 * manipulations (future optimization: if num_manips == 0,
399 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
401 nf_ct_invert_tuplepr(&curr_tuple
,
402 &ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
);
404 get_unique_tuple(&new_tuple
, &curr_tuple
, range
, ct
, maniptype
);
406 if (!nf_ct_tuple_equal(&new_tuple
, &curr_tuple
)) {
407 struct nf_conntrack_tuple reply
;
409 /* Alter conntrack table so will recognize replies. */
410 nf_ct_invert_tuplepr(&reply
, &new_tuple
);
411 nf_conntrack_alter_reply(ct
, &reply
);
413 /* Non-atomic: we own this at the moment. */
414 if (maniptype
== NF_NAT_MANIP_SRC
)
415 ct
->status
|= IPS_SRC_NAT
;
417 ct
->status
|= IPS_DST_NAT
;
419 if (nfct_help(ct
) && !nfct_seqadj(ct
))
420 if (!nfct_seqadj_ext_add(ct
))
424 if (maniptype
== NF_NAT_MANIP_SRC
) {
425 unsigned int srchash
;
427 srchash
= hash_by_src(net
,
428 &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
);
429 spin_lock_bh(&nf_nat_lock
);
430 hlist_add_head_rcu(&ct
->nat_bysource
,
431 &nf_nat_bysource
[srchash
]);
432 spin_unlock_bh(&nf_nat_lock
);
436 if (maniptype
== NF_NAT_MANIP_DST
)
437 ct
->status
|= IPS_DST_NAT_DONE
;
439 ct
->status
|= IPS_SRC_NAT_DONE
;
443 EXPORT_SYMBOL(nf_nat_setup_info
);
446 __nf_nat_alloc_null_binding(struct nf_conn
*ct
, enum nf_nat_manip_type manip
)
448 /* Force range to this IP; let proto decide mapping for
449 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
450 * Use reply in case it's already been mangled (eg local packet).
452 union nf_inet_addr ip
=
453 (manip
== NF_NAT_MANIP_SRC
?
454 ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
.dst
.u3
:
455 ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
.src
.u3
);
456 struct nf_nat_range range
= {
457 .flags
= NF_NAT_RANGE_MAP_IPS
,
461 return nf_nat_setup_info(ct
, &range
, manip
);
465 nf_nat_alloc_null_binding(struct nf_conn
*ct
, unsigned int hooknum
)
467 return __nf_nat_alloc_null_binding(ct
, HOOK2MANIP(hooknum
));
469 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding
);
471 /* Do packet manipulations according to nf_nat_setup_info. */
472 unsigned int nf_nat_packet(struct nf_conn
*ct
,
473 enum ip_conntrack_info ctinfo
,
474 unsigned int hooknum
,
477 const struct nf_nat_l3proto
*l3proto
;
478 const struct nf_nat_l4proto
*l4proto
;
479 enum ip_conntrack_dir dir
= CTINFO2DIR(ctinfo
);
480 unsigned long statusbit
;
481 enum nf_nat_manip_type mtype
= HOOK2MANIP(hooknum
);
483 if (mtype
== NF_NAT_MANIP_SRC
)
484 statusbit
= IPS_SRC_NAT
;
486 statusbit
= IPS_DST_NAT
;
488 /* Invert if this is reply dir. */
489 if (dir
== IP_CT_DIR_REPLY
)
490 statusbit
^= IPS_NAT_MASK
;
492 /* Non-atomic: these bits don't change. */
493 if (ct
->status
& statusbit
) {
494 struct nf_conntrack_tuple target
;
496 /* We are aiming to look like inverse of other direction. */
497 nf_ct_invert_tuplepr(&target
, &ct
->tuplehash
[!dir
].tuple
);
499 l3proto
= __nf_nat_l3proto_find(target
.src
.l3num
);
500 l4proto
= __nf_nat_l4proto_find(target
.src
.l3num
,
501 target
.dst
.protonum
);
502 if (!l3proto
->manip_pkt(skb
, 0, l4proto
, &target
, mtype
))
507 EXPORT_SYMBOL_GPL(nf_nat_packet
);
509 struct nf_nat_proto_clean
{
514 /* kill conntracks with affected NAT section */
515 static int nf_nat_proto_remove(struct nf_conn
*i
, void *data
)
517 const struct nf_nat_proto_clean
*clean
= data
;
519 if ((clean
->l3proto
&& nf_ct_l3num(i
) != clean
->l3proto
) ||
520 (clean
->l4proto
&& nf_ct_protonum(i
) != clean
->l4proto
))
523 return i
->status
& IPS_NAT_MASK
? 1 : 0;
526 static int nf_nat_proto_clean(struct nf_conn
*ct
, void *data
)
528 if (nf_nat_proto_remove(ct
, data
))
531 if ((ct
->status
& IPS_SRC_NAT_DONE
) == 0)
534 /* This netns is being destroyed, and conntrack has nat null binding.
535 * Remove it from bysource hash, as the table will be freed soon.
537 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
538 * will delete entry from already-freed table.
540 clear_bit(IPS_SRC_NAT_DONE_BIT
, &ct
->status
);
541 spin_lock_bh(&nf_nat_lock
);
542 hlist_del_rcu(&ct
->nat_bysource
);
543 spin_unlock_bh(&nf_nat_lock
);
545 /* don't delete conntrack. Although that would make things a lot
546 * simpler, we'd end up flushing all conntracks on nat rmmod.
551 static void nf_nat_l4proto_clean(u8 l3proto
, u8 l4proto
)
553 struct nf_nat_proto_clean clean
= {
558 nf_ct_iterate_destroy(nf_nat_proto_remove
, &clean
);
561 static void nf_nat_l3proto_clean(u8 l3proto
)
563 struct nf_nat_proto_clean clean
= {
567 nf_ct_iterate_destroy(nf_nat_proto_remove
, &clean
);
570 /* Protocol registration. */
571 int nf_nat_l4proto_register(u8 l3proto
, const struct nf_nat_l4proto
*l4proto
)
573 const struct nf_nat_l4proto
**l4protos
;
577 mutex_lock(&nf_nat_proto_mutex
);
578 if (nf_nat_l4protos
[l3proto
] == NULL
) {
579 l4protos
= kmalloc(IPPROTO_MAX
* sizeof(struct nf_nat_l4proto
*),
581 if (l4protos
== NULL
) {
586 for (i
= 0; i
< IPPROTO_MAX
; i
++)
587 RCU_INIT_POINTER(l4protos
[i
], &nf_nat_l4proto_unknown
);
589 /* Before making proto_array visible to lockless readers,
590 * we must make sure its content is committed to memory.
594 nf_nat_l4protos
[l3proto
] = l4protos
;
597 if (rcu_dereference_protected(
598 nf_nat_l4protos
[l3proto
][l4proto
->l4proto
],
599 lockdep_is_held(&nf_nat_proto_mutex
)
600 ) != &nf_nat_l4proto_unknown
) {
604 RCU_INIT_POINTER(nf_nat_l4protos
[l3proto
][l4proto
->l4proto
], l4proto
);
606 mutex_unlock(&nf_nat_proto_mutex
);
609 EXPORT_SYMBOL_GPL(nf_nat_l4proto_register
);
611 /* No one stores the protocol anywhere; simply delete it. */
612 void nf_nat_l4proto_unregister(u8 l3proto
, const struct nf_nat_l4proto
*l4proto
)
614 mutex_lock(&nf_nat_proto_mutex
);
615 RCU_INIT_POINTER(nf_nat_l4protos
[l3proto
][l4proto
->l4proto
],
616 &nf_nat_l4proto_unknown
);
617 mutex_unlock(&nf_nat_proto_mutex
);
620 nf_nat_l4proto_clean(l3proto
, l4proto
->l4proto
);
622 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister
);
624 int nf_nat_l3proto_register(const struct nf_nat_l3proto
*l3proto
)
628 err
= nf_ct_l3proto_try_module_get(l3proto
->l3proto
);
632 mutex_lock(&nf_nat_proto_mutex
);
633 RCU_INIT_POINTER(nf_nat_l4protos
[l3proto
->l3proto
][IPPROTO_TCP
],
634 &nf_nat_l4proto_tcp
);
635 RCU_INIT_POINTER(nf_nat_l4protos
[l3proto
->l3proto
][IPPROTO_UDP
],
636 &nf_nat_l4proto_udp
);
637 #ifdef CONFIG_NF_NAT_PROTO_DCCP
638 RCU_INIT_POINTER(nf_nat_l4protos
[l3proto
->l3proto
][IPPROTO_DCCP
],
639 &nf_nat_l4proto_dccp
);
641 #ifdef CONFIG_NF_NAT_PROTO_SCTP
642 RCU_INIT_POINTER(nf_nat_l4protos
[l3proto
->l3proto
][IPPROTO_SCTP
],
643 &nf_nat_l4proto_sctp
);
645 #ifdef CONFIG_NF_NAT_PROTO_UDPLITE
646 RCU_INIT_POINTER(nf_nat_l4protos
[l3proto
->l3proto
][IPPROTO_UDPLITE
],
647 &nf_nat_l4proto_udplite
);
649 mutex_unlock(&nf_nat_proto_mutex
);
651 RCU_INIT_POINTER(nf_nat_l3protos
[l3proto
->l3proto
], l3proto
);
654 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register
);
656 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto
*l3proto
)
658 mutex_lock(&nf_nat_proto_mutex
);
659 RCU_INIT_POINTER(nf_nat_l3protos
[l3proto
->l3proto
], NULL
);
660 mutex_unlock(&nf_nat_proto_mutex
);
663 nf_nat_l3proto_clean(l3proto
->l3proto
);
664 nf_ct_l3proto_module_put(l3proto
->l3proto
);
666 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister
);
668 /* No one using conntrack by the time this called. */
669 static void nf_nat_cleanup_conntrack(struct nf_conn
*ct
)
671 if (ct
->status
& IPS_SRC_NAT_DONE
) {
672 spin_lock_bh(&nf_nat_lock
);
673 hlist_del_rcu(&ct
->nat_bysource
);
674 spin_unlock_bh(&nf_nat_lock
);
678 static struct nf_ct_ext_type nat_extend __read_mostly
= {
679 .len
= sizeof(struct nf_conn_nat
),
680 .align
= __alignof__(struct nf_conn_nat
),
681 .destroy
= nf_nat_cleanup_conntrack
,
685 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
687 #include <linux/netfilter/nfnetlink.h>
688 #include <linux/netfilter/nfnetlink_conntrack.h>
690 static const struct nla_policy protonat_nla_policy
[CTA_PROTONAT_MAX
+1] = {
691 [CTA_PROTONAT_PORT_MIN
] = { .type
= NLA_U16
},
692 [CTA_PROTONAT_PORT_MAX
] = { .type
= NLA_U16
},
695 static int nfnetlink_parse_nat_proto(struct nlattr
*attr
,
696 const struct nf_conn
*ct
,
697 struct nf_nat_range
*range
)
699 struct nlattr
*tb
[CTA_PROTONAT_MAX
+1];
700 const struct nf_nat_l4proto
*l4proto
;
703 err
= nla_parse_nested(tb
, CTA_PROTONAT_MAX
, attr
,
704 protonat_nla_policy
, NULL
);
708 l4proto
= __nf_nat_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
709 if (l4proto
->nlattr_to_range
)
710 err
= l4proto
->nlattr_to_range(tb
, range
);
715 static const struct nla_policy nat_nla_policy
[CTA_NAT_MAX
+1] = {
716 [CTA_NAT_V4_MINIP
] = { .type
= NLA_U32
},
717 [CTA_NAT_V4_MAXIP
] = { .type
= NLA_U32
},
718 [CTA_NAT_V6_MINIP
] = { .len
= sizeof(struct in6_addr
) },
719 [CTA_NAT_V6_MAXIP
] = { .len
= sizeof(struct in6_addr
) },
720 [CTA_NAT_PROTO
] = { .type
= NLA_NESTED
},
724 nfnetlink_parse_nat(const struct nlattr
*nat
,
725 const struct nf_conn
*ct
, struct nf_nat_range
*range
,
726 const struct nf_nat_l3proto
*l3proto
)
728 struct nlattr
*tb
[CTA_NAT_MAX
+1];
731 memset(range
, 0, sizeof(*range
));
733 err
= nla_parse_nested(tb
, CTA_NAT_MAX
, nat
, nat_nla_policy
, NULL
);
737 err
= l3proto
->nlattr_to_range(tb
, range
);
741 if (!tb
[CTA_NAT_PROTO
])
744 return nfnetlink_parse_nat_proto(tb
[CTA_NAT_PROTO
], ct
, range
);
747 /* This function is called under rcu_read_lock() */
749 nfnetlink_parse_nat_setup(struct nf_conn
*ct
,
750 enum nf_nat_manip_type manip
,
751 const struct nlattr
*attr
)
753 struct nf_nat_range range
;
754 const struct nf_nat_l3proto
*l3proto
;
757 /* Should not happen, restricted to creating new conntracks
760 if (WARN_ON_ONCE(nf_nat_initialized(ct
, manip
)))
763 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
764 * attach the null binding, otherwise this may oops.
766 l3proto
= __nf_nat_l3proto_find(nf_ct_l3num(ct
));
770 /* No NAT information has been passed, allocate the null-binding */
772 return __nf_nat_alloc_null_binding(ct
, manip
) == NF_DROP
? -ENOMEM
: 0;
774 err
= nfnetlink_parse_nat(attr
, ct
, &range
, l3proto
);
778 return nf_nat_setup_info(ct
, &range
, manip
) == NF_DROP
? -ENOMEM
: 0;
782 nfnetlink_parse_nat_setup(struct nf_conn
*ct
,
783 enum nf_nat_manip_type manip
,
784 const struct nlattr
*attr
)
790 static struct nf_ct_helper_expectfn follow_master_nat
= {
791 .name
= "nat-follow-master",
792 .expectfn
= nf_nat_follow_master
,
795 static int __init
nf_nat_init(void)
799 /* Leave them the same for the moment. */
800 nf_nat_htable_size
= nf_conntrack_htable_size
;
802 nf_nat_bysource
= nf_ct_alloc_hashtable(&nf_nat_htable_size
, 0);
803 if (!nf_nat_bysource
)
806 ret
= nf_ct_extend_register(&nat_extend
);
808 nf_ct_free_hashtable(nf_nat_bysource
, nf_nat_htable_size
);
809 printk(KERN_ERR
"nf_nat_core: Unable to register extension\n");
813 nf_ct_helper_expectfn_register(&follow_master_nat
);
815 BUG_ON(nfnetlink_parse_nat_setup_hook
!= NULL
);
816 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook
,
817 nfnetlink_parse_nat_setup
);
819 BUG_ON(nf_nat_decode_session_hook
!= NULL
);
820 RCU_INIT_POINTER(nf_nat_decode_session_hook
, __nf_nat_decode_session
);
825 static void __exit
nf_nat_cleanup(void)
827 struct nf_nat_proto_clean clean
= {};
830 nf_ct_iterate_destroy(nf_nat_proto_clean
, &clean
);
832 nf_ct_extend_unregister(&nat_extend
);
833 nf_ct_helper_expectfn_unregister(&follow_master_nat
);
834 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook
, NULL
);
836 RCU_INIT_POINTER(nf_nat_decode_session_hook
, NULL
);
840 for (i
= 0; i
< NFPROTO_NUMPROTO
; i
++)
841 kfree(nf_nat_l4protos
[i
]);
843 nf_ct_free_hashtable(nf_nat_bysource
, nf_nat_htable_size
);
846 MODULE_LICENSE("GPL");
848 module_init(nf_nat_init
);
849 module_exit(nf_nat_cleanup
);