]>
Commit | Line | Data |
---|---|---|
c7232c99 PM |
1 | /* |
2 | * (C) 1999-2001 Paul `Rusty' Russell | |
5b1158e9 | 3 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
c7232c99 | 4 | * (C) 2011 Patrick McHardy <kaber@trash.net> |
5b1158e9 JK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/timer.h> | |
14 | #include <linux/skbuff.h> | |
5a0e3ad6 | 15 | #include <linux/gfp.h> |
c7232c99 | 16 | #include <net/xfrm.h> |
5b1158e9 | 17 | #include <linux/jhash.h> |
c7232c99 | 18 | #include <linux/rtnetlink.h> |
5b1158e9 | 19 | |
5b1158e9 JK |
20 | #include <net/netfilter/nf_conntrack.h> |
21 | #include <net/netfilter/nf_conntrack_core.h> | |
22 | #include <net/netfilter/nf_nat.h> | |
c7232c99 PM |
23 | #include <net/netfilter/nf_nat_l3proto.h> |
24 | #include <net/netfilter/nf_nat_l4proto.h> | |
5b1158e9 JK |
25 | #include <net/netfilter/nf_nat_core.h> |
26 | #include <net/netfilter/nf_nat_helper.h> | |
27 | #include <net/netfilter/nf_conntrack_helper.h> | |
41d73ec0 | 28 | #include <net/netfilter/nf_conntrack_seqadj.h> |
5b1158e9 | 29 | #include <net/netfilter/nf_conntrack_l3proto.h> |
5d0aa2cc | 30 | #include <net/netfilter/nf_conntrack_zones.h> |
c7232c99 | 31 | #include <linux/netfilter/nf_nat.h> |
5b1158e9 | 32 | |
c7232c99 PM |
33 | static DEFINE_MUTEX(nf_nat_proto_mutex); |
34 | static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] | |
35 | __read_mostly; | |
36 | static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] | |
ce4b1ceb | 37 | __read_mostly; |
a76ae1c8 | 38 | |
870190a9 FW |
39 | struct nf_nat_conn_key { |
40 | const struct net *net; | |
41 | const struct nf_conntrack_tuple *tuple; | |
42 | const struct nf_conntrack_zone *zone; | |
43 | }; | |
44 | ||
7223ecd4 | 45 | static struct rhltable nf_nat_bysource_table; |
c7232c99 PM |
46 | |
47 | inline const struct nf_nat_l3proto * | |
48 | __nf_nat_l3proto_find(u8 family) | |
5b1158e9 | 49 | { |
c7232c99 | 50 | return rcu_dereference(nf_nat_l3protos[family]); |
5b1158e9 JK |
51 | } |
52 | ||
c7232c99 PM |
53 | inline const struct nf_nat_l4proto * |
54 | __nf_nat_l4proto_find(u8 family, u8 protonum) | |
55 | { | |
56 | return rcu_dereference(nf_nat_l4protos[family][protonum]); | |
57 | } | |
58 | EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find); | |
59 | ||
60 | #ifdef CONFIG_XFRM | |
61 | static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) | |
62 | { | |
63 | const struct nf_nat_l3proto *l3proto; | |
64 | const struct nf_conn *ct; | |
65 | enum ip_conntrack_info ctinfo; | |
66 | enum ip_conntrack_dir dir; | |
67 | unsigned long statusbit; | |
68 | u8 family; | |
69 | ||
70 | ct = nf_ct_get(skb, &ctinfo); | |
71 | if (ct == NULL) | |
72 | return; | |
73 | ||
74 | family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | |
75 | rcu_read_lock(); | |
76 | l3proto = __nf_nat_l3proto_find(family); | |
77 | if (l3proto == NULL) | |
78 | goto out; | |
79 | ||
80 | dir = CTINFO2DIR(ctinfo); | |
81 | if (dir == IP_CT_DIR_ORIGINAL) | |
82 | statusbit = IPS_DST_NAT; | |
83 | else | |
84 | statusbit = IPS_SRC_NAT; | |
85 | ||
86 | l3proto->decode_session(skb, ct, dir, statusbit, fl); | |
87 | out: | |
88 | rcu_read_unlock(); | |
89 | } | |
90 | ||
c7af6483 | 91 | int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) |
c7232c99 PM |
92 | { |
93 | struct flowi fl; | |
94 | unsigned int hh_len; | |
95 | struct dst_entry *dst; | |
aaa795ad | 96 | int err; |
c7232c99 | 97 | |
aaa795ad | 98 | err = xfrm_decode_session(skb, &fl, family); |
e7e6f630 | 99 | if (err < 0) |
aaa795ad | 100 | return err; |
c7232c99 PM |
101 | |
102 | dst = skb_dst(skb); | |
103 | if (dst->xfrm) | |
104 | dst = ((struct xfrm_dst *)dst)->route; | |
105 | dst_hold(dst); | |
106 | ||
c7af6483 | 107 | dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); |
c7232c99 | 108 | if (IS_ERR(dst)) |
aaa795ad | 109 | return PTR_ERR(dst); |
c7232c99 PM |
110 | |
111 | skb_dst_drop(skb); | |
112 | skb_dst_set(skb, dst); | |
113 | ||
114 | /* Change in oif may mean change in hh_len. */ | |
115 | hh_len = skb_dst(skb)->dev->hard_header_len; | |
116 | if (skb_headroom(skb) < hh_len && | |
117 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | |
aaa795ad | 118 | return -ENOMEM; |
c7232c99 PM |
119 | return 0; |
120 | } | |
121 | EXPORT_SYMBOL(nf_xfrm_me_harder); | |
122 | #endif /* CONFIG_XFRM */ | |
123 | ||
870190a9 | 124 | static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed) |
5b1158e9 | 125 | { |
870190a9 FW |
126 | const struct nf_conntrack_tuple *t; |
127 | const struct nf_conn *ct = data; | |
7001c6d1 | 128 | |
870190a9 | 129 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
5b1158e9 | 130 | /* Original src, to ensure we map it consistently if poss. */ |
8fc54f68 | 131 | |
870190a9 FW |
132 | seed ^= net_hash_mix(nf_ct_net(ct)); |
133 | return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32), | |
134 | t->dst.protonum ^ seed); | |
5b1158e9 JK |
135 | } |
136 | ||
5b1158e9 JK |
137 | /* Is this tuple already taken? (not by us) */ |
138 | int | |
139 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, | |
140 | const struct nf_conn *ignored_conntrack) | |
141 | { | |
142 | /* Conntrack tracking doesn't keep track of outgoing tuples; only | |
c7232c99 PM |
143 | * incoming ones. NAT means they don't have a fixed mapping, |
144 | * so we invert the tuple and look for the incoming reply. | |
145 | * | |
146 | * We could keep a separate hash if this proves too slow. | |
147 | */ | |
5b1158e9 JK |
148 | struct nf_conntrack_tuple reply; |
149 | ||
150 | nf_ct_invert_tuplepr(&reply, tuple); | |
151 | return nf_conntrack_tuple_taken(&reply, ignored_conntrack); | |
152 | } | |
153 | EXPORT_SYMBOL(nf_nat_used_tuple); | |
154 | ||
155 | /* If we source map this tuple so reply looks like reply_tuple, will | |
c7232c99 PM |
156 | * that meet the constraints of range. |
157 | */ | |
158 | static int in_range(const struct nf_nat_l3proto *l3proto, | |
159 | const struct nf_nat_l4proto *l4proto, | |
160 | const struct nf_conntrack_tuple *tuple, | |
161 | const struct nf_nat_range *range) | |
5b1158e9 | 162 | { |
5b1158e9 | 163 | /* If we are supposed to map IPs, then we must be in the |
c7232c99 PM |
164 | * range specified, otherwise let this drag us onto a new src IP. |
165 | */ | |
166 | if (range->flags & NF_NAT_RANGE_MAP_IPS && | |
167 | !l3proto->in_range(tuple, range)) | |
168 | return 0; | |
5b1158e9 | 169 | |
cbc9f2f4 | 170 | if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || |
c7232c99 PM |
171 | l4proto->in_range(tuple, NF_NAT_MANIP_SRC, |
172 | &range->min_proto, &range->max_proto)) | |
173 | return 1; | |
5b1158e9 | 174 | |
c7232c99 | 175 | return 0; |
5b1158e9 JK |
176 | } |
177 | ||
178 | static inline int | |
179 | same_src(const struct nf_conn *ct, | |
180 | const struct nf_conntrack_tuple *tuple) | |
181 | { | |
182 | const struct nf_conntrack_tuple *t; | |
183 | ||
184 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | |
185 | return (t->dst.protonum == tuple->dst.protonum && | |
c7232c99 | 186 | nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && |
5b1158e9 JK |
187 | t->src.u.all == tuple->src.u.all); |
188 | } | |
189 | ||
870190a9 FW |
190 | static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg, |
191 | const void *obj) | |
192 | { | |
193 | const struct nf_nat_conn_key *key = arg->key; | |
194 | const struct nf_conn *ct = obj; | |
195 | ||
728e87b4 FW |
196 | if (!same_src(ct, key->tuple) || |
197 | !net_eq(nf_ct_net(ct), key->net) || | |
198 | !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL)) | |
199 | return 1; | |
200 | ||
201 | return 0; | |
870190a9 FW |
202 | } |
203 | ||
204 | static struct rhashtable_params nf_nat_bysource_params = { | |
205 | .head_offset = offsetof(struct nf_conn, nat_bysource), | |
206 | .obj_hashfn = nf_nat_bysource_hash, | |
207 | .obj_cmpfn = nf_nat_bysource_cmp, | |
208 | .nelem_hint = 256, | |
209 | .min_size = 1024, | |
870190a9 FW |
210 | }; |
211 | ||
5b1158e9 JK |
212 | /* Only called for SRC manip */ |
213 | static int | |
308ac914 DB |
214 | find_appropriate_src(struct net *net, |
215 | const struct nf_conntrack_zone *zone, | |
c7232c99 PM |
216 | const struct nf_nat_l3proto *l3proto, |
217 | const struct nf_nat_l4proto *l4proto, | |
0c4c9288 | 218 | const struct nf_conntrack_tuple *tuple, |
5b1158e9 | 219 | struct nf_conntrack_tuple *result, |
c7232c99 | 220 | const struct nf_nat_range *range) |
5b1158e9 | 221 | { |
72b72949 | 222 | const struct nf_conn *ct; |
870190a9 FW |
223 | struct nf_nat_conn_key key = { |
224 | .net = net, | |
225 | .tuple = tuple, | |
226 | .zone = zone | |
227 | }; | |
7223ecd4 | 228 | struct rhlist_head *hl; |
5b1158e9 | 229 | |
7223ecd4 FW |
230 | hl = rhltable_lookup(&nf_nat_bysource_table, &key, |
231 | nf_nat_bysource_params); | |
232 | if (!hl) | |
870190a9 FW |
233 | return 0; |
234 | ||
7223ecd4 FW |
235 | ct = container_of(hl, typeof(*ct), nat_bysource); |
236 | ||
870190a9 FW |
237 | nf_ct_invert_tuplepr(result, |
238 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
239 | result->dst = tuple->dst; | |
240 | ||
241 | return in_range(l3proto, l4proto, result, range); | |
5b1158e9 JK |
242 | } |
243 | ||
244 | /* For [FUTURE] fragmentation handling, we want the least-used | |
c7232c99 PM |
245 | * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus |
246 | * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports | |
247 | * 1-65535, we don't do pro-rata allocation based on ports; we choose | |
248 | * the ip with the lowest src-ip/dst-ip/proto usage. | |
249 | */ | |
5b1158e9 | 250 | static void |
308ac914 DB |
251 | find_best_ips_proto(const struct nf_conntrack_zone *zone, |
252 | struct nf_conntrack_tuple *tuple, | |
c7232c99 | 253 | const struct nf_nat_range *range, |
5b1158e9 JK |
254 | const struct nf_conn *ct, |
255 | enum nf_nat_manip_type maniptype) | |
256 | { | |
c7232c99 PM |
257 | union nf_inet_addr *var_ipp; |
258 | unsigned int i, max; | |
5b1158e9 | 259 | /* Host order */ |
c7232c99 PM |
260 | u32 minip, maxip, j, dist; |
261 | bool full_range; | |
5b1158e9 JK |
262 | |
263 | /* No IP mapping? Do nothing. */ | |
cbc9f2f4 | 264 | if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) |
5b1158e9 JK |
265 | return; |
266 | ||
cbc9f2f4 | 267 | if (maniptype == NF_NAT_MANIP_SRC) |
c7232c99 | 268 | var_ipp = &tuple->src.u3; |
5b1158e9 | 269 | else |
c7232c99 | 270 | var_ipp = &tuple->dst.u3; |
5b1158e9 JK |
271 | |
272 | /* Fast path: only one choice. */ | |
c7232c99 PM |
273 | if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { |
274 | *var_ipp = range->min_addr; | |
5b1158e9 JK |
275 | return; |
276 | } | |
277 | ||
c7232c99 PM |
278 | if (nf_ct_l3num(ct) == NFPROTO_IPV4) |
279 | max = sizeof(var_ipp->ip) / sizeof(u32) - 1; | |
280 | else | |
281 | max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; | |
282 | ||
5b1158e9 JK |
283 | /* Hashing source and destination IPs gives a fairly even |
284 | * spread in practice (if there are a small number of IPs | |
285 | * involved, there usually aren't that many connections | |
286 | * anyway). The consistency means that servers see the same | |
287 | * client coming from the same IP (some Internet Banking sites | |
c7232c99 PM |
288 | * like this), even across reboots. |
289 | */ | |
5693d68d | 290 | j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), |
c7232c99 | 291 | range->flags & NF_NAT_RANGE_PERSISTENT ? |
308ac914 | 292 | 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id); |
c7232c99 PM |
293 | |
294 | full_range = false; | |
295 | for (i = 0; i <= max; i++) { | |
296 | /* If first bytes of the address are at the maximum, use the | |
297 | * distance. Otherwise use the full range. | |
298 | */ | |
299 | if (!full_range) { | |
300 | minip = ntohl((__force __be32)range->min_addr.all[i]); | |
301 | maxip = ntohl((__force __be32)range->max_addr.all[i]); | |
302 | dist = maxip - minip + 1; | |
303 | } else { | |
304 | minip = 0; | |
305 | dist = ~0; | |
306 | } | |
307 | ||
308 | var_ipp->all[i] = (__force __u32) | |
8fc54f68 | 309 | htonl(minip + reciprocal_scale(j, dist)); |
c7232c99 PM |
310 | if (var_ipp->all[i] != range->max_addr.all[i]) |
311 | full_range = true; | |
312 | ||
313 | if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) | |
314 | j ^= (__force u32)tuple->dst.u3.all[i]; | |
315 | } | |
5b1158e9 JK |
316 | } |
317 | ||
c7232c99 PM |
318 | /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, |
319 | * we change the source to map into the range. For NF_INET_PRE_ROUTING | |
6e23ae2a | 320 | * and NF_INET_LOCAL_OUT, we change the destination to map into the |
c7232c99 | 321 | * range. It might not be possible to get a unique tuple, but we try. |
5b1158e9 JK |
322 | * At worst (or if we race), we will end up with a final duplicate in |
323 | * __ip_conntrack_confirm and drop the packet. */ | |
324 | static void | |
325 | get_unique_tuple(struct nf_conntrack_tuple *tuple, | |
326 | const struct nf_conntrack_tuple *orig_tuple, | |
c7232c99 | 327 | const struct nf_nat_range *range, |
5b1158e9 JK |
328 | struct nf_conn *ct, |
329 | enum nf_nat_manip_type maniptype) | |
330 | { | |
308ac914 | 331 | const struct nf_conntrack_zone *zone; |
c7232c99 PM |
332 | const struct nf_nat_l3proto *l3proto; |
333 | const struct nf_nat_l4proto *l4proto; | |
0c4c9288 | 334 | struct net *net = nf_ct_net(ct); |
308ac914 DB |
335 | |
336 | zone = nf_ct_zone(ct); | |
5b1158e9 | 337 | |
c7232c99 PM |
338 | rcu_read_lock(); |
339 | l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num); | |
340 | l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num, | |
341 | orig_tuple->dst.protonum); | |
5b1158e9 | 342 | |
c7232c99 PM |
343 | /* 1) If this srcip/proto/src-proto-part is currently mapped, |
344 | * and that same mapping gives a unique tuple within the given | |
345 | * range, use that. | |
346 | * | |
347 | * This is only required for source (ie. NAT/masq) mappings. | |
348 | * So far, we don't do local source mappings, so multiple | |
349 | * manips not an issue. | |
350 | */ | |
cbc9f2f4 | 351 | if (maniptype == NF_NAT_MANIP_SRC && |
34ce3240 | 352 | !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { |
41a7cab6 | 353 | /* try the original tuple first */ |
c7232c99 | 354 | if (in_range(l3proto, l4proto, orig_tuple, range)) { |
41a7cab6 CG |
355 | if (!nf_nat_used_tuple(orig_tuple, ct)) { |
356 | *tuple = *orig_tuple; | |
c7232c99 | 357 | goto out; |
41a7cab6 | 358 | } |
c7232c99 PM |
359 | } else if (find_appropriate_src(net, zone, l3proto, l4proto, |
360 | orig_tuple, tuple, range)) { | |
0d53778e | 361 | pr_debug("get_unique_tuple: Found current src map\n"); |
0dbff689 | 362 | if (!nf_nat_used_tuple(tuple, ct)) |
c7232c99 | 363 | goto out; |
5b1158e9 JK |
364 | } |
365 | } | |
366 | ||
c7232c99 | 367 | /* 2) Select the least-used IP/proto combination in the given range */ |
5b1158e9 | 368 | *tuple = *orig_tuple; |
5d0aa2cc | 369 | find_best_ips_proto(zone, tuple, range, ct, maniptype); |
5b1158e9 JK |
370 | |
371 | /* 3) The per-protocol part of the manip is made to map into | |
c7232c99 PM |
372 | * the range to make a unique tuple. |
373 | */ | |
5b1158e9 JK |
374 | |
375 | /* Only bother mapping if it's not already in range and unique */ | |
34ce3240 | 376 | if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { |
cbc9f2f4 | 377 | if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { |
c7232c99 PM |
378 | if (l4proto->in_range(tuple, maniptype, |
379 | &range->min_proto, | |
380 | &range->max_proto) && | |
381 | (range->min_proto.all == range->max_proto.all || | |
99ad3c53 CG |
382 | !nf_nat_used_tuple(tuple, ct))) |
383 | goto out; | |
384 | } else if (!nf_nat_used_tuple(tuple, ct)) { | |
385 | goto out; | |
386 | } | |
387 | } | |
5b1158e9 JK |
388 | |
389 | /* Last change: get protocol to try to obtain unique tuple. */ | |
c7232c99 | 390 | l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); |
e22a0548 PM |
391 | out: |
392 | rcu_read_unlock(); | |
5b1158e9 JK |
393 | } |
394 | ||
f768e5bd FW |
395 | struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) |
396 | { | |
397 | struct nf_conn_nat *nat = nfct_nat(ct); | |
398 | if (nat) | |
399 | return nat; | |
400 | ||
401 | if (!nf_ct_is_confirmed(ct)) | |
402 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); | |
403 | ||
404 | return nat; | |
405 | } | |
406 | EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); | |
407 | ||
5b1158e9 JK |
408 | unsigned int |
409 | nf_nat_setup_info(struct nf_conn *ct, | |
c7232c99 | 410 | const struct nf_nat_range *range, |
cc01dcbd | 411 | enum nf_nat_manip_type maniptype) |
5b1158e9 JK |
412 | { |
413 | struct nf_conntrack_tuple curr_tuple, new_tuple; | |
2d59e5ca | 414 | struct nf_conn_nat *nat; |
5b1158e9 | 415 | |
2d59e5ca | 416 | /* nat helper or nfctnetlink also setup binding */ |
f768e5bd FW |
417 | nat = nf_ct_nat_ext_add(ct); |
418 | if (nat == NULL) | |
419 | return NF_ACCEPT; | |
2d59e5ca | 420 | |
cbc9f2f4 PM |
421 | NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || |
422 | maniptype == NF_NAT_MANIP_DST); | |
5b1158e9 JK |
423 | BUG_ON(nf_nat_initialized(ct, maniptype)); |
424 | ||
425 | /* What we've got will look like inverse of reply. Normally | |
c7232c99 PM |
426 | * this is what is in the conntrack, except for prior |
427 | * manipulations (future optimization: if num_manips == 0, | |
428 | * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) | |
429 | */ | |
5b1158e9 JK |
430 | nf_ct_invert_tuplepr(&curr_tuple, |
431 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
432 | ||
433 | get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); | |
434 | ||
435 | if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { | |
436 | struct nf_conntrack_tuple reply; | |
437 | ||
438 | /* Alter conntrack table so will recognize replies. */ | |
439 | nf_ct_invert_tuplepr(&reply, &new_tuple); | |
440 | nf_conntrack_alter_reply(ct, &reply); | |
441 | ||
442 | /* Non-atomic: we own this at the moment. */ | |
cbc9f2f4 | 443 | if (maniptype == NF_NAT_MANIP_SRC) |
5b1158e9 JK |
444 | ct->status |= IPS_SRC_NAT; |
445 | else | |
446 | ct->status |= IPS_DST_NAT; | |
41d73ec0 PM |
447 | |
448 | if (nfct_help(ct)) | |
4440a2ab GF |
449 | if (!nfct_seqadj_ext_add(ct)) |
450 | return NF_DROP; | |
5b1158e9 JK |
451 | } |
452 | ||
cbc9f2f4 | 453 | if (maniptype == NF_NAT_MANIP_SRC) { |
7223ecd4 FW |
454 | struct nf_nat_conn_key key = { |
455 | .net = nf_ct_net(ct), | |
456 | .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | |
457 | .zone = nf_ct_zone(ct), | |
458 | }; | |
870190a9 FW |
459 | int err; |
460 | ||
7223ecd4 FW |
461 | err = rhltable_insert_key(&nf_nat_bysource_table, |
462 | &key, | |
463 | &ct->nat_bysource, | |
464 | nf_nat_bysource_params); | |
870190a9 FW |
465 | if (err) |
466 | return NF_DROP; | |
5b1158e9 JK |
467 | } |
468 | ||
469 | /* It's done. */ | |
cbc9f2f4 | 470 | if (maniptype == NF_NAT_MANIP_DST) |
a7c2f4d7 | 471 | ct->status |= IPS_DST_NAT_DONE; |
5b1158e9 | 472 | else |
a7c2f4d7 | 473 | ct->status |= IPS_SRC_NAT_DONE; |
5b1158e9 JK |
474 | |
475 | return NF_ACCEPT; | |
476 | } | |
477 | EXPORT_SYMBOL(nf_nat_setup_info); | |
478 | ||
0eba801b PNA |
479 | static unsigned int |
480 | __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) | |
f59cb045 PNA |
481 | { |
482 | /* Force range to this IP; let proto decide mapping for | |
483 | * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). | |
484 | * Use reply in case it's already been mangled (eg local packet). | |
485 | */ | |
486 | union nf_inet_addr ip = | |
0eba801b | 487 | (manip == NF_NAT_MANIP_SRC ? |
f59cb045 PNA |
488 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : |
489 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); | |
490 | struct nf_nat_range range = { | |
491 | .flags = NF_NAT_RANGE_MAP_IPS, | |
492 | .min_addr = ip, | |
493 | .max_addr = ip, | |
494 | }; | |
0eba801b PNA |
495 | return nf_nat_setup_info(ct, &range, manip); |
496 | } | |
497 | ||
498 | unsigned int | |
499 | nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | |
500 | { | |
501 | return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); | |
f59cb045 PNA |
502 | } |
503 | EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); | |
504 | ||
5b1158e9 JK |
505 | /* Do packet manipulations according to nf_nat_setup_info. */ |
506 | unsigned int nf_nat_packet(struct nf_conn *ct, | |
507 | enum ip_conntrack_info ctinfo, | |
508 | unsigned int hooknum, | |
3db05fea | 509 | struct sk_buff *skb) |
5b1158e9 | 510 | { |
c7232c99 PM |
511 | const struct nf_nat_l3proto *l3proto; |
512 | const struct nf_nat_l4proto *l4proto; | |
5b1158e9 JK |
513 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
514 | unsigned long statusbit; | |
515 | enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); | |
516 | ||
cbc9f2f4 | 517 | if (mtype == NF_NAT_MANIP_SRC) |
5b1158e9 JK |
518 | statusbit = IPS_SRC_NAT; |
519 | else | |
520 | statusbit = IPS_DST_NAT; | |
521 | ||
522 | /* Invert if this is reply dir. */ | |
523 | if (dir == IP_CT_DIR_REPLY) | |
524 | statusbit ^= IPS_NAT_MASK; | |
525 | ||
526 | /* Non-atomic: these bits don't change. */ | |
527 | if (ct->status & statusbit) { | |
528 | struct nf_conntrack_tuple target; | |
529 | ||
530 | /* We are aiming to look like inverse of other direction. */ | |
531 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | |
532 | ||
c7232c99 PM |
533 | l3proto = __nf_nat_l3proto_find(target.src.l3num); |
534 | l4proto = __nf_nat_l4proto_find(target.src.l3num, | |
535 | target.dst.protonum); | |
536 | if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype)) | |
5b1158e9 JK |
537 | return NF_DROP; |
538 | } | |
539 | return NF_ACCEPT; | |
540 | } | |
541 | EXPORT_SYMBOL_GPL(nf_nat_packet); | |
542 | ||
c7232c99 PM |
543 | struct nf_nat_proto_clean { |
544 | u8 l3proto; | |
545 | u8 l4proto; | |
c7232c99 PM |
546 | }; |
547 | ||
c2d421e1 FW |
548 | /* kill conntracks with affected NAT section */ |
549 | static int nf_nat_proto_remove(struct nf_conn *i, void *data) | |
5b1158e9 | 550 | { |
c7232c99 PM |
551 | const struct nf_nat_proto_clean *clean = data; |
552 | struct nf_conn_nat *nat = nfct_nat(i); | |
5b1158e9 | 553 | |
c7232c99 | 554 | if (!nat) |
5b1158e9 | 555 | return 0; |
c2d421e1 | 556 | |
c7232c99 PM |
557 | if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || |
558 | (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) | |
5b1158e9 JK |
559 | return 0; |
560 | ||
c2d421e1 | 561 | return i->status & IPS_NAT_MASK ? 1 : 0; |
c7232c99 | 562 | } |
5b1158e9 | 563 | |
945b2b2d FW |
564 | static int nf_nat_proto_clean(struct nf_conn *ct, void *data) |
565 | { | |
566 | struct nf_conn_nat *nat = nfct_nat(ct); | |
567 | ||
568 | if (nf_nat_proto_remove(ct, data)) | |
569 | return 1; | |
570 | ||
7c966435 | 571 | if (!nat) |
945b2b2d FW |
572 | return 0; |
573 | ||
574 | /* This netns is being destroyed, and conntrack has nat null binding. | |
575 | * Remove it from bysource hash, as the table will be freed soon. | |
576 | * | |
577 | * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() | |
578 | * will delete entry from already-freed table. | |
579 | */ | |
945b2b2d | 580 | ct->status &= ~IPS_NAT_DONE_MASK; |
7223ecd4 FW |
581 | rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, |
582 | nf_nat_bysource_params); | |
945b2b2d | 583 | |
945b2b2d FW |
584 | /* don't delete conntrack. Although that would make things a lot |
585 | * simpler, we'd end up flushing all conntracks on nat rmmod. | |
586 | */ | |
587 | return 0; | |
588 | } | |
589 | ||
c7232c99 PM |
590 | static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) |
591 | { | |
592 | struct nf_nat_proto_clean clean = { | |
593 | .l3proto = l3proto, | |
594 | .l4proto = l4proto, | |
595 | }; | |
596 | struct net *net; | |
597 | ||
598 | rtnl_lock(); | |
c7232c99 | 599 | for_each_net(net) |
c655bc68 | 600 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); |
c7232c99 PM |
601 | rtnl_unlock(); |
602 | } | |
5b1158e9 | 603 | |
c7232c99 PM |
604 | static void nf_nat_l3proto_clean(u8 l3proto) |
605 | { | |
606 | struct nf_nat_proto_clean clean = { | |
607 | .l3proto = l3proto, | |
608 | }; | |
609 | struct net *net; | |
610 | ||
611 | rtnl_lock(); | |
5b1158e9 | 612 | |
c7232c99 | 613 | for_each_net(net) |
c655bc68 | 614 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); |
c7232c99 | 615 | rtnl_unlock(); |
5b1158e9 | 616 | } |
5b1158e9 JK |
617 | |
618 | /* Protocol registration. */ | |
c7232c99 | 619 | int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto) |
5b1158e9 | 620 | { |
c7232c99 PM |
621 | const struct nf_nat_l4proto **l4protos; |
622 | unsigned int i; | |
5b1158e9 JK |
623 | int ret = 0; |
624 | ||
c7232c99 PM |
625 | mutex_lock(&nf_nat_proto_mutex); |
626 | if (nf_nat_l4protos[l3proto] == NULL) { | |
627 | l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *), | |
628 | GFP_KERNEL); | |
629 | if (l4protos == NULL) { | |
630 | ret = -ENOMEM; | |
631 | goto out; | |
632 | } | |
633 | ||
634 | for (i = 0; i < IPPROTO_MAX; i++) | |
635 | RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown); | |
636 | ||
637 | /* Before making proto_array visible to lockless readers, | |
638 | * we must make sure its content is committed to memory. | |
639 | */ | |
640 | smp_wmb(); | |
641 | ||
642 | nf_nat_l4protos[l3proto] = l4protos; | |
643 | } | |
644 | ||
eb733162 | 645 | if (rcu_dereference_protected( |
c7232c99 PM |
646 | nf_nat_l4protos[l3proto][l4proto->l4proto], |
647 | lockdep_is_held(&nf_nat_proto_mutex) | |
648 | ) != &nf_nat_l4proto_unknown) { | |
5b1158e9 JK |
649 | ret = -EBUSY; |
650 | goto out; | |
651 | } | |
c7232c99 | 652 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto); |
5b1158e9 | 653 | out: |
c7232c99 | 654 | mutex_unlock(&nf_nat_proto_mutex); |
5b1158e9 JK |
655 | return ret; |
656 | } | |
c7232c99 | 657 | EXPORT_SYMBOL_GPL(nf_nat_l4proto_register); |
5b1158e9 | 658 | |
25985edc | 659 | /* No one stores the protocol anywhere; simply delete it. */ |
c7232c99 | 660 | void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto) |
5b1158e9 | 661 | { |
c7232c99 PM |
662 | mutex_lock(&nf_nat_proto_mutex); |
663 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], | |
664 | &nf_nat_l4proto_unknown); | |
665 | mutex_unlock(&nf_nat_proto_mutex); | |
e22a0548 | 666 | synchronize_rcu(); |
c7232c99 PM |
667 | |
668 | nf_nat_l4proto_clean(l3proto, l4proto->l4proto); | |
5b1158e9 | 669 | } |
c7232c99 PM |
670 | EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); |
671 | ||
672 | int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) | |
673 | { | |
674 | int err; | |
675 | ||
676 | err = nf_ct_l3proto_try_module_get(l3proto->l3proto); | |
677 | if (err < 0) | |
678 | return err; | |
679 | ||
680 | mutex_lock(&nf_nat_proto_mutex); | |
681 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], | |
682 | &nf_nat_l4proto_tcp); | |
683 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP], | |
684 | &nf_nat_l4proto_udp); | |
0c4e966e DC |
685 | #ifdef CONFIG_NF_NAT_PROTO_DCCP |
686 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP], | |
687 | &nf_nat_l4proto_dccp); | |
7a2dd28c DC |
688 | #endif |
689 | #ifdef CONFIG_NF_NAT_PROTO_SCTP | |
690 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP], | |
691 | &nf_nat_l4proto_sctp); | |
b8ad652f DC |
692 | #endif |
693 | #ifdef CONFIG_NF_NAT_PROTO_UDPLITE | |
694 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE], | |
695 | &nf_nat_l4proto_udplite); | |
0c4e966e | 696 | #endif |
c7232c99 PM |
697 | mutex_unlock(&nf_nat_proto_mutex); |
698 | ||
699 | RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto); | |
700 | return 0; | |
701 | } | |
702 | EXPORT_SYMBOL_GPL(nf_nat_l3proto_register); | |
703 | ||
704 | void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) | |
705 | { | |
706 | mutex_lock(&nf_nat_proto_mutex); | |
707 | RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL); | |
708 | mutex_unlock(&nf_nat_proto_mutex); | |
709 | synchronize_rcu(); | |
710 | ||
711 | nf_nat_l3proto_clean(l3proto->l3proto); | |
712 | nf_ct_l3proto_module_put(l3proto->l3proto); | |
713 | } | |
714 | EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); | |
5b1158e9 | 715 | |
25985edc | 716 | /* No one using conntrack by the time this called. */ |
d8a0509a YK |
717 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) |
718 | { | |
719 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); | |
720 | ||
7c966435 | 721 | if (!nat) |
d8a0509a YK |
722 | return; |
723 | ||
7223ecd4 FW |
724 | rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, |
725 | nf_nat_bysource_params); | |
2d59e5ca YK |
726 | } |
727 | ||
61eb3107 | 728 | static struct nf_ct_ext_type nat_extend __read_mostly = { |
d8a0509a YK |
729 | .len = sizeof(struct nf_conn_nat), |
730 | .align = __alignof__(struct nf_conn_nat), | |
731 | .destroy = nf_nat_cleanup_conntrack, | |
d8a0509a YK |
732 | .id = NF_CT_EXT_NAT, |
733 | .flags = NF_CT_EXT_F_PREALLOC, | |
2d59e5ca YK |
734 | }; |
735 | ||
24de3d37 | 736 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
e6a7d3c0 PNA |
737 | |
738 | #include <linux/netfilter/nfnetlink.h> | |
739 | #include <linux/netfilter/nfnetlink_conntrack.h> | |
740 | ||
741 | static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { | |
742 | [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, | |
743 | [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, | |
744 | }; | |
745 | ||
746 | static int nfnetlink_parse_nat_proto(struct nlattr *attr, | |
747 | const struct nf_conn *ct, | |
c7232c99 | 748 | struct nf_nat_range *range) |
e6a7d3c0 PNA |
749 | { |
750 | struct nlattr *tb[CTA_PROTONAT_MAX+1]; | |
c7232c99 | 751 | const struct nf_nat_l4proto *l4proto; |
e6a7d3c0 PNA |
752 | int err; |
753 | ||
754 | err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); | |
755 | if (err < 0) | |
756 | return err; | |
757 | ||
c7232c99 PM |
758 | l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
759 | if (l4proto->nlattr_to_range) | |
760 | err = l4proto->nlattr_to_range(tb, range); | |
761 | ||
e6a7d3c0 PNA |
762 | return err; |
763 | } | |
764 | ||
765 | static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { | |
c7232c99 PM |
766 | [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, |
767 | [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, | |
58a317f1 PM |
768 | [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, |
769 | [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, | |
329fb58a | 770 | [CTA_NAT_PROTO] = { .type = NLA_NESTED }, |
e6a7d3c0 PNA |
771 | }; |
772 | ||
773 | static int | |
39938324 | 774 | nfnetlink_parse_nat(const struct nlattr *nat, |
0eba801b PNA |
775 | const struct nf_conn *ct, struct nf_nat_range *range, |
776 | const struct nf_nat_l3proto *l3proto) | |
e6a7d3c0 PNA |
777 | { |
778 | struct nlattr *tb[CTA_NAT_MAX+1]; | |
779 | int err; | |
780 | ||
781 | memset(range, 0, sizeof(*range)); | |
782 | ||
783 | err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); | |
784 | if (err < 0) | |
785 | return err; | |
786 | ||
c7232c99 PM |
787 | err = l3proto->nlattr_to_range(tb, range); |
788 | if (err < 0) | |
0eba801b | 789 | return err; |
e6a7d3c0 PNA |
790 | |
791 | if (!tb[CTA_NAT_PROTO]) | |
0eba801b | 792 | return 0; |
e6a7d3c0 | 793 | |
0eba801b | 794 | return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); |
e6a7d3c0 PNA |
795 | } |
796 | ||
0eba801b | 797 | /* This function is called under rcu_read_lock() */ |
e6a7d3c0 PNA |
798 | static int |
799 | nfnetlink_parse_nat_setup(struct nf_conn *ct, | |
800 | enum nf_nat_manip_type manip, | |
39938324 | 801 | const struct nlattr *attr) |
e6a7d3c0 | 802 | { |
c7232c99 | 803 | struct nf_nat_range range; |
0eba801b | 804 | const struct nf_nat_l3proto *l3proto; |
c7232c99 | 805 | int err; |
e6a7d3c0 | 806 | |
0eba801b PNA |
807 | /* Should not happen, restricted to creating new conntracks |
808 | * via ctnetlink. | |
809 | */ | |
810 | if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) | |
811 | return -EEXIST; | |
812 | ||
813 | /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to | |
814 | * attach the null binding, otherwise this may oops. | |
815 | */ | |
816 | l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); | |
817 | if (l3proto == NULL) | |
818 | return -EAGAIN; | |
819 | ||
820 | /* No NAT information has been passed, allocate the null-binding */ | |
821 | if (attr == NULL) | |
822 | return __nf_nat_alloc_null_binding(ct, manip); | |
823 | ||
824 | err = nfnetlink_parse_nat(attr, ct, &range, l3proto); | |
c7232c99 PM |
825 | if (err < 0) |
826 | return err; | |
e6a7d3c0 | 827 | |
ecfcdfec | 828 | return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; |
e6a7d3c0 PNA |
829 | } |
830 | #else | |
831 | static int | |
832 | nfnetlink_parse_nat_setup(struct nf_conn *ct, | |
833 | enum nf_nat_manip_type manip, | |
39938324 | 834 | const struct nlattr *attr) |
e6a7d3c0 PNA |
835 | { |
836 | return -EOPNOTSUPP; | |
837 | } | |
838 | #endif | |
839 | ||
0c4c9288 AD |
840 | static void __net_exit nf_nat_net_exit(struct net *net) |
841 | { | |
c7232c99 PM |
842 | struct nf_nat_proto_clean clean = {}; |
843 | ||
945b2b2d | 844 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0); |
0c4c9288 AD |
845 | } |
846 | ||
847 | static struct pernet_operations nf_nat_net_ops = { | |
0c4c9288 AD |
848 | .exit = nf_nat_net_exit, |
849 | }; | |
850 | ||
544d5c7d PNA |
851 | static struct nf_ct_helper_expectfn follow_master_nat = { |
852 | .name = "nat-follow-master", | |
853 | .expectfn = nf_nat_follow_master, | |
854 | }; | |
855 | ||
5b1158e9 JK |
856 | static int __init nf_nat_init(void) |
857 | { | |
2d59e5ca YK |
858 | int ret; |
859 | ||
7223ecd4 | 860 | ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); |
870190a9 FW |
861 | if (ret) |
862 | return ret; | |
a76ae1c8 | 863 | |
2d59e5ca YK |
864 | ret = nf_ct_extend_register(&nat_extend); |
865 | if (ret < 0) { | |
7223ecd4 | 866 | rhltable_destroy(&nf_nat_bysource_table); |
2d59e5ca YK |
867 | printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); |
868 | return ret; | |
869 | } | |
5b1158e9 | 870 | |
0c4c9288 AD |
871 | ret = register_pernet_subsys(&nf_nat_net_ops); |
872 | if (ret < 0) | |
2d59e5ca | 873 | goto cleanup_extend; |
5b1158e9 | 874 | |
c7232c99 | 875 | nf_ct_helper_expectfn_register(&follow_master_nat); |
5b1158e9 | 876 | |
5b1158e9 | 877 | /* Initialize fake conntrack so that NAT will skip it */ |
5bfddbd4 | 878 | nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); |
5b1158e9 | 879 | |
e6a7d3c0 | 880 | BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); |
a9b3cd7f | 881 | RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, |
e6a7d3c0 | 882 | nfnetlink_parse_nat_setup); |
c7232c99 PM |
883 | #ifdef CONFIG_XFRM |
884 | BUG_ON(nf_nat_decode_session_hook != NULL); | |
885 | RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); | |
886 | #endif | |
5b1158e9 | 887 | return 0; |
2d59e5ca YK |
888 | |
889 | cleanup_extend: | |
7223ecd4 | 890 | rhltable_destroy(&nf_nat_bysource_table); |
2d59e5ca YK |
891 | nf_ct_extend_unregister(&nat_extend); |
892 | return ret; | |
5b1158e9 JK |
893 | } |
894 | ||
5b1158e9 JK |
895 | static void __exit nf_nat_cleanup(void) |
896 | { | |
c7232c99 PM |
897 | unsigned int i; |
898 | ||
0c4c9288 | 899 | unregister_pernet_subsys(&nf_nat_net_ops); |
2d59e5ca | 900 | nf_ct_extend_unregister(&nat_extend); |
544d5c7d | 901 | nf_ct_helper_expectfn_unregister(&follow_master_nat); |
a9b3cd7f | 902 | RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); |
c7232c99 PM |
903 | #ifdef CONFIG_XFRM |
904 | RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); | |
905 | #endif | |
3b7dabf0 LZ |
906 | synchronize_rcu(); |
907 | ||
c7232c99 PM |
908 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
909 | kfree(nf_nat_l4protos[i]); | |
870190a9 | 910 | |
7223ecd4 | 911 | rhltable_destroy(&nf_nat_bysource_table); |
5b1158e9 JK |
912 | } |
913 | ||
914 | MODULE_LICENSE("GPL"); | |
915 | ||
916 | module_init(nf_nat_init); | |
917 | module_exit(nf_nat_cleanup); |