]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
f8aad03d674b05008edb5b9883b3a26b2fa7461f
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / netfilter / nf_nat_l3proto_ipv4.c
1 /*
2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/ip.h>
15 #include <linux/icmp.h>
16 #include <linux/netfilter.h>
17 #include <linux/netfilter_ipv4.h>
18 #include <net/secure_seq.h>
19 #include <net/checksum.h>
20 #include <net/route.h>
21 #include <net/ip.h>
22
23 #include <net/netfilter/nf_conntrack_core.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_nat_core.h>
26 #include <net/netfilter/nf_nat_l3proto.h>
27 #include <net/netfilter/nf_nat_l4proto.h>
28
29 static const struct nf_nat_l3proto nf_nat_l3proto_ipv4;
30
31 #ifdef CONFIG_XFRM
32 static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
33 const struct nf_conn *ct,
34 enum ip_conntrack_dir dir,
35 unsigned long statusbit,
36 struct flowi *fl)
37 {
38 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
39 struct flowi4 *fl4 = &fl->u.ip4;
40
41 if (ct->status & statusbit) {
42 fl4->daddr = t->dst.u3.ip;
43 if (t->dst.protonum == IPPROTO_TCP ||
44 t->dst.protonum == IPPROTO_UDP ||
45 t->dst.protonum == IPPROTO_UDPLITE ||
46 t->dst.protonum == IPPROTO_DCCP ||
47 t->dst.protonum == IPPROTO_SCTP)
48 fl4->fl4_dport = t->dst.u.all;
49 }
50
51 statusbit ^= IPS_NAT_MASK;
52
53 if (ct->status & statusbit) {
54 fl4->saddr = t->src.u3.ip;
55 if (t->dst.protonum == IPPROTO_TCP ||
56 t->dst.protonum == IPPROTO_UDP ||
57 t->dst.protonum == IPPROTO_UDPLITE ||
58 t->dst.protonum == IPPROTO_DCCP ||
59 t->dst.protonum == IPPROTO_SCTP)
60 fl4->fl4_sport = t->src.u.all;
61 }
62 }
63 #endif /* CONFIG_XFRM */
64
65 static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
66 const struct nf_nat_range *range)
67 {
68 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
69 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
70 }
71
72 static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t,
73 __be16 dport)
74 {
75 return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport);
76 }
77
78 static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
79 unsigned int iphdroff,
80 const struct nf_nat_l4proto *l4proto,
81 const struct nf_conntrack_tuple *target,
82 enum nf_nat_manip_type maniptype)
83 {
84 struct iphdr *iph;
85 unsigned int hdroff;
86
87 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
88 return false;
89
90 iph = (void *)skb->data + iphdroff;
91 hdroff = iphdroff + iph->ihl * 4;
92
93 if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff,
94 target, maniptype))
95 return false;
96 iph = (void *)skb->data + iphdroff;
97
98 if (maniptype == NF_NAT_MANIP_SRC) {
99 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
100 iph->saddr = target->src.u3.ip;
101 } else {
102 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
103 iph->daddr = target->dst.u3.ip;
104 }
105 return true;
106 }
107
108 static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
109 unsigned int iphdroff, __sum16 *check,
110 const struct nf_conntrack_tuple *t,
111 enum nf_nat_manip_type maniptype)
112 {
113 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
114 __be32 oldip, newip;
115
116 if (maniptype == NF_NAT_MANIP_SRC) {
117 oldip = iph->saddr;
118 newip = t->src.u3.ip;
119 } else {
120 oldip = iph->daddr;
121 newip = t->dst.u3.ip;
122 }
123 inet_proto_csum_replace4(check, skb, oldip, newip, true);
124 }
125
126 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
127 u8 proto, void *data, __sum16 *check,
128 int datalen, int oldlen)
129 {
130 if (skb->ip_summed != CHECKSUM_PARTIAL) {
131 const struct iphdr *iph = ip_hdr(skb);
132
133 skb->ip_summed = CHECKSUM_PARTIAL;
134 skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
135 ip_hdrlen(skb);
136 skb->csum_offset = (void *)check - data;
137 *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
138 proto, 0);
139 } else
140 inet_proto_csum_replace2(check, skb,
141 htons(oldlen), htons(datalen), true);
142 }
143
144 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
145 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
146 struct nf_nat_range *range)
147 {
148 if (tb[CTA_NAT_V4_MINIP]) {
149 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
150 range->flags |= NF_NAT_RANGE_MAP_IPS;
151 }
152
153 if (tb[CTA_NAT_V4_MAXIP])
154 range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
155 else
156 range->max_addr.ip = range->min_addr.ip;
157
158 return 0;
159 }
160 #endif
161
162 static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
163 .l3proto = NFPROTO_IPV4,
164 .in_range = nf_nat_ipv4_in_range,
165 .secure_port = nf_nat_ipv4_secure_port,
166 .manip_pkt = nf_nat_ipv4_manip_pkt,
167 .csum_update = nf_nat_ipv4_csum_update,
168 .csum_recalc = nf_nat_ipv4_csum_recalc,
169 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
170 .nlattr_to_range = nf_nat_ipv4_nlattr_to_range,
171 #endif
172 #ifdef CONFIG_XFRM
173 .decode_session = nf_nat_ipv4_decode_session,
174 #endif
175 };
176
177 int nf_nat_icmp_reply_translation(struct sk_buff *skb,
178 struct nf_conn *ct,
179 enum ip_conntrack_info ctinfo,
180 unsigned int hooknum)
181 {
182 struct {
183 struct icmphdr icmp;
184 struct iphdr ip;
185 } *inside;
186 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
187 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
188 unsigned int hdrlen = ip_hdrlen(skb);
189 const struct nf_nat_l4proto *l4proto;
190 struct nf_conntrack_tuple target;
191 unsigned long statusbit;
192
193 NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
194
195 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
196 return 0;
197 if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
198 return 0;
199
200 inside = (void *)skb->data + hdrlen;
201 if (inside->icmp.type == ICMP_REDIRECT) {
202 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
203 return 0;
204 if (ct->status & IPS_NAT_MASK)
205 return 0;
206 }
207
208 if (manip == NF_NAT_MANIP_SRC)
209 statusbit = IPS_SRC_NAT;
210 else
211 statusbit = IPS_DST_NAT;
212
213 /* Invert if this is reply direction */
214 if (dir == IP_CT_DIR_REPLY)
215 statusbit ^= IPS_NAT_MASK;
216
217 if (!(ct->status & statusbit))
218 return 1;
219
220 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol);
221 if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
222 l4proto, &ct->tuplehash[!dir].tuple, !manip))
223 return 0;
224
225 if (skb->ip_summed != CHECKSUM_PARTIAL) {
226 /* Reloading "inside" here since manip_pkt may reallocate */
227 inside = (void *)skb->data + hdrlen;
228 inside->icmp.checksum = 0;
229 inside->icmp.checksum =
230 csum_fold(skb_checksum(skb, hdrlen,
231 skb->len - hdrlen, 0));
232 }
233
234 /* Change outer to look like the reply to an incoming packet */
235 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
236 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0);
237 if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip))
238 return 0;
239
240 return 1;
241 }
242 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
243
244 unsigned int
245 nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
246 const struct nf_hook_state *state,
247 unsigned int (*do_chain)(void *priv,
248 struct sk_buff *skb,
249 const struct nf_hook_state *state,
250 struct nf_conn *ct))
251 {
252 struct nf_conn *ct;
253 enum ip_conntrack_info ctinfo;
254 struct nf_conn_nat *nat;
255 /* maniptype == SRC for postrouting. */
256 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
257
258 /* We never see fragments: conntrack defrags on pre-routing
259 * and local-out, and nf_nat_out protects post-routing.
260 */
261 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
262
263 ct = nf_ct_get(skb, &ctinfo);
264 /* Can't track? It's not due to stress, or conntrack would
265 * have dropped it. Hence it's the user's responsibilty to
266 * packet filter it out, or implement conntrack/NAT for that
267 * protocol. 8) --RR
268 */
269 if (!ct)
270 return NF_ACCEPT;
271
272 /* Don't try to NAT if this packet is not conntracked */
273 if (nf_ct_is_untracked(ct))
274 return NF_ACCEPT;
275
276 nat = nf_ct_nat_ext_add(ct);
277 if (nat == NULL)
278 return NF_ACCEPT;
279
280 switch (ctinfo) {
281 case IP_CT_RELATED:
282 case IP_CT_RELATED_REPLY:
283 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
284 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
285 state->hook))
286 return NF_DROP;
287 else
288 return NF_ACCEPT;
289 }
290 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
291 case IP_CT_NEW:
292 /* Seen it before? This can happen for loopback, retrans,
293 * or local packets.
294 */
295 if (!nf_nat_initialized(ct, maniptype)) {
296 unsigned int ret;
297
298 ret = do_chain(priv, skb, state, ct);
299 if (ret != NF_ACCEPT)
300 return ret;
301
302 if (nf_nat_initialized(ct, HOOK2MANIP(state->hook)))
303 break;
304
305 ret = nf_nat_alloc_null_binding(ct, state->hook);
306 if (ret != NF_ACCEPT)
307 return ret;
308 } else {
309 pr_debug("Already setup manip %s for ct %p\n",
310 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
311 ct);
312 if (nf_nat_oif_changed(state->hook, ctinfo, nat,
313 state->out))
314 goto oif_changed;
315 }
316 break;
317
318 default:
319 /* ESTABLISHED */
320 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
321 ctinfo == IP_CT_ESTABLISHED_REPLY);
322 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
323 goto oif_changed;
324 }
325
326 return nf_nat_packet(ct, ctinfo, state->hook, skb);
327
328 oif_changed:
329 nf_ct_kill_acct(ct, ctinfo, skb);
330 return NF_DROP;
331 }
332 EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
333
334 unsigned int
335 nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
336 const struct nf_hook_state *state,
337 unsigned int (*do_chain)(void *priv,
338 struct sk_buff *skb,
339 const struct nf_hook_state *state,
340 struct nf_conn *ct))
341 {
342 unsigned int ret;
343 __be32 daddr = ip_hdr(skb)->daddr;
344
345 ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
346 if (ret != NF_DROP && ret != NF_STOLEN &&
347 daddr != ip_hdr(skb)->daddr)
348 skb_dst_drop(skb);
349
350 return ret;
351 }
352 EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
353
354 unsigned int
355 nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
356 const struct nf_hook_state *state,
357 unsigned int (*do_chain)(void *priv,
358 struct sk_buff *skb,
359 const struct nf_hook_state *state,
360 struct nf_conn *ct))
361 {
362 #ifdef CONFIG_XFRM
363 const struct nf_conn *ct;
364 enum ip_conntrack_info ctinfo;
365 int err;
366 #endif
367 unsigned int ret;
368
369 /* root is playing with raw sockets. */
370 if (skb->len < sizeof(struct iphdr) ||
371 ip_hdrlen(skb) < sizeof(struct iphdr))
372 return NF_ACCEPT;
373
374 ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
375 #ifdef CONFIG_XFRM
376 if (ret != NF_DROP && ret != NF_STOLEN &&
377 !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
378 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
379 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
380
381 if ((ct->tuplehash[dir].tuple.src.u3.ip !=
382 ct->tuplehash[!dir].tuple.dst.u3.ip) ||
383 (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
384 ct->tuplehash[dir].tuple.src.u.all !=
385 ct->tuplehash[!dir].tuple.dst.u.all)) {
386 err = nf_xfrm_me_harder(state->net, skb, AF_INET);
387 if (err < 0)
388 ret = NF_DROP_ERR(err);
389 }
390 }
391 #endif
392 return ret;
393 }
394 EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
395
396 unsigned int
397 nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
398 const struct nf_hook_state *state,
399 unsigned int (*do_chain)(void *priv,
400 struct sk_buff *skb,
401 const struct nf_hook_state *state,
402 struct nf_conn *ct))
403 {
404 const struct nf_conn *ct;
405 enum ip_conntrack_info ctinfo;
406 unsigned int ret;
407 int err;
408
409 /* root is playing with raw sockets. */
410 if (skb->len < sizeof(struct iphdr) ||
411 ip_hdrlen(skb) < sizeof(struct iphdr))
412 return NF_ACCEPT;
413
414 ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
415 if (ret != NF_DROP && ret != NF_STOLEN &&
416 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
417 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
418
419 if (ct->tuplehash[dir].tuple.dst.u3.ip !=
420 ct->tuplehash[!dir].tuple.src.u3.ip) {
421 err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
422 if (err < 0)
423 ret = NF_DROP_ERR(err);
424 }
425 #ifdef CONFIG_XFRM
426 else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
427 ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
428 ct->tuplehash[dir].tuple.dst.u.all !=
429 ct->tuplehash[!dir].tuple.src.u.all) {
430 err = nf_xfrm_me_harder(state->net, skb, AF_INET);
431 if (err < 0)
432 ret = NF_DROP_ERR(err);
433 }
434 #endif
435 }
436 return ret;
437 }
438 EXPORT_SYMBOL_GPL(nf_nat_ipv4_local_fn);
439
440 static int __init nf_nat_l3proto_ipv4_init(void)
441 {
442 int err;
443
444 err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
445 if (err < 0)
446 goto err1;
447 err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
448 if (err < 0)
449 goto err2;
450 return err;
451
452 err2:
453 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
454 err1:
455 return err;
456 }
457
458 static void __exit nf_nat_l3proto_ipv4_exit(void)
459 {
460 nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
461 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
462 }
463
464 MODULE_LICENSE("GPL");
465 MODULE_ALIAS("nf-nat-" __stringify(AF_INET));
466
467 module_init(nf_nat_l3proto_ipv4_init);
468 module_exit(nf_nat_l3proto_ipv4_exit);