]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv4/netfilter/nf_nat_helper.c
net: skb->rtable accessor
[mirror_ubuntu-zesty-kernel.git] / net / ipv4 / netfilter / nf_nat_helper.c
1 /* ip_nat_helper.c - generic support functions for NAT helpers
2 *
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kmod.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/checksum.h>
18 #include <net/tcp.h>
19 #include <net/route.h>
20
21 #include <linux/netfilter_ipv4.h>
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_helper.h>
24 #include <net/netfilter/nf_conntrack_ecache.h>
25 #include <net/netfilter/nf_conntrack_expect.h>
26 #include <net/netfilter/nf_nat.h>
27 #include <net/netfilter/nf_nat_protocol.h>
28 #include <net/netfilter/nf_nat_core.h>
29 #include <net/netfilter/nf_nat_helper.h>
30
31 #define DUMP_OFFSET(x) \
32 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
33 x->offset_before, x->offset_after, x->correction_pos);
34
35 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
36
37 /* Setup TCP sequence correction given this change at this sequence */
38 static inline void
39 adjust_tcp_sequence(u32 seq,
40 int sizediff,
41 struct nf_conn *ct,
42 enum ip_conntrack_info ctinfo)
43 {
44 int dir;
45 struct nf_nat_seq *this_way, *other_way;
46 struct nf_conn_nat *nat = nfct_nat(ct);
47
48 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n", seq, seq);
49
50 dir = CTINFO2DIR(ctinfo);
51
52 this_way = &nat->seq[dir];
53 other_way = &nat->seq[!dir];
54
55 pr_debug("nf_nat_resize_packet: Seq_offset before: ");
56 DUMP_OFFSET(this_way);
57
58 spin_lock_bh(&nf_nat_seqofs_lock);
59
60 /* SYN adjust. If it's uninitialized, or this is after last
61 * correction, record it: we don't handle more than one
62 * adjustment in the window, but do deal with common case of a
63 * retransmit */
64 if (this_way->offset_before == this_way->offset_after ||
65 before(this_way->correction_pos, seq)) {
66 this_way->correction_pos = seq;
67 this_way->offset_before = this_way->offset_after;
68 this_way->offset_after += sizediff;
69 }
70 spin_unlock_bh(&nf_nat_seqofs_lock);
71
72 pr_debug("nf_nat_resize_packet: Seq_offset after: ");
73 DUMP_OFFSET(this_way);
74 }
75
76 /* Frobs data inside this packet, which is linear. */
77 static void mangle_contents(struct sk_buff *skb,
78 unsigned int dataoff,
79 unsigned int match_offset,
80 unsigned int match_len,
81 const char *rep_buffer,
82 unsigned int rep_len)
83 {
84 unsigned char *data;
85
86 BUG_ON(skb_is_nonlinear(skb));
87 data = skb_network_header(skb) + dataoff;
88
89 /* move post-replacement */
90 memmove(data + match_offset + rep_len,
91 data + match_offset + match_len,
92 skb->tail - (skb->network_header + dataoff +
93 match_offset + match_len));
94
95 /* insert data from buffer */
96 memcpy(data + match_offset, rep_buffer, rep_len);
97
98 /* update skb info */
99 if (rep_len > match_len) {
100 pr_debug("nf_nat_mangle_packet: Extending packet by "
101 "%u from %u bytes\n", rep_len - match_len, skb->len);
102 skb_put(skb, rep_len - match_len);
103 } else {
104 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
105 "%u from %u bytes\n", match_len - rep_len, skb->len);
106 __skb_trim(skb, skb->len + rep_len - match_len);
107 }
108
109 /* fix IP hdr checksum information */
110 ip_hdr(skb)->tot_len = htons(skb->len);
111 ip_send_check(ip_hdr(skb));
112 }
113
114 /* Unusual, but possible case. */
115 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
116 {
117 if (skb->len + extra > 65535)
118 return 0;
119
120 if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
121 return 0;
122
123 return 1;
124 }
125
126 /* Generic function for mangling variable-length address changes inside
127 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
128 * command in FTP).
129 *
130 * Takes care about all the nasty sequence number changes, checksumming,
131 * skb enlargement, ...
132 *
133 * */
134 int
135 nf_nat_mangle_tcp_packet(struct sk_buff *skb,
136 struct nf_conn *ct,
137 enum ip_conntrack_info ctinfo,
138 unsigned int match_offset,
139 unsigned int match_len,
140 const char *rep_buffer,
141 unsigned int rep_len)
142 {
143 struct rtable *rt = skb_rtable(skb);
144 struct iphdr *iph;
145 struct tcphdr *tcph;
146 int oldlen, datalen;
147
148 if (!skb_make_writable(skb, skb->len))
149 return 0;
150
151 if (rep_len > match_len &&
152 rep_len - match_len > skb_tailroom(skb) &&
153 !enlarge_skb(skb, rep_len - match_len))
154 return 0;
155
156 SKB_LINEAR_ASSERT(skb);
157
158 iph = ip_hdr(skb);
159 tcph = (void *)iph + iph->ihl*4;
160
161 oldlen = skb->len - iph->ihl*4;
162 mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
163 match_offset, match_len, rep_buffer, rep_len);
164
165 datalen = skb->len - iph->ihl*4;
166 if (skb->ip_summed != CHECKSUM_PARTIAL) {
167 if (!(rt->rt_flags & RTCF_LOCAL) &&
168 skb->dev->features & NETIF_F_V4_CSUM) {
169 skb->ip_summed = CHECKSUM_PARTIAL;
170 skb->csum_start = skb_headroom(skb) +
171 skb_network_offset(skb) +
172 iph->ihl * 4;
173 skb->csum_offset = offsetof(struct tcphdr, check);
174 tcph->check = ~tcp_v4_check(datalen,
175 iph->saddr, iph->daddr, 0);
176 } else {
177 tcph->check = 0;
178 tcph->check = tcp_v4_check(datalen,
179 iph->saddr, iph->daddr,
180 csum_partial(tcph,
181 datalen, 0));
182 }
183 } else
184 inet_proto_csum_replace2(&tcph->check, skb,
185 htons(oldlen), htons(datalen), 1);
186
187 if (rep_len != match_len) {
188 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
189 adjust_tcp_sequence(ntohl(tcph->seq),
190 (int)rep_len - (int)match_len,
191 ct, ctinfo);
192 /* Tell TCP window tracking about seq change */
193 nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
194 ct, CTINFO2DIR(ctinfo));
195
196 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
197 }
198 return 1;
199 }
200 EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
201
202 /* Generic function for mangling variable-length address changes inside
203 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
204 * command in the Amanda protocol)
205 *
206 * Takes care about all the nasty sequence number changes, checksumming,
207 * skb enlargement, ...
208 *
209 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
210 * should be fairly easy to do.
211 */
212 int
213 nf_nat_mangle_udp_packet(struct sk_buff *skb,
214 struct nf_conn *ct,
215 enum ip_conntrack_info ctinfo,
216 unsigned int match_offset,
217 unsigned int match_len,
218 const char *rep_buffer,
219 unsigned int rep_len)
220 {
221 struct rtable *rt = skb_rtable(skb);
222 struct iphdr *iph;
223 struct udphdr *udph;
224 int datalen, oldlen;
225
226 /* UDP helpers might accidentally mangle the wrong packet */
227 iph = ip_hdr(skb);
228 if (skb->len < iph->ihl*4 + sizeof(*udph) +
229 match_offset + match_len)
230 return 0;
231
232 if (!skb_make_writable(skb, skb->len))
233 return 0;
234
235 if (rep_len > match_len &&
236 rep_len - match_len > skb_tailroom(skb) &&
237 !enlarge_skb(skb, rep_len - match_len))
238 return 0;
239
240 iph = ip_hdr(skb);
241 udph = (void *)iph + iph->ihl*4;
242
243 oldlen = skb->len - iph->ihl*4;
244 mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
245 match_offset, match_len, rep_buffer, rep_len);
246
247 /* update the length of the UDP packet */
248 datalen = skb->len - iph->ihl*4;
249 udph->len = htons(datalen);
250
251 /* fix udp checksum if udp checksum was previously calculated */
252 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
253 return 1;
254
255 if (skb->ip_summed != CHECKSUM_PARTIAL) {
256 if (!(rt->rt_flags & RTCF_LOCAL) &&
257 skb->dev->features & NETIF_F_V4_CSUM) {
258 skb->ip_summed = CHECKSUM_PARTIAL;
259 skb->csum_start = skb_headroom(skb) +
260 skb_network_offset(skb) +
261 iph->ihl * 4;
262 skb->csum_offset = offsetof(struct udphdr, check);
263 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
264 datalen, IPPROTO_UDP,
265 0);
266 } else {
267 udph->check = 0;
268 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
269 datalen, IPPROTO_UDP,
270 csum_partial(udph,
271 datalen, 0));
272 if (!udph->check)
273 udph->check = CSUM_MANGLED_0;
274 }
275 } else
276 inet_proto_csum_replace2(&udph->check, skb,
277 htons(oldlen), htons(datalen), 1);
278
279 return 1;
280 }
281 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
282
283 /* Adjust one found SACK option including checksum correction */
284 static void
285 sack_adjust(struct sk_buff *skb,
286 struct tcphdr *tcph,
287 unsigned int sackoff,
288 unsigned int sackend,
289 struct nf_nat_seq *natseq)
290 {
291 while (sackoff < sackend) {
292 struct tcp_sack_block_wire *sack;
293 __be32 new_start_seq, new_end_seq;
294
295 sack = (void *)skb->data + sackoff;
296 if (after(ntohl(sack->start_seq) - natseq->offset_before,
297 natseq->correction_pos))
298 new_start_seq = htonl(ntohl(sack->start_seq)
299 - natseq->offset_after);
300 else
301 new_start_seq = htonl(ntohl(sack->start_seq)
302 - natseq->offset_before);
303
304 if (after(ntohl(sack->end_seq) - natseq->offset_before,
305 natseq->correction_pos))
306 new_end_seq = htonl(ntohl(sack->end_seq)
307 - natseq->offset_after);
308 else
309 new_end_seq = htonl(ntohl(sack->end_seq)
310 - natseq->offset_before);
311
312 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
313 ntohl(sack->start_seq), new_start_seq,
314 ntohl(sack->end_seq), new_end_seq);
315
316 inet_proto_csum_replace4(&tcph->check, skb,
317 sack->start_seq, new_start_seq, 0);
318 inet_proto_csum_replace4(&tcph->check, skb,
319 sack->end_seq, new_end_seq, 0);
320 sack->start_seq = new_start_seq;
321 sack->end_seq = new_end_seq;
322 sackoff += sizeof(*sack);
323 }
324 }
325
326 /* TCP SACK sequence number adjustment */
327 static inline unsigned int
328 nf_nat_sack_adjust(struct sk_buff *skb,
329 struct tcphdr *tcph,
330 struct nf_conn *ct,
331 enum ip_conntrack_info ctinfo)
332 {
333 unsigned int dir, optoff, optend;
334 struct nf_conn_nat *nat = nfct_nat(ct);
335
336 optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
337 optend = ip_hdrlen(skb) + tcph->doff * 4;
338
339 if (!skb_make_writable(skb, optend))
340 return 0;
341
342 dir = CTINFO2DIR(ctinfo);
343
344 while (optoff < optend) {
345 /* Usually: option, length. */
346 unsigned char *op = skb->data + optoff;
347
348 switch (op[0]) {
349 case TCPOPT_EOL:
350 return 1;
351 case TCPOPT_NOP:
352 optoff++;
353 continue;
354 default:
355 /* no partial options */
356 if (optoff + 1 == optend ||
357 optoff + op[1] > optend ||
358 op[1] < 2)
359 return 0;
360 if (op[0] == TCPOPT_SACK &&
361 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
362 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
363 sack_adjust(skb, tcph, optoff+2,
364 optoff+op[1], &nat->seq[!dir]);
365 optoff += op[1];
366 }
367 }
368 return 1;
369 }
370
371 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
372 int
373 nf_nat_seq_adjust(struct sk_buff *skb,
374 struct nf_conn *ct,
375 enum ip_conntrack_info ctinfo)
376 {
377 struct tcphdr *tcph;
378 int dir;
379 __be32 newseq, newack;
380 struct nf_conn_nat *nat = nfct_nat(ct);
381 struct nf_nat_seq *this_way, *other_way;
382
383 dir = CTINFO2DIR(ctinfo);
384
385 this_way = &nat->seq[dir];
386 other_way = &nat->seq[!dir];
387
388 if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
389 return 0;
390
391 tcph = (void *)skb->data + ip_hdrlen(skb);
392 if (after(ntohl(tcph->seq), this_way->correction_pos))
393 newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
394 else
395 newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
396
397 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
398 other_way->correction_pos))
399 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
400 else
401 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
402
403 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
404 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
405
406 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
407 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
408 ntohl(newack));
409
410 tcph->seq = newseq;
411 tcph->ack_seq = newack;
412
413 if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
414 return 0;
415
416 nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir);
417
418 return 1;
419 }
420
421 /* Setup NAT on this expected conntrack so it follows master. */
422 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
423 void nf_nat_follow_master(struct nf_conn *ct,
424 struct nf_conntrack_expect *exp)
425 {
426 struct nf_nat_range range;
427
428 /* This must be a fresh one. */
429 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
430
431 /* Change src to where master sends to */
432 range.flags = IP_NAT_RANGE_MAP_IPS;
433 range.min_ip = range.max_ip
434 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
435 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
436
437 /* For DST manip, map port here to where it's expected. */
438 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
439 range.min = range.max = exp->saved_proto;
440 range.min_ip = range.max_ip
441 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
442 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
443 }
444 EXPORT_SYMBOL(nf_nat_follow_master);