]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/netfilter/xt_TCPMSS.c
KVM: SVM: Move spec control call after restore of GS
[mirror_ubuntu-artful-kernel.git] / net / netfilter / xt_TCPMSS.c
1 /*
2 * This is a module which is used for setting the MSS option in TCP packets.
3 *
4 * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
5 * Copyright (C) 2007 Patrick McHardy <kaber@trash.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/ip.h>
15 #include <linux/gfp.h>
16 #include <linux/ipv6.h>
17 #include <linux/tcp.h>
18 #include <net/dst.h>
19 #include <net/flow.h>
20 #include <net/ipv6.h>
21 #include <net/route.h>
22 #include <net/tcp.h>
23
24 #include <linux/netfilter_ipv4/ip_tables.h>
25 #include <linux/netfilter_ipv6/ip6_tables.h>
26 #include <linux/netfilter/x_tables.h>
27 #include <linux/netfilter/xt_tcpudp.h>
28 #include <linux/netfilter/xt_TCPMSS.h>
29
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
32 MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
33 MODULE_ALIAS("ipt_TCPMSS");
34 MODULE_ALIAS("ip6t_TCPMSS");
35
36 static inline unsigned int
37 optlen(const u_int8_t *opt, unsigned int offset)
38 {
39 /* Beware zero-length options: make finite progress */
40 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
41 return 1;
42 else
43 return opt[offset+1];
44 }
45
46 static u_int32_t tcpmss_reverse_mtu(struct net *net,
47 const struct sk_buff *skb,
48 unsigned int family)
49 {
50 struct flowi fl;
51 const struct nf_afinfo *ai;
52 struct rtable *rt = NULL;
53 u_int32_t mtu = ~0U;
54
55 if (family == PF_INET) {
56 struct flowi4 *fl4 = &fl.u.ip4;
57 memset(fl4, 0, sizeof(*fl4));
58 fl4->daddr = ip_hdr(skb)->saddr;
59 } else {
60 struct flowi6 *fl6 = &fl.u.ip6;
61
62 memset(fl6, 0, sizeof(*fl6));
63 fl6->daddr = ipv6_hdr(skb)->saddr;
64 }
65 rcu_read_lock();
66 ai = nf_get_afinfo(family);
67 if (ai != NULL)
68 ai->route(net, (struct dst_entry **)&rt, &fl, false);
69 rcu_read_unlock();
70
71 if (rt != NULL) {
72 mtu = dst_mtu(&rt->dst);
73 dst_release(&rt->dst);
74 }
75 return mtu;
76 }
77
78 static int
79 tcpmss_mangle_packet(struct sk_buff *skb,
80 const struct xt_action_param *par,
81 unsigned int family,
82 unsigned int tcphoff,
83 unsigned int minlen)
84 {
85 const struct xt_tcpmss_info *info = par->targinfo;
86 struct tcphdr *tcph;
87 int len, tcp_hdrlen;
88 unsigned int i;
89 __be16 oldval;
90 u16 newmss;
91 u8 *opt;
92
93 /* This is a fragment, no TCP header is available */
94 if (par->fragoff != 0)
95 return 0;
96
97 if (!skb_make_writable(skb, skb->len))
98 return -1;
99
100 len = skb->len - tcphoff;
101 if (len < (int)sizeof(struct tcphdr))
102 return -1;
103
104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
105 tcp_hdrlen = tcph->doff * 4;
106
107 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
108 return -1;
109
110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
111 struct net *net = xt_net(par);
112 unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
113 unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
114
115 if (min_mtu <= minlen) {
116 net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
117 min_mtu);
118 return -1;
119 }
120 newmss = min_mtu - minlen;
121 } else
122 newmss = info->mss;
123
124 opt = (u_int8_t *)tcph;
125 for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
126 if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
127 u_int16_t oldmss;
128
129 oldmss = (opt[i+2] << 8) | opt[i+3];
130
131 /* Never increase MSS, even when setting it, as
132 * doing so results in problems for hosts that rely
133 * on MSS being set correctly.
134 */
135 if (oldmss <= newmss)
136 return 0;
137
138 opt[i+2] = (newmss & 0xff00) >> 8;
139 opt[i+3] = newmss & 0x00ff;
140
141 inet_proto_csum_replace2(&tcph->check, skb,
142 htons(oldmss), htons(newmss),
143 false);
144 return 0;
145 }
146 }
147
148 /* There is data after the header so the option can't be added
149 * without moving it, and doing so may make the SYN packet
150 * itself too large. Accept the packet unmodified instead.
151 */
152 if (len > tcp_hdrlen)
153 return 0;
154
155 /* tcph->doff has 4 bits, do not wrap it to 0 */
156 if (tcp_hdrlen >= 15 * 4)
157 return 0;
158
159 /*
160 * MSS Option not found ?! add it..
161 */
162 if (skb_tailroom(skb) < TCPOLEN_MSS) {
163 if (pskb_expand_head(skb, 0,
164 TCPOLEN_MSS - skb_tailroom(skb),
165 GFP_ATOMIC))
166 return -1;
167 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
168 }
169
170 skb_put(skb, TCPOLEN_MSS);
171
172 /*
173 * IPv4: RFC 1122 states "If an MSS option is not received at
174 * connection setup, TCP MUST assume a default send MSS of 536".
175 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
176 * length IPv6 header of 60, ergo the default MSS value is 1220
177 * Since no MSS was provided, we must use the default values
178 */
179 if (xt_family(par) == NFPROTO_IPV4)
180 newmss = min(newmss, (u16)536);
181 else
182 newmss = min(newmss, (u16)1220);
183
184 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
185 memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
186
187 inet_proto_csum_replace2(&tcph->check, skb,
188 htons(len), htons(len + TCPOLEN_MSS), true);
189 opt[0] = TCPOPT_MSS;
190 opt[1] = TCPOLEN_MSS;
191 opt[2] = (newmss & 0xff00) >> 8;
192 opt[3] = newmss & 0x00ff;
193
194 inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
195
196 oldval = ((__be16 *)tcph)[6];
197 tcph->doff += TCPOLEN_MSS/4;
198 inet_proto_csum_replace2(&tcph->check, skb,
199 oldval, ((__be16 *)tcph)[6], false);
200 return TCPOLEN_MSS;
201 }
202
203 static unsigned int
204 tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
205 {
206 struct iphdr *iph = ip_hdr(skb);
207 __be16 newlen;
208 int ret;
209
210 ret = tcpmss_mangle_packet(skb, par,
211 PF_INET,
212 iph->ihl * 4,
213 sizeof(*iph) + sizeof(struct tcphdr));
214 if (ret < 0)
215 return NF_DROP;
216 if (ret > 0) {
217 iph = ip_hdr(skb);
218 newlen = htons(ntohs(iph->tot_len) + ret);
219 csum_replace2(&iph->check, iph->tot_len, newlen);
220 iph->tot_len = newlen;
221 }
222 return XT_CONTINUE;
223 }
224
225 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
226 static unsigned int
227 tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
228 {
229 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
230 u8 nexthdr;
231 __be16 frag_off, oldlen, newlen;
232 int tcphoff;
233 int ret;
234
235 nexthdr = ipv6h->nexthdr;
236 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
237 if (tcphoff < 0)
238 return NF_DROP;
239 ret = tcpmss_mangle_packet(skb, par,
240 PF_INET6,
241 tcphoff,
242 sizeof(*ipv6h) + sizeof(struct tcphdr));
243 if (ret < 0)
244 return NF_DROP;
245 if (ret > 0) {
246 ipv6h = ipv6_hdr(skb);
247 oldlen = ipv6h->payload_len;
248 newlen = htons(ntohs(oldlen) + ret);
249 if (skb->ip_summed == CHECKSUM_COMPLETE)
250 skb->csum = csum_add(csum_sub(skb->csum, oldlen),
251 newlen);
252 ipv6h->payload_len = newlen;
253 }
254 return XT_CONTINUE;
255 }
256 #endif
257
258 /* Must specify -p tcp --syn */
259 static inline bool find_syn_match(const struct xt_entry_match *m)
260 {
261 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
262
263 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
264 tcpinfo->flg_cmp & TCPHDR_SYN &&
265 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
266 return true;
267
268 return false;
269 }
270
271 static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
272 {
273 const struct xt_tcpmss_info *info = par->targinfo;
274 const struct ipt_entry *e = par->entryinfo;
275 const struct xt_entry_match *ematch;
276
277 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
278 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
279 (1 << NF_INET_LOCAL_OUT) |
280 (1 << NF_INET_POST_ROUTING))) != 0) {
281 pr_info("path-MTU clamping only supported in "
282 "FORWARD, OUTPUT and POSTROUTING hooks\n");
283 return -EINVAL;
284 }
285 if (par->nft_compat)
286 return 0;
287
288 xt_ematch_foreach(ematch, e)
289 if (find_syn_match(ematch))
290 return 0;
291 pr_info("Only works on TCP SYN packets\n");
292 return -EINVAL;
293 }
294
295 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
296 static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
297 {
298 const struct xt_tcpmss_info *info = par->targinfo;
299 const struct ip6t_entry *e = par->entryinfo;
300 const struct xt_entry_match *ematch;
301
302 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
303 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
304 (1 << NF_INET_LOCAL_OUT) |
305 (1 << NF_INET_POST_ROUTING))) != 0) {
306 pr_info("path-MTU clamping only supported in "
307 "FORWARD, OUTPUT and POSTROUTING hooks\n");
308 return -EINVAL;
309 }
310 if (par->nft_compat)
311 return 0;
312
313 xt_ematch_foreach(ematch, e)
314 if (find_syn_match(ematch))
315 return 0;
316 pr_info("Only works on TCP SYN packets\n");
317 return -EINVAL;
318 }
319 #endif
320
321 static struct xt_target tcpmss_tg_reg[] __read_mostly = {
322 {
323 .family = NFPROTO_IPV4,
324 .name = "TCPMSS",
325 .checkentry = tcpmss_tg4_check,
326 .target = tcpmss_tg4,
327 .targetsize = sizeof(struct xt_tcpmss_info),
328 .proto = IPPROTO_TCP,
329 .me = THIS_MODULE,
330 },
331 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
332 {
333 .family = NFPROTO_IPV6,
334 .name = "TCPMSS",
335 .checkentry = tcpmss_tg6_check,
336 .target = tcpmss_tg6,
337 .targetsize = sizeof(struct xt_tcpmss_info),
338 .proto = IPPROTO_TCP,
339 .me = THIS_MODULE,
340 },
341 #endif
342 };
343
344 static int __init tcpmss_tg_init(void)
345 {
346 return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
347 }
348
349 static void __exit tcpmss_tg_exit(void)
350 {
351 xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
352 }
353
354 module_init(tcpmss_tg_init);
355 module_exit(tcpmss_tg_exit);