]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/xt_TCPMSS.c
x86/msr-index: Cleanup bit defines
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / xt_TCPMSS.c
1 /*
2 * This is a module which is used for setting the MSS option in TCP packets.
3 *
4 * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
5 * Copyright (C) 2007 Patrick McHardy <kaber@trash.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/ip.h>
15 #include <linux/gfp.h>
16 #include <linux/ipv6.h>
17 #include <linux/tcp.h>
18 #include <net/dst.h>
19 #include <net/flow.h>
20 #include <net/ipv6.h>
21 #include <net/route.h>
22 #include <net/tcp.h>
23
24 #include <linux/netfilter_ipv4/ip_tables.h>
25 #include <linux/netfilter_ipv6/ip6_tables.h>
26 #include <linux/netfilter/x_tables.h>
27 #include <linux/netfilter/xt_tcpudp.h>
28 #include <linux/netfilter/xt_TCPMSS.h>
29
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
32 MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
33 MODULE_ALIAS("ipt_TCPMSS");
34 MODULE_ALIAS("ip6t_TCPMSS");
35
36 static inline unsigned int
37 optlen(const u_int8_t *opt, unsigned int offset)
38 {
39 /* Beware zero-length options: make finite progress */
40 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
41 return 1;
42 else
43 return opt[offset+1];
44 }
45
46 static u_int32_t tcpmss_reverse_mtu(struct net *net,
47 const struct sk_buff *skb,
48 unsigned int family)
49 {
50 struct flowi fl;
51 const struct nf_afinfo *ai;
52 struct rtable *rt = NULL;
53 u_int32_t mtu = ~0U;
54
55 if (family == PF_INET) {
56 struct flowi4 *fl4 = &fl.u.ip4;
57 memset(fl4, 0, sizeof(*fl4));
58 fl4->daddr = ip_hdr(skb)->saddr;
59 } else {
60 struct flowi6 *fl6 = &fl.u.ip6;
61
62 memset(fl6, 0, sizeof(*fl6));
63 fl6->daddr = ipv6_hdr(skb)->saddr;
64 }
65 ai = nf_get_afinfo(family);
66 if (ai != NULL)
67 ai->route(net, (struct dst_entry **)&rt, &fl, false);
68
69 if (rt != NULL) {
70 mtu = dst_mtu(&rt->dst);
71 dst_release(&rt->dst);
72 }
73 return mtu;
74 }
75
76 static int
77 tcpmss_mangle_packet(struct sk_buff *skb,
78 const struct xt_action_param *par,
79 unsigned int family,
80 unsigned int tcphoff,
81 unsigned int minlen)
82 {
83 const struct xt_tcpmss_info *info = par->targinfo;
84 struct tcphdr *tcph;
85 int len, tcp_hdrlen;
86 unsigned int i;
87 __be16 oldval;
88 u16 newmss;
89 u8 *opt;
90
91 /* This is a fragment, no TCP header is available */
92 if (par->fragoff != 0)
93 return 0;
94
95 if (!skb_make_writable(skb, skb->len))
96 return -1;
97
98 len = skb->len - tcphoff;
99 if (len < (int)sizeof(struct tcphdr))
100 return -1;
101
102 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
103 tcp_hdrlen = tcph->doff * 4;
104
105 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
106 return -1;
107
108 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
109 struct net *net = xt_net(par);
110 unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
111 unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
112
113 if (min_mtu <= minlen) {
114 net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
115 min_mtu);
116 return -1;
117 }
118 newmss = min_mtu - minlen;
119 } else
120 newmss = info->mss;
121
122 opt = (u_int8_t *)tcph;
123 for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
124 if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
125 u_int16_t oldmss;
126
127 oldmss = (opt[i+2] << 8) | opt[i+3];
128
129 /* Never increase MSS, even when setting it, as
130 * doing so results in problems for hosts that rely
131 * on MSS being set correctly.
132 */
133 if (oldmss <= newmss)
134 return 0;
135
136 opt[i+2] = (newmss & 0xff00) >> 8;
137 opt[i+3] = newmss & 0x00ff;
138
139 inet_proto_csum_replace2(&tcph->check, skb,
140 htons(oldmss), htons(newmss),
141 false);
142 return 0;
143 }
144 }
145
146 /* There is data after the header so the option can't be added
147 * without moving it, and doing so may make the SYN packet
148 * itself too large. Accept the packet unmodified instead.
149 */
150 if (len > tcp_hdrlen)
151 return 0;
152
153 /* tcph->doff has 4 bits, do not wrap it to 0 */
154 if (tcp_hdrlen >= 15 * 4)
155 return 0;
156
157 /*
158 * MSS Option not found ?! add it..
159 */
160 if (skb_tailroom(skb) < TCPOLEN_MSS) {
161 if (pskb_expand_head(skb, 0,
162 TCPOLEN_MSS - skb_tailroom(skb),
163 GFP_ATOMIC))
164 return -1;
165 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
166 }
167
168 skb_put(skb, TCPOLEN_MSS);
169
170 /*
171 * IPv4: RFC 1122 states "If an MSS option is not received at
172 * connection setup, TCP MUST assume a default send MSS of 536".
173 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
174 * length IPv6 header of 60, ergo the default MSS value is 1220
175 * Since no MSS was provided, we must use the default values
176 */
177 if (xt_family(par) == NFPROTO_IPV4)
178 newmss = min(newmss, (u16)536);
179 else
180 newmss = min(newmss, (u16)1220);
181
182 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
183 memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
184
185 inet_proto_csum_replace2(&tcph->check, skb,
186 htons(len), htons(len + TCPOLEN_MSS), true);
187 opt[0] = TCPOPT_MSS;
188 opt[1] = TCPOLEN_MSS;
189 opt[2] = (newmss & 0xff00) >> 8;
190 opt[3] = newmss & 0x00ff;
191
192 inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
193
194 oldval = ((__be16 *)tcph)[6];
195 tcph->doff += TCPOLEN_MSS/4;
196 inet_proto_csum_replace2(&tcph->check, skb,
197 oldval, ((__be16 *)tcph)[6], false);
198 return TCPOLEN_MSS;
199 }
200
201 static unsigned int
202 tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
203 {
204 struct iphdr *iph = ip_hdr(skb);
205 __be16 newlen;
206 int ret;
207
208 ret = tcpmss_mangle_packet(skb, par,
209 PF_INET,
210 iph->ihl * 4,
211 sizeof(*iph) + sizeof(struct tcphdr));
212 if (ret < 0)
213 return NF_DROP;
214 if (ret > 0) {
215 iph = ip_hdr(skb);
216 newlen = htons(ntohs(iph->tot_len) + ret);
217 csum_replace2(&iph->check, iph->tot_len, newlen);
218 iph->tot_len = newlen;
219 }
220 return XT_CONTINUE;
221 }
222
223 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
224 static unsigned int
225 tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
226 {
227 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
228 u8 nexthdr;
229 __be16 frag_off, oldlen, newlen;
230 int tcphoff;
231 int ret;
232
233 nexthdr = ipv6h->nexthdr;
234 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
235 if (tcphoff < 0)
236 return NF_DROP;
237 ret = tcpmss_mangle_packet(skb, par,
238 PF_INET6,
239 tcphoff,
240 sizeof(*ipv6h) + sizeof(struct tcphdr));
241 if (ret < 0)
242 return NF_DROP;
243 if (ret > 0) {
244 ipv6h = ipv6_hdr(skb);
245 oldlen = ipv6h->payload_len;
246 newlen = htons(ntohs(oldlen) + ret);
247 if (skb->ip_summed == CHECKSUM_COMPLETE)
248 skb->csum = csum_add(csum_sub(skb->csum, oldlen),
249 newlen);
250 ipv6h->payload_len = newlen;
251 }
252 return XT_CONTINUE;
253 }
254 #endif
255
256 /* Must specify -p tcp --syn */
257 static inline bool find_syn_match(const struct xt_entry_match *m)
258 {
259 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
260
261 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
262 tcpinfo->flg_cmp & TCPHDR_SYN &&
263 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
264 return true;
265
266 return false;
267 }
268
269 static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
270 {
271 const struct xt_tcpmss_info *info = par->targinfo;
272 const struct ipt_entry *e = par->entryinfo;
273 const struct xt_entry_match *ematch;
274
275 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
276 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
277 (1 << NF_INET_LOCAL_OUT) |
278 (1 << NF_INET_POST_ROUTING))) != 0) {
279 pr_info("path-MTU clamping only supported in "
280 "FORWARD, OUTPUT and POSTROUTING hooks\n");
281 return -EINVAL;
282 }
283 if (par->nft_compat)
284 return 0;
285
286 xt_ematch_foreach(ematch, e)
287 if (find_syn_match(ematch))
288 return 0;
289 pr_info("Only works on TCP SYN packets\n");
290 return -EINVAL;
291 }
292
293 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
294 static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
295 {
296 const struct xt_tcpmss_info *info = par->targinfo;
297 const struct ip6t_entry *e = par->entryinfo;
298 const struct xt_entry_match *ematch;
299
300 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
301 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
302 (1 << NF_INET_LOCAL_OUT) |
303 (1 << NF_INET_POST_ROUTING))) != 0) {
304 pr_info("path-MTU clamping only supported in "
305 "FORWARD, OUTPUT and POSTROUTING hooks\n");
306 return -EINVAL;
307 }
308 if (par->nft_compat)
309 return 0;
310
311 xt_ematch_foreach(ematch, e)
312 if (find_syn_match(ematch))
313 return 0;
314 pr_info("Only works on TCP SYN packets\n");
315 return -EINVAL;
316 }
317 #endif
318
319 static struct xt_target tcpmss_tg_reg[] __read_mostly = {
320 {
321 .family = NFPROTO_IPV4,
322 .name = "TCPMSS",
323 .checkentry = tcpmss_tg4_check,
324 .target = tcpmss_tg4,
325 .targetsize = sizeof(struct xt_tcpmss_info),
326 .proto = IPPROTO_TCP,
327 .me = THIS_MODULE,
328 },
329 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
330 {
331 .family = NFPROTO_IPV6,
332 .name = "TCPMSS",
333 .checkentry = tcpmss_tg6_check,
334 .target = tcpmss_tg6,
335 .targetsize = sizeof(struct xt_tcpmss_info),
336 .proto = IPPROTO_TCP,
337 .me = THIS_MODULE,
338 },
339 #endif
340 };
341
342 static int __init tcpmss_tg_init(void)
343 {
344 return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
345 }
346
347 static void __exit tcpmss_tg_exit(void)
348 {
349 xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
350 }
351
352 module_init(tcpmss_tg_init);
353 module_exit(tcpmss_tg_exit);