]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bridge/netfilter/nft_reject_bridge.c
networking: introduce and use skb_put_data()
[mirror_ubuntu-artful-kernel.git] / net / bridge / netfilter / nft_reject_bridge.c
1 /*
2 * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables.h>
16 #include <net/netfilter/nft_reject.h>
17 #include <net/netfilter/ipv4/nf_reject.h>
18 #include <net/netfilter/ipv6/nf_reject.h>
19 #include <linux/ip.h>
20 #include <net/ip.h>
21 #include <net/ip6_checksum.h>
22 #include <linux/netfilter_bridge.h>
23 #include <linux/netfilter_ipv6.h>
24 #include "../br_private.h"
25
26 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
27 struct sk_buff *nskb)
28 {
29 struct ethhdr *eth;
30
31 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
32 skb_reset_mac_header(nskb);
33 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
34 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
35 eth->h_proto = eth_hdr(oldskb)->h_proto;
36 skb_pull(nskb, ETH_HLEN);
37 }
38
39 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
40 {
41 struct iphdr *iph;
42 u32 len;
43
44 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
45 return 0;
46
47 iph = ip_hdr(skb);
48 if (iph->ihl < 5 || iph->version != 4)
49 return 0;
50
51 len = ntohs(iph->tot_len);
52 if (skb->len < len)
53 return 0;
54 else if (len < (iph->ihl*4))
55 return 0;
56
57 if (!pskb_may_pull(skb, iph->ihl*4))
58 return 0;
59
60 return 1;
61 }
62
63 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
64 * or the bridge port (NF_BRIDGE PREROUTING).
65 */
66 static void nft_reject_br_send_v4_tcp_reset(struct net *net,
67 struct sk_buff *oldskb,
68 const struct net_device *dev,
69 int hook)
70 {
71 struct sk_buff *nskb;
72 struct iphdr *niph;
73 const struct tcphdr *oth;
74 struct tcphdr _oth;
75
76 if (!nft_bridge_iphdr_validate(oldskb))
77 return;
78
79 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
80 if (!oth)
81 return;
82
83 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
84 LL_MAX_HEADER, GFP_ATOMIC);
85 if (!nskb)
86 return;
87
88 skb_reserve(nskb, LL_MAX_HEADER);
89 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
90 net->ipv4.sysctl_ip_default_ttl);
91 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
92 niph->ttl = net->ipv4.sysctl_ip_default_ttl;
93 niph->tot_len = htons(nskb->len);
94 ip_send_check(niph);
95
96 nft_reject_br_push_etherhdr(oldskb, nskb);
97
98 br_forward(br_port_get_rcu(dev), nskb, false, true);
99 }
100
101 static void nft_reject_br_send_v4_unreach(struct net *net,
102 struct sk_buff *oldskb,
103 const struct net_device *dev,
104 int hook, u8 code)
105 {
106 struct sk_buff *nskb;
107 struct iphdr *niph;
108 struct icmphdr *icmph;
109 unsigned int len;
110 void *payload;
111 __wsum csum;
112 u8 proto;
113
114 if (!nft_bridge_iphdr_validate(oldskb))
115 return;
116
117 /* IP header checks: fragment. */
118 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
119 return;
120
121 /* RFC says return as much as we can without exceeding 576 bytes. */
122 len = min_t(unsigned int, 536, oldskb->len);
123
124 if (!pskb_may_pull(oldskb, len))
125 return;
126
127 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
128 return;
129
130 if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
131 ip_hdr(oldskb)->protocol == IPPROTO_UDP)
132 proto = ip_hdr(oldskb)->protocol;
133 else
134 proto = 0;
135
136 if (!skb_csum_unnecessary(oldskb) &&
137 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
138 return;
139
140 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
141 LL_MAX_HEADER + len, GFP_ATOMIC);
142 if (!nskb)
143 return;
144
145 skb_reserve(nskb, LL_MAX_HEADER);
146 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
147 net->ipv4.sysctl_ip_default_ttl);
148
149 skb_reset_transport_header(nskb);
150 icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
151 icmph->type = ICMP_DEST_UNREACH;
152 icmph->code = code;
153
154 payload = skb_put_data(nskb, skb_network_header(oldskb), len);
155
156 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
157 icmph->checksum = csum_fold(csum);
158
159 niph->tot_len = htons(nskb->len);
160 ip_send_check(niph);
161
162 nft_reject_br_push_etherhdr(oldskb, nskb);
163
164 br_forward(br_port_get_rcu(dev), nskb, false, true);
165 }
166
167 static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
168 {
169 struct ipv6hdr *hdr;
170 u32 pkt_len;
171
172 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
173 return 0;
174
175 hdr = ipv6_hdr(skb);
176 if (hdr->version != 6)
177 return 0;
178
179 pkt_len = ntohs(hdr->payload_len);
180 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
181 return 0;
182
183 return 1;
184 }
185
186 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
187 struct sk_buff *oldskb,
188 const struct net_device *dev,
189 int hook)
190 {
191 struct sk_buff *nskb;
192 const struct tcphdr *oth;
193 struct tcphdr _oth;
194 unsigned int otcplen;
195 struct ipv6hdr *nip6h;
196
197 if (!nft_bridge_ip6hdr_validate(oldskb))
198 return;
199
200 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
201 if (!oth)
202 return;
203
204 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
205 LL_MAX_HEADER, GFP_ATOMIC);
206 if (!nskb)
207 return;
208
209 skb_reserve(nskb, LL_MAX_HEADER);
210 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
211 net->ipv6.devconf_all->hop_limit);
212 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
213 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
214
215 nft_reject_br_push_etherhdr(oldskb, nskb);
216
217 br_forward(br_port_get_rcu(dev), nskb, false, true);
218 }
219
220 static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
221 {
222 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
223 int thoff;
224 __be16 fo;
225 u8 proto = ip6h->nexthdr;
226
227 if (skb_csum_unnecessary(skb))
228 return true;
229
230 if (ip6h->payload_len &&
231 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
232 return false;
233
234 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
235 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
236 return false;
237
238 return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
239 }
240
241 static void nft_reject_br_send_v6_unreach(struct net *net,
242 struct sk_buff *oldskb,
243 const struct net_device *dev,
244 int hook, u8 code)
245 {
246 struct sk_buff *nskb;
247 struct ipv6hdr *nip6h;
248 struct icmp6hdr *icmp6h;
249 unsigned int len;
250 void *payload;
251
252 if (!nft_bridge_ip6hdr_validate(oldskb))
253 return;
254
255 /* Include "As much of invoking packet as possible without the ICMPv6
256 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
257 */
258 len = min_t(unsigned int, 1220, oldskb->len);
259
260 if (!pskb_may_pull(oldskb, len))
261 return;
262
263 if (!reject6_br_csum_ok(oldskb, hook))
264 return;
265
266 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
267 LL_MAX_HEADER + len, GFP_ATOMIC);
268 if (!nskb)
269 return;
270
271 skb_reserve(nskb, LL_MAX_HEADER);
272 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
273 net->ipv6.devconf_all->hop_limit);
274
275 skb_reset_transport_header(nskb);
276 icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
277 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
278 icmp6h->icmp6_code = code;
279
280 payload = skb_put_data(nskb, skb_network_header(oldskb), len);
281 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
282
283 icmp6h->icmp6_cksum =
284 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
285 nskb->len - sizeof(struct ipv6hdr),
286 IPPROTO_ICMPV6,
287 csum_partial(icmp6h,
288 nskb->len - sizeof(struct ipv6hdr),
289 0));
290
291 nft_reject_br_push_etherhdr(oldskb, nskb);
292
293 br_forward(br_port_get_rcu(dev), nskb, false, true);
294 }
295
296 static void nft_reject_bridge_eval(const struct nft_expr *expr,
297 struct nft_regs *regs,
298 const struct nft_pktinfo *pkt)
299 {
300 struct nft_reject *priv = nft_expr_priv(expr);
301 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
302
303 if (is_broadcast_ether_addr(dest) ||
304 is_multicast_ether_addr(dest))
305 goto out;
306
307 switch (eth_hdr(pkt->skb)->h_proto) {
308 case htons(ETH_P_IP):
309 switch (priv->type) {
310 case NFT_REJECT_ICMP_UNREACH:
311 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
312 nft_in(pkt),
313 nft_hook(pkt),
314 priv->icmp_code);
315 break;
316 case NFT_REJECT_TCP_RST:
317 nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
318 nft_in(pkt),
319 nft_hook(pkt));
320 break;
321 case NFT_REJECT_ICMPX_UNREACH:
322 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
323 nft_in(pkt),
324 nft_hook(pkt),
325 nft_reject_icmp_code(priv->icmp_code));
326 break;
327 }
328 break;
329 case htons(ETH_P_IPV6):
330 switch (priv->type) {
331 case NFT_REJECT_ICMP_UNREACH:
332 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
333 nft_in(pkt),
334 nft_hook(pkt),
335 priv->icmp_code);
336 break;
337 case NFT_REJECT_TCP_RST:
338 nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
339 nft_in(pkt),
340 nft_hook(pkt));
341 break;
342 case NFT_REJECT_ICMPX_UNREACH:
343 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
344 nft_in(pkt),
345 nft_hook(pkt),
346 nft_reject_icmpv6_code(priv->icmp_code));
347 break;
348 }
349 break;
350 default:
351 /* No explicit way to reject this protocol, drop it. */
352 break;
353 }
354 out:
355 regs->verdict.code = NF_DROP;
356 }
357
358 static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
359 const struct nft_expr *expr,
360 const struct nft_data **data)
361 {
362 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
363 (1 << NF_BR_LOCAL_IN));
364 }
365
366 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
367 const struct nft_expr *expr,
368 const struct nlattr * const tb[])
369 {
370 struct nft_reject *priv = nft_expr_priv(expr);
371 int icmp_code;
372
373 if (tb[NFTA_REJECT_TYPE] == NULL)
374 return -EINVAL;
375
376 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
377 switch (priv->type) {
378 case NFT_REJECT_ICMP_UNREACH:
379 case NFT_REJECT_ICMPX_UNREACH:
380 if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
381 return -EINVAL;
382
383 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
384 if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
385 icmp_code > NFT_REJECT_ICMPX_MAX)
386 return -EINVAL;
387
388 priv->icmp_code = icmp_code;
389 break;
390 case NFT_REJECT_TCP_RST:
391 break;
392 default:
393 return -EINVAL;
394 }
395 return 0;
396 }
397
398 static int nft_reject_bridge_dump(struct sk_buff *skb,
399 const struct nft_expr *expr)
400 {
401 const struct nft_reject *priv = nft_expr_priv(expr);
402
403 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
404 goto nla_put_failure;
405
406 switch (priv->type) {
407 case NFT_REJECT_ICMP_UNREACH:
408 case NFT_REJECT_ICMPX_UNREACH:
409 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
410 goto nla_put_failure;
411 break;
412 default:
413 break;
414 }
415
416 return 0;
417
418 nla_put_failure:
419 return -1;
420 }
421
422 static struct nft_expr_type nft_reject_bridge_type;
423 static const struct nft_expr_ops nft_reject_bridge_ops = {
424 .type = &nft_reject_bridge_type,
425 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
426 .eval = nft_reject_bridge_eval,
427 .init = nft_reject_bridge_init,
428 .dump = nft_reject_bridge_dump,
429 .validate = nft_reject_bridge_validate,
430 };
431
432 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
433 .family = NFPROTO_BRIDGE,
434 .name = "reject",
435 .ops = &nft_reject_bridge_ops,
436 .policy = nft_reject_policy,
437 .maxattr = NFTA_REJECT_MAX,
438 .owner = THIS_MODULE,
439 };
440
441 static int __init nft_reject_bridge_module_init(void)
442 {
443 return nft_register_expr(&nft_reject_bridge_type);
444 }
445
446 static void __exit nft_reject_bridge_module_exit(void)
447 {
448 nft_unregister_expr(&nft_reject_bridge_type);
449 }
450
451 module_init(nft_reject_bridge_module_init);
452 module_exit(nft_reject_bridge_module_exit);
453
454 MODULE_LICENSE("GPL");
455 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
456 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");