]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bridge/br_netfilter.c
Merge remote-tracking branches 'asoc/fix/audmux', 'asoc/fix/cs42l52', 'asoc/fix/fsl...
[mirror_ubuntu-bionic-kernel.git] / net / bridge / br_netfilter.c
1 /*
2 * Handle firewalling
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * Lennert dedicates this file to Kerstin Wurdinger.
15 */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
34
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/route.h>
38
39 #include <asm/uaccess.h>
40 #include "br_private.h"
41 #ifdef CONFIG_SYSCTL
42 #include <linux/sysctl.h>
43 #endif
44
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
49
50 #ifdef CONFIG_SYSCTL
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
57 static int brnf_pass_vlan_indev __read_mostly = 0;
58 #else
59 #define brnf_call_iptables 1
60 #define brnf_call_ip6tables 1
61 #define brnf_call_arptables 1
62 #define brnf_filter_vlan_tagged 0
63 #define brnf_filter_pppoe_tagged 0
64 #define brnf_pass_vlan_indev 0
65 #endif
66
67 #define IS_IP(skb) \
68 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
69
70 #define IS_IPV6(skb) \
71 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
72
73 #define IS_ARP(skb) \
74 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
75
76 static inline __be16 vlan_proto(const struct sk_buff *skb)
77 {
78 if (vlan_tx_tag_present(skb))
79 return skb->protocol;
80 else if (skb->protocol == htons(ETH_P_8021Q))
81 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
82 else
83 return 0;
84 }
85
86 #define IS_VLAN_IP(skb) \
87 (vlan_proto(skb) == htons(ETH_P_IP) && \
88 brnf_filter_vlan_tagged)
89
90 #define IS_VLAN_IPV6(skb) \
91 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
92 brnf_filter_vlan_tagged)
93
94 #define IS_VLAN_ARP(skb) \
95 (vlan_proto(skb) == htons(ETH_P_ARP) && \
96 brnf_filter_vlan_tagged)
97
98 static inline __be16 pppoe_proto(const struct sk_buff *skb)
99 {
100 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
101 sizeof(struct pppoe_hdr)));
102 }
103
104 #define IS_PPPOE_IP(skb) \
105 (skb->protocol == htons(ETH_P_PPP_SES) && \
106 pppoe_proto(skb) == htons(PPP_IP) && \
107 brnf_filter_pppoe_tagged)
108
109 #define IS_PPPOE_IPV6(skb) \
110 (skb->protocol == htons(ETH_P_PPP_SES) && \
111 pppoe_proto(skb) == htons(PPP_IPV6) && \
112 brnf_filter_pppoe_tagged)
113
114 static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
115 struct sk_buff *skb, u32 mtu)
116 {
117 }
118
119 static void fake_redirect(struct dst_entry *dst, struct sock *sk,
120 struct sk_buff *skb)
121 {
122 }
123
124 static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
125 {
126 return NULL;
127 }
128
129 static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
130 struct sk_buff *skb,
131 const void *daddr)
132 {
133 return NULL;
134 }
135
136 static unsigned int fake_mtu(const struct dst_entry *dst)
137 {
138 return dst->dev->mtu;
139 }
140
141 static struct dst_ops fake_dst_ops = {
142 .family = AF_INET,
143 .protocol = cpu_to_be16(ETH_P_IP),
144 .update_pmtu = fake_update_pmtu,
145 .redirect = fake_redirect,
146 .cow_metrics = fake_cow_metrics,
147 .neigh_lookup = fake_neigh_lookup,
148 .mtu = fake_mtu,
149 };
150
151 /*
152 * Initialize bogus route table used to keep netfilter happy.
153 * Currently, we fill in the PMTU entry because netfilter
154 * refragmentation needs it, and the rt_flags entry because
155 * ipt_REJECT needs it. Future netfilter modules might
156 * require us to fill additional fields.
157 */
158 static const u32 br_dst_default_metrics[RTAX_MAX] = {
159 [RTAX_MTU - 1] = 1500,
160 };
161
162 void br_netfilter_rtable_init(struct net_bridge *br)
163 {
164 struct rtable *rt = &br->fake_rtable;
165
166 atomic_set(&rt->dst.__refcnt, 1);
167 rt->dst.dev = br->dev;
168 rt->dst.path = &rt->dst;
169 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
170 rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE;
171 rt->dst.ops = &fake_dst_ops;
172 }
173
174 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
175 {
176 struct net_bridge_port *port;
177
178 port = br_port_get_rcu(dev);
179 return port ? &port->br->fake_rtable : NULL;
180 }
181
182 static inline struct net_device *bridge_parent(const struct net_device *dev)
183 {
184 struct net_bridge_port *port;
185
186 port = br_port_get_rcu(dev);
187 return port ? port->br->dev : NULL;
188 }
189
190 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
191 {
192 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
193 if (likely(skb->nf_bridge))
194 atomic_set(&(skb->nf_bridge->use), 1);
195
196 return skb->nf_bridge;
197 }
198
199 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
200 {
201 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
202
203 if (atomic_read(&nf_bridge->use) > 1) {
204 struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
205
206 if (tmp) {
207 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
208 atomic_set(&tmp->use, 1);
209 }
210 nf_bridge_put(nf_bridge);
211 nf_bridge = tmp;
212 }
213 return nf_bridge;
214 }
215
216 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
217 {
218 unsigned int len = nf_bridge_encap_header_len(skb);
219
220 skb_push(skb, len);
221 skb->network_header -= len;
222 }
223
224 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
225 {
226 unsigned int len = nf_bridge_encap_header_len(skb);
227
228 skb_pull(skb, len);
229 skb->network_header += len;
230 }
231
232 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
233 {
234 unsigned int len = nf_bridge_encap_header_len(skb);
235
236 skb_pull_rcsum(skb, len);
237 skb->network_header += len;
238 }
239
240 static inline void nf_bridge_save_header(struct sk_buff *skb)
241 {
242 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
243
244 skb_copy_from_linear_data_offset(skb, -header_size,
245 skb->nf_bridge->data, header_size);
246 }
247
248 static inline void nf_bridge_update_protocol(struct sk_buff *skb)
249 {
250 if (skb->nf_bridge->mask & BRNF_8021Q)
251 skb->protocol = htons(ETH_P_8021Q);
252 else if (skb->nf_bridge->mask & BRNF_PPPoE)
253 skb->protocol = htons(ETH_P_PPP_SES);
254 }
255
256 /* When handing a packet over to the IP layer
257 * check whether we have a skb that is in the
258 * expected format
259 */
260
261 static int br_parse_ip_options(struct sk_buff *skb)
262 {
263 struct ip_options *opt;
264 const struct iphdr *iph;
265 struct net_device *dev = skb->dev;
266 u32 len;
267
268 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
269 goto inhdr_error;
270
271 iph = ip_hdr(skb);
272 opt = &(IPCB(skb)->opt);
273
274 /* Basic sanity checks */
275 if (iph->ihl < 5 || iph->version != 4)
276 goto inhdr_error;
277
278 if (!pskb_may_pull(skb, iph->ihl*4))
279 goto inhdr_error;
280
281 iph = ip_hdr(skb);
282 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
283 goto inhdr_error;
284
285 len = ntohs(iph->tot_len);
286 if (skb->len < len) {
287 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
288 goto drop;
289 } else if (len < (iph->ihl*4))
290 goto inhdr_error;
291
292 if (pskb_trim_rcsum(skb, len)) {
293 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
294 goto drop;
295 }
296
297 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
298 if (iph->ihl == 5)
299 return 0;
300
301 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
302 if (ip_options_compile(dev_net(dev), opt, skb))
303 goto inhdr_error;
304
305 /* Check correct handling of SRR option */
306 if (unlikely(opt->srr)) {
307 struct in_device *in_dev = __in_dev_get_rcu(dev);
308 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
309 goto drop;
310
311 if (ip_options_rcv_srr(skb))
312 goto drop;
313 }
314
315 return 0;
316
317 inhdr_error:
318 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
319 drop:
320 return -1;
321 }
322
323 /* Fill in the header for fragmented IP packets handled by
324 * the IPv4 connection tracking code.
325 */
326 int nf_bridge_copy_header(struct sk_buff *skb)
327 {
328 int err;
329 unsigned int header_size;
330
331 nf_bridge_update_protocol(skb);
332 header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
333 err = skb_cow_head(skb, header_size);
334 if (err)
335 return err;
336
337 skb_copy_to_linear_data_offset(skb, -header_size,
338 skb->nf_bridge->data, header_size);
339 __skb_push(skb, nf_bridge_encap_header_len(skb));
340 return 0;
341 }
342
343 /* PF_BRIDGE/PRE_ROUTING *********************************************/
344 /* Undo the changes made for ip6tables PREROUTING and continue the
345 * bridge PRE_ROUTING hook. */
346 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
347 {
348 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
349 struct rtable *rt;
350
351 if (nf_bridge->mask & BRNF_PKT_TYPE) {
352 skb->pkt_type = PACKET_OTHERHOST;
353 nf_bridge->mask ^= BRNF_PKT_TYPE;
354 }
355 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
356
357 rt = bridge_parent_rtable(nf_bridge->physindev);
358 if (!rt) {
359 kfree_skb(skb);
360 return 0;
361 }
362 skb_dst_set_noref(skb, &rt->dst);
363
364 skb->dev = nf_bridge->physindev;
365 nf_bridge_update_protocol(skb);
366 nf_bridge_push_encap_header(skb);
367 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
368 br_handle_frame_finish, 1);
369
370 return 0;
371 }
372
373 /* Obtain the correct destination MAC address, while preserving the original
374 * source MAC address. If we already know this address, we just copy it. If we
375 * don't, we use the neighbour framework to find out. In both cases, we make
376 * sure that br_handle_frame_finish() is called afterwards.
377 */
378 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
379 {
380 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
381 struct neighbour *neigh;
382 struct dst_entry *dst;
383
384 skb->dev = bridge_parent(skb->dev);
385 if (!skb->dev)
386 goto free_skb;
387 dst = skb_dst(skb);
388 neigh = dst_neigh_lookup_skb(dst, skb);
389 if (neigh) {
390 int ret;
391
392 if (neigh->hh.hh_len) {
393 neigh_hh_bridge(&neigh->hh, skb);
394 skb->dev = nf_bridge->physindev;
395 ret = br_handle_frame_finish(skb);
396 } else {
397 /* the neighbour function below overwrites the complete
398 * MAC header, so we save the Ethernet source address and
399 * protocol number.
400 */
401 skb_copy_from_linear_data_offset(skb,
402 -(ETH_HLEN-ETH_ALEN),
403 skb->nf_bridge->data,
404 ETH_HLEN-ETH_ALEN);
405 /* tell br_dev_xmit to continue with forwarding */
406 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
407 ret = neigh->output(neigh, skb);
408 }
409 neigh_release(neigh);
410 return ret;
411 }
412 free_skb:
413 kfree_skb(skb);
414 return 0;
415 }
416
417 /* This requires some explaining. If DNAT has taken place,
418 * we will need to fix up the destination Ethernet address.
419 *
420 * There are two cases to consider:
421 * 1. The packet was DNAT'ed to a device in the same bridge
422 * port group as it was received on. We can still bridge
423 * the packet.
424 * 2. The packet was DNAT'ed to a different device, either
425 * a non-bridged device or another bridge port group.
426 * The packet will need to be routed.
427 *
428 * The correct way of distinguishing between these two cases is to
429 * call ip_route_input() and to look at skb->dst->dev, which is
430 * changed to the destination device if ip_route_input() succeeds.
431 *
432 * Let's first consider the case that ip_route_input() succeeds:
433 *
434 * If the output device equals the logical bridge device the packet
435 * came in on, we can consider this bridging. The corresponding MAC
436 * address will be obtained in br_nf_pre_routing_finish_bridge.
437 * Otherwise, the packet is considered to be routed and we just
438 * change the destination MAC address so that the packet will
439 * later be passed up to the IP stack to be routed. For a redirected
440 * packet, ip_route_input() will give back the localhost as output device,
441 * which differs from the bridge device.
442 *
443 * Let's now consider the case that ip_route_input() fails:
444 *
445 * This can be because the destination address is martian, in which case
446 * the packet will be dropped.
447 * If IP forwarding is disabled, ip_route_input() will fail, while
448 * ip_route_output_key() can return success. The source
449 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
450 * thinks we're handling a locally generated packet and won't care
451 * if IP forwarding is enabled. If the output device equals the logical bridge
452 * device, we proceed as if ip_route_input() succeeded. If it differs from the
453 * logical bridge port or if ip_route_output_key() fails we drop the packet.
454 */
455 static int br_nf_pre_routing_finish(struct sk_buff *skb)
456 {
457 struct net_device *dev = skb->dev;
458 struct iphdr *iph = ip_hdr(skb);
459 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
460 struct rtable *rt;
461 int err;
462
463 if (nf_bridge->mask & BRNF_PKT_TYPE) {
464 skb->pkt_type = PACKET_OTHERHOST;
465 nf_bridge->mask ^= BRNF_PKT_TYPE;
466 }
467 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
468 if (dnat_took_place(skb)) {
469 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
470 struct in_device *in_dev = __in_dev_get_rcu(dev);
471
472 /* If err equals -EHOSTUNREACH the error is due to a
473 * martian destination or due to the fact that
474 * forwarding is disabled. For most martian packets,
475 * ip_route_output_key() will fail. It won't fail for 2 types of
476 * martian destinations: loopback destinations and destination
477 * 0.0.0.0. In both cases the packet will be dropped because the
478 * destination is the loopback device and not the bridge. */
479 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
480 goto free_skb;
481
482 rt = ip_route_output(dev_net(dev), iph->daddr, 0,
483 RT_TOS(iph->tos), 0);
484 if (!IS_ERR(rt)) {
485 /* - Bridged-and-DNAT'ed traffic doesn't
486 * require ip_forwarding. */
487 if (rt->dst.dev == dev) {
488 skb_dst_set(skb, &rt->dst);
489 goto bridged_dnat;
490 }
491 ip_rt_put(rt);
492 }
493 free_skb:
494 kfree_skb(skb);
495 return 0;
496 } else {
497 if (skb_dst(skb)->dev == dev) {
498 bridged_dnat:
499 skb->dev = nf_bridge->physindev;
500 nf_bridge_update_protocol(skb);
501 nf_bridge_push_encap_header(skb);
502 NF_HOOK_THRESH(NFPROTO_BRIDGE,
503 NF_BR_PRE_ROUTING,
504 skb, skb->dev, NULL,
505 br_nf_pre_routing_finish_bridge,
506 1);
507 return 0;
508 }
509 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
510 skb->pkt_type = PACKET_HOST;
511 }
512 } else {
513 rt = bridge_parent_rtable(nf_bridge->physindev);
514 if (!rt) {
515 kfree_skb(skb);
516 return 0;
517 }
518 skb_dst_set_noref(skb, &rt->dst);
519 }
520
521 skb->dev = nf_bridge->physindev;
522 nf_bridge_update_protocol(skb);
523 nf_bridge_push_encap_header(skb);
524 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
525 br_handle_frame_finish, 1);
526
527 return 0;
528 }
529
530 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
531 {
532 struct net_device *vlan, *br;
533
534 br = bridge_parent(dev);
535 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
536 return br;
537
538 vlan = __vlan_find_dev_deep(br, skb->vlan_proto,
539 vlan_tx_tag_get(skb) & VLAN_VID_MASK);
540
541 return vlan ? vlan : br;
542 }
543
544 /* Some common code for IPv4/IPv6 */
545 static struct net_device *setup_pre_routing(struct sk_buff *skb)
546 {
547 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
548
549 if (skb->pkt_type == PACKET_OTHERHOST) {
550 skb->pkt_type = PACKET_HOST;
551 nf_bridge->mask |= BRNF_PKT_TYPE;
552 }
553
554 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
555 nf_bridge->physindev = skb->dev;
556 skb->dev = brnf_get_logical_dev(skb, skb->dev);
557 if (skb->protocol == htons(ETH_P_8021Q))
558 nf_bridge->mask |= BRNF_8021Q;
559 else if (skb->protocol == htons(ETH_P_PPP_SES))
560 nf_bridge->mask |= BRNF_PPPoE;
561
562 /* Must drop socket now because of tproxy. */
563 skb_orphan(skb);
564 return skb->dev;
565 }
566
567 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
568 static int check_hbh_len(struct sk_buff *skb)
569 {
570 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
571 u32 pkt_len;
572 const unsigned char *nh = skb_network_header(skb);
573 int off = raw - nh;
574 int len = (raw[1] + 1) << 3;
575
576 if ((raw + len) - skb->data > skb_headlen(skb))
577 goto bad;
578
579 off += 2;
580 len -= 2;
581
582 while (len > 0) {
583 int optlen = nh[off + 1] + 2;
584
585 switch (nh[off]) {
586 case IPV6_TLV_PAD1:
587 optlen = 1;
588 break;
589
590 case IPV6_TLV_PADN:
591 break;
592
593 case IPV6_TLV_JUMBO:
594 if (nh[off + 1] != 4 || (off & 3) != 2)
595 goto bad;
596 pkt_len = ntohl(*(__be32 *) (nh + off + 2));
597 if (pkt_len <= IPV6_MAXPLEN ||
598 ipv6_hdr(skb)->payload_len)
599 goto bad;
600 if (pkt_len > skb->len - sizeof(struct ipv6hdr))
601 goto bad;
602 if (pskb_trim_rcsum(skb,
603 pkt_len + sizeof(struct ipv6hdr)))
604 goto bad;
605 nh = skb_network_header(skb);
606 break;
607 default:
608 if (optlen > len)
609 goto bad;
610 break;
611 }
612 off += optlen;
613 len -= optlen;
614 }
615 if (len == 0)
616 return 0;
617 bad:
618 return -1;
619
620 }
621
622 /* Replicate the checks that IPv6 does on packet reception and pass the packet
623 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
624 static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
625 struct sk_buff *skb,
626 const struct net_device *in,
627 const struct net_device *out,
628 int (*okfn)(struct sk_buff *))
629 {
630 const struct ipv6hdr *hdr;
631 u32 pkt_len;
632
633 if (skb->len < sizeof(struct ipv6hdr))
634 return NF_DROP;
635
636 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
637 return NF_DROP;
638
639 hdr = ipv6_hdr(skb);
640
641 if (hdr->version != 6)
642 return NF_DROP;
643
644 pkt_len = ntohs(hdr->payload_len);
645
646 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
647 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
648 return NF_DROP;
649 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
650 return NF_DROP;
651 }
652 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
653 return NF_DROP;
654
655 nf_bridge_put(skb->nf_bridge);
656 if (!nf_bridge_alloc(skb))
657 return NF_DROP;
658 if (!setup_pre_routing(skb))
659 return NF_DROP;
660
661 skb->protocol = htons(ETH_P_IPV6);
662 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
663 br_nf_pre_routing_finish_ipv6);
664
665 return NF_STOLEN;
666 }
667
668 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
669 * Replicate the checks that IPv4 does on packet reception.
670 * Set skb->dev to the bridge device (i.e. parent of the
671 * receiving device) to make netfilter happy, the REDIRECT
672 * target in particular. Save the original destination IP
673 * address to be able to detect DNAT afterwards. */
674 static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
675 struct sk_buff *skb,
676 const struct net_device *in,
677 const struct net_device *out,
678 int (*okfn)(struct sk_buff *))
679 {
680 struct net_bridge_port *p;
681 struct net_bridge *br;
682 __u32 len = nf_bridge_encap_header_len(skb);
683
684 if (unlikely(!pskb_may_pull(skb, len)))
685 return NF_DROP;
686
687 p = br_port_get_rcu(in);
688 if (p == NULL)
689 return NF_DROP;
690 br = p->br;
691
692 if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
693 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
694 return NF_ACCEPT;
695
696 nf_bridge_pull_encap_header_rcsum(skb);
697 return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
698 }
699
700 if (!brnf_call_iptables && !br->nf_call_iptables)
701 return NF_ACCEPT;
702
703 if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
704 return NF_ACCEPT;
705
706 nf_bridge_pull_encap_header_rcsum(skb);
707
708 if (br_parse_ip_options(skb))
709 return NF_DROP;
710
711 nf_bridge_put(skb->nf_bridge);
712 if (!nf_bridge_alloc(skb))
713 return NF_DROP;
714 if (!setup_pre_routing(skb))
715 return NF_DROP;
716 store_orig_dstaddr(skb);
717 skb->protocol = htons(ETH_P_IP);
718
719 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
720 br_nf_pre_routing_finish);
721
722 return NF_STOLEN;
723 }
724
725
726 /* PF_BRIDGE/LOCAL_IN ************************************************/
727 /* The packet is locally destined, which requires a real
728 * dst_entry, so detach the fake one. On the way up, the
729 * packet would pass through PRE_ROUTING again (which already
730 * took place when the packet entered the bridge), but we
731 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
732 * prevent this from happening. */
733 static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
734 struct sk_buff *skb,
735 const struct net_device *in,
736 const struct net_device *out,
737 int (*okfn)(struct sk_buff *))
738 {
739 br_drop_fake_rtable(skb);
740 return NF_ACCEPT;
741 }
742
743 /* PF_BRIDGE/FORWARD *************************************************/
744 static int br_nf_forward_finish(struct sk_buff *skb)
745 {
746 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
747 struct net_device *in;
748
749 if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
750 in = nf_bridge->physindev;
751 if (nf_bridge->mask & BRNF_PKT_TYPE) {
752 skb->pkt_type = PACKET_OTHERHOST;
753 nf_bridge->mask ^= BRNF_PKT_TYPE;
754 }
755 nf_bridge_update_protocol(skb);
756 } else {
757 in = *((struct net_device **)(skb->cb));
758 }
759 nf_bridge_push_encap_header(skb);
760
761 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
762 skb->dev, br_forward_finish, 1);
763 return 0;
764 }
765
766
767 /* This is the 'purely bridged' case. For IP, we pass the packet to
768 * netfilter with indev and outdev set to the bridge device,
769 * but we are still able to filter on the 'real' indev/outdev
770 * because of the physdev module. For ARP, indev and outdev are the
771 * bridge ports. */
772 static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
773 struct sk_buff *skb,
774 const struct net_device *in,
775 const struct net_device *out,
776 int (*okfn)(struct sk_buff *))
777 {
778 struct nf_bridge_info *nf_bridge;
779 struct net_device *parent;
780 u_int8_t pf;
781
782 if (!skb->nf_bridge)
783 return NF_ACCEPT;
784
785 /* Need exclusive nf_bridge_info since we might have multiple
786 * different physoutdevs. */
787 if (!nf_bridge_unshare(skb))
788 return NF_DROP;
789
790 parent = bridge_parent(out);
791 if (!parent)
792 return NF_DROP;
793
794 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
795 pf = NFPROTO_IPV4;
796 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
797 pf = NFPROTO_IPV6;
798 else
799 return NF_ACCEPT;
800
801 nf_bridge_pull_encap_header(skb);
802
803 nf_bridge = skb->nf_bridge;
804 if (skb->pkt_type == PACKET_OTHERHOST) {
805 skb->pkt_type = PACKET_HOST;
806 nf_bridge->mask |= BRNF_PKT_TYPE;
807 }
808
809 if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
810 return NF_DROP;
811
812 /* The physdev module checks on this */
813 nf_bridge->mask |= BRNF_BRIDGED;
814 nf_bridge->physoutdev = skb->dev;
815 if (pf == NFPROTO_IPV4)
816 skb->protocol = htons(ETH_P_IP);
817 else
818 skb->protocol = htons(ETH_P_IPV6);
819
820 NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
821 br_nf_forward_finish);
822
823 return NF_STOLEN;
824 }
825
826 static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
827 struct sk_buff *skb,
828 const struct net_device *in,
829 const struct net_device *out,
830 int (*okfn)(struct sk_buff *))
831 {
832 struct net_bridge_port *p;
833 struct net_bridge *br;
834 struct net_device **d = (struct net_device **)(skb->cb);
835
836 p = br_port_get_rcu(out);
837 if (p == NULL)
838 return NF_ACCEPT;
839 br = p->br;
840
841 if (!brnf_call_arptables && !br->nf_call_arptables)
842 return NF_ACCEPT;
843
844 if (!IS_ARP(skb)) {
845 if (!IS_VLAN_ARP(skb))
846 return NF_ACCEPT;
847 nf_bridge_pull_encap_header(skb);
848 }
849
850 if (arp_hdr(skb)->ar_pln != 4) {
851 if (IS_VLAN_ARP(skb))
852 nf_bridge_push_encap_header(skb);
853 return NF_ACCEPT;
854 }
855 *d = (struct net_device *)in;
856 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
857 (struct net_device *)out, br_nf_forward_finish);
858
859 return NF_STOLEN;
860 }
861
862 #if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
863 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
864 {
865 int ret;
866
867 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
868 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
869 !skb_is_gso(skb)) {
870 if (br_parse_ip_options(skb))
871 /* Drop invalid packet */
872 return NF_DROP;
873 ret = ip_fragment(skb, br_dev_queue_push_xmit);
874 } else
875 ret = br_dev_queue_push_xmit(skb);
876
877 return ret;
878 }
879 #else
880 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
881 {
882 return br_dev_queue_push_xmit(skb);
883 }
884 #endif
885
886 /* PF_BRIDGE/POST_ROUTING ********************************************/
887 static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
888 struct sk_buff *skb,
889 const struct net_device *in,
890 const struct net_device *out,
891 int (*okfn)(struct sk_buff *))
892 {
893 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
894 struct net_device *realoutdev = bridge_parent(skb->dev);
895 u_int8_t pf;
896
897 if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
898 return NF_ACCEPT;
899
900 if (!realoutdev)
901 return NF_DROP;
902
903 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
904 pf = NFPROTO_IPV4;
905 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
906 pf = NFPROTO_IPV6;
907 else
908 return NF_ACCEPT;
909
910 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
911 * about the value of skb->pkt_type. */
912 if (skb->pkt_type == PACKET_OTHERHOST) {
913 skb->pkt_type = PACKET_HOST;
914 nf_bridge->mask |= BRNF_PKT_TYPE;
915 }
916
917 nf_bridge_pull_encap_header(skb);
918 nf_bridge_save_header(skb);
919 if (pf == NFPROTO_IPV4)
920 skb->protocol = htons(ETH_P_IP);
921 else
922 skb->protocol = htons(ETH_P_IPV6);
923
924 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
925 br_nf_dev_queue_xmit);
926
927 return NF_STOLEN;
928 }
929
930 /* IP/SABOTAGE *****************************************************/
931 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
932 * for the second time. */
933 static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
934 struct sk_buff *skb,
935 const struct net_device *in,
936 const struct net_device *out,
937 int (*okfn)(struct sk_buff *))
938 {
939 if (skb->nf_bridge &&
940 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
941 return NF_STOP;
942 }
943
944 return NF_ACCEPT;
945 }
946
947 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
948 * br_dev_queue_push_xmit is called afterwards */
949 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
950 {
951 .hook = br_nf_pre_routing,
952 .owner = THIS_MODULE,
953 .pf = NFPROTO_BRIDGE,
954 .hooknum = NF_BR_PRE_ROUTING,
955 .priority = NF_BR_PRI_BRNF,
956 },
957 {
958 .hook = br_nf_local_in,
959 .owner = THIS_MODULE,
960 .pf = NFPROTO_BRIDGE,
961 .hooknum = NF_BR_LOCAL_IN,
962 .priority = NF_BR_PRI_BRNF,
963 },
964 {
965 .hook = br_nf_forward_ip,
966 .owner = THIS_MODULE,
967 .pf = NFPROTO_BRIDGE,
968 .hooknum = NF_BR_FORWARD,
969 .priority = NF_BR_PRI_BRNF - 1,
970 },
971 {
972 .hook = br_nf_forward_arp,
973 .owner = THIS_MODULE,
974 .pf = NFPROTO_BRIDGE,
975 .hooknum = NF_BR_FORWARD,
976 .priority = NF_BR_PRI_BRNF,
977 },
978 {
979 .hook = br_nf_post_routing,
980 .owner = THIS_MODULE,
981 .pf = NFPROTO_BRIDGE,
982 .hooknum = NF_BR_POST_ROUTING,
983 .priority = NF_BR_PRI_LAST,
984 },
985 {
986 .hook = ip_sabotage_in,
987 .owner = THIS_MODULE,
988 .pf = NFPROTO_IPV4,
989 .hooknum = NF_INET_PRE_ROUTING,
990 .priority = NF_IP_PRI_FIRST,
991 },
992 {
993 .hook = ip_sabotage_in,
994 .owner = THIS_MODULE,
995 .pf = NFPROTO_IPV6,
996 .hooknum = NF_INET_PRE_ROUTING,
997 .priority = NF_IP6_PRI_FIRST,
998 },
999 };
1000
1001 #ifdef CONFIG_SYSCTL
1002 static
1003 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
1004 void __user *buffer, size_t *lenp, loff_t *ppos)
1005 {
1006 int ret;
1007
1008 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1009
1010 if (write && *(int *)(ctl->data))
1011 *(int *)(ctl->data) = 1;
1012 return ret;
1013 }
1014
1015 static struct ctl_table brnf_table[] = {
1016 {
1017 .procname = "bridge-nf-call-arptables",
1018 .data = &brnf_call_arptables,
1019 .maxlen = sizeof(int),
1020 .mode = 0644,
1021 .proc_handler = brnf_sysctl_call_tables,
1022 },
1023 {
1024 .procname = "bridge-nf-call-iptables",
1025 .data = &brnf_call_iptables,
1026 .maxlen = sizeof(int),
1027 .mode = 0644,
1028 .proc_handler = brnf_sysctl_call_tables,
1029 },
1030 {
1031 .procname = "bridge-nf-call-ip6tables",
1032 .data = &brnf_call_ip6tables,
1033 .maxlen = sizeof(int),
1034 .mode = 0644,
1035 .proc_handler = brnf_sysctl_call_tables,
1036 },
1037 {
1038 .procname = "bridge-nf-filter-vlan-tagged",
1039 .data = &brnf_filter_vlan_tagged,
1040 .maxlen = sizeof(int),
1041 .mode = 0644,
1042 .proc_handler = brnf_sysctl_call_tables,
1043 },
1044 {
1045 .procname = "bridge-nf-filter-pppoe-tagged",
1046 .data = &brnf_filter_pppoe_tagged,
1047 .maxlen = sizeof(int),
1048 .mode = 0644,
1049 .proc_handler = brnf_sysctl_call_tables,
1050 },
1051 {
1052 .procname = "bridge-nf-pass-vlan-input-dev",
1053 .data = &brnf_pass_vlan_indev,
1054 .maxlen = sizeof(int),
1055 .mode = 0644,
1056 .proc_handler = brnf_sysctl_call_tables,
1057 },
1058 { }
1059 };
1060 #endif
1061
1062 int __init br_netfilter_init(void)
1063 {
1064 int ret;
1065
1066 ret = dst_entries_init(&fake_dst_ops);
1067 if (ret < 0)
1068 return ret;
1069
1070 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1071 if (ret < 0) {
1072 dst_entries_destroy(&fake_dst_ops);
1073 return ret;
1074 }
1075 #ifdef CONFIG_SYSCTL
1076 brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
1077 if (brnf_sysctl_header == NULL) {
1078 printk(KERN_WARNING
1079 "br_netfilter: can't register to sysctl.\n");
1080 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1081 dst_entries_destroy(&fake_dst_ops);
1082 return -ENOMEM;
1083 }
1084 #endif
1085 printk(KERN_NOTICE "Bridge firewalling registered\n");
1086 return 0;
1087 }
1088
1089 void br_netfilter_fini(void)
1090 {
1091 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1092 #ifdef CONFIG_SYSCTL
1093 unregister_net_sysctl_table(brnf_sysctl_header);
1094 #endif
1095 dst_entries_destroy(&fake_dst_ops);
1096 }