]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Forwarding decision | |
3 | * Linux ethernet bridge | |
4 | * | |
5 | * Authors: | |
6 | * Lennert Buytenhek <buytenh@gnu.org> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * as published by the Free Software Foundation; either version | |
11 | * 2 of the License, or (at your option) any later version. | |
12 | */ | |
13 | ||
14 | #include <linux/err.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/netdevice.h> | |
18 | #include <linux/netpoll.h> | |
19 | #include <linux/skbuff.h> | |
20 | #include <linux/if_vlan.h> | |
21 | #include <linux/netfilter_bridge.h> | |
22 | #include "br_private.h" | |
23 | ||
24 | static int deliver_clone(const struct net_bridge_port *prev, | |
25 | struct sk_buff *skb, | |
26 | void (*__packet_hook)(const struct net_bridge_port *p, | |
27 | struct sk_buff *skb)); | |
28 | ||
29 | /* Don't forward packets to originating port or forwarding disabled */ | |
30 | static inline int should_deliver(const struct net_bridge_port *p, | |
31 | const struct sk_buff *skb) | |
32 | { | |
33 | return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && | |
34 | br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) && | |
35 | p->state == BR_STATE_FORWARDING; | |
36 | } | |
37 | ||
38 | int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) | |
39 | { | |
40 | if (!is_skb_forwardable(skb->dev, skb)) | |
41 | goto drop; | |
42 | ||
43 | skb_push(skb, ETH_HLEN); | |
44 | br_drop_fake_rtable(skb); | |
45 | skb_sender_cpu_clear(skb); | |
46 | ||
47 | if (skb->ip_summed == CHECKSUM_PARTIAL && | |
48 | (skb->protocol == htons(ETH_P_8021Q) || | |
49 | skb->protocol == htons(ETH_P_8021AD))) { | |
50 | int depth; | |
51 | ||
52 | if (!__vlan_get_protocol(skb, skb->protocol, &depth)) | |
53 | goto drop; | |
54 | ||
55 | skb_set_network_header(skb, depth); | |
56 | } | |
57 | ||
58 | dev_queue_xmit(skb); | |
59 | ||
60 | return 0; | |
61 | ||
62 | drop: | |
63 | kfree_skb(skb); | |
64 | return 0; | |
65 | } | |
66 | EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); | |
67 | ||
68 | int br_forward_finish(struct sock *sk, struct sk_buff *skb) | |
69 | { | |
70 | struct net *net = dev_net(skb->dev); | |
71 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, | |
72 | net, sk, skb, NULL, skb->dev, | |
73 | br_dev_queue_push_xmit); | |
74 | ||
75 | } | |
76 | EXPORT_SYMBOL_GPL(br_forward_finish); | |
77 | ||
78 | static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |
79 | { | |
80 | skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); | |
81 | if (!skb) | |
82 | return; | |
83 | ||
84 | skb->dev = to->dev; | |
85 | ||
86 | if (unlikely(netpoll_tx_running(to->br->dev))) { | |
87 | if (!is_skb_forwardable(skb->dev, skb)) | |
88 | kfree_skb(skb); | |
89 | else { | |
90 | skb_push(skb, ETH_HLEN); | |
91 | br_netpoll_send_skb(to, skb); | |
92 | } | |
93 | return; | |
94 | } | |
95 | ||
96 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, | |
97 | dev_net(skb->dev), NULL, skb,NULL, skb->dev, | |
98 | br_forward_finish); | |
99 | } | |
100 | ||
101 | static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | |
102 | { | |
103 | struct net_device *indev; | |
104 | ||
105 | if (skb_warn_if_lro(skb)) { | |
106 | kfree_skb(skb); | |
107 | return; | |
108 | } | |
109 | ||
110 | skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); | |
111 | if (!skb) | |
112 | return; | |
113 | ||
114 | indev = skb->dev; | |
115 | skb->dev = to->dev; | |
116 | skb_forward_csum(skb); | |
117 | ||
118 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, | |
119 | dev_net(indev), NULL, skb, indev, skb->dev, | |
120 | br_forward_finish); | |
121 | } | |
122 | ||
123 | /* called with rcu_read_lock */ | |
124 | void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |
125 | { | |
126 | if (to && should_deliver(to, skb)) { | |
127 | __br_deliver(to, skb); | |
128 | return; | |
129 | } | |
130 | ||
131 | kfree_skb(skb); | |
132 | } | |
133 | EXPORT_SYMBOL_GPL(br_deliver); | |
134 | ||
135 | /* called with rcu_read_lock */ | |
136 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) | |
137 | { | |
138 | if (should_deliver(to, skb)) { | |
139 | if (skb0) | |
140 | deliver_clone(to, skb, __br_forward); | |
141 | else | |
142 | __br_forward(to, skb); | |
143 | return; | |
144 | } | |
145 | ||
146 | if (!skb0) | |
147 | kfree_skb(skb); | |
148 | } | |
149 | ||
150 | static int deliver_clone(const struct net_bridge_port *prev, | |
151 | struct sk_buff *skb, | |
152 | void (*__packet_hook)(const struct net_bridge_port *p, | |
153 | struct sk_buff *skb)) | |
154 | { | |
155 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | |
156 | ||
157 | skb = skb_clone(skb, GFP_ATOMIC); | |
158 | if (!skb) { | |
159 | dev->stats.tx_dropped++; | |
160 | return -ENOMEM; | |
161 | } | |
162 | ||
163 | __packet_hook(prev, skb); | |
164 | return 0; | |
165 | } | |
166 | ||
167 | static struct net_bridge_port *maybe_deliver( | |
168 | struct net_bridge_port *prev, struct net_bridge_port *p, | |
169 | struct sk_buff *skb, | |
170 | void (*__packet_hook)(const struct net_bridge_port *p, | |
171 | struct sk_buff *skb)) | |
172 | { | |
173 | int err; | |
174 | ||
175 | if (!should_deliver(p, skb)) | |
176 | return prev; | |
177 | ||
178 | if (!prev) | |
179 | goto out; | |
180 | ||
181 | err = deliver_clone(prev, skb, __packet_hook); | |
182 | if (err) | |
183 | return ERR_PTR(err); | |
184 | ||
185 | out: | |
186 | return p; | |
187 | } | |
188 | ||
189 | /* called under bridge lock */ | |
190 | static void br_flood(struct net_bridge *br, struct sk_buff *skb, | |
191 | struct sk_buff *skb0, | |
192 | void (*__packet_hook)(const struct net_bridge_port *p, | |
193 | struct sk_buff *skb), | |
194 | bool unicast) | |
195 | { | |
196 | struct net_bridge_port *p; | |
197 | struct net_bridge_port *prev; | |
198 | ||
199 | prev = NULL; | |
200 | ||
201 | list_for_each_entry_rcu(p, &br->port_list, list) { | |
202 | /* Do not flood unicast traffic to ports that turn it off */ | |
203 | if (unicast && !(p->flags & BR_FLOOD)) | |
204 | continue; | |
205 | ||
206 | /* Do not flood to ports that enable proxy ARP */ | |
207 | if (p->flags & BR_PROXYARP) | |
208 | continue; | |
209 | if ((p->flags & BR_PROXYARP_WIFI) && | |
210 | BR_INPUT_SKB_CB(skb)->proxyarp_replied) | |
211 | continue; | |
212 | ||
213 | prev = maybe_deliver(prev, p, skb, __packet_hook); | |
214 | if (IS_ERR(prev)) | |
215 | goto out; | |
216 | } | |
217 | ||
218 | if (!prev) | |
219 | goto out; | |
220 | ||
221 | if (skb0) | |
222 | deliver_clone(prev, skb, __packet_hook); | |
223 | else | |
224 | __packet_hook(prev, skb); | |
225 | return; | |
226 | ||
227 | out: | |
228 | if (!skb0) | |
229 | kfree_skb(skb); | |
230 | } | |
231 | ||
232 | ||
233 | /* called with rcu_read_lock */ | |
234 | void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) | |
235 | { | |
236 | br_flood(br, skb, NULL, __br_deliver, unicast); | |
237 | } | |
238 | ||
239 | /* called under bridge lock */ | |
240 | void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, | |
241 | struct sk_buff *skb2, bool unicast) | |
242 | { | |
243 | br_flood(br, skb, skb2, __br_forward, unicast); | |
244 | } | |
245 | ||
246 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | |
247 | /* called with rcu_read_lock */ | |
248 | static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, | |
249 | struct sk_buff *skb, struct sk_buff *skb0, | |
250 | void (*__packet_hook)( | |
251 | const struct net_bridge_port *p, | |
252 | struct sk_buff *skb)) | |
253 | { | |
254 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | |
255 | struct net_bridge *br = netdev_priv(dev); | |
256 | struct net_bridge_port *prev = NULL; | |
257 | struct net_bridge_port_group *p; | |
258 | struct hlist_node *rp; | |
259 | ||
260 | rp = rcu_dereference(hlist_first_rcu(&br->router_list)); | |
261 | p = mdst ? rcu_dereference(mdst->ports) : NULL; | |
262 | while (p || rp) { | |
263 | struct net_bridge_port *port, *lport, *rport; | |
264 | ||
265 | lport = p ? p->port : NULL; | |
266 | rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : | |
267 | NULL; | |
268 | ||
269 | port = (unsigned long)lport > (unsigned long)rport ? | |
270 | lport : rport; | |
271 | ||
272 | prev = maybe_deliver(prev, port, skb, __packet_hook); | |
273 | if (IS_ERR(prev)) | |
274 | goto out; | |
275 | ||
276 | if ((unsigned long)lport >= (unsigned long)port) | |
277 | p = rcu_dereference(p->next); | |
278 | if ((unsigned long)rport >= (unsigned long)port) | |
279 | rp = rcu_dereference(hlist_next_rcu(rp)); | |
280 | } | |
281 | ||
282 | if (!prev) | |
283 | goto out; | |
284 | ||
285 | if (skb0) | |
286 | deliver_clone(prev, skb, __packet_hook); | |
287 | else | |
288 | __packet_hook(prev, skb); | |
289 | return; | |
290 | ||
291 | out: | |
292 | if (!skb0) | |
293 | kfree_skb(skb); | |
294 | } | |
295 | ||
296 | /* called with rcu_read_lock */ | |
297 | void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, | |
298 | struct sk_buff *skb) | |
299 | { | |
300 | br_multicast_flood(mdst, skb, NULL, __br_deliver); | |
301 | } | |
302 | ||
303 | /* called with rcu_read_lock */ | |
304 | void br_multicast_forward(struct net_bridge_mdb_entry *mdst, | |
305 | struct sk_buff *skb, struct sk_buff *skb2) | |
306 | { | |
307 | br_multicast_flood(mdst, skb, skb2, __br_forward); | |
308 | } | |
309 | #endif |