]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-netdev.c
datapath: Add get_vport_protected().
[mirror_ovs.git] / datapath / vport-netdev.c
1 /*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_bridge.h>
11 #include <linux/if_vlan.h>
12 #include <linux/kernel.h>
13 #include <linux/llc.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/skbuff.h>
16
17 #include <net/llc.h>
18
19 #include "checksum.h"
20 #include "datapath.h"
21 #include "vport-internal_dev.h"
22 #include "vport-netdev.h"
23
24 #include "compat.h"
25
26 /* If the native device stats aren't 64 bit use the vport stats tracking instead. */
27 #define USE_VPORT_STATS (sizeof(((struct net_device_stats *)0)->rx_bytes) < sizeof(u64))
28
29 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb);
30
31 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
32 /* Called with rcu_read_lock and bottom-halves disabled. */
33 static struct sk_buff *netdev_frame_hook(struct sk_buff *skb)
34 {
35 struct vport *vport;
36
37 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
38 return skb;
39
40 vport = netdev_get_vport(skb->dev);
41
42 netdev_port_receive(vport, skb);
43
44 return NULL;
45 }
46 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
47 /*
48 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
49 * different set of devices!)
50 */
51 /* Called with rcu_read_lock and bottom-halves disabled. */
52 static struct sk_buff *netdev_frame_hook(struct net_bridge_port *p,
53 struct sk_buff *skb)
54 {
55 netdev_port_receive((struct vport *)p, skb);
56 return NULL;
57 }
58 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
59 /*
60 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
61 * different set of devices!)
62 */
63 /* Called with rcu_read_lock and bottom-halves disabled. */
64 static int netdev_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
65 {
66 netdev_port_receive((struct vport *)p, *pskb);
67 return 1;
68 }
69 #else
70 #error
71 #endif
72
73 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
74 static int netdev_init(void) { return 0; }
75 static void netdev_exit(void) { }
76 #else
77 static int netdev_init(void)
78 {
79 /* Hook into callback used by the bridge to intercept packets.
80 * Parasites we are. */
81 br_handle_frame_hook = netdev_frame_hook;
82
83 return 0;
84 }
85
86 static void netdev_exit(void)
87 {
88 br_handle_frame_hook = NULL;
89 }
90 #endif
91
92 static struct vport *netdev_create(const struct vport_parms *parms)
93 {
94 struct vport *vport;
95 struct netdev_vport *netdev_vport;
96 int err;
97
98 vport = vport_alloc(sizeof(struct netdev_vport), &netdev_vport_ops, parms);
99 if (IS_ERR(vport)) {
100 err = PTR_ERR(vport);
101 goto error;
102 }
103
104 netdev_vport = netdev_vport_priv(vport);
105
106 netdev_vport->dev = dev_get_by_name(&init_net, parms->name);
107 if (!netdev_vport->dev) {
108 err = -ENODEV;
109 goto error_free_vport;
110 }
111
112 if (netdev_vport->dev->flags & IFF_LOOPBACK ||
113 netdev_vport->dev->type != ARPHRD_ETHER ||
114 is_internal_dev(netdev_vport->dev)) {
115 err = -EINVAL;
116 goto error_put;
117 }
118
119 /* If we are using the vport stats layer initialize it to the current
120 * values so we are roughly consistent with the device stats. */
121 if (USE_VPORT_STATS) {
122 struct rtnl_link_stats64 stats;
123
124 err = netdev_get_stats(vport, &stats);
125 if (!err)
126 vport_set_stats(vport, &stats);
127 }
128
129 err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
130 vport);
131 if (err)
132 goto error_put;
133
134 dev_set_promiscuity(netdev_vport->dev, 1);
135 dev_disable_lro(netdev_vport->dev);
136 netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
137
138 return vport;
139
140 error_put:
141 dev_put(netdev_vport->dev);
142 error_free_vport:
143 vport_free(vport);
144 error:
145 return ERR_PTR(err);
146 }
147
148 static int netdev_destroy(struct vport *vport)
149 {
150 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
151
152 netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
153 netdev_rx_handler_unregister(netdev_vport->dev);
154 dev_set_promiscuity(netdev_vport->dev, -1);
155
156 synchronize_rcu();
157
158 dev_put(netdev_vport->dev);
159 vport_free(vport);
160
161 return 0;
162 }
163
164 int netdev_set_mtu(struct vport *vport, int mtu)
165 {
166 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
167 return dev_set_mtu(netdev_vport->dev, mtu);
168 }
169
170 int netdev_set_addr(struct vport *vport, const unsigned char *addr)
171 {
172 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
173 struct sockaddr sa;
174
175 sa.sa_family = ARPHRD_ETHER;
176 memcpy(sa.sa_data, addr, ETH_ALEN);
177
178 return dev_set_mac_address(netdev_vport->dev, &sa);
179 }
180
181 const char *netdev_get_name(const struct vport *vport)
182 {
183 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
184 return netdev_vport->dev->name;
185 }
186
187 const unsigned char *netdev_get_addr(const struct vport *vport)
188 {
189 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
190 return netdev_vport->dev->dev_addr;
191 }
192
193 struct kobject *netdev_get_kobj(const struct vport *vport)
194 {
195 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
196 return &netdev_vport->dev->NETDEV_DEV_MEMBER.kobj;
197 }
198
199 int netdev_get_stats(const struct vport *vport, struct rtnl_link_stats64 *stats)
200 {
201 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
202 dev_get_stats(netdev_vport->dev, stats);
203 return 0;
204 }
205
206 unsigned netdev_get_dev_flags(const struct vport *vport)
207 {
208 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
209 return dev_get_flags(netdev_vport->dev);
210 }
211
212 int netdev_is_running(const struct vport *vport)
213 {
214 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
215 return netif_running(netdev_vport->dev);
216 }
217
218 unsigned char netdev_get_operstate(const struct vport *vport)
219 {
220 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
221 return netdev_vport->dev->operstate;
222 }
223
224 int netdev_get_ifindex(const struct vport *vport)
225 {
226 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
227 return netdev_vport->dev->ifindex;
228 }
229
230 int netdev_get_iflink(const struct vport *vport)
231 {
232 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
233 return netdev_vport->dev->iflink;
234 }
235
236 int netdev_get_mtu(const struct vport *vport)
237 {
238 const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
239 return netdev_vport->dev->mtu;
240 }
241
242 /* Must be called with rcu_read_lock. */
243 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
244 {
245 /* Make our own copy of the packet. Otherwise we will mangle the
246 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
247 * (No one comes after us, since we tell handle_bridge() that we took
248 * the packet.) */
249 skb = skb_share_check(skb, GFP_ATOMIC);
250 if (unlikely(!skb))
251 return;
252
253 skb_warn_if_lro(skb);
254
255 skb_push(skb, ETH_HLEN);
256 compute_ip_summed(skb, false);
257
258 vport_receive(vport, skb);
259 }
260
261 static int netdev_send(struct vport *vport, struct sk_buff *skb)
262 {
263 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
264 int len = skb->len;
265
266 skb->dev = netdev_vport->dev;
267 forward_ip_summed(skb);
268 dev_queue_xmit(skb);
269
270 return len;
271 }
272
273 /* Returns null if this device is not attached to a datapath. */
274 struct vport *netdev_get_vport(struct net_device *dev)
275 {
276 #ifdef IFF_BRIDGE_PORT
277 #if IFF_BRIDGE_PORT != IFF_OVS_DATAPATH
278 if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
279 #else
280 if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook))
281 #endif
282 return (struct vport *)rcu_dereference_rtnl(dev->rx_handler_data);
283 else
284 return NULL;
285 #else
286 return (struct vport *)rcu_dereference_rtnl(dev->br_port);
287 #endif
288 }
289
290 const struct vport_ops netdev_vport_ops = {
291 .type = "netdev",
292 .flags = (VPORT_F_REQUIRED |
293 (USE_VPORT_STATS ? VPORT_F_GEN_STATS : 0)),
294 .init = netdev_init,
295 .exit = netdev_exit,
296 .create = netdev_create,
297 .destroy = netdev_destroy,
298 .set_mtu = netdev_set_mtu,
299 .set_addr = netdev_set_addr,
300 .get_name = netdev_get_name,
301 .get_addr = netdev_get_addr,
302 .get_kobj = netdev_get_kobj,
303 .get_stats = netdev_get_stats,
304 .get_dev_flags = netdev_get_dev_flags,
305 .is_running = netdev_is_running,
306 .get_operstate = netdev_get_operstate,
307 .get_ifindex = netdev_get_ifindex,
308 .get_iflink = netdev_get_iflink,
309 .get_mtu = netdev_get_mtu,
310 .send = netdev_send,
311 };
312
313 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
314 /*
315 * In kernels earlier than 2.6.36, Open vSwitch cannot safely coexist with
316 * the Linux bridge module on any released version of Linux, because there
317 * is only a single bridge hook function and only a single br_port member
318 * in struct net_device.
319 *
320 * Declaring and exporting this symbol enforces mutual exclusion. The bridge
321 * module also exports the same symbol, so the module loader will refuse to
322 * load both modules at the same time (e.g. "bridge: exports duplicate symbol
323 * br_should_route_hook (owned by openvswitch_mod)").
324 *
325 * The use of "typeof" here avoids the need to track changes in the type of
326 * br_should_route_hook over various kernel versions.
327 */
328 typeof(br_should_route_hook) br_should_route_hook;
329 EXPORT_SYMBOL(br_should_route_hook);
330 #endif