]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-internal_dev.c
tunneling: Internal dev vport can be NULL.
[mirror_ovs.git] / datapath / vport-internal_dev.c
1 /*
2 * Copyright (c) 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/rcupdate.h>
14 #include <linux/skbuff.h>
15
16 #include "datapath.h"
17 #include "vport-generic.h"
18 #include "vport-internal_dev.h"
19 #include "vport-netdev.h"
20
21 struct internal_dev {
22 struct vport *attached_vport, *vport;
23 struct net_device_stats stats;
24 };
25
26 static inline struct internal_dev *internal_dev_priv(struct net_device *netdev)
27 {
28 return netdev_priv(netdev);
29 }
30
31 /* This function is only called by the kernel network layer. It is not a vport
32 * get_stats() function. If a vport get_stats() function is defined that
33 * results in this being called it will cause infinite recursion. */
34 static struct net_device_stats *internal_dev_sys_stats(struct net_device *netdev)
35 {
36 struct vport *vport = internal_dev_get_vport(netdev);
37 struct net_device_stats *stats = &internal_dev_priv(netdev)->stats;
38
39 if (vport) {
40 struct odp_vport_stats vport_stats;
41
42 vport_get_stats(vport, &vport_stats);
43
44 /* The tx and rx stats need to be swapped because the switch
45 * and host OS have opposite perspectives. */
46 stats->rx_packets = vport_stats.tx_packets;
47 stats->tx_packets = vport_stats.rx_packets;
48 stats->rx_bytes = vport_stats.tx_bytes;
49 stats->tx_bytes = vport_stats.rx_bytes;
50 stats->rx_errors = vport_stats.tx_errors;
51 stats->tx_errors = vport_stats.rx_errors;
52 stats->rx_dropped = vport_stats.tx_dropped;
53 stats->tx_dropped = vport_stats.rx_dropped;
54 stats->collisions = vport_stats.collisions;
55 }
56
57 return stats;
58 }
59
60 static int internal_dev_mac_addr(struct net_device *dev, void *p)
61 {
62 struct sockaddr *addr = p;
63
64 if (!is_valid_ether_addr(addr->sa_data))
65 return -EADDRNOTAVAIL;
66 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
67 return 0;
68 }
69
70 /* Called with rcu_read_lock and bottom-halves disabled. */
71 static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
72 {
73 struct internal_dev *internal_dev = internal_dev_priv(netdev);
74 struct vport *vport = rcu_dereference(internal_dev->vport);
75
76 /* We need our own clone. */
77 skb = skb_share_check(skb, GFP_ATOMIC);
78 if (unlikely(!skb)) {
79 vport_record_error(vport, VPORT_E_RX_DROPPED);
80 return 0;
81 }
82
83 skb_reset_mac_header(skb);
84 compute_ip_summed(skb, true);
85 OVS_CB(skb)->flow = NULL;
86
87 vport_receive(vport, skb);
88
89 return 0;
90 }
91
92 static int internal_dev_open(struct net_device *netdev)
93 {
94 netif_start_queue(netdev);
95 return 0;
96 }
97
98 static int internal_dev_stop(struct net_device *netdev)
99 {
100 netif_stop_queue(netdev);
101 return 0;
102 }
103
104 static void internal_dev_getinfo(struct net_device *netdev,
105 struct ethtool_drvinfo *info)
106 {
107 struct vport *vport = internal_dev_get_vport(netdev);
108 struct dp_port *dp_port;
109
110 strcpy(info->driver, "openvswitch");
111
112 if (!vport)
113 return;
114
115 dp_port = vport_get_dp_port(vport);
116 if (dp_port)
117 sprintf(info->bus_info, "%d.%d", dp_port->dp->dp_idx, dp_port->port_no);
118 }
119
120 static struct ethtool_ops internal_dev_ethtool_ops = {
121 .get_drvinfo = internal_dev_getinfo,
122 .get_link = ethtool_op_get_link,
123 .get_sg = ethtool_op_get_sg,
124 .set_sg = ethtool_op_set_sg,
125 .get_tx_csum = ethtool_op_get_tx_csum,
126 .set_tx_csum = ethtool_op_set_tx_hw_csum,
127 .get_tso = ethtool_op_get_tso,
128 .set_tso = ethtool_op_set_tso,
129 };
130
131 static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
132 {
133 struct vport *vport = internal_dev_get_vport(netdev);
134
135 if (new_mtu < 68)
136 return -EINVAL;
137
138 if (vport) {
139 struct dp_port *dp_port = vport_get_dp_port(vport);
140
141 if (dp_port) {
142 if (new_mtu > dp_min_mtu(dp_port->dp))
143 return -EINVAL;
144 }
145 }
146
147 netdev->mtu = new_mtu;
148 return 0;
149 }
150
151 static void internal_dev_free(struct net_device *netdev)
152 {
153 free_netdev(netdev);
154 }
155
156 static int internal_dev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
157 {
158 if (dp_ioctl_hook)
159 return dp_ioctl_hook(dev, ifr, cmd);
160
161 return -EOPNOTSUPP;
162 }
163
164 #ifdef HAVE_NET_DEVICE_OPS
165 static const struct net_device_ops internal_dev_netdev_ops = {
166 .ndo_open = internal_dev_open,
167 .ndo_stop = internal_dev_stop,
168 .ndo_start_xmit = internal_dev_xmit,
169 .ndo_set_mac_address = internal_dev_mac_addr,
170 .ndo_do_ioctl = internal_dev_do_ioctl,
171 .ndo_change_mtu = internal_dev_change_mtu,
172 .ndo_get_stats = internal_dev_sys_stats,
173 };
174 #endif
175
176 static void do_setup(struct net_device *netdev)
177 {
178 ether_setup(netdev);
179
180 #ifdef HAVE_NET_DEVICE_OPS
181 netdev->netdev_ops = &internal_dev_netdev_ops;
182 #else
183 netdev->do_ioctl = internal_dev_do_ioctl;
184 netdev->get_stats = internal_dev_sys_stats;
185 netdev->hard_start_xmit = internal_dev_xmit;
186 netdev->open = internal_dev_open;
187 netdev->stop = internal_dev_stop;
188 netdev->set_mac_address = internal_dev_mac_addr;
189 netdev->change_mtu = internal_dev_change_mtu;
190 #endif
191
192 netdev->destructor = internal_dev_free;
193 SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
194 netdev->tx_queue_len = 0;
195
196 netdev->flags = IFF_BROADCAST | IFF_MULTICAST;
197 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_HIGHDMA
198 | NETIF_F_HW_CSUM | NETIF_F_TSO;
199
200 vport_gen_rand_ether_addr(netdev->dev_addr);
201 }
202
203 static struct vport *internal_dev_create(const char *name,
204 const void __user *config)
205 {
206 struct vport *vport;
207 struct netdev_vport *netdev_vport;
208 struct internal_dev *internal_dev;
209 int err;
210
211 vport = vport_alloc(sizeof(struct netdev_vport), &internal_vport_ops);
212 if (IS_ERR(vport)) {
213 err = PTR_ERR(vport);
214 goto error;
215 }
216
217 netdev_vport = netdev_vport_priv(vport);
218
219 netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev), name, do_setup);
220 if (!netdev_vport->dev) {
221 err = -ENOMEM;
222 goto error_free_vport;
223 }
224
225 internal_dev = internal_dev_priv(netdev_vport->dev);
226 rcu_assign_pointer(internal_dev->vport, vport);
227
228 err = register_netdevice(netdev_vport->dev);
229 if (err)
230 goto error_free_netdev;
231
232 return vport;
233
234 error_free_netdev:
235 free_netdev(netdev_vport->dev);
236 error_free_vport:
237 vport_free(vport);
238 error:
239 return ERR_PTR(err);
240 }
241
242 static int internal_dev_destroy(struct vport *vport)
243 {
244 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
245
246 unregister_netdevice(netdev_vport->dev);
247 vport_free(vport);
248
249 return 0;
250 }
251
252 static int internal_dev_attach(struct vport *vport)
253 {
254 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
255 struct internal_dev *internal_dev = internal_dev_priv(netdev_vport->dev);
256
257 rcu_assign_pointer(internal_dev->attached_vport, internal_dev->vport);
258 dev_set_promiscuity(netdev_vport->dev, 1);
259 netif_start_queue(netdev_vport->dev);
260
261 return 0;
262 }
263
264 static int internal_dev_detach(struct vport *vport)
265 {
266 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
267 struct internal_dev *internal_dev = internal_dev_priv(netdev_vport->dev);
268
269 netif_stop_queue(netdev_vport->dev);
270 dev_set_promiscuity(netdev_vport->dev, -1);
271 rcu_assign_pointer(internal_dev->attached_vport, NULL);
272
273 return 0;
274 }
275
276 static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
277 {
278 struct net_device *netdev = netdev_vport_priv(vport)->dev;
279 int len;
280
281 skb->dev = netdev;
282 len = skb->len;
283 skb->pkt_type = PACKET_HOST;
284 skb->protocol = eth_type_trans(skb, netdev);
285
286 if (in_interrupt())
287 netif_rx(skb);
288 else
289 netif_rx_ni(skb);
290 netdev->last_rx = jiffies;
291
292 return len;
293 }
294
295 struct vport_ops internal_vport_ops = {
296 .type = "internal",
297 .flags = VPORT_F_REQUIRED | VPORT_F_GEN_STATS | VPORT_F_FLOW,
298 .create = internal_dev_create,
299 .destroy = internal_dev_destroy,
300 .attach = internal_dev_attach,
301 .detach = internal_dev_detach,
302 .set_mtu = netdev_set_mtu,
303 .set_addr = netdev_set_addr,
304 .get_name = netdev_get_name,
305 .get_addr = netdev_get_addr,
306 .get_kobj = netdev_get_kobj,
307 .get_dev_flags = netdev_get_dev_flags,
308 .is_running = netdev_is_running,
309 .get_operstate = netdev_get_operstate,
310 .get_ifindex = netdev_get_ifindex,
311 .get_iflink = netdev_get_iflink,
312 .get_mtu = netdev_get_mtu,
313 .send = internal_dev_recv,
314 };
315
316 int is_internal_dev(const struct net_device *netdev)
317 {
318 #ifdef HAVE_NET_DEVICE_OPS
319 return netdev->netdev_ops == &internal_dev_netdev_ops;
320 #else
321 return netdev->open == internal_dev_open;
322 #endif
323 }
324
325 int is_internal_vport(const struct vport *vport)
326 {
327 return vport->ops == &internal_vport_ops;
328 }
329
330 struct vport *internal_dev_get_vport(struct net_device *netdev)
331 {
332 struct internal_dev *internal_dev;
333
334 if (!is_internal_dev(netdev))
335 return NULL;
336
337 internal_dev = internal_dev_priv(netdev);
338 return rcu_dereference(internal_dev->attached_vport);
339 }