]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-internal_dev.c
datapath: Directly use free_netdev for internal devices.
[mirror_ovs.git] / datapath / vport-internal_dev.c
1 /*
2 * Copyright (c) 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/rcupdate.h>
14 #include <linux/skbuff.h>
15
16 #include "datapath.h"
17 #include "vport-generic.h"
18 #include "vport-internal_dev.h"
19 #include "vport-netdev.h"
20
21 struct internal_dev {
22 struct vport *attached_vport, *vport;
23 struct net_device_stats stats;
24 };
25
26 static inline struct internal_dev *internal_dev_priv(struct net_device *netdev)
27 {
28 return netdev_priv(netdev);
29 }
30
31 /* This function is only called by the kernel network layer. It is not a vport
32 * get_stats() function. If a vport get_stats() function is defined that
33 * results in this being called it will cause infinite recursion. */
34 static struct net_device_stats *internal_dev_sys_stats(struct net_device *netdev)
35 {
36 struct vport *vport = internal_dev_get_vport(netdev);
37 struct net_device_stats *stats = &internal_dev_priv(netdev)->stats;
38
39 if (vport) {
40 struct odp_vport_stats vport_stats;
41
42 vport_get_stats(vport, &vport_stats);
43
44 /* The tx and rx stats need to be swapped because the switch
45 * and host OS have opposite perspectives. */
46 stats->rx_packets = vport_stats.tx_packets;
47 stats->tx_packets = vport_stats.rx_packets;
48 stats->rx_bytes = vport_stats.tx_bytes;
49 stats->tx_bytes = vport_stats.rx_bytes;
50 stats->rx_errors = vport_stats.tx_errors;
51 stats->tx_errors = vport_stats.rx_errors;
52 stats->rx_dropped = vport_stats.tx_dropped;
53 stats->tx_dropped = vport_stats.rx_dropped;
54 stats->collisions = vport_stats.collisions;
55 }
56
57 return stats;
58 }
59
60 static int internal_dev_mac_addr(struct net_device *dev, void *p)
61 {
62 struct sockaddr *addr = p;
63
64 if (!is_valid_ether_addr(addr->sa_data))
65 return -EADDRNOTAVAIL;
66 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
67 return 0;
68 }
69
70 /* Called with rcu_read_lock and bottom-halves disabled. */
71 static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
72 {
73 struct internal_dev *internal_dev = internal_dev_priv(netdev);
74 struct vport *vport = rcu_dereference(internal_dev->vport);
75
76 /* We need our own clone. */
77 skb = skb_share_check(skb, GFP_ATOMIC);
78 if (unlikely(!skb)) {
79 vport_record_error(vport, VPORT_E_RX_DROPPED);
80 return 0;
81 }
82
83 skb_reset_mac_header(skb);
84 compute_ip_summed(skb, true);
85 OVS_CB(skb)->flow = NULL;
86
87 vport_receive(vport, skb);
88
89 return 0;
90 }
91
92 static int internal_dev_open(struct net_device *netdev)
93 {
94 netif_start_queue(netdev);
95 return 0;
96 }
97
98 static int internal_dev_stop(struct net_device *netdev)
99 {
100 netif_stop_queue(netdev);
101 return 0;
102 }
103
104 static void internal_dev_getinfo(struct net_device *netdev,
105 struct ethtool_drvinfo *info)
106 {
107 struct vport *vport = internal_dev_get_vport(netdev);
108 struct dp_port *dp_port;
109
110 strcpy(info->driver, "openvswitch");
111
112 if (!vport)
113 return;
114
115 dp_port = vport_get_dp_port(vport);
116 if (dp_port)
117 sprintf(info->bus_info, "%d.%d", dp_port->dp->dp_idx, dp_port->port_no);
118 }
119
120 static struct ethtool_ops internal_dev_ethtool_ops = {
121 .get_drvinfo = internal_dev_getinfo,
122 .get_link = ethtool_op_get_link,
123 .get_sg = ethtool_op_get_sg,
124 .set_sg = ethtool_op_set_sg,
125 .get_tx_csum = ethtool_op_get_tx_csum,
126 .set_tx_csum = ethtool_op_set_tx_hw_csum,
127 .get_tso = ethtool_op_get_tso,
128 .set_tso = ethtool_op_set_tso,
129 };
130
131 static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
132 {
133 struct vport *vport = internal_dev_get_vport(netdev);
134
135 if (new_mtu < 68)
136 return -EINVAL;
137
138 if (vport) {
139 struct dp_port *dp_port = vport_get_dp_port(vport);
140
141 if (dp_port) {
142 if (new_mtu > dp_min_mtu(dp_port->dp))
143 return -EINVAL;
144 }
145 }
146
147 netdev->mtu = new_mtu;
148 return 0;
149 }
150
151 static int internal_dev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
152 {
153 if (dp_ioctl_hook)
154 return dp_ioctl_hook(dev, ifr, cmd);
155
156 return -EOPNOTSUPP;
157 }
158
159 #ifdef HAVE_NET_DEVICE_OPS
160 static const struct net_device_ops internal_dev_netdev_ops = {
161 .ndo_open = internal_dev_open,
162 .ndo_stop = internal_dev_stop,
163 .ndo_start_xmit = internal_dev_xmit,
164 .ndo_set_mac_address = internal_dev_mac_addr,
165 .ndo_do_ioctl = internal_dev_do_ioctl,
166 .ndo_change_mtu = internal_dev_change_mtu,
167 .ndo_get_stats = internal_dev_sys_stats,
168 };
169 #endif
170
171 static void do_setup(struct net_device *netdev)
172 {
173 ether_setup(netdev);
174
175 #ifdef HAVE_NET_DEVICE_OPS
176 netdev->netdev_ops = &internal_dev_netdev_ops;
177 #else
178 netdev->do_ioctl = internal_dev_do_ioctl;
179 netdev->get_stats = internal_dev_sys_stats;
180 netdev->hard_start_xmit = internal_dev_xmit;
181 netdev->open = internal_dev_open;
182 netdev->stop = internal_dev_stop;
183 netdev->set_mac_address = internal_dev_mac_addr;
184 netdev->change_mtu = internal_dev_change_mtu;
185 #endif
186
187 netdev->destructor = free_netdev;
188 SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
189 netdev->tx_queue_len = 0;
190
191 netdev->flags = IFF_BROADCAST | IFF_MULTICAST;
192 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_HIGHDMA
193 | NETIF_F_HW_CSUM | NETIF_F_TSO;
194
195 vport_gen_rand_ether_addr(netdev->dev_addr);
196 }
197
198 static struct vport *internal_dev_create(const char *name,
199 const void __user *config)
200 {
201 struct vport *vport;
202 struct netdev_vport *netdev_vport;
203 struct internal_dev *internal_dev;
204 int err;
205
206 vport = vport_alloc(sizeof(struct netdev_vport), &internal_vport_ops);
207 if (IS_ERR(vport)) {
208 err = PTR_ERR(vport);
209 goto error;
210 }
211
212 netdev_vport = netdev_vport_priv(vport);
213
214 netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev), name, do_setup);
215 if (!netdev_vport->dev) {
216 err = -ENOMEM;
217 goto error_free_vport;
218 }
219
220 internal_dev = internal_dev_priv(netdev_vport->dev);
221 rcu_assign_pointer(internal_dev->vport, vport);
222
223 err = register_netdevice(netdev_vport->dev);
224 if (err)
225 goto error_free_netdev;
226
227 return vport;
228
229 error_free_netdev:
230 free_netdev(netdev_vport->dev);
231 error_free_vport:
232 vport_free(vport);
233 error:
234 return ERR_PTR(err);
235 }
236
237 static int internal_dev_destroy(struct vport *vport)
238 {
239 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
240
241 unregister_netdevice(netdev_vport->dev);
242 vport_free(vport);
243
244 return 0;
245 }
246
247 static int internal_dev_attach(struct vport *vport)
248 {
249 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
250 struct internal_dev *internal_dev = internal_dev_priv(netdev_vport->dev);
251
252 rcu_assign_pointer(internal_dev->attached_vport, internal_dev->vport);
253 dev_set_promiscuity(netdev_vport->dev, 1);
254 netif_start_queue(netdev_vport->dev);
255
256 return 0;
257 }
258
259 static int internal_dev_detach(struct vport *vport)
260 {
261 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
262 struct internal_dev *internal_dev = internal_dev_priv(netdev_vport->dev);
263
264 netif_stop_queue(netdev_vport->dev);
265 dev_set_promiscuity(netdev_vport->dev, -1);
266 rcu_assign_pointer(internal_dev->attached_vport, NULL);
267
268 return 0;
269 }
270
271 static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
272 {
273 struct net_device *netdev = netdev_vport_priv(vport)->dev;
274 int len;
275
276 skb->dev = netdev;
277 len = skb->len;
278 skb->pkt_type = PACKET_HOST;
279 skb->protocol = eth_type_trans(skb, netdev);
280
281 if (in_interrupt())
282 netif_rx(skb);
283 else
284 netif_rx_ni(skb);
285 netdev->last_rx = jiffies;
286
287 return len;
288 }
289
290 struct vport_ops internal_vport_ops = {
291 .type = "internal",
292 .flags = VPORT_F_REQUIRED | VPORT_F_GEN_STATS | VPORT_F_FLOW,
293 .create = internal_dev_create,
294 .destroy = internal_dev_destroy,
295 .attach = internal_dev_attach,
296 .detach = internal_dev_detach,
297 .set_mtu = netdev_set_mtu,
298 .set_addr = netdev_set_addr,
299 .get_name = netdev_get_name,
300 .get_addr = netdev_get_addr,
301 .get_kobj = netdev_get_kobj,
302 .get_dev_flags = netdev_get_dev_flags,
303 .is_running = netdev_is_running,
304 .get_operstate = netdev_get_operstate,
305 .get_ifindex = netdev_get_ifindex,
306 .get_iflink = netdev_get_iflink,
307 .get_mtu = netdev_get_mtu,
308 .send = internal_dev_recv,
309 };
310
311 int is_internal_dev(const struct net_device *netdev)
312 {
313 #ifdef HAVE_NET_DEVICE_OPS
314 return netdev->netdev_ops == &internal_dev_netdev_ops;
315 #else
316 return netdev->open == internal_dev_open;
317 #endif
318 }
319
320 int is_internal_vport(const struct vport *vport)
321 {
322 return vport->ops == &internal_vport_ops;
323 }
324
325 struct vport *internal_dev_get_vport(struct net_device *netdev)
326 {
327 struct internal_dev *internal_dev;
328
329 if (!is_internal_dev(netdev))
330 return NULL;
331
332 internal_dev = internal_dev_priv(netdev);
333 return rcu_dereference(internal_dev->attached_vport);
334 }