]> git.proxmox.com Git - mirror_ovs.git/blame - datapath/vport-internal_dev.c
datapath: ip6_gre: Split up ip6gre_tnl_change()
[mirror_ovs.git] / datapath / vport-internal_dev.c
CommitLineData
f2459fe7 1/*
8063e095 2 * Copyright (c) 2007-2012 Nicira, Inc.
f2459fe7 3 *
a9a29d22
JG
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
f2459fe7
JG
17 */
18
6ce39213 19#include <linux/if_vlan.h>
f2459fe7
JG
20#include <linux/kernel.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/ethtool.h>
f2459fe7 24#include <linux/skbuff.h>
f2459fe7 25
53e6421b
JG
26#include <net/dst.h>
27#include <net/xfrm.h>
e23775f2 28#include <net/rtnetlink.h>
53e6421b 29
f2459fe7
JG
30#include "datapath.h"
31#include "vport-internal_dev.h"
32#include "vport-netdev.h"
33
f2459fe7 34struct internal_dev {
7237e4f4 35 struct vport *vport;
f2459fe7
JG
36};
37
5a38795f
TG
38static struct vport_ops ovs_internal_vport_ops;
39
6455100f 40static struct internal_dev *internal_dev_priv(struct net_device *netdev)
f2459fe7
JG
41{
42 return netdev_priv(netdev);
43}
44
8a5d84f6 45/* Called with rcu_read_lock_bh. */
f2459fe7
JG
46static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
47{
e23775f2
PS
48 int len, err;
49
50 len = skb->len;
8a5d84f6 51 rcu_read_lock();
e23775f2 52 err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
8a5d84f6 53 rcu_read_unlock();
e23775f2
PS
54
55 if (likely(!err)) {
8063e095 56 struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);
e23775f2
PS
57
58 u64_stats_update_begin(&tstats->syncp);
59 tstats->tx_bytes += len;
60 tstats->tx_packets++;
61 u64_stats_update_end(&tstats->syncp);
e23775f2
PS
62 } else {
63 netdev->stats.tx_errors++;
64 }
f2459fe7
JG
65 return 0;
66}
67
68static int internal_dev_open(struct net_device *netdev)
69{
70 netif_start_queue(netdev);
71 return 0;
72}
73
74static int internal_dev_stop(struct net_device *netdev)
75{
76 netif_stop_queue(netdev);
77 return 0;
78}
79
80static void internal_dev_getinfo(struct net_device *netdev,
81 struct ethtool_drvinfo *info)
82{
c58a0a6b 83 strlcpy(info->driver, "openvswitch", sizeof(info->driver));
f2459fe7
JG
84}
85
b279fccf 86static const struct ethtool_ops internal_dev_ethtool_ops = {
f4267e34
JG
87 .get_drvinfo = internal_dev_getinfo,
88 .get_link = ethtool_op_get_link,
f2459fe7
JG
89};
90
39ca3383 91#if !defined(HAVE_NET_DEVICE_WITH_MAX_MTU) && !defined(HAVE_RHEL7_MAX_MTU)
6c0bf091 92static int internal_dev_change_mtu(struct net_device *dev, int new_mtu)
f2459fe7 93{
6c0bf091
JW
94 if (new_mtu < ETH_MIN_MTU) {
95 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
96 dev->name, new_mtu, ETH_MIN_MTU);
f2459fe7 97 return -EINVAL;
6c0bf091
JW
98 }
99
100 if (new_mtu > ETH_MAX_MTU) {
101 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
102 dev->name, new_mtu, ETH_MAX_MTU);
103 return -EINVAL;
104 }
f2459fe7 105
6c0bf091 106 dev->mtu = new_mtu;
f2459fe7
JG
107 return 0;
108}
6c0bf091 109#endif
f2459fe7 110
8338302d
JG
111static void internal_dev_destructor(struct net_device *dev)
112{
850b6b3b 113 struct vport *vport = ovs_internal_dev_get_vport(dev);
8338302d 114
850b6b3b 115 ovs_vport_free(vport);
7f15e8dd 116#ifndef HAVE_NEEDS_FREE_NETDEV
8338302d 117 free_netdev(dev);
7f15e8dd 118#endif
8338302d
JG
119}
120
3c82e35d 121static void
8063e095 122internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
e23775f2 123{
8063e095
PS
124 int i;
125
126 memset(stats, 0, sizeof(*stats));
127 stats->rx_errors = dev->stats.rx_errors;
128 stats->tx_errors = dev->stats.tx_errors;
129 stats->tx_dropped = dev->stats.tx_dropped;
130 stats->rx_dropped = dev->stats.rx_dropped;
131
132 for_each_possible_cpu(i) {
133 const struct pcpu_sw_netstats *percpu_stats;
134 struct pcpu_sw_netstats local_stats;
135 unsigned int start;
136
137 percpu_stats = per_cpu_ptr(dev->tstats, i);
138
139 do {
140 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
141 local_stats = *percpu_stats;
142 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
143
144 stats->rx_bytes += local_stats.rx_bytes;
145 stats->rx_packets += local_stats.rx_packets;
146 stats->tx_bytes += local_stats.tx_bytes;
147 stats->tx_packets += local_stats.tx_packets;
148 }
e23775f2 149}
e23775f2 150
f2459fe7 151static const struct net_device_ops internal_dev_netdev_ops = {
f2459fe7
JG
152 .ndo_open = internal_dev_open,
153 .ndo_stop = internal_dev_stop,
154 .ndo_start_xmit = internal_dev_xmit,
faa61e25 155 .ndo_set_mac_address = eth_mac_addr,
39ca3383 156#if !defined(HAVE_NET_DEVICE_WITH_MAX_MTU) && !defined(HAVE_RHEL7_MAX_MTU)
f2459fe7 157 .ndo_change_mtu = internal_dev_change_mtu,
6c0bf091 158#endif
3c82e35d 159 .ndo_get_stats64 = (void *)internal_get_stats,
f2459fe7 160};
f2459fe7 161
5282e284
TG
162static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
163 .kind = "openvswitch",
164};
165
fceb2a5b 166static void do_setup(struct net_device *netdev)
f2459fe7
JG
167{
168 ether_setup(netdev);
169
6c0bf091
JW
170#ifdef HAVE_NET_DEVICE_WITH_MAX_MTU
171 netdev->max_mtu = ETH_MAX_MTU;
172#endif
f2459fe7 173 netdev->netdev_ops = &internal_dev_netdev_ops;
f2459fe7 174
9c8482e9 175 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
8ce37339 176 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
c497350a 177 IFF_NO_QUEUE;
7f15e8dd 178#ifndef HAVE_NEEDS_FREE_NETDEV
8338302d 179 netdev->destructor = internal_dev_destructor;
7f15e8dd
GR
180#else
181 netdev->needs_free_netdev = true;
182 netdev->priv_destructor = internal_dev_destructor;
183#endif /* HAVE_NEEDS_FREE_NETDEV */
f6a0c895 184 netdev->ethtool_ops = &internal_dev_ethtool_ops;
5282e284 185 netdev->rtnl_link_ops = &internal_dev_link_ops;
6089e1d8
PS
186
187#ifndef HAVE_IFF_NO_QUEUE
f2459fe7 188 netdev->tx_queue_len = 0;
6089e1d8 189#endif
f2459fe7 190
8e6c8ff5 191 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
c7d084d6
PS
192 NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
193 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
f2459fe7 194
926ea16e 195 netdev->vlan_features = netdev->features;
c7d084d6 196 netdev->hw_enc_features = netdev->features;
8865d2ed 197 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
e23775f2 198 netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
8063e095 199
55053441 200 eth_hw_addr_random(netdev);
f2459fe7
JG
201}
202
94903c98 203static struct vport *internal_dev_create(const struct vport_parms *parms)
f2459fe7
JG
204{
205 struct vport *vport;
f2459fe7
JG
206 struct internal_dev *internal_dev;
207 int err;
208
e23775f2 209 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
f2459fe7
JG
210 if (IS_ERR(vport)) {
211 err = PTR_ERR(vport);
212 goto error;
213 }
214
e23775f2 215 vport->dev = alloc_netdev(sizeof(struct internal_dev),
60434d0d 216 parms->name, NET_NAME_USER, do_setup);
e23775f2 217 if (!vport->dev) {
f2459fe7
JG
218 err = -ENOMEM;
219 goto error_free_vport;
220 }
8063e095
PS
221 vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
222 if (!vport->dev->tstats) {
223 err = -ENOMEM;
224 goto error_free_netdev;
225 }
f2459fe7 226
e23775f2
PS
227 dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
228 internal_dev = internal_dev_priv(vport->dev);
c052da84 229 internal_dev->vport = vport;
f2459fe7 230
2a4999f3
PS
231 /* Restrict bridge port to current netns. */
232 if (vport->port_no == OVSP_LOCAL)
e23775f2 233 vport->dev->features |= NETIF_F_NETNS_LOCAL;
2a4999f3 234
cd2a59e9 235 rtnl_lock();
e23775f2 236 err = register_netdevice(vport->dev);
f2459fe7 237 if (err)
8063e095 238 goto error_unlock;
f2459fe7 239
e23775f2 240 dev_set_promiscuity(vport->dev, 1);
cd2a59e9 241 rtnl_unlock();
e23775f2 242 netif_start_queue(vport->dev);
7237e4f4 243
f2459fe7
JG
244 return vport;
245
8063e095 246error_unlock:
cd2a59e9 247 rtnl_unlock();
8063e095
PS
248 free_percpu(vport->dev->tstats);
249error_free_netdev:
e23775f2 250 free_netdev(vport->dev);
f2459fe7 251error_free_vport:
850b6b3b 252 ovs_vport_free(vport);
f2459fe7
JG
253error:
254 return ERR_PTR(err);
255}
256
3544358a 257static void internal_dev_destroy(struct vport *vport)
f2459fe7 258{
e23775f2 259 netif_stop_queue(vport->dev);
cd2a59e9 260 rtnl_lock();
e23775f2 261 dev_set_promiscuity(vport->dev, -1);
7237e4f4 262
057dd6d2 263 /* unregister_netdevice() waits for an RCU grace period. */
e23775f2 264 unregister_netdevice(vport->dev);
8063e095 265 free_percpu(vport->dev->tstats);
cd2a59e9 266 rtnl_unlock();
f2459fe7
JG
267}
268
e23775f2 269static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
f2459fe7 270{
e23775f2 271 struct net_device *netdev = skb->dev;
e23775f2 272 struct pcpu_sw_netstats *stats;
f2459fe7 273
0077a780
CL
274 if (unlikely(!(netdev->flags & IFF_UP))) {
275 kfree_skb(skb);
e23775f2
PS
276 netdev->stats.rx_dropped++;
277 return NETDEV_TX_OK;
0077a780
CL
278 }
279
53e6421b
JG
280 skb_dst_drop(skb);
281 nf_reset(skb);
282 secpath_reset(skb);
283
f2459fe7
JG
284 skb->pkt_type = PACKET_HOST;
285 skb->protocol = eth_type_trans(skb, netdev);
3cfede14 286 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
f2459fe7 287
8063e095 288 stats = this_cpu_ptr(netdev->tstats);
e23775f2
PS
289 u64_stats_update_begin(&stats->syncp);
290 stats->rx_packets++;
291 stats->rx_bytes += skb->len;
292 u64_stats_update_end(&stats->syncp);
a5b7d883 293
e23775f2
PS
294 netif_rx(skb);
295 return NETDEV_TX_OK;
f2459fe7
JG
296}
297
5a38795f 298static struct vport_ops ovs_internal_vport_ops = {
df2c07f4 299 .type = OVS_VPORT_TYPE_INTERNAL,
f2459fe7
JG
300 .create = internal_dev_create,
301 .destroy = internal_dev_destroy,
f2459fe7
JG
302 .send = internal_dev_recv,
303};
304
850b6b3b 305int ovs_is_internal_dev(const struct net_device *netdev)
f2459fe7 306{
f2459fe7 307 return netdev->netdev_ops == &internal_dev_netdev_ops;
f2459fe7
JG
308}
309
850b6b3b 310struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
f2459fe7 311{
850b6b3b 312 if (!ovs_is_internal_dev(netdev))
17a07f9f 313 return NULL;
7237e4f4 314
c052da84 315 return internal_dev_priv(netdev)->vport;
f2459fe7 316}
5282e284
TG
317
318int ovs_internal_dev_rtnl_link_register(void)
319{
5a38795f
TG
320 int err;
321
322 err = rtnl_link_register(&internal_dev_link_ops);
323 if (err < 0)
324 return err;
325
326 err = ovs_vport_ops_register(&ovs_internal_vport_ops);
327 if (err < 0)
328 rtnl_link_unregister(&internal_dev_link_ops);
329
330 return err;
5282e284
TG
331}
332
333void ovs_internal_dev_rtnl_link_unregister(void)
334{
5a38795f 335 ovs_vport_ops_unregister(&ovs_internal_vport_ops);
5282e284
TG
336 rtnl_link_unregister(&internal_dev_link_ops);
337}