]> git.proxmox.com Git - mirror_ovs.git/blame - datapath/datapath.c
datapath: check for backported ip_is_fragment
[mirror_ovs.git] / datapath / datapath.c
CommitLineData
064af421 1/*
a5b8d49b 2 * Copyright (c) 2007-2014 Nicira, Inc.
a14bc59f 3 *
a9a29d22
JG
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
064af421
BP
17 */
18
dfffaef1
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
064af421
BP
21#include <linux/init.h>
22#include <linux/module.h>
064af421 23#include <linux/if_arp.h>
064af421
BP
24#include <linux/if_vlan.h>
25#include <linux/in.h>
26#include <linux/ip.h>
982b8810 27#include <linux/jhash.h>
064af421
BP
28#include <linux/delay.h>
29#include <linux/time.h>
30#include <linux/etherdevice.h>
ed099e92 31#include <linux/genetlink.h>
064af421
BP
32#include <linux/kernel.h>
33#include <linux/kthread.h>
064af421
BP
34#include <linux/mutex.h>
35#include <linux/percpu.h>
36#include <linux/rcupdate.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/version.h>
40#include <linux/ethtool.h>
064af421 41#include <linux/wait.h>
064af421 42#include <asm/div64.h>
656a0e37 43#include <linux/highmem.h>
064af421
BP
44#include <linux/netfilter_bridge.h>
45#include <linux/netfilter_ipv4.h>
46#include <linux/inetdevice.h>
47#include <linux/list.h>
077257b8 48#include <linux/openvswitch.h>
064af421 49#include <linux/rculist.h>
064af421 50#include <linux/dmi.h>
36956a7d 51#include <net/genetlink.h>
2a4999f3
PS
52#include <net/net_namespace.h>
53#include <net/netns/generic.h>
064af421 54
064af421 55#include "datapath.h"
064af421 56#include "flow.h"
d103f479 57#include "flow_table.h"
a097c0b2 58#include "flow_netlink.h"
303708cc 59#include "vlan.h"
f2459fe7 60#include "vport-internal_dev.h"
d5de5b0d 61#include "vport-netdev.h"
064af421 62
2a4999f3 63int ovs_net_id __read_mostly;
5a38795f 64EXPORT_SYMBOL_GPL(ovs_net_id);
2a4999f3 65
cb25142c
PS
66static struct genl_family dp_packet_genl_family;
67static struct genl_family dp_flow_genl_family;
68static struct genl_family dp_datapath_genl_family;
69
bc619e29
JS
70static const struct nla_policy flow_policy[];
71
18fd3a52
PS
72static struct genl_multicast_group ovs_dp_flow_multicast_group = {
73 .name = OVS_FLOW_MCGROUP
cb25142c
PS
74};
75
18fd3a52
PS
76static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
77 .name = OVS_DATAPATH_MCGROUP
cb25142c
PS
78};
79
18fd3a52
PS
80struct genl_multicast_group ovs_dp_vport_multicast_group = {
81 .name = OVS_VPORT_MCGROUP
cb25142c
PS
82};
83
afad3556 84/* Check if need to build a reply message.
af465b67
PS
85 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
86 */
114fce23
SG
87static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
88 unsigned int group)
afad3556
JR
89{
90 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
6233a1bd 91 genl_has_listeners(family, genl_info_net(info), group);
afad3556
JR
92}
93
18fd3a52 94static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
cb25142c 95 struct sk_buff *skb, struct genl_info *info)
e297c6b7 96{
cb25142c
PS
97 genl_notify(family, skb, genl_info_net(info),
98 info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL);
e297c6b7
TG
99}
100
ed099e92
BP
101/**
102 * DOC: Locking:
064af421 103 *
cd2a59e9
PS
104 * All writes e.g. Writes to device state (add/remove datapath, port, set
105 * operations on vports, etc.), Writes to other state (flow table
106 * modifications, set miscellaneous datapath parameters, etc.) are protected
107 * by ovs_lock.
ed099e92
BP
108 *
109 * Reads are protected by RCU.
110 *
111 * There are a few special cases (mostly stats) that have their own
112 * synchronization but they nest under all of above and don't interact with
113 * each other.
cd2a59e9
PS
114 *
115 * The RTNL lock nests inside ovs_mutex.
064af421 116 */
ed099e92 117
cd2a59e9
PS
118static DEFINE_MUTEX(ovs_mutex);
119
120void ovs_lock(void)
121{
122 mutex_lock(&ovs_mutex);
123}
124
125void ovs_unlock(void)
126{
127 mutex_unlock(&ovs_mutex);
128}
129
130#ifdef CONFIG_LOCKDEP
131int lockdep_ovsl_is_held(void)
132{
133 if (debug_locks)
134 return lockdep_is_held(&ovs_mutex);
135 else
136 return 1;
137}
5a38795f 138EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
cd2a59e9
PS
139#endif
140
5ae440c3 141static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
f1f60b85
TG
142 const struct sw_flow_key *,
143 const struct dp_upcall_info *);
5ae440c3 144static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
7d16c847 145 const struct sw_flow_key *,
7257b535 146 const struct dp_upcall_info *);
064af421 147
01ac0970
AZ
148/* Must be called with rcu_read_lock. */
149static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
064af421 150{
01ac0970 151 struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
ed099e92 152
254f2dc8 153 if (dev) {
850b6b3b 154 struct vport *vport = ovs_internal_dev_get_vport(dev);
254f2dc8 155 if (vport)
01ac0970 156 return vport->dp;
254f2dc8 157 }
01ac0970
AZ
158
159 return NULL;
160}
161
162/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
af465b67
PS
163 * returned dp pointer valid.
164 */
01ac0970
AZ
165static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
166{
167 struct datapath *dp;
168
169 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
170 rcu_read_lock();
171 dp = get_dp_rcu(net, dp_ifindex);
254f2dc8
BP
172 rcu_read_unlock();
173
174 return dp;
064af421 175}
064af421 176
cd2a59e9 177/* Must be called with rcu_read_lock or ovs_mutex. */
850b6b3b 178const char *ovs_dp_name(const struct datapath *dp)
f2459fe7 179{
cd2a59e9 180 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
16b82e84 181 return vport->ops->get_name(vport);
f2459fe7
JG
182}
183
f1f60b85 184static int get_dpifindex(const struct datapath *dp)
99769a40
JG
185{
186 struct vport *local;
187 int ifindex;
188
189 rcu_read_lock();
190
95b1d73a 191 local = ovs_vport_rcu(dp, OVSP_LOCAL);
99769a40 192 if (local)
d5de5b0d 193 ifindex = netdev_vport_priv(local)->dev->ifindex;
99769a40
JG
194 else
195 ifindex = 0;
196
197 rcu_read_unlock();
198
199 return ifindex;
200}
201
46c6a11d
JG
202static void destroy_dp_rcu(struct rcu_head *rcu)
203{
204 struct datapath *dp = container_of(rcu, struct datapath, rcu);
46c6a11d 205
e379e4d1 206 ovs_flow_tbl_destroy(&dp->table);
46c6a11d 207 free_percpu(dp->stats_percpu);
2a4999f3 208 release_net(ovs_dp_get_net(dp));
95b1d73a 209 kfree(dp->ports);
5ca1ba48 210 kfree(dp);
46c6a11d
JG
211}
212
95b1d73a
PS
213static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
214 u16 port_no)
215{
216 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
217}
218
aa917006 219/* Called with ovs_mutex or RCU read lock. */
95b1d73a
PS
220struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
221{
222 struct vport *vport;
95b1d73a
PS
223 struct hlist_head *head;
224
225 head = vport_hash_bucket(dp, port_no);
f8dfbcb7 226 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
95b1d73a
PS
227 if (vport->port_no == port_no)
228 return vport;
229 }
230 return NULL;
231}
232
cd2a59e9 233/* Called with ovs_mutex. */
c19e6535 234static struct vport *new_vport(const struct vport_parms *parms)
064af421 235{
f2459fe7 236 struct vport *vport;
f2459fe7 237
850b6b3b 238 vport = ovs_vport_add(parms);
c19e6535
BP
239 if (!IS_ERR(vport)) {
240 struct datapath *dp = parms->dp;
95b1d73a 241 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
064af421 242
95b1d73a 243 hlist_add_head_rcu(&vport->dp_hash_node, head);
c19e6535 244 }
c19e6535 245 return vport;
064af421
BP
246}
247
850b6b3b 248void ovs_dp_detach_port(struct vport *p)
064af421 249{
cd2a59e9 250 ASSERT_OVSL();
064af421 251
064af421 252 /* First drop references to device. */
95b1d73a 253 hlist_del_rcu(&p->dp_hash_node);
f2459fe7 254
7237e4f4 255 /* Then destroy it. */
850b6b3b 256 ovs_vport_del(p);
064af421
BP
257}
258
fb66fbd1 259/* Must be called with rcu_read_lock. */
e74d4817 260void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
064af421 261{
a6059080 262 const struct vport *p = OVS_CB(skb)->input_vport;
064af421 263 struct datapath *dp = p->dp;
3544358a 264 struct sw_flow *flow;
ad50cb60 265 struct sw_flow_actions *sf_acts;
064af421 266 struct dp_stats_percpu *stats;
e9141eec 267 u64 *stats_counter;
4fa72a95 268 u32 n_mask_hit;
064af421 269
70dbc259 270 stats = this_cpu_ptr(dp->stats_percpu);
a063b0df 271
52a23d92 272 /* Look up flow. */
e74d4817 273 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
5604935e 274 &n_mask_hit);
52a23d92
JG
275 if (unlikely(!flow)) {
276 struct dp_upcall_info upcall;
a7d607c5 277 int error;
52a23d92 278
0e469d3b 279 memset(&upcall, 0, sizeof(upcall));
52a23d92 280 upcall.cmd = OVS_PACKET_CMD_MISS;
beb1c69a 281 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
e74d4817 282 error = ovs_dp_upcall(dp, skb, key, &upcall);
a7d607c5
LR
283 if (unlikely(error))
284 kfree_skb(skb);
285 else
286 consume_skb(skb);
52a23d92
JG
287 stats_counter = &stats->n_missed;
288 goto out;
289 }
290
e74d4817 291 ovs_flow_stats_update(flow, key->tp.flags, skb);
ad50cb60 292 sf_acts = rcu_dereference(flow->sf_acts);
7d16c847
PS
293 ovs_execute_actions(dp, skb, sf_acts, key);
294
b0b906cc 295 stats_counter = &stats->n_hit;
55574bb0 296
8819fac7 297out:
55574bb0 298 /* Update datapath statistics. */
b81deb15 299 u64_stats_update_begin(&stats->syncp);
e9141eec 300 (*stats_counter)++;
4fa72a95 301 stats->n_mask_hit += n_mask_hit;
b81deb15 302 u64_stats_update_end(&stats->syncp);
064af421
BP
303}
304
850b6b3b 305int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
f1f60b85 306 const struct sw_flow_key *key,
850b6b3b 307 const struct dp_upcall_info *upcall_info)
aa5a8fdc
JG
308{
309 struct dp_stats_percpu *stats;
310 int err;
311
28aea917 312 if (upcall_info->portid == 0) {
b063d9f0 313 err = -ENOTCONN;
b063d9f0
JG
314 goto err;
315 }
316
7257b535 317 if (!skb_is_gso(skb))
e74d4817 318 err = queue_userspace_packet(dp, skb, key, upcall_info);
7257b535 319 else
e74d4817 320 err = queue_gso_packets(dp, skb, key, upcall_info);
d76195db
JG
321 if (err)
322 goto err;
323
324 return 0;
aa5a8fdc 325
aa5a8fdc 326err:
70dbc259 327 stats = this_cpu_ptr(dp->stats_percpu);
aa5a8fdc 328
b81deb15 329 u64_stats_update_begin(&stats->syncp);
aa5a8fdc 330 stats->n_lost++;
b81deb15 331 u64_stats_update_end(&stats->syncp);
aa5a8fdc 332
aa5a8fdc 333 return err;
982b8810
BP
334}
335
5ae440c3 336static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
f1f60b85 337 const struct sw_flow_key *key,
7257b535 338 const struct dp_upcall_info *upcall_info)
cb5087ca 339{
d4cba1f8 340 unsigned short gso_type = skb_shinfo(skb)->gso_type;
7257b535
BP
341 struct sw_flow_key later_key;
342 struct sk_buff *segs, *nskb;
b2a23c4e 343 struct ovs_skb_cb ovs_cb;
7257b535 344 int err;
cb5087ca 345
b2a23c4e 346 ovs_cb = *OVS_CB(skb);
1d04cd4e 347 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
b2a23c4e 348 *OVS_CB(skb) = ovs_cb;
79089764
PS
349 if (IS_ERR(segs))
350 return PTR_ERR(segs);
d1da7669
PS
351 if (segs == NULL)
352 return -EINVAL;
99769a40 353
9b277b39 354 if (gso_type & SKB_GSO_UDP) {
c135bba1 355 /* The initial flow key extracted by ovs_flow_key_extract()
9b277b39
PS
356 * in this case is for a first fragment, so we need to
357 * properly mark later fragments.
358 */
e74d4817 359 later_key = *key;
9b277b39
PS
360 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
361 }
362
7257b535
BP
363 /* Queue all of the segments. */
364 skb = segs;
cb5087ca 365 do {
b2a23c4e 366 *OVS_CB(skb) = ovs_cb;
9b277b39 367 if (gso_type & SKB_GSO_UDP && skb != segs)
e74d4817 368 key = &later_key;
9b277b39 369
e74d4817 370 err = queue_userspace_packet(dp, skb, key, upcall_info);
982b8810 371 if (err)
7257b535 372 break;
856081f6 373
36ce148c 374 } while ((skb = skb->next));
cb5087ca 375
7257b535
BP
376 /* Free all of the segments. */
377 skb = segs;
378 do {
379 nskb = skb->next;
380 if (err)
381 kfree_skb(skb);
382 else
383 consume_skb(skb);
384 } while ((skb = nskb));
385 return err;
386}
387
8b7ea2d4 388static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
533bea51 389 unsigned int hdrlen)
0afa2373
TG
390{
391 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
533bea51 392 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
4e25b8c1 393 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
0afa2373
TG
394
395 /* OVS_PACKET_ATTR_USERDATA */
8b7ea2d4
WZ
396 if (upcall_info->userdata)
397 size += NLA_ALIGN(upcall_info->userdata->nla_len);
398
399 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
400 if (upcall_info->egress_tun_info)
401 size += nla_total_size(ovs_tun_key_attr_size());
0afa2373 402
0e469d3b
NM
403 /* OVS_PACKET_ATTR_ACTIONS */
404 if (upcall_info->actions_len)
405 size += nla_total_size(upcall_info->actions_len);
406
0afa2373
TG
407 return size;
408}
409
5ae440c3 410static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
f1f60b85 411 const struct sw_flow_key *key,
7257b535
BP
412 const struct dp_upcall_info *upcall_info)
413{
414 struct ovs_header *upcall;
6161d3fd 415 struct sk_buff *nskb = NULL;
82706a6f 416 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
7257b535 417 struct nlattr *nla;
68eadcf0 418 struct genl_info info = {
705e9260 419#ifdef HAVE_GENLMSG_NEW_UNICAST
5ae440c3 420 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
68eadcf0
TG
421#endif
422 .snd_portid = upcall_info->portid,
423 };
978188b2 424 size_t len;
533bea51 425 unsigned int hlen;
5ae440c3
TG
426 int err, dp_ifindex;
427
428 dp_ifindex = get_dpifindex(dp);
429 if (!dp_ifindex)
430 return -ENODEV;
7257b535 431
efd8a18e 432 if (skb_vlan_tag_present(skb)) {
6161d3fd
JG
433 nskb = skb_clone(skb, GFP_ATOMIC);
434 if (!nskb)
435 return -ENOMEM;
07ac71ea 436
efd8a18e 437 nskb = vlan_insert_tag_set_proto(nskb, nskb->vlan_proto, skb_vlan_tag_get(nskb));
07ac71ea
PS
438 if (!nskb)
439 return -ENOMEM;
440
441 vlan_set_tci(nskb, 0);
7257b535 442
6161d3fd
JG
443 skb = nskb;
444 }
445
446 if (nla_attr_size(skb->len) > USHRT_MAX) {
447 err = -EFBIG;
448 goto out;
449 }
7257b535 450
533bea51
TG
451 /* Complete checksum if needed */
452 if (skb->ip_summed == CHECKSUM_PARTIAL &&
453 (err = skb_checksum_help(skb)))
454 goto out;
455
456 /* Older versions of OVS user space enforce alignment of the last
457 * Netlink attribute to NLA_ALIGNTO which would require extensive
458 * padding logic. Only perform zerocopy if padding is not required.
459 */
460 if (dp->user_features & OVS_DP_F_UNALIGNED)
461 hlen = skb_zerocopy_headlen(skb);
462 else
463 hlen = skb->len;
464
8b7ea2d4 465 len = upcall_msg_size(upcall_info, hlen);
68eadcf0 466 user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
6161d3fd
JG
467 if (!user_skb) {
468 err = -ENOMEM;
469 goto out;
470 }
7257b535
BP
471
472 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
473 0, upcall_info->cmd);
474 upcall->dp_ifindex = dp_ifindex;
475
db7f2238 476 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
9a621f82 477 BUG_ON(err);
7257b535
BP
478
479 if (upcall_info->userdata)
e995e3df 480 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
462a988b 481 nla_len(upcall_info->userdata),
e995e3df 482 nla_data(upcall_info->userdata));
7257b535 483
8b7ea2d4
WZ
484 if (upcall_info->egress_tun_info) {
485 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
486 err = ovs_nla_put_egress_tunnel_key(user_skb,
487 upcall_info->egress_tun_info);
488 BUG_ON(err);
489 nla_nest_end(user_skb, nla);
490 }
491
0e469d3b
NM
492 if (upcall_info->actions_len) {
493 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
494 err = ovs_nla_put_actions(upcall_info->actions,
495 upcall_info->actions_len,
496 user_skb);
497 if (!err)
498 nla_nest_end(user_skb, nla);
499 else
500 nla_nest_cancel(user_skb, nla);
501 }
502
533bea51 503 /* Only reserve room for attribute header, packet data is added
af465b67
PS
504 * in skb_zerocopy()
505 */
533bea51
TG
506 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
507 err = -ENOBUFS;
508 goto out;
509 }
510 nla->nla_len = nla_attr_size(skb->len);
bed53bd1 511
2c272bd9
ZK
512 err = skb_zerocopy(user_skb, skb, skb->len, hlen);
513 if (err)
514 goto out;
7257b535 515
ef507cec 516 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
978188b2
JG
517 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
518 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
519
520 if (plen > 0)
521 memset(skb_put(user_skb, plen), 0, plen);
522 }
ef507cec 523
533bea51 524 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
6161d3fd 525
533bea51 526 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
82706a6f 527 user_skb = NULL;
6161d3fd 528out:
2c272bd9
ZK
529 if (err)
530 skb_tx_error(skb);
82706a6f 531 kfree_skb(user_skb);
6161d3fd
JG
532 kfree_skb(nskb);
533 return err;
cb5087ca
BP
534}
535
df2c07f4 536static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
064af421 537{
df2c07f4 538 struct ovs_header *ovs_header = info->userhdr;
982b8810 539 struct nlattr **a = info->attrs;
e0e57990 540 struct sw_flow_actions *acts;
982b8810 541 struct sk_buff *packet;
e0e57990 542 struct sw_flow *flow;
ad50cb60 543 struct sw_flow_actions *sf_acts;
f7cd0081 544 struct datapath *dp;
d6569377 545 struct ethhdr *eth;
a6059080 546 struct vport *input_vport;
3f19d399 547 int len;
d6569377 548 int err;
2e460098 549 bool log = !a[OVS_PACKET_ATTR_PROBE];
064af421 550
f7cd0081 551 err = -EINVAL;
df2c07f4 552 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
7c3072cc 553 !a[OVS_PACKET_ATTR_ACTIONS])
e5cad958 554 goto err;
064af421 555
df2c07f4 556 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
3f19d399 557 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
f7cd0081
BP
558 err = -ENOMEM;
559 if (!packet)
e5cad958 560 goto err;
3f19d399
BP
561 skb_reserve(packet, NET_IP_ALIGN);
562
bf3d6fce 563 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
8d5ebd83 564
f7cd0081
BP
565 skb_reset_mac_header(packet);
566 eth = eth_hdr(packet);
064af421 567
d6569377
BP
568 /* Normally, setting the skb 'protocol' field would be handled by a
569 * call to eth_type_trans(), but it assumes there's a sending
af465b67
PS
570 * device, which we may not have.
571 */
935fc582 572 if (eth_proto_is_802_3(eth->h_proto))
f7cd0081 573 packet->protocol = eth->h_proto;
d6569377 574 else
f7cd0081 575 packet->protocol = htons(ETH_P_802_2);
d3c54451 576
e0e57990 577 /* Build an sw_flow for sending this packet. */
df65fec1 578 flow = ovs_flow_alloc();
e0e57990
BP
579 err = PTR_ERR(flow);
580 if (IS_ERR(flow))
e5cad958 581 goto err_kfree_skb;
064af421 582
c135bba1 583 err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
9233cef7 584 &flow->key, log);
e0e57990 585 if (err)
9321954a 586 goto err_flow_free;
e0e57990 587
a097c0b2 588 err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
9233cef7 589 &flow->key, &acts, log);
9b405f1a
PS
590 if (err)
591 goto err_flow_free;
e0e57990 592
ff27161e 593 rcu_assign_pointer(flow->sf_acts, acts);
800711c3 594 OVS_CB(packet)->egress_tun_info = NULL;
abff858b 595 packet->priority = flow->key.phy.priority;
3025a772 596 packet->mark = flow->key.phy.skb_mark;
e0e57990 597
d6569377 598 rcu_read_lock();
01ac0970 599 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
f7cd0081 600 err = -ENODEV;
e5cad958
BP
601 if (!dp)
602 goto err_unlock;
cc4015df 603
a6059080
AZ
604 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
605 if (!input_vport)
606 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
607
608 if (!input_vport)
609 goto err_unlock;
610
611 OVS_CB(packet)->input_vport = input_vport;
ad50cb60 612 sf_acts = rcu_dereference(flow->sf_acts);
a6059080 613
e9141eec 614 local_bh_disable();
7d16c847 615 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
e9141eec 616 local_bh_enable();
d6569377 617 rcu_read_unlock();
e0e57990 618
a1c564be 619 ovs_flow_free(flow, false);
e5cad958 620 return err;
064af421 621
e5cad958
BP
622err_unlock:
623 rcu_read_unlock();
9321954a 624err_flow_free:
a1c564be 625 ovs_flow_free(flow, false);
e5cad958
BP
626err_kfree_skb:
627 kfree_skb(packet);
628err:
d6569377 629 return err;
064af421
BP
630}
631
df2c07f4 632static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
7c3072cc 633 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
df2c07f4
JP
634 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
635 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
2e460098 636 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
982b8810
BP
637};
638
18fd3a52 639static struct genl_ops dp_packet_genl_ops[] = {
df2c07f4 640 { .cmd = OVS_PACKET_CMD_EXECUTE,
982b8810
BP
641 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
642 .policy = packet_policy,
df2c07f4 643 .doit = ovs_packet_cmd_execute
982b8810
BP
644 }
645};
646
cb25142c
PS
647static struct genl_family dp_packet_genl_family = {
648 .id = GENL_ID_GENERATE,
649 .hdrsize = sizeof(struct ovs_header),
650 .name = OVS_PACKET_FAMILY,
651 .version = OVS_PACKET_VERSION,
652 .maxattr = OVS_PACKET_ATTR_MAX,
653 .netnsok = true,
654 .parallel_ops = true,
655 .ops = dp_packet_genl_ops,
656 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
657};
658
f1f60b85 659static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
4fa72a95 660 struct ovs_dp_megaflow_stats *mega_stats)
064af421 661{
d6569377 662 int i;
f180c2e2 663
4fa72a95
AZ
664 memset(mega_stats, 0, sizeof(*mega_stats));
665
994dc286 666 stats->n_flows = ovs_flow_tbl_count(&dp->table);
4fa72a95 667 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
064af421 668
7257b535 669 stats->n_hit = stats->n_missed = stats->n_lost = 0;
4fa72a95 670
d6569377
BP
671 for_each_possible_cpu(i) {
672 const struct dp_stats_percpu *percpu_stats;
673 struct dp_stats_percpu local_stats;
821cb9fa 674 unsigned int start;
44e05eca 675
d6569377 676 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
064af421 677
d6569377 678 do {
b81deb15 679 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
d6569377 680 local_stats = *percpu_stats;
b81deb15 681 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
064af421 682
d6569377
BP
683 stats->n_hit += local_stats.n_hit;
684 stats->n_missed += local_stats.n_missed;
685 stats->n_lost += local_stats.n_lost;
4fa72a95 686 mega_stats->n_mask_hit += local_stats.n_mask_hit;
d6569377
BP
687 }
688}
064af421 689
bc619e29
JS
690static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
691{
692 return ovs_identifier_is_ufid(sfid) &&
693 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
694}
695
696static bool should_fill_mask(uint32_t ufid_flags)
697{
698 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
699}
700
701static bool should_fill_actions(uint32_t ufid_flags)
0afa2373 702{
bc619e29
JS
703 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
704}
705
706static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
707 const struct sw_flow_id *sfid,
708 uint32_t ufid_flags)
709{
710 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
711
712 /* OVS_FLOW_ATTR_UFID */
713 if (sfid && ovs_identifier_is_ufid(sfid))
714 len += nla_total_size(sfid->ufid_len);
715
716 /* OVS_FLOW_ATTR_KEY */
717 if (!sfid || should_fill_key(sfid, ufid_flags))
718 len += nla_total_size(ovs_key_attr_size());
719
720 /* OVS_FLOW_ATTR_MASK */
721 if (should_fill_mask(ufid_flags))
722 len += nla_total_size(ovs_key_attr_size());
723
724 /* OVS_FLOW_ATTR_ACTIONS */
725 if (should_fill_actions(ufid_flags))
726 len += nla_total_size(acts->actions_len);
727
728 return len
0afa2373
TG
729 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
730 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
bc619e29 731 + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
0afa2373
TG
732}
733
f1948bb9
JS
734/* Called with ovs_mutex or RCU read lock. */
735static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
736 struct sk_buff *skb)
737{
738 struct ovs_flow_stats stats;
739 __be16 tcp_flags;
740 unsigned long used;
741
b0f3a2fe 742 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
f71db6b1 743
b0f3a2fe
PS
744 if (used &&
745 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
f1948bb9 746 return -EMSGSIZE;
d6569377 747
b0f3a2fe
PS
748 if (stats.n_packets &&
749 nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
f1948bb9 750 return -EMSGSIZE;
b0b906cc 751
b0f3a2fe
PS
752 if ((u8)ntohs(tcp_flags) &&
753 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
f1948bb9
JS
754 return -EMSGSIZE;
755
756 return 0;
757}
758
759/* Called with ovs_mutex or RCU read lock. */
760static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
761 struct sk_buff *skb, int skb_orig_len)
762{
763 struct nlattr *start;
764 int err;
d6569377 765
df2c07f4 766 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
30053024
BP
767 * this is the first flow to be dumped into 'skb'. This is unusual for
768 * Netlink but individual action lists can be longer than
769 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
770 * The userspace caller can always fetch the actions separately if it
771 * really wants them. (Most userspace callers in fact don't care.)
772 *
773 * This can only fail for dump operations because the skb is always
774 * properly sized for single flows.
775 */
9b405f1a 776 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
f6f481ee 777 if (start) {
f44ccce1
PS
778 const struct sw_flow_actions *sf_acts;
779
780ec6ae 780 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
a097c0b2
PS
781 err = ovs_nla_put_actions(sf_acts->actions,
782 sf_acts->actions_len, skb);
f71db6b1 783
0a25b039
BP
784 if (!err)
785 nla_nest_end(skb, start);
786 else {
787 if (skb_orig_len)
f1948bb9 788 return err;
0a25b039
BP
789
790 nla_nest_cancel(skb, start);
791 }
f1948bb9
JS
792 } else if (skb_orig_len) {
793 return -EMSGSIZE;
794 }
795
796 return 0;
797}
798
799/* Called with ovs_mutex or RCU read lock. */
2c622e5a 800static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
f1948bb9 801 struct sk_buff *skb, u32 portid,
bc619e29 802 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
f1948bb9
JS
803{
804 const int skb_orig_len = skb->len;
805 struct ovs_header *ovs_header;
806 int err;
807
7d16c847
PS
808 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
809 flags, cmd);
f1948bb9
JS
810 if (!ovs_header)
811 return -EMSGSIZE;
7d16c847 812
f1948bb9
JS
813 ovs_header->dp_ifindex = dp_ifindex;
814
bc619e29 815 err = ovs_nla_put_identifier(flow, skb);
db7f2238
JS
816 if (err)
817 goto error;
818
bc619e29
JS
819 if (should_fill_key(&flow->id, ufid_flags)) {
820 err = ovs_nla_put_masked_key(flow, skb);
821 if (err)
822 goto error;
823 }
824
825 if (should_fill_mask(ufid_flags)) {
826 err = ovs_nla_put_mask(flow, skb);
827 if (err)
828 goto error;
829 }
f1948bb9
JS
830
831 err = ovs_flow_cmd_fill_stats(flow, skb);
832 if (err)
833 goto error;
834
bc619e29
JS
835 if (should_fill_actions(ufid_flags)) {
836 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
837 if (err)
838 goto error;
839 }
37a1300c 840
23b48dc1
TG
841 genlmsg_end(skb, ovs_header);
842 return 0;
d6569377 843
37a1300c 844error:
df2c07f4 845 genlmsg_cancel(skb, ovs_header);
d6569377 846 return err;
44e05eca
BP
847}
848
f71db6b1
JR
849/* May not be called with RCU read lock. */
850static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
bc619e29 851 const struct sw_flow_id *sfid,
afad3556 852 struct genl_info *info,
bc619e29
JS
853 bool always,
854 uint32_t ufid_flags)
44e05eca 855{
afad3556 856 struct sk_buff *skb;
bc619e29 857 size_t len;
d6569377 858
114fce23
SG
859 if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
860 GROUP_ID(&ovs_dp_flow_multicast_group)))
afad3556
JR
861 return NULL;
862
bc619e29
JS
863 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
864 skb = genlmsg_new_unicast(len, info, GFP_KERNEL);
afad3556
JR
865 if (!skb)
866 return ERR_PTR(-ENOMEM);
867
868 return skb;
37a1300c 869}
8d5ebd83 870
f71db6b1 871/* Called with ovs_mutex. */
7d16c847 872static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
f71db6b1
JR
873 int dp_ifindex,
874 struct genl_info *info, u8 cmd,
bc619e29 875 bool always, u32 ufid_flags)
37a1300c
BP
876{
877 struct sk_buff *skb;
878 int retval;
d6569377 879
bc619e29
JS
880 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
881 &flow->id, info, always, ufid_flags);
a6ddcc9a 882 if (IS_ERR_OR_NULL(skb))
afad3556 883 return skb;
d6569377 884
2c622e5a 885 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
f71db6b1 886 info->snd_portid, info->snd_seq, 0,
bc619e29 887 cmd, ufid_flags);
37a1300c 888 BUG_ON(retval < 0);
d6569377 889 return skb;
064af421
BP
890}
891
0c9fd022 892static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
064af421 893{
37a1300c 894 struct nlattr **a = info->attrs;
df2c07f4 895 struct ovs_header *ovs_header = info->userhdr;
bc619e29 896 struct sw_flow *flow = NULL, *new_flow;
a1c564be 897 struct sw_flow_mask mask;
37a1300c 898 struct sk_buff *reply;
9c52546b 899 struct datapath *dp;
bc619e29 900 struct sw_flow_key key;
0c9fd022 901 struct sw_flow_actions *acts;
a1c564be 902 struct sw_flow_match match;
bc619e29 903 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
bc4a05c6 904 int error;
9233cef7 905 bool log = !a[OVS_FLOW_ATTR_PROBE];
064af421 906
6740b721 907 /* Must have key and actions. */
37a1300c 908 error = -EINVAL;
a473df5b 909 if (!a[OVS_FLOW_ATTR_KEY]) {
7d16c847 910 OVS_NLERR(log, "Flow key attr not present in new flow.");
37a1300c 911 goto error;
a473df5b
JG
912 }
913 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
7d16c847 914 OVS_NLERR(log, "Flow actions attr not present in new flow.");
6740b721 915 goto error;
a473df5b 916 }
a1c564be 917
6740b721 918 /* Most of the time we need to allocate a new flow, do it before
af465b67
PS
919 * locking.
920 */
6740b721
JR
921 new_flow = ovs_flow_alloc();
922 if (IS_ERR(new_flow)) {
923 error = PTR_ERR(new_flow);
924 goto error;
925 }
926
927 /* Extract key. */
bc619e29 928 ovs_match_init(&match, &key, &mask);
9233cef7
JR
929 error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
930 a[OVS_FLOW_ATTR_MASK], log);
37a1300c 931 if (error)
6740b721 932 goto err_kfree_flow;
064af421 933
bc619e29
JS
934 ovs_flow_mask_key(&new_flow->key, &key, &mask);
935
936 /* Extract flow identifier. */
937 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
938 &key, log);
939 if (error)
940 goto err_kfree_flow;
9b405f1a 941
6740b721 942 /* Validate actions. */
6740b721 943 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
9233cef7 944 &acts, log);
0c9fd022 945 if (error) {
7d16c847 946 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
4f67b12a 947 goto err_kfree_flow;
6740b721
JR
948 }
949
bc619e29
JS
950 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
951 ufid_flags);
6740b721
JR
952 if (IS_ERR(reply)) {
953 error = PTR_ERR(reply);
954 goto err_kfree_acts;
37a1300c
BP
955 }
956
cd2a59e9 957 ovs_lock();
2a4999f3 958 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
6740b721
JR
959 if (unlikely(!dp)) {
960 error = -ENODEV;
cd2a59e9 961 goto err_unlock_ovs;
6740b721 962 }
bc619e29 963
a1c564be 964 /* Check if this is a duplicate flow */
bc619e29
JS
965 if (ovs_identifier_is_ufid(&new_flow->id))
966 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
967 if (!flow)
968 flow = ovs_flow_tbl_lookup(&dp->table, &key);
6740b721
JR
969 if (likely(!flow)) {
970 rcu_assign_pointer(new_flow->sf_acts, acts);
d6569377 971
d6569377 972 /* Put flow in bucket. */
6740b721
JR
973 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
974 if (unlikely(error)) {
0585f7a8 975 acts = NULL;
6740b721
JR
976 goto err_unlock_ovs;
977 }
978
979 if (unlikely(reply)) {
2c622e5a 980 error = ovs_flow_cmd_fill_info(new_flow,
6740b721
JR
981 ovs_header->dp_ifindex,
982 reply, info->snd_portid,
983 info->snd_seq, 0,
bc619e29
JS
984 OVS_FLOW_CMD_NEW,
985 ufid_flags);
6740b721 986 BUG_ON(error < 0);
0585f7a8 987 }
6740b721 988 ovs_unlock();
d6569377 989 } else {
0c9fd022
JR
990 struct sw_flow_actions *old_acts;
991
d6569377
BP
992 /* Bail out if we're not allowed to modify an existing flow.
993 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
994 * because Generic Netlink treats the latter as a dump
995 * request. We also accept NLM_F_EXCL in case that bug ever
996 * gets fixed.
997 */
6740b721
JR
998 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
999 | NLM_F_EXCL))) {
1000 error = -EEXIST;
cd2a59e9 1001 goto err_unlock_ovs;
6740b721 1002 }
bc619e29
JS
1003 /* The flow identifier has to be the same for flow updates.
1004 * Look for any overlapping flow.
1005 */
1006 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1007 if (ovs_identifier_is_key(&flow->id))
1008 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1009 &match);
1010 else /* UFID matches but key is different */
1011 flow = NULL;
3440e4bc
AW
1012 if (!flow) {
1013 error = -ENOENT;
1014 goto err_unlock_ovs;
1015 }
6740b721 1016 }
0c9fd022
JR
1017 /* Update actions. */
1018 old_acts = ovsl_dereference(flow->sf_acts);
1019 rcu_assign_pointer(flow->sf_acts, acts);
0c9fd022 1020
6740b721 1021 if (unlikely(reply)) {
2c622e5a 1022 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1023 ovs_header->dp_ifindex,
1024 reply, info->snd_portid,
1025 info->snd_seq, 0,
bc619e29
JS
1026 OVS_FLOW_CMD_NEW,
1027 ufid_flags);
6740b721
JR
1028 BUG_ON(error < 0);
1029 }
1030 ovs_unlock();
0c9fd022 1031
6740b721
JR
1032 ovs_nla_free_flow_actions(old_acts);
1033 ovs_flow_free(new_flow, false);
0c9fd022 1034 }
6740b721
JR
1035
1036 if (reply)
cb25142c 1037 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
0c9fd022
JR
1038 return 0;
1039
0c9fd022
JR
1040err_unlock_ovs:
1041 ovs_unlock();
6740b721
JR
1042 kfree_skb(reply);
1043err_kfree_acts:
0c9fd022 1044 kfree(acts);
6740b721
JR
1045err_kfree_flow:
1046 ovs_flow_free(new_flow, false);
0c9fd022
JR
1047error:
1048 return error;
1049}
1050
cc561abf
PS
1051/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1052static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
1053 const struct sw_flow_key *key,
9233cef7
JR
1054 const struct sw_flow_mask *mask,
1055 bool log)
cc561abf
PS
1056{
1057 struct sw_flow_actions *acts;
1058 struct sw_flow_key masked_key;
1059 int error;
1060
1061 ovs_flow_mask_key(&masked_key, key, mask);
9233cef7 1062 error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
cc561abf 1063 if (error) {
9233cef7 1064 OVS_NLERR(log,
7d16c847 1065 "Actions may not be safe on all matching packets");
cc561abf
PS
1066 return ERR_PTR(error);
1067 }
1068
1069 return acts;
1070}
1071
0c9fd022
JR
1072static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1073{
1074 struct nlattr **a = info->attrs;
1075 struct ovs_header *ovs_header = info->userhdr;
1d2a1b5f 1076 struct sw_flow_key key;
0c9fd022
JR
1077 struct sw_flow *flow;
1078 struct sw_flow_mask mask;
1079 struct sk_buff *reply = NULL;
1080 struct datapath *dp;
6740b721 1081 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
0c9fd022 1082 struct sw_flow_match match;
bc619e29
JS
1083 struct sw_flow_id sfid;
1084 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
0c9fd022 1085 int error;
9233cef7 1086 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1087 bool ufid_present;
0c9fd022
JR
1088
1089 /* Extract key. */
1090 error = -EINVAL;
a473df5b 1091 if (!a[OVS_FLOW_ATTR_KEY]) {
9233cef7 1092 OVS_NLERR(log, "Flow key attribute not present in set flow.");
0c9fd022 1093 goto error;
a473df5b 1094 }
0c9fd022 1095
bc619e29 1096 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
0c9fd022 1097 ovs_match_init(&match, &key, &mask);
9233cef7
JR
1098 error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
1099 a[OVS_FLOW_ATTR_MASK], log);
0c9fd022
JR
1100 if (error)
1101 goto error;
d6569377 1102
0c9fd022
JR
1103 /* Validate actions. */
1104 if (a[OVS_FLOW_ATTR_ACTIONS]) {
9233cef7
JR
1105 acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask,
1106 log);
cc561abf
PS
1107 if (IS_ERR(acts)) {
1108 error = PTR_ERR(acts);
0c9fd022 1109 goto error;
6740b721 1110 }
6740b721 1111
ff27161e 1112 /* Can allocate before locking if have acts. */
bc619e29
JS
1113 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1114 ufid_flags);
6740b721
JR
1115 if (IS_ERR(reply)) {
1116 error = PTR_ERR(reply);
1117 goto err_kfree_acts;
90b8c2f7 1118 }
0c9fd022
JR
1119 }
1120
1121 ovs_lock();
1122 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
6740b721
JR
1123 if (unlikely(!dp)) {
1124 error = -ENODEV;
0c9fd022 1125 goto err_unlock_ovs;
6740b721 1126 }
0c9fd022 1127 /* Check that the flow exists. */
bc619e29
JS
1128 if (ufid_present)
1129 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1130 else
1131 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
6740b721
JR
1132 if (unlikely(!flow)) {
1133 error = -ENOENT;
0c9fd022 1134 goto err_unlock_ovs;
6740b721 1135 }
3440e4bc 1136
0c9fd022 1137 /* Update actions, if present. */
6740b721 1138 if (likely(acts)) {
0c9fd022
JR
1139 old_acts = ovsl_dereference(flow->sf_acts);
1140 rcu_assign_pointer(flow->sf_acts, acts);
6740b721
JR
1141
1142 if (unlikely(reply)) {
2c622e5a 1143 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1144 ovs_header->dp_ifindex,
1145 reply, info->snd_portid,
1146 info->snd_seq, 0,
bc619e29
JS
1147 OVS_FLOW_CMD_NEW,
1148 ufid_flags);
6740b721
JR
1149 BUG_ON(error < 0);
1150 }
1151 } else {
1152 /* Could not alloc without acts before locking. */
7d16c847 1153 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
bc619e29
JS
1154 info, OVS_FLOW_CMD_NEW, false,
1155 ufid_flags);
1156
6740b721
JR
1157 if (unlikely(IS_ERR(reply))) {
1158 error = PTR_ERR(reply);
1159 goto err_unlock_ovs;
1160 }
9c52546b 1161 }
0c9fd022 1162
0c9fd022
JR
1163 /* Clear stats. */
1164 if (a[OVS_FLOW_ATTR_CLEAR])
1165 ovs_flow_stats_clear(flow);
cd2a59e9 1166 ovs_unlock();
37a1300c 1167
6740b721 1168 if (reply)
cb25142c 1169 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
6740b721
JR
1170 if (old_acts)
1171 ovs_nla_free_flow_actions(old_acts);
7d16c847 1172
d6569377 1173 return 0;
704a1e09 1174
cd2a59e9
PS
1175err_unlock_ovs:
1176 ovs_unlock();
6740b721
JR
1177 kfree_skb(reply);
1178err_kfree_acts:
ba400435 1179 kfree(acts);
37a1300c 1180error:
9c52546b 1181 return error;
704a1e09
BP
1182}
1183
df2c07f4 1184static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
704a1e09 1185{
37a1300c 1186 struct nlattr **a = info->attrs;
df2c07f4 1187 struct ovs_header *ovs_header = info->userhdr;
37a1300c 1188 struct sw_flow_key key;
37a1300c 1189 struct sk_buff *reply;
704a1e09 1190 struct sw_flow *flow;
9c52546b 1191 struct datapath *dp;
a1c564be 1192 struct sw_flow_match match;
bc619e29
JS
1193 struct sw_flow_id ufid;
1194 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1195 int err = 0;
9233cef7 1196 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1197 bool ufid_present;
704a1e09 1198
bc619e29
JS
1199 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1200 if (a[OVS_FLOW_ATTR_KEY]) {
1201 ovs_match_init(&match, &key, NULL);
1202 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
1203 log);
1204 } else if (!ufid_present) {
9233cef7
JR
1205 OVS_NLERR(log,
1206 "Flow get message rejected, Key attribute missing.");
bc619e29 1207 err = -EINVAL;
1b936472 1208 }
37a1300c
BP
1209 if (err)
1210 return err;
704a1e09 1211
cd2a59e9 1212 ovs_lock();
2a4999f3 1213 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9
PS
1214 if (!dp) {
1215 err = -ENODEV;
1216 goto unlock;
1217 }
704a1e09 1218
bc619e29
JS
1219 if (ufid_present)
1220 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1221 else
1222 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1223 if (!flow) {
cd2a59e9
PS
1224 err = -ENOENT;
1225 goto unlock;
1226 }
d6569377 1227
7d16c847 1228 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
bc619e29 1229 OVS_FLOW_CMD_NEW, true, ufid_flags);
cd2a59e9
PS
1230 if (IS_ERR(reply)) {
1231 err = PTR_ERR(reply);
1232 goto unlock;
1233 }
36956a7d 1234
cd2a59e9 1235 ovs_unlock();
37a1300c 1236 return genlmsg_reply(reply, info);
cd2a59e9
PS
1237unlock:
1238 ovs_unlock();
1239 return err;
d6569377 1240}
9c52546b 1241
df2c07f4 1242static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
d6569377 1243{
37a1300c 1244 struct nlattr **a = info->attrs;
df2c07f4 1245 struct ovs_header *ovs_header = info->userhdr;
37a1300c 1246 struct sw_flow_key key;
37a1300c 1247 struct sk_buff *reply;
bc619e29 1248 struct sw_flow *flow = NULL;
d6569377 1249 struct datapath *dp;
a1c564be 1250 struct sw_flow_match match;
bc619e29
JS
1251 struct sw_flow_id ufid;
1252 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
d6569377 1253 int err;
9233cef7 1254 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1255 bool ufid_present;
36956a7d 1256
bc619e29
JS
1257 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1258 if (a[OVS_FLOW_ATTR_KEY]) {
cde7f3ba 1259 ovs_match_init(&match, &key, NULL);
9233cef7
JR
1260 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
1261 log);
cde7f3ba
JR
1262 if (unlikely(err))
1263 return err;
1264 }
1265
cd2a59e9 1266 ovs_lock();
2a4999f3 1267 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cde7f3ba 1268 if (unlikely(!dp)) {
cd2a59e9
PS
1269 err = -ENODEV;
1270 goto unlock;
1271 }
7d16c847 1272
bc619e29 1273 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
994dc286 1274 err = ovs_flow_tbl_flush(&dp->table);
cd2a59e9
PS
1275 goto unlock;
1276 }
7d16c847 1277
bc619e29
JS
1278 if (ufid_present)
1279 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1280 else
1281 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1282 if (unlikely(!flow)) {
cd2a59e9
PS
1283 err = -ENOENT;
1284 goto unlock;
1285 }
d6569377 1286
994dc286 1287 ovs_flow_tbl_remove(&dp->table, flow);
cde7f3ba 1288 ovs_unlock();
37a1300c 1289
46051cf8 1290 reply = ovs_flow_cmd_alloc_info(rcu_dereference_raw(flow->sf_acts),
bc619e29 1291 &flow->id, info, false, ufid_flags);
cde7f3ba
JR
1292
1293 if (likely(reply)) {
1294 if (likely(!IS_ERR(reply))) {
7d16c847
PS
1295 rcu_read_lock(); /*To keep RCU checker happy. */
1296 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
cde7f3ba
JR
1297 reply, info->snd_portid,
1298 info->snd_seq, 0,
bc619e29
JS
1299 OVS_FLOW_CMD_DEL,
1300 ufid_flags);
cde7f3ba
JR
1301 rcu_read_unlock();
1302 BUG_ON(err < 0);
cb25142c 1303 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
cde7f3ba 1304 } else {
cb25142c
PS
1305 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1306 GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1307
cde7f3ba 1308 }
afad3556 1309 }
37a1300c 1310
a1c564be 1311 ovs_flow_free(flow, true);
37a1300c 1312 return 0;
cd2a59e9
PS
1313unlock:
1314 ovs_unlock();
1315 return err;
37a1300c
BP
1316}
1317
df2c07f4 1318static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
37a1300c 1319{
bc619e29 1320 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
df2c07f4 1321 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
994dc286 1322 struct table_instance *ti;
37a1300c 1323 struct datapath *dp;
bc619e29
JS
1324 u32 ufid_flags;
1325 int err;
1326
1327 err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1328 OVS_FLOW_ATTR_MAX, flow_policy);
1329 if (err)
1330 return err;
1331 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
37a1300c 1332
f44ccce1 1333 rcu_read_lock();
01ac0970 1334 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9 1335 if (!dp) {
f44ccce1 1336 rcu_read_unlock();
37a1300c 1337 return -ENODEV;
cd2a59e9 1338 }
37a1300c 1339
994dc286 1340 ti = rcu_dereference(dp->table.ti);
37a1300c 1341 for (;;) {
37a1300c
BP
1342 struct sw_flow *flow;
1343 u32 bucket, obj;
1344
1345 bucket = cb->args[0];
1346 obj = cb->args[1];
994dc286 1347 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
3544358a 1348 if (!flow)
37a1300c
BP
1349 break;
1350
2c622e5a 1351 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
28aea917 1352 NETLINK_CB(cb->skb).portid,
37a1300c 1353 cb->nlh->nlmsg_seq, NLM_F_MULTI,
bc619e29 1354 OVS_FLOW_CMD_NEW, ufid_flags) < 0)
37a1300c
BP
1355 break;
1356
1357 cb->args[0] = bucket;
1358 cb->args[1] = obj;
1359 }
f44ccce1 1360 rcu_read_unlock();
37a1300c 1361 return skb->len;
704a1e09
BP
1362}
1363
cb25142c
PS
1364static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1365 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
9233cef7 1366 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
cb25142c
PS
1367 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1368 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
9233cef7 1369 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
bc619e29
JS
1370 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1371 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
cb25142c
PS
1372};
1373
18fd3a52 1374static struct genl_ops dp_flow_genl_ops[] = {
df2c07f4 1375 { .cmd = OVS_FLOW_CMD_NEW,
37a1300c
BP
1376 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1377 .policy = flow_policy,
0c9fd022 1378 .doit = ovs_flow_cmd_new
37a1300c 1379 },
df2c07f4 1380 { .cmd = OVS_FLOW_CMD_DEL,
37a1300c
BP
1381 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1382 .policy = flow_policy,
df2c07f4 1383 .doit = ovs_flow_cmd_del
37a1300c 1384 },
df2c07f4 1385 { .cmd = OVS_FLOW_CMD_GET,
37a1300c
BP
1386 .flags = 0, /* OK for unprivileged users. */
1387 .policy = flow_policy,
df2c07f4
JP
1388 .doit = ovs_flow_cmd_get,
1389 .dumpit = ovs_flow_cmd_dump
37a1300c 1390 },
df2c07f4 1391 { .cmd = OVS_FLOW_CMD_SET,
37a1300c
BP
1392 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1393 .policy = flow_policy,
0c9fd022 1394 .doit = ovs_flow_cmd_set,
37a1300c
BP
1395 },
1396};
1397
cb25142c 1398static struct genl_family dp_flow_genl_family = {
aaff4b55 1399 .id = GENL_ID_GENERATE,
df2c07f4 1400 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1401 .name = OVS_FLOW_FAMILY,
1402 .version = OVS_FLOW_VERSION,
1403 .maxattr = OVS_FLOW_ATTR_MAX,
b3dcb73c 1404 .netnsok = true,
cb25142c
PS
1405 .parallel_ops = true,
1406 .ops = dp_flow_genl_ops,
1407 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1408 .mcgrps = &ovs_dp_flow_multicast_group,
1409 .n_mcgrps = 1,
aaff4b55
BP
1410};
1411
0afa2373
TG
1412static size_t ovs_dp_cmd_msg_size(void)
1413{
1414 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1415
1416 msgsize += nla_total_size(IFNAMSIZ);
1417 msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
4fa72a95 1418 msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
300af20a 1419 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
0afa2373
TG
1420
1421 return msgsize;
1422}
1423
d637497c 1424/* Called with ovs_mutex. */
df2c07f4 1425static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
28aea917 1426 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1427{
df2c07f4 1428 struct ovs_header *ovs_header;
e926dfe3 1429 struct ovs_dp_stats dp_stats;
4fa72a95 1430 struct ovs_dp_megaflow_stats dp_megaflow_stats;
064af421
BP
1431 int err;
1432
28aea917 1433 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
aaff4b55 1434 flags, cmd);
df2c07f4 1435 if (!ovs_header)
aaff4b55 1436 goto error;
064af421 1437
b063d9f0 1438 ovs_header->dp_ifindex = get_dpifindex(dp);
064af421 1439
850b6b3b 1440 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
064af421 1441 if (err)
d6569377 1442 goto nla_put_failure;
064af421 1443
4fa72a95
AZ
1444 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1445 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1446 &dp_stats))
1447 goto nla_put_failure;
1448
1449 if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1450 sizeof(struct ovs_dp_megaflow_stats),
1451 &dp_megaflow_stats))
c3cc8c03 1452 goto nla_put_failure;
d6569377 1453
c58cc9a4
TG
1454 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1455 goto nla_put_failure;
1456
23b48dc1
TG
1457 genlmsg_end(skb, ovs_header);
1458 return 0;
d6569377
BP
1459
1460nla_put_failure:
df2c07f4 1461 genlmsg_cancel(skb, ovs_header);
aaff4b55
BP
1462error:
1463 return -EMSGSIZE;
d6569377
BP
1464}
1465
d81eef1b 1466static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
d6569377 1467{
d81eef1b 1468 return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
aaff4b55 1469}
9dca7bd5 1470
aa917006 1471/* Called with rcu_read_lock or ovs_mutex. */
2a4999f3 1472static struct datapath *lookup_datapath(struct net *net,
f1f60b85 1473 const struct ovs_header *ovs_header,
6455100f 1474 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
d6569377 1475{
254f2dc8
BP
1476 struct datapath *dp;
1477
df2c07f4 1478 if (!a[OVS_DP_ATTR_NAME])
2a4999f3 1479 dp = get_dp(net, ovs_header->dp_ifindex);
254f2dc8 1480 else {
d6569377 1481 struct vport *vport;
d6569377 1482
2a4999f3 1483 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
df2c07f4 1484 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
d6569377 1485 }
254f2dc8 1486 return dp ? dp : ERR_PTR(-ENODEV);
d6569377
BP
1487}
1488
94358dcf
TG
1489static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1490{
1491 struct datapath *dp;
1492
1493 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
09350a3d 1494 if (IS_ERR(dp))
94358dcf
TG
1495 return;
1496
1497 WARN(dp->user_features, "Dropping previously announced user features\n");
1498 dp->user_features = 0;
1499}
1500
f1f60b85 1501static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
c58cc9a4
TG
1502{
1503 if (a[OVS_DP_ATTR_USER_FEATURES])
1504 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1505}
1506
df2c07f4 1507static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
d6569377 1508{
aaff4b55 1509 struct nlattr **a = info->attrs;
d6569377 1510 struct vport_parms parms;
aaff4b55 1511 struct sk_buff *reply;
d6569377
BP
1512 struct datapath *dp;
1513 struct vport *vport;
2a4999f3 1514 struct ovs_net *ovs_net;
95b1d73a 1515 int err, i;
d6569377 1516
d6569377 1517 err = -EINVAL;
ea36840f 1518 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
aaff4b55
BP
1519 goto err;
1520
d81eef1b
JR
1521 reply = ovs_dp_cmd_alloc_info(info);
1522 if (!reply)
1523 return -ENOMEM;
d6569377 1524
d6569377
BP
1525 err = -ENOMEM;
1526 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1527 if (dp == NULL)
d81eef1b 1528 goto err_free_reply;
2a4999f3 1529
0ceaa66c
JG
1530 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1531
d6569377 1532 /* Allocate table. */
994dc286
PS
1533 err = ovs_flow_tbl_init(&dp->table);
1534 if (err)
d6569377
BP
1535 goto err_free_dp;
1536
08fb1bbd 1537 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
99769a40
JG
1538 if (!dp->stats_percpu) {
1539 err = -ENOMEM;
1540 goto err_destroy_table;
1541 }
1542
95b1d73a
PS
1543 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1544 GFP_KERNEL);
1545 if (!dp->ports) {
1546 err = -ENOMEM;
1547 goto err_destroy_percpu;
1548 }
1549
1550 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1551 INIT_HLIST_HEAD(&dp->ports[i]);
1552
d6569377 1553 /* Set up our datapath device. */
df2c07f4
JP
1554 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1555 parms.type = OVS_VPORT_TYPE_INTERNAL;
d6569377
BP
1556 parms.options = NULL;
1557 parms.dp = dp;
df2c07f4 1558 parms.port_no = OVSP_LOCAL;
beb1c69a 1559 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
b063d9f0 1560
c58cc9a4
TG
1561 ovs_dp_change(dp, a);
1562
d81eef1b
JR
1563 /* So far only local changes have been made, now need the lock. */
1564 ovs_lock();
1565
d6569377
BP
1566 vport = new_vport(&parms);
1567 if (IS_ERR(vport)) {
1568 err = PTR_ERR(vport);
1569 if (err == -EBUSY)
1570 err = -EEXIST;
1571
94358dcf
TG
1572 if (err == -EEXIST) {
1573 /* An outdated user space instance that does not understand
1574 * the concept of user_features has attempted to create a new
1575 * datapath and is likely to reuse it. Drop all user features.
1576 */
1577 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1578 ovs_dp_reset_user_features(skb, info);
1579 }
1580
95b1d73a 1581 goto err_destroy_ports_array;
d6569377 1582 }
d6569377 1583
d81eef1b
JR
1584 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1585 info->snd_seq, 0, OVS_DP_CMD_NEW);
1586 BUG_ON(err < 0);
aaff4b55 1587
2a4999f3 1588 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
fb93e9aa 1589 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
a0fb56c1 1590
cd2a59e9 1591 ovs_unlock();
d6569377 1592
cb25142c 1593 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
d6569377
BP
1594 return 0;
1595
95b1d73a 1596err_destroy_ports_array:
d81eef1b 1597 ovs_unlock();
95b1d73a 1598 kfree(dp->ports);
99769a40
JG
1599err_destroy_percpu:
1600 free_percpu(dp->stats_percpu);
d6569377 1601err_destroy_table:
e379e4d1 1602 ovs_flow_tbl_destroy(&dp->table);
d6569377 1603err_free_dp:
0ceaa66c 1604 release_net(ovs_dp_get_net(dp));
d6569377 1605 kfree(dp);
d81eef1b
JR
1606err_free_reply:
1607 kfree_skb(reply);
d6569377 1608err:
064af421
BP
1609 return err;
1610}
1611
cd2a59e9 1612/* Called with ovs_mutex. */
2a4999f3 1613static void __dp_destroy(struct datapath *dp)
44e05eca 1614{
95b1d73a 1615 int i;
44e05eca 1616
95b1d73a
PS
1617 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1618 struct vport *vport;
f8dfbcb7 1619 struct hlist_node *n;
95b1d73a 1620
f8dfbcb7 1621 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
95b1d73a
PS
1622 if (vport->port_no != OVSP_LOCAL)
1623 ovs_dp_detach_port(vport);
1624 }
ed099e92 1625
fb93e9aa 1626 list_del_rcu(&dp->list_node);
ed099e92 1627
cd2a59e9 1628 /* OVSP_LOCAL is datapath internal port. We need to make sure that
d103f479
AZ
1629 * all ports in datapath are destroyed first before freeing datapath.
1630 */
cd2a59e9 1631 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
99620d2c 1632
d103f479 1633 /* RCU destroy the flow table */
ed099e92 1634 call_rcu(&dp->rcu, destroy_dp_rcu);
2a4999f3
PS
1635}
1636
1637static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1638{
1639 struct sk_buff *reply;
1640 struct datapath *dp;
1641 int err;
1642
d81eef1b
JR
1643 reply = ovs_dp_cmd_alloc_info(info);
1644 if (!reply)
1645 return -ENOMEM;
1646
cd2a59e9 1647 ovs_lock();
2a4999f3
PS
1648 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1649 err = PTR_ERR(dp);
1650 if (IS_ERR(dp))
d81eef1b 1651 goto err_unlock_free;
2a4999f3 1652
d81eef1b
JR
1653 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1654 info->snd_seq, 0, OVS_DP_CMD_DEL);
1655 BUG_ON(err < 0);
2a4999f3
PS
1656
1657 __dp_destroy(dp);
d81eef1b 1658 ovs_unlock();
7d16c847 1659
cb25142c 1660 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
99620d2c 1661 return 0;
d81eef1b
JR
1662
1663err_unlock_free:
cd2a59e9 1664 ovs_unlock();
d81eef1b 1665 kfree_skb(reply);
cd2a59e9 1666 return err;
44e05eca
BP
1667}
1668
df2c07f4 1669static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
064af421 1670{
aaff4b55 1671 struct sk_buff *reply;
d6569377 1672 struct datapath *dp;
d6569377 1673 int err;
064af421 1674
d81eef1b
JR
1675 reply = ovs_dp_cmd_alloc_info(info);
1676 if (!reply)
1677 return -ENOMEM;
1678
cd2a59e9 1679 ovs_lock();
2a4999f3 1680 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9 1681 err = PTR_ERR(dp);
d6569377 1682 if (IS_ERR(dp))
d81eef1b 1683 goto err_unlock_free;
38c6ecbc 1684
c58cc9a4
TG
1685 ovs_dp_change(dp, info->attrs);
1686
d81eef1b
JR
1687 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1688 info->snd_seq, 0, OVS_DP_CMD_NEW);
1689 BUG_ON(err < 0);
a0fb56c1 1690
cd2a59e9 1691 ovs_unlock();
7d16c847 1692
cb25142c 1693 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
aaff4b55 1694 return 0;
d81eef1b
JR
1695
1696err_unlock_free:
cd2a59e9 1697 ovs_unlock();
d81eef1b 1698 kfree_skb(reply);
cd2a59e9 1699 return err;
064af421
BP
1700}
1701
df2c07f4 1702static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1dcf111b 1703{
aaff4b55 1704 struct sk_buff *reply;
d6569377 1705 struct datapath *dp;
d6569377 1706 int err;
1dcf111b 1707
d81eef1b
JR
1708 reply = ovs_dp_cmd_alloc_info(info);
1709 if (!reply)
1710 return -ENOMEM;
1711
d637497c 1712 ovs_lock();
2a4999f3 1713 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9
PS
1714 if (IS_ERR(dp)) {
1715 err = PTR_ERR(dp);
d81eef1b 1716 goto err_unlock_free;
cd2a59e9 1717 }
d81eef1b
JR
1718 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1719 info->snd_seq, 0, OVS_DP_CMD_NEW);
1720 BUG_ON(err < 0);
d637497c 1721 ovs_unlock();
aaff4b55
BP
1722
1723 return genlmsg_reply(reply, info);
cd2a59e9 1724
d81eef1b 1725err_unlock_free:
d637497c 1726 ovs_unlock();
d81eef1b 1727 kfree_skb(reply);
cd2a59e9 1728 return err;
1dcf111b
JP
1729}
1730
df2c07f4 1731static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
a7786963 1732{
2a4999f3 1733 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
254f2dc8
BP
1734 struct datapath *dp;
1735 int skip = cb->args[0];
1736 int i = 0;
a7786963 1737
d637497c
PS
1738 ovs_lock();
1739 list_for_each_entry(dp, &ovs_net->dps, list_node) {
a2bab2f0 1740 if (i >= skip &&
28aea917 1741 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
aaff4b55 1742 cb->nlh->nlmsg_seq, NLM_F_MULTI,
df2c07f4 1743 OVS_DP_CMD_NEW) < 0)
aaff4b55 1744 break;
254f2dc8 1745 i++;
a7786963 1746 }
d637497c 1747 ovs_unlock();
aaff4b55 1748
254f2dc8
BP
1749 cb->args[0] = i;
1750
aaff4b55 1751 return skb->len;
c19e6535
BP
1752}
1753
cb25142c
PS
1754static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1755 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1756 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1757 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1758};
1759
18fd3a52 1760static struct genl_ops dp_datapath_genl_ops[] = {
df2c07f4 1761 { .cmd = OVS_DP_CMD_NEW,
aaff4b55
BP
1762 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1763 .policy = datapath_policy,
df2c07f4 1764 .doit = ovs_dp_cmd_new
aaff4b55 1765 },
df2c07f4 1766 { .cmd = OVS_DP_CMD_DEL,
aaff4b55
BP
1767 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1768 .policy = datapath_policy,
df2c07f4 1769 .doit = ovs_dp_cmd_del
aaff4b55 1770 },
df2c07f4 1771 { .cmd = OVS_DP_CMD_GET,
aaff4b55
BP
1772 .flags = 0, /* OK for unprivileged users. */
1773 .policy = datapath_policy,
df2c07f4
JP
1774 .doit = ovs_dp_cmd_get,
1775 .dumpit = ovs_dp_cmd_dump
aaff4b55 1776 },
df2c07f4 1777 { .cmd = OVS_DP_CMD_SET,
aaff4b55
BP
1778 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1779 .policy = datapath_policy,
df2c07f4 1780 .doit = ovs_dp_cmd_set,
aaff4b55
BP
1781 },
1782};
1783
cb25142c 1784static struct genl_family dp_datapath_genl_family = {
f0fef760 1785 .id = GENL_ID_GENERATE,
df2c07f4 1786 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1787 .name = OVS_DATAPATH_FAMILY,
1788 .version = OVS_DATAPATH_VERSION,
1789 .maxattr = OVS_DP_ATTR_MAX,
b3dcb73c 1790 .netnsok = true,
cb25142c
PS
1791 .parallel_ops = true,
1792 .ops = dp_datapath_genl_ops,
1793 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1794 .mcgrps = &ovs_dp_datapath_multicast_group,
1795 .n_mcgrps = 1,
f0fef760
BP
1796};
1797
cd2a59e9 1798/* Called with ovs_mutex or RCU read lock. */
df2c07f4 1799static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
28aea917 1800 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1801{
df2c07f4 1802 struct ovs_header *ovs_header;
e926dfe3 1803 struct ovs_vport_stats vport_stats;
c19e6535
BP
1804 int err;
1805
28aea917 1806 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
f0fef760 1807 flags, cmd);
df2c07f4 1808 if (!ovs_header)
f0fef760 1809 return -EMSGSIZE;
c19e6535 1810
99769a40 1811 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
c19e6535 1812
c3cc8c03
DM
1813 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1814 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
beb1c69a 1815 nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)))
c3cc8c03 1816 goto nla_put_failure;
c19e6535 1817
850b6b3b 1818 ovs_vport_get_stats(vport, &vport_stats);
c3cc8c03
DM
1819 if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1820 &vport_stats))
1821 goto nla_put_failure;
c19e6535 1822
beb1c69a
AW
1823 if (ovs_vport_get_upcall_portids(vport, skb))
1824 goto nla_put_failure;
1825
850b6b3b 1826 err = ovs_vport_get_options(vport, skb);
f0fef760
BP
1827 if (err == -EMSGSIZE)
1828 goto error;
c19e6535 1829
23b48dc1
TG
1830 genlmsg_end(skb, ovs_header);
1831 return 0;
c19e6535
BP
1832
1833nla_put_failure:
1834 err = -EMSGSIZE;
f0fef760 1835error:
df2c07f4 1836 genlmsg_cancel(skb, ovs_header);
f0fef760 1837 return err;
064af421
BP
1838}
1839
d81eef1b
JR
1840static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1841{
1842 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1843}
1844
1845/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
28aea917 1846struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
f14d8083 1847 u32 seq, u8 cmd)
064af421 1848{
c19e6535 1849 struct sk_buff *skb;
f0fef760 1850 int retval;
c19e6535 1851
f0fef760 1852 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
c19e6535
BP
1853 if (!skb)
1854 return ERR_PTR(-ENOMEM);
1855
28aea917 1856 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
c25ea534
JG
1857 BUG_ON(retval < 0);
1858
c19e6535 1859 return skb;
f0fef760 1860}
c19e6535 1861
cd2a59e9 1862/* Called with ovs_mutex or RCU read lock. */
2a4999f3 1863static struct vport *lookup_vport(struct net *net,
f1f60b85 1864 const struct ovs_header *ovs_header,
df2c07f4 1865 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
c19e6535
BP
1866{
1867 struct datapath *dp;
1868 struct vport *vport;
1869
df2c07f4 1870 if (a[OVS_VPORT_ATTR_NAME]) {
2a4999f3 1871 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
ed099e92 1872 if (!vport)
c19e6535 1873 return ERR_PTR(-ENODEV);
24ce832d
BP
1874 if (ovs_header->dp_ifindex &&
1875 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1876 return ERR_PTR(-ENODEV);
c19e6535 1877 return vport;
df2c07f4
JP
1878 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1879 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
c19e6535
BP
1880
1881 if (port_no >= DP_MAX_PORTS)
f0fef760 1882 return ERR_PTR(-EFBIG);
c19e6535 1883
2a4999f3 1884 dp = get_dp(net, ovs_header->dp_ifindex);
c19e6535
BP
1885 if (!dp)
1886 return ERR_PTR(-ENODEV);
f2459fe7 1887
cd2a59e9 1888 vport = ovs_vport_ovsl_rcu(dp, port_no);
ed099e92 1889 if (!vport)
17535c57 1890 return ERR_PTR(-ENODEV);
c19e6535
BP
1891 return vport;
1892 } else
1893 return ERR_PTR(-EINVAL);
064af421
BP
1894}
1895
df2c07f4 1896static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
c19e6535 1897{
f0fef760 1898 struct nlattr **a = info->attrs;
df2c07f4 1899 struct ovs_header *ovs_header = info->userhdr;
c19e6535 1900 struct vport_parms parms;
ed099e92 1901 struct sk_buff *reply;
c19e6535 1902 struct vport *vport;
c19e6535 1903 struct datapath *dp;
b0ec0f27 1904 u32 port_no;
c19e6535 1905 int err;
b0ec0f27 1906
ea36840f
BP
1907 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1908 !a[OVS_VPORT_ATTR_UPCALL_PID])
d81eef1b
JR
1909 return -EINVAL;
1910
1911 port_no = a[OVS_VPORT_ATTR_PORT_NO]
1912 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1913 if (port_no >= DP_MAX_PORTS)
1914 return -EFBIG;
1915
1916 reply = ovs_vport_cmd_alloc_info();
1917 if (!reply)
1918 return -ENOMEM;
f0fef760 1919
cd2a59e9 1920 ovs_lock();
5a38795f 1921restart:
2a4999f3 1922 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
c19e6535
BP
1923 err = -ENODEV;
1924 if (!dp)
d81eef1b 1925 goto exit_unlock_free;
c19e6535 1926
d81eef1b 1927 if (port_no) {
cd2a59e9 1928 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
1929 err = -EBUSY;
1930 if (vport)
d81eef1b 1931 goto exit_unlock_free;
c19e6535
BP
1932 } else {
1933 for (port_no = 1; ; port_no++) {
1934 if (port_no >= DP_MAX_PORTS) {
1935 err = -EFBIG;
d81eef1b 1936 goto exit_unlock_free;
c19e6535 1937 }
cd2a59e9 1938 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
1939 if (!vport)
1940 break;
51d4d598 1941 }
064af421 1942 }
b0ec0f27 1943
df2c07f4
JP
1944 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1945 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1946 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
c19e6535
BP
1947 parms.dp = dp;
1948 parms.port_no = port_no;
beb1c69a 1949 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
c19e6535
BP
1950
1951 vport = new_vport(&parms);
1952 err = PTR_ERR(vport);
5a38795f
TG
1953 if (IS_ERR(vport)) {
1954 if (err == -EAGAIN)
1955 goto restart;
d81eef1b 1956 goto exit_unlock_free;
5a38795f 1957 }
c19e6535 1958
d81eef1b
JR
1959 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1960 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1961 BUG_ON(err < 0);
1962 ovs_unlock();
e297c6b7 1963
cb25142c 1964 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 1965 return 0;
c19e6535 1966
d81eef1b 1967exit_unlock_free:
cd2a59e9 1968 ovs_unlock();
d81eef1b 1969 kfree_skb(reply);
c19e6535 1970 return err;
44e05eca
BP
1971}
1972
df2c07f4 1973static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
44e05eca 1974{
f0fef760
BP
1975 struct nlattr **a = info->attrs;
1976 struct sk_buff *reply;
c19e6535 1977 struct vport *vport;
c19e6535 1978 int err;
44e05eca 1979
d81eef1b
JR
1980 reply = ovs_vport_cmd_alloc_info();
1981 if (!reply)
1982 return -ENOMEM;
1983
cd2a59e9 1984 ovs_lock();
2a4999f3 1985 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535
BP
1986 err = PTR_ERR(vport);
1987 if (IS_ERR(vport))
d81eef1b 1988 goto exit_unlock_free;
44e05eca 1989
6455100f 1990 if (a[OVS_VPORT_ATTR_TYPE] &&
17ec1d04 1991 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
4879d4c7 1992 err = -EINVAL;
d81eef1b 1993 goto exit_unlock_free;
c25ea534
JG
1994 }
1995
17ec1d04 1996 if (a[OVS_VPORT_ATTR_OPTIONS]) {
850b6b3b 1997 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
17ec1d04 1998 if (err)
d81eef1b 1999 goto exit_unlock_free;
17ec1d04 2000 }
1fc7083d 2001
beb1c69a 2002 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
7d16c847
PS
2003 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2004
2005 err = ovs_vport_set_upcall_portids(vport, ids);
beb1c69a
AW
2006 if (err)
2007 goto exit_unlock_free;
2008 }
c19e6535 2009
c25ea534
JG
2010 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2011 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2012 BUG_ON(err < 0);
cd2a59e9 2013 ovs_unlock();
d81eef1b 2014
cb25142c 2015 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
c25ea534
JG
2016 return 0;
2017
d81eef1b 2018exit_unlock_free:
cd2a59e9 2019 ovs_unlock();
d81eef1b 2020 kfree_skb(reply);
c19e6535 2021 return err;
064af421
BP
2022}
2023
df2c07f4 2024static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2025{
f0fef760
BP
2026 struct nlattr **a = info->attrs;
2027 struct sk_buff *reply;
c19e6535 2028 struct vport *vport;
c19e6535
BP
2029 int err;
2030
d81eef1b
JR
2031 reply = ovs_vport_cmd_alloc_info();
2032 if (!reply)
2033 return -ENOMEM;
2034
cd2a59e9 2035 ovs_lock();
2a4999f3 2036 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535 2037 err = PTR_ERR(vport);
f0fef760 2038 if (IS_ERR(vport))
d81eef1b 2039 goto exit_unlock_free;
c19e6535 2040
df2c07f4 2041 if (vport->port_no == OVSP_LOCAL) {
f0fef760 2042 err = -EINVAL;
d81eef1b 2043 goto exit_unlock_free;
f0fef760
BP
2044 }
2045
d81eef1b
JR
2046 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2047 info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2048 BUG_ON(err < 0);
850b6b3b 2049 ovs_dp_detach_port(vport);
d81eef1b 2050 ovs_unlock();
f0fef760 2051
cb25142c 2052 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 2053 return 0;
f0fef760 2054
d81eef1b 2055exit_unlock_free:
cd2a59e9 2056 ovs_unlock();
d81eef1b 2057 kfree_skb(reply);
c19e6535 2058 return err;
7c40efc9
BP
2059}
2060
df2c07f4 2061static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2062{
f0fef760 2063 struct nlattr **a = info->attrs;
df2c07f4 2064 struct ovs_header *ovs_header = info->userhdr;
ed099e92 2065 struct sk_buff *reply;
c19e6535 2066 struct vport *vport;
c19e6535
BP
2067 int err;
2068
d81eef1b
JR
2069 reply = ovs_vport_cmd_alloc_info();
2070 if (!reply)
2071 return -ENOMEM;
2072
ed099e92 2073 rcu_read_lock();
2a4999f3 2074 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
c19e6535
BP
2075 err = PTR_ERR(vport);
2076 if (IS_ERR(vport))
d81eef1b
JR
2077 goto exit_unlock_free;
2078 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2079 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2080 BUG_ON(err < 0);
df2fa9b5
JG
2081 rcu_read_unlock();
2082
2083 return genlmsg_reply(reply, info);
ed099e92 2084
d81eef1b 2085exit_unlock_free:
ed099e92 2086 rcu_read_unlock();
d81eef1b 2087 kfree_skb(reply);
c19e6535
BP
2088 return err;
2089}
2090
df2c07f4 2091static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
c19e6535 2092{
df2c07f4 2093 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
c19e6535 2094 struct datapath *dp;
95b1d73a
PS
2095 int bucket = cb->args[0], skip = cb->args[1];
2096 int i, j = 0;
c19e6535 2097
03fc2881 2098 rcu_read_lock();
01ac0970 2099 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
03fc2881
JR
2100 if (!dp) {
2101 rcu_read_unlock();
f0fef760 2102 return -ENODEV;
03fc2881 2103 }
95b1d73a 2104 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
ed099e92 2105 struct vport *vport;
95b1d73a
PS
2106
2107 j = 0;
f8dfbcb7 2108 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
95b1d73a
PS
2109 if (j >= skip &&
2110 ovs_vport_cmd_fill_info(vport, skb,
28aea917 2111 NETLINK_CB(cb->skb).portid,
95b1d73a
PS
2112 cb->nlh->nlmsg_seq,
2113 NLM_F_MULTI,
2114 OVS_VPORT_CMD_NEW) < 0)
2115 goto out;
2116
2117 j++;
2118 }
2119 skip = 0;
c19e6535 2120 }
95b1d73a 2121out:
ed099e92 2122 rcu_read_unlock();
c19e6535 2123
95b1d73a
PS
2124 cb->args[0] = i;
2125 cb->args[1] = j;
f0fef760 2126
95b1d73a 2127 return skb->len;
7c40efc9
BP
2128}
2129
cb25142c
PS
2130static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2131 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2132 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2133 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2134 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2135 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2136 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2137};
2138
18fd3a52 2139static struct genl_ops dp_vport_genl_ops[] = {
df2c07f4 2140 { .cmd = OVS_VPORT_CMD_NEW,
f0fef760
BP
2141 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2142 .policy = vport_policy,
df2c07f4 2143 .doit = ovs_vport_cmd_new
f0fef760 2144 },
df2c07f4 2145 { .cmd = OVS_VPORT_CMD_DEL,
f0fef760
BP
2146 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2147 .policy = vport_policy,
df2c07f4 2148 .doit = ovs_vport_cmd_del
f0fef760 2149 },
df2c07f4 2150 { .cmd = OVS_VPORT_CMD_GET,
f0fef760
BP
2151 .flags = 0, /* OK for unprivileged users. */
2152 .policy = vport_policy,
df2c07f4
JP
2153 .doit = ovs_vport_cmd_get,
2154 .dumpit = ovs_vport_cmd_dump
f0fef760 2155 },
df2c07f4 2156 { .cmd = OVS_VPORT_CMD_SET,
f0fef760
BP
2157 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2158 .policy = vport_policy,
df2c07f4 2159 .doit = ovs_vport_cmd_set,
f0fef760
BP
2160 },
2161};
2162
cb25142c
PS
2163struct genl_family dp_vport_genl_family = {
2164 .id = GENL_ID_GENERATE,
2165 .hdrsize = sizeof(struct ovs_header),
2166 .name = OVS_VPORT_FAMILY,
2167 .version = OVS_VPORT_VERSION,
2168 .maxattr = OVS_VPORT_ATTR_MAX,
2169 .netnsok = true,
2170 .parallel_ops = true,
2171 .ops = dp_vport_genl_ops,
2172 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2173 .mcgrps = &ovs_dp_vport_multicast_group,
2174 .n_mcgrps = 1,
982b8810 2175};
ed099e92 2176
18fd3a52 2177static struct genl_family *dp_genl_families[] = {
cb25142c
PS
2178 &dp_datapath_genl_family,
2179 &dp_vport_genl_family,
2180 &dp_flow_genl_family,
2181 &dp_packet_genl_family,
982b8810 2182};
ed099e92 2183
982b8810
BP
2184static void dp_unregister_genl(int n_families)
2185{
2186 int i;
ed099e92 2187
b867ca75 2188 for (i = 0; i < n_families; i++)
cb25142c 2189 genl_unregister_family(dp_genl_families[i]);
ed099e92
BP
2190}
2191
982b8810 2192static int dp_register_genl(void)
064af421 2193{
982b8810
BP
2194 int err;
2195 int i;
064af421 2196
982b8810 2197 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
064af421 2198
cb25142c 2199 err = genl_register_family(dp_genl_families[i]);
982b8810
BP
2200 if (err)
2201 goto error;
982b8810 2202 }
9cc8b4e4 2203
982b8810 2204 return 0;
064af421
BP
2205
2206error:
cb25142c 2207 dp_unregister_genl(i);
982b8810 2208 return err;
064af421
BP
2209}
2210
2a4999f3
PS
2211static int __net_init ovs_init_net(struct net *net)
2212{
2213 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2214
2215 INIT_LIST_HEAD(&ovs_net->dps);
cd2a59e9 2216 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2a4999f3
PS
2217 return 0;
2218}
2219
cabd5516
PS
2220static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2221 struct list_head *head)
2a4999f3
PS
2222{
2223 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
cabd5516
PS
2224 struct datapath *dp;
2225
2226 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2227 int i;
2228
2229 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2230 struct vport *vport;
2231
2232 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2233 struct netdev_vport *netdev_vport;
2234
2235 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2236 continue;
2237
2238 netdev_vport = netdev_vport_priv(vport);
2239 if (dev_net(netdev_vport->dev) == dnet)
2240 list_add(&vport->detach_list, head);
2241 }
2242 }
2243 }
2244}
2245
2246static void __net_exit ovs_exit_net(struct net *dnet)
2247{
2248 struct datapath *dp, *dp_next;
2249 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2250 struct vport *vport, *vport_next;
2251 struct net *net;
2252 LIST_HEAD(head);
2a4999f3 2253
cd2a59e9
PS
2254 ovs_lock();
2255 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2256 __dp_destroy(dp);
cabd5516
PS
2257
2258 rtnl_lock();
2259 for_each_net(net)
2260 list_vports_from_net(net, dnet, &head);
2261 rtnl_unlock();
2262
2263 /* Detach all vports from given namespace. */
2264 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2265 list_del(&vport->detach_list);
2266 ovs_dp_detach_port(vport);
2267 }
2268
cd2a59e9
PS
2269 ovs_unlock();
2270
2271 cancel_work_sync(&ovs_net->dp_notify_work);
2a4999f3
PS
2272}
2273
2274static struct pernet_operations ovs_net_ops = {
2275 .init = ovs_init_net,
2276 .exit = ovs_exit_net,
2277 .id = &ovs_net_id,
2278 .size = sizeof(struct ovs_net),
2279};
2280
637c8268
PS
2281DEFINE_COMPAT_PNET_REG_FUNC(device);
2282
22d24ebf
BP
2283static int __init dp_init(void)
2284{
2285 int err;
2286
f3d85db3 2287 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
22d24ebf 2288
26bfaeaa 2289 pr_info("Open vSwitch switching datapath %s\n", VERSION);
064af421 2290
2c8c4fb7 2291 err = action_fifos_init();
3544358a 2292 if (err)
533e96e7 2293 goto error;
3544358a 2294
5282e284 2295 err = ovs_internal_dev_rtnl_link_register();
2c8c4fb7
AZ
2296 if (err)
2297 goto error_action_fifos_exit;
2298
5282e284
TG
2299 err = ovs_flow_init();
2300 if (err)
2301 goto error_unreg_rtnl_link;
2302
850b6b3b 2303 err = ovs_vport_init();
064af421
BP
2304 if (err)
2305 goto error_flow_exit;
2306
2a4999f3 2307 err = register_pernet_device(&ovs_net_ops);
f2459fe7
JG
2308 if (err)
2309 goto error_vport_exit;
2310
2a4999f3
PS
2311 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2312 if (err)
2313 goto error_netns_exit;
2314
5a38795f
TG
2315 err = ovs_netdev_init();
2316 if (err)
2317 goto error_unreg_notifier;
2318
982b8810
BP
2319 err = dp_register_genl();
2320 if (err < 0)
5a38795f 2321 goto error_unreg_netdev;
982b8810 2322
064af421
BP
2323 return 0;
2324
5a38795f
TG
2325error_unreg_netdev:
2326 ovs_netdev_exit();
064af421 2327error_unreg_notifier:
850b6b3b 2328 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2a4999f3
PS
2329error_netns_exit:
2330 unregister_pernet_device(&ovs_net_ops);
f2459fe7 2331error_vport_exit:
850b6b3b 2332 ovs_vport_exit();
064af421 2333error_flow_exit:
850b6b3b 2334 ovs_flow_exit();
5282e284
TG
2335error_unreg_rtnl_link:
2336 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7
AZ
2337error_action_fifos_exit:
2338 action_fifos_exit();
064af421
BP
2339error:
2340 return err;
2341}
2342
2343static void dp_cleanup(void)
2344{
982b8810 2345 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
5a38795f 2346 ovs_netdev_exit();
850b6b3b 2347 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2a4999f3
PS
2348 unregister_pernet_device(&ovs_net_ops);
2349 rcu_barrier();
850b6b3b
JG
2350 ovs_vport_exit();
2351 ovs_flow_exit();
5282e284 2352 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7 2353 action_fifos_exit();
064af421
BP
2354}
2355
2356module_init(dp_init);
2357module_exit(dp_cleanup);
2358
2359MODULE_DESCRIPTION("Open vSwitch switching datapath");
2360MODULE_LICENSE("GPL");
3d0666d2 2361MODULE_VERSION(VERSION);