]> git.proxmox.com Git - mirror_ovs.git/blame - datapath/datapath.c
FAQ: Update support for NAT and Geneve.
[mirror_ovs.git] / datapath / datapath.c
CommitLineData
064af421 1/*
e23775f2 2 * Copyright (c) 2007-2015 Nicira, Inc.
a14bc59f 3 *
a9a29d22
JG
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
064af421
BP
17 */
18
dfffaef1
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
064af421
BP
21#include <linux/init.h>
22#include <linux/module.h>
064af421 23#include <linux/if_arp.h>
064af421
BP
24#include <linux/if_vlan.h>
25#include <linux/in.h>
26#include <linux/ip.h>
982b8810 27#include <linux/jhash.h>
064af421
BP
28#include <linux/delay.h>
29#include <linux/time.h>
30#include <linux/etherdevice.h>
ed099e92 31#include <linux/genetlink.h>
064af421
BP
32#include <linux/kernel.h>
33#include <linux/kthread.h>
064af421
BP
34#include <linux/mutex.h>
35#include <linux/percpu.h>
36#include <linux/rcupdate.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/version.h>
40#include <linux/ethtool.h>
064af421 41#include <linux/wait.h>
064af421 42#include <asm/div64.h>
656a0e37 43#include <linux/highmem.h>
064af421
BP
44#include <linux/netfilter_bridge.h>
45#include <linux/netfilter_ipv4.h>
46#include <linux/inetdevice.h>
47#include <linux/list.h>
077257b8 48#include <linux/openvswitch.h>
064af421 49#include <linux/rculist.h>
064af421 50#include <linux/dmi.h>
36956a7d 51#include <net/genetlink.h>
2a4999f3
PS
52#include <net/net_namespace.h>
53#include <net/netns/generic.h>
064af421 54
064af421 55#include "datapath.h"
038e34ab 56#include "conntrack.h"
064af421 57#include "flow.h"
d103f479 58#include "flow_table.h"
a097c0b2 59#include "flow_netlink.h"
e23775f2 60#include "gso.h"
f2459fe7 61#include "vport-internal_dev.h"
d5de5b0d 62#include "vport-netdev.h"
064af421 63
2a4999f3 64int ovs_net_id __read_mostly;
5a38795f 65EXPORT_SYMBOL_GPL(ovs_net_id);
2a4999f3 66
cb25142c
PS
67static struct genl_family dp_packet_genl_family;
68static struct genl_family dp_flow_genl_family;
69static struct genl_family dp_datapath_genl_family;
70
bc619e29
JS
71static const struct nla_policy flow_policy[];
72
18fd3a52
PS
73static struct genl_multicast_group ovs_dp_flow_multicast_group = {
74 .name = OVS_FLOW_MCGROUP
cb25142c
PS
75};
76
18fd3a52
PS
77static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
78 .name = OVS_DATAPATH_MCGROUP
cb25142c
PS
79};
80
18fd3a52
PS
81struct genl_multicast_group ovs_dp_vport_multicast_group = {
82 .name = OVS_VPORT_MCGROUP
cb25142c
PS
83};
84
afad3556 85/* Check if need to build a reply message.
af465b67
PS
86 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
87 */
114fce23
SG
88static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
89 unsigned int group)
afad3556
JR
90{
91 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
6233a1bd 92 genl_has_listeners(family, genl_info_net(info), group);
afad3556
JR
93}
94
18fd3a52 95static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
cb25142c 96 struct sk_buff *skb, struct genl_info *info)
e297c6b7 97{
cb25142c
PS
98 genl_notify(family, skb, genl_info_net(info),
99 info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL);
e297c6b7
TG
100}
101
ed099e92
BP
102/**
103 * DOC: Locking:
064af421 104 *
cd2a59e9
PS
105 * All writes e.g. Writes to device state (add/remove datapath, port, set
106 * operations on vports, etc.), Writes to other state (flow table
107 * modifications, set miscellaneous datapath parameters, etc.) are protected
108 * by ovs_lock.
ed099e92
BP
109 *
110 * Reads are protected by RCU.
111 *
112 * There are a few special cases (mostly stats) that have their own
113 * synchronization but they nest under all of above and don't interact with
114 * each other.
cd2a59e9
PS
115 *
116 * The RTNL lock nests inside ovs_mutex.
064af421 117 */
ed099e92 118
cd2a59e9
PS
119static DEFINE_MUTEX(ovs_mutex);
120
121void ovs_lock(void)
122{
123 mutex_lock(&ovs_mutex);
124}
125
126void ovs_unlock(void)
127{
128 mutex_unlock(&ovs_mutex);
129}
130
131#ifdef CONFIG_LOCKDEP
132int lockdep_ovsl_is_held(void)
133{
134 if (debug_locks)
135 return lockdep_is_held(&ovs_mutex);
136 else
137 return 1;
138}
5a38795f 139EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
cd2a59e9
PS
140#endif
141
5ae440c3 142static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
f1f60b85 143 const struct sw_flow_key *,
4c7804f1
WT
144 const struct dp_upcall_info *,
145 uint32_t cutlen);
5ae440c3 146static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
7d16c847 147 const struct sw_flow_key *,
4c7804f1
WT
148 const struct dp_upcall_info *,
149 uint32_t cutlen);
064af421 150
01ac0970
AZ
151/* Must be called with rcu_read_lock. */
152static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
064af421 153{
01ac0970 154 struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
ed099e92 155
254f2dc8 156 if (dev) {
850b6b3b 157 struct vport *vport = ovs_internal_dev_get_vport(dev);
254f2dc8 158 if (vport)
01ac0970 159 return vport->dp;
254f2dc8 160 }
01ac0970
AZ
161
162 return NULL;
163}
164
165/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
af465b67
PS
166 * returned dp pointer valid.
167 */
01ac0970
AZ
168static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
169{
170 struct datapath *dp;
171
172 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
173 rcu_read_lock();
174 dp = get_dp_rcu(net, dp_ifindex);
254f2dc8
BP
175 rcu_read_unlock();
176
177 return dp;
064af421 178}
064af421 179
cd2a59e9 180/* Must be called with rcu_read_lock or ovs_mutex. */
850b6b3b 181const char *ovs_dp_name(const struct datapath *dp)
f2459fe7 182{
cd2a59e9 183 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
e23775f2 184 return ovs_vport_name(vport);
f2459fe7
JG
185}
186
f1f60b85 187static int get_dpifindex(const struct datapath *dp)
99769a40
JG
188{
189 struct vport *local;
190 int ifindex;
191
192 rcu_read_lock();
193
95b1d73a 194 local = ovs_vport_rcu(dp, OVSP_LOCAL);
99769a40 195 if (local)
e23775f2 196 ifindex = local->dev->ifindex;
99769a40
JG
197 else
198 ifindex = 0;
199
200 rcu_read_unlock();
201
202 return ifindex;
203}
204
46c6a11d
JG
205static void destroy_dp_rcu(struct rcu_head *rcu)
206{
207 struct datapath *dp = container_of(rcu, struct datapath, rcu);
46c6a11d 208
e379e4d1 209 ovs_flow_tbl_destroy(&dp->table);
46c6a11d 210 free_percpu(dp->stats_percpu);
95b1d73a 211 kfree(dp->ports);
5ca1ba48 212 kfree(dp);
46c6a11d
JG
213}
214
95b1d73a
PS
215static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
216 u16 port_no)
217{
218 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
219}
220
aa917006 221/* Called with ovs_mutex or RCU read lock. */
95b1d73a
PS
222struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
223{
224 struct vport *vport;
95b1d73a
PS
225 struct hlist_head *head;
226
227 head = vport_hash_bucket(dp, port_no);
f8dfbcb7 228 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
95b1d73a
PS
229 if (vport->port_no == port_no)
230 return vport;
231 }
232 return NULL;
233}
234
cd2a59e9 235/* Called with ovs_mutex. */
c19e6535 236static struct vport *new_vport(const struct vport_parms *parms)
064af421 237{
f2459fe7 238 struct vport *vport;
f2459fe7 239
850b6b3b 240 vport = ovs_vport_add(parms);
c19e6535
BP
241 if (!IS_ERR(vport)) {
242 struct datapath *dp = parms->dp;
95b1d73a 243 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
064af421 244
95b1d73a 245 hlist_add_head_rcu(&vport->dp_hash_node, head);
c19e6535 246 }
c19e6535 247 return vport;
064af421
BP
248}
249
850b6b3b 250void ovs_dp_detach_port(struct vport *p)
064af421 251{
cd2a59e9 252 ASSERT_OVSL();
064af421 253
064af421 254 /* First drop references to device. */
95b1d73a 255 hlist_del_rcu(&p->dp_hash_node);
f2459fe7 256
7237e4f4 257 /* Then destroy it. */
850b6b3b 258 ovs_vport_del(p);
064af421
BP
259}
260
fb66fbd1 261/* Must be called with rcu_read_lock. */
e74d4817 262void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
064af421 263{
a6059080 264 const struct vport *p = OVS_CB(skb)->input_vport;
064af421 265 struct datapath *dp = p->dp;
3544358a 266 struct sw_flow *flow;
ad50cb60 267 struct sw_flow_actions *sf_acts;
064af421 268 struct dp_stats_percpu *stats;
e9141eec 269 u64 *stats_counter;
4fa72a95 270 u32 n_mask_hit;
064af421 271
70dbc259 272 stats = this_cpu_ptr(dp->stats_percpu);
a063b0df 273
52a23d92 274 /* Look up flow. */
e74d4817 275 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
5604935e 276 &n_mask_hit);
52a23d92
JG
277 if (unlikely(!flow)) {
278 struct dp_upcall_info upcall;
a7d607c5 279 int error;
52a23d92 280
0e469d3b 281 memset(&upcall, 0, sizeof(upcall));
52a23d92 282 upcall.cmd = OVS_PACKET_CMD_MISS;
beb1c69a 283 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
a94ebc39 284 upcall.mru = OVS_CB(skb)->mru;
4c7804f1 285 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
a7d607c5
LR
286 if (unlikely(error))
287 kfree_skb(skb);
288 else
289 consume_skb(skb);
52a23d92
JG
290 stats_counter = &stats->n_missed;
291 goto out;
292 }
293
e74d4817 294 ovs_flow_stats_update(flow, key->tp.flags, skb);
ad50cb60 295 sf_acts = rcu_dereference(flow->sf_acts);
7d16c847
PS
296 ovs_execute_actions(dp, skb, sf_acts, key);
297
b0b906cc 298 stats_counter = &stats->n_hit;
55574bb0 299
8819fac7 300out:
55574bb0 301 /* Update datapath statistics. */
b81deb15 302 u64_stats_update_begin(&stats->syncp);
e9141eec 303 (*stats_counter)++;
4fa72a95 304 stats->n_mask_hit += n_mask_hit;
b81deb15 305 u64_stats_update_end(&stats->syncp);
064af421
BP
306}
307
850b6b3b 308int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
f1f60b85 309 const struct sw_flow_key *key,
4c7804f1
WT
310 const struct dp_upcall_info *upcall_info,
311 uint32_t cutlen)
aa5a8fdc
JG
312{
313 struct dp_stats_percpu *stats;
314 int err;
315
28aea917 316 if (upcall_info->portid == 0) {
b063d9f0 317 err = -ENOTCONN;
b063d9f0
JG
318 goto err;
319 }
320
7257b535 321 if (!skb_is_gso(skb))
4c7804f1 322 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
7257b535 323 else
4c7804f1 324 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
d76195db
JG
325 if (err)
326 goto err;
327
328 return 0;
aa5a8fdc 329
aa5a8fdc 330err:
70dbc259 331 stats = this_cpu_ptr(dp->stats_percpu);
aa5a8fdc 332
b81deb15 333 u64_stats_update_begin(&stats->syncp);
aa5a8fdc 334 stats->n_lost++;
b81deb15 335 u64_stats_update_end(&stats->syncp);
aa5a8fdc 336
aa5a8fdc 337 return err;
982b8810
BP
338}
339
5ae440c3 340static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
f1f60b85 341 const struct sw_flow_key *key,
4c7804f1
WT
342 const struct dp_upcall_info *upcall_info,
343 uint32_t cutlen)
cb5087ca 344{
d4cba1f8 345 unsigned short gso_type = skb_shinfo(skb)->gso_type;
7257b535
BP
346 struct sw_flow_key later_key;
347 struct sk_buff *segs, *nskb;
b2a23c4e 348 struct ovs_skb_cb ovs_cb;
7257b535 349 int err;
cb5087ca 350
b2a23c4e 351 ovs_cb = *OVS_CB(skb);
1d04cd4e 352 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
b2a23c4e 353 *OVS_CB(skb) = ovs_cb;
79089764
PS
354 if (IS_ERR(segs))
355 return PTR_ERR(segs);
d1da7669
PS
356 if (segs == NULL)
357 return -EINVAL;
99769a40 358
9b277b39 359 if (gso_type & SKB_GSO_UDP) {
c135bba1 360 /* The initial flow key extracted by ovs_flow_key_extract()
9b277b39
PS
361 * in this case is for a first fragment, so we need to
362 * properly mark later fragments.
363 */
e74d4817 364 later_key = *key;
9b277b39
PS
365 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
366 }
367
7257b535
BP
368 /* Queue all of the segments. */
369 skb = segs;
cb5087ca 370 do {
b2a23c4e 371 *OVS_CB(skb) = ovs_cb;
9b277b39 372 if (gso_type & SKB_GSO_UDP && skb != segs)
e74d4817 373 key = &later_key;
9b277b39 374
4c7804f1 375 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
982b8810 376 if (err)
7257b535 377 break;
856081f6 378
36ce148c 379 } while ((skb = skb->next));
cb5087ca 380
7257b535
BP
381 /* Free all of the segments. */
382 skb = segs;
383 do {
384 nskb = skb->next;
385 if (err)
386 kfree_skb(skb);
387 else
388 consume_skb(skb);
389 } while ((skb = nskb));
390 return err;
391}
392
8b7ea2d4 393static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
533bea51 394 unsigned int hdrlen)
0afa2373
TG
395{
396 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
533bea51 397 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
4e25b8c1 398 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
0afa2373
TG
399
400 /* OVS_PACKET_ATTR_USERDATA */
8b7ea2d4
WZ
401 if (upcall_info->userdata)
402 size += NLA_ALIGN(upcall_info->userdata->nla_len);
403
404 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
405 if (upcall_info->egress_tun_info)
406 size += nla_total_size(ovs_tun_key_attr_size());
0afa2373 407
0e469d3b
NM
408 /* OVS_PACKET_ATTR_ACTIONS */
409 if (upcall_info->actions_len)
410 size += nla_total_size(upcall_info->actions_len);
411
a94ebc39
JS
412 /* OVS_PACKET_ATTR_MRU */
413 if (upcall_info->mru)
414 size += nla_total_size(sizeof(upcall_info->mru));
415
0afa2373
TG
416 return size;
417}
418
a94ebc39
JS
419static void pad_packet(struct datapath *dp, struct sk_buff *skb)
420{
421 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
422 size_t plen = NLA_ALIGN(skb->len) - skb->len;
423
424 if (plen > 0)
425 memset(skb_put(skb, plen), 0, plen);
426 }
427}
428
5ae440c3 429static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
f1f60b85 430 const struct sw_flow_key *key,
4c7804f1
WT
431 const struct dp_upcall_info *upcall_info,
432 uint32_t cutlen)
7257b535
BP
433{
434 struct ovs_header *upcall;
6161d3fd 435 struct sk_buff *nskb = NULL;
82706a6f 436 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
7257b535 437 struct nlattr *nla;
68eadcf0 438 struct genl_info info = {
705e9260 439#ifdef HAVE_GENLMSG_NEW_UNICAST
5ae440c3 440 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
68eadcf0
TG
441#endif
442 .snd_portid = upcall_info->portid,
443 };
978188b2 444 size_t len;
533bea51 445 unsigned int hlen;
5ae440c3
TG
446 int err, dp_ifindex;
447
448 dp_ifindex = get_dpifindex(dp);
449 if (!dp_ifindex)
450 return -ENODEV;
7257b535 451
efd8a18e 452 if (skb_vlan_tag_present(skb)) {
6161d3fd
JG
453 nskb = skb_clone(skb, GFP_ATOMIC);
454 if (!nskb)
455 return -ENOMEM;
07ac71ea 456
8063e095 457 nskb = __vlan_hwaccel_push_inside(nskb);
07ac71ea
PS
458 if (!nskb)
459 return -ENOMEM;
460
6161d3fd
JG
461 skb = nskb;
462 }
463
464 if (nla_attr_size(skb->len) > USHRT_MAX) {
465 err = -EFBIG;
466 goto out;
467 }
7257b535 468
533bea51
TG
469 /* Complete checksum if needed */
470 if (skb->ip_summed == CHECKSUM_PARTIAL &&
471 (err = skb_checksum_help(skb)))
472 goto out;
473
474 /* Older versions of OVS user space enforce alignment of the last
475 * Netlink attribute to NLA_ALIGNTO which would require extensive
476 * padding logic. Only perform zerocopy if padding is not required.
477 */
478 if (dp->user_features & OVS_DP_F_UNALIGNED)
479 hlen = skb_zerocopy_headlen(skb);
480 else
481 hlen = skb->len;
482
4c7804f1 483 len = upcall_msg_size(upcall_info, hlen - cutlen);
68eadcf0 484 user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
6161d3fd
JG
485 if (!user_skb) {
486 err = -ENOMEM;
487 goto out;
488 }
7257b535
BP
489
490 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
491 0, upcall_info->cmd);
492 upcall->dp_ifindex = dp_ifindex;
493
db7f2238 494 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
9a621f82 495 BUG_ON(err);
7257b535
BP
496
497 if (upcall_info->userdata)
e995e3df 498 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
462a988b 499 nla_len(upcall_info->userdata),
e995e3df 500 nla_data(upcall_info->userdata));
7257b535 501
e23775f2 502
8b7ea2d4
WZ
503 if (upcall_info->egress_tun_info) {
504 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
505 err = ovs_nla_put_egress_tunnel_key(user_skb,
e23775f2
PS
506 upcall_info->egress_tun_info,
507 upcall_info->egress_tun_opts);
8b7ea2d4
WZ
508 BUG_ON(err);
509 nla_nest_end(user_skb, nla);
510 }
511
0e469d3b
NM
512 if (upcall_info->actions_len) {
513 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
514 err = ovs_nla_put_actions(upcall_info->actions,
515 upcall_info->actions_len,
516 user_skb);
517 if (!err)
518 nla_nest_end(user_skb, nla);
519 else
520 nla_nest_cancel(user_skb, nla);
521 }
522
a94ebc39
JS
523 /* Add OVS_PACKET_ATTR_MRU */
524 if (upcall_info->mru) {
525 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
526 upcall_info->mru)) {
527 err = -ENOBUFS;
528 goto out;
529 }
530 pad_packet(dp, user_skb);
531 }
532
533bea51 533 /* Only reserve room for attribute header, packet data is added
af465b67
PS
534 * in skb_zerocopy()
535 */
533bea51
TG
536 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
537 err = -ENOBUFS;
538 goto out;
539 }
4c7804f1 540 nla->nla_len = nla_attr_size(skb->len - cutlen);
bed53bd1 541
4c7804f1 542 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
2c272bd9
ZK
543 if (err)
544 goto out;
7257b535 545
ef507cec 546 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
a94ebc39 547 pad_packet(dp, user_skb);
ef507cec 548
533bea51 549 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
6161d3fd 550
533bea51 551 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
82706a6f 552 user_skb = NULL;
6161d3fd 553out:
2c272bd9
ZK
554 if (err)
555 skb_tx_error(skb);
82706a6f 556 kfree_skb(user_skb);
6161d3fd
JG
557 kfree_skb(nskb);
558 return err;
cb5087ca
BP
559}
560
df2c07f4 561static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
064af421 562{
df2c07f4 563 struct ovs_header *ovs_header = info->userhdr;
a94ebc39 564 struct net *net = sock_net(skb->sk);
982b8810 565 struct nlattr **a = info->attrs;
e0e57990 566 struct sw_flow_actions *acts;
982b8810 567 struct sk_buff *packet;
e0e57990 568 struct sw_flow *flow;
ad50cb60 569 struct sw_flow_actions *sf_acts;
f7cd0081 570 struct datapath *dp;
d6569377 571 struct ethhdr *eth;
a6059080 572 struct vport *input_vport;
a94ebc39 573 u16 mru = 0;
3f19d399 574 int len;
d6569377 575 int err;
2e460098 576 bool log = !a[OVS_PACKET_ATTR_PROBE];
064af421 577
f7cd0081 578 err = -EINVAL;
df2c07f4 579 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
7c3072cc 580 !a[OVS_PACKET_ATTR_ACTIONS])
e5cad958 581 goto err;
064af421 582
df2c07f4 583 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
3f19d399 584 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
f7cd0081
BP
585 err = -ENOMEM;
586 if (!packet)
e5cad958 587 goto err;
3f19d399
BP
588 skb_reserve(packet, NET_IP_ALIGN);
589
bf3d6fce 590 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
8d5ebd83 591
f7cd0081
BP
592 skb_reset_mac_header(packet);
593 eth = eth_hdr(packet);
064af421 594
d6569377
BP
595 /* Normally, setting the skb 'protocol' field would be handled by a
596 * call to eth_type_trans(), but it assumes there's a sending
af465b67
PS
597 * device, which we may not have.
598 */
935fc582 599 if (eth_proto_is_802_3(eth->h_proto))
f7cd0081 600 packet->protocol = eth->h_proto;
d6569377 601 else
f7cd0081 602 packet->protocol = htons(ETH_P_802_2);
d3c54451 603
a94ebc39
JS
604 /* Set packet's mru */
605 if (a[OVS_PACKET_ATTR_MRU]) {
606 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
607 packet->ignore_df = 1;
608 }
609 OVS_CB(packet)->mru = mru;
610
e0e57990 611 /* Build an sw_flow for sending this packet. */
df65fec1 612 flow = ovs_flow_alloc();
e0e57990
BP
613 err = PTR_ERR(flow);
614 if (IS_ERR(flow))
e5cad958 615 goto err_kfree_skb;
064af421 616
038e34ab
JS
617 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
618 packet, &flow->key, log);
e0e57990 619 if (err)
9321954a 620 goto err_flow_free;
e0e57990 621
a94ebc39 622 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
9233cef7 623 &flow->key, &acts, log);
9b405f1a
PS
624 if (err)
625 goto err_flow_free;
e0e57990 626
ff27161e 627 rcu_assign_pointer(flow->sf_acts, acts);
abff858b 628 packet->priority = flow->key.phy.priority;
3025a772 629 packet->mark = flow->key.phy.skb_mark;
e0e57990 630
d6569377 631 rcu_read_lock();
a94ebc39 632 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
f7cd0081 633 err = -ENODEV;
e5cad958
BP
634 if (!dp)
635 goto err_unlock;
cc4015df 636
a6059080
AZ
637 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
638 if (!input_vport)
639 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
640
641 if (!input_vport)
642 goto err_unlock;
643
e23775f2 644 packet->dev = input_vport->dev;
a6059080 645 OVS_CB(packet)->input_vport = input_vport;
ad50cb60 646 sf_acts = rcu_dereference(flow->sf_acts);
a6059080 647
e9141eec 648 local_bh_disable();
7d16c847 649 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
e9141eec 650 local_bh_enable();
d6569377 651 rcu_read_unlock();
e0e57990 652
a1c564be 653 ovs_flow_free(flow, false);
e5cad958 654 return err;
064af421 655
e5cad958
BP
656err_unlock:
657 rcu_read_unlock();
9321954a 658err_flow_free:
a1c564be 659 ovs_flow_free(flow, false);
e5cad958
BP
660err_kfree_skb:
661 kfree_skb(packet);
662err:
d6569377 663 return err;
064af421
BP
664}
665
df2c07f4 666static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
7c3072cc 667 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
df2c07f4
JP
668 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
669 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
2e460098 670 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
a94ebc39 671 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
982b8810
BP
672};
673
18fd3a52 674static struct genl_ops dp_packet_genl_ops[] = {
df2c07f4 675 { .cmd = OVS_PACKET_CMD_EXECUTE,
982b8810
BP
676 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
677 .policy = packet_policy,
df2c07f4 678 .doit = ovs_packet_cmd_execute
982b8810
BP
679 }
680};
681
cb25142c
PS
682static struct genl_family dp_packet_genl_family = {
683 .id = GENL_ID_GENERATE,
684 .hdrsize = sizeof(struct ovs_header),
685 .name = OVS_PACKET_FAMILY,
686 .version = OVS_PACKET_VERSION,
687 .maxattr = OVS_PACKET_ATTR_MAX,
688 .netnsok = true,
689 .parallel_ops = true,
690 .ops = dp_packet_genl_ops,
691 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
692};
693
f1f60b85 694static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
4fa72a95 695 struct ovs_dp_megaflow_stats *mega_stats)
064af421 696{
d6569377 697 int i;
f180c2e2 698
4fa72a95
AZ
699 memset(mega_stats, 0, sizeof(*mega_stats));
700
994dc286 701 stats->n_flows = ovs_flow_tbl_count(&dp->table);
4fa72a95 702 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
064af421 703
7257b535 704 stats->n_hit = stats->n_missed = stats->n_lost = 0;
4fa72a95 705
d6569377
BP
706 for_each_possible_cpu(i) {
707 const struct dp_stats_percpu *percpu_stats;
708 struct dp_stats_percpu local_stats;
821cb9fa 709 unsigned int start;
44e05eca 710
d6569377 711 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
064af421 712
d6569377 713 do {
b81deb15 714 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
d6569377 715 local_stats = *percpu_stats;
b81deb15 716 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
064af421 717
d6569377
BP
718 stats->n_hit += local_stats.n_hit;
719 stats->n_missed += local_stats.n_missed;
720 stats->n_lost += local_stats.n_lost;
4fa72a95 721 mega_stats->n_mask_hit += local_stats.n_mask_hit;
d6569377
BP
722 }
723}
064af421 724
bc619e29
JS
725static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
726{
727 return ovs_identifier_is_ufid(sfid) &&
728 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
729}
730
731static bool should_fill_mask(uint32_t ufid_flags)
732{
733 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
734}
735
736static bool should_fill_actions(uint32_t ufid_flags)
0afa2373 737{
bc619e29
JS
738 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
739}
740
741static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
742 const struct sw_flow_id *sfid,
743 uint32_t ufid_flags)
744{
745 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
746
747 /* OVS_FLOW_ATTR_UFID */
748 if (sfid && ovs_identifier_is_ufid(sfid))
749 len += nla_total_size(sfid->ufid_len);
750
751 /* OVS_FLOW_ATTR_KEY */
752 if (!sfid || should_fill_key(sfid, ufid_flags))
753 len += nla_total_size(ovs_key_attr_size());
754
755 /* OVS_FLOW_ATTR_MASK */
756 if (should_fill_mask(ufid_flags))
757 len += nla_total_size(ovs_key_attr_size());
758
759 /* OVS_FLOW_ATTR_ACTIONS */
760 if (should_fill_actions(ufid_flags))
c3bb15b3 761 len += nla_total_size(acts->orig_len);
bc619e29
JS
762
763 return len
0afa2373
TG
764 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
765 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
bc619e29 766 + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
0afa2373
TG
767}
768
f1948bb9
JS
769/* Called with ovs_mutex or RCU read lock. */
770static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
771 struct sk_buff *skb)
772{
773 struct ovs_flow_stats stats;
774 __be16 tcp_flags;
775 unsigned long used;
776
b0f3a2fe 777 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
f71db6b1 778
b0f3a2fe
PS
779 if (used &&
780 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
f1948bb9 781 return -EMSGSIZE;
d6569377 782
b0f3a2fe
PS
783 if (stats.n_packets &&
784 nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
f1948bb9 785 return -EMSGSIZE;
b0b906cc 786
b0f3a2fe
PS
787 if ((u8)ntohs(tcp_flags) &&
788 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
f1948bb9
JS
789 return -EMSGSIZE;
790
791 return 0;
792}
793
794/* Called with ovs_mutex or RCU read lock. */
795static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
796 struct sk_buff *skb, int skb_orig_len)
797{
798 struct nlattr *start;
799 int err;
d6569377 800
df2c07f4 801 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
30053024
BP
802 * this is the first flow to be dumped into 'skb'. This is unusual for
803 * Netlink but individual action lists can be longer than
804 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
805 * The userspace caller can always fetch the actions separately if it
806 * really wants them. (Most userspace callers in fact don't care.)
807 *
808 * This can only fail for dump operations because the skb is always
809 * properly sized for single flows.
810 */
9b405f1a 811 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
f6f481ee 812 if (start) {
f44ccce1
PS
813 const struct sw_flow_actions *sf_acts;
814
780ec6ae 815 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
a097c0b2
PS
816 err = ovs_nla_put_actions(sf_acts->actions,
817 sf_acts->actions_len, skb);
f71db6b1 818
0a25b039
BP
819 if (!err)
820 nla_nest_end(skb, start);
821 else {
822 if (skb_orig_len)
f1948bb9 823 return err;
0a25b039
BP
824
825 nla_nest_cancel(skb, start);
826 }
f1948bb9
JS
827 } else if (skb_orig_len) {
828 return -EMSGSIZE;
829 }
830
831 return 0;
832}
833
834/* Called with ovs_mutex or RCU read lock. */
2c622e5a 835static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
f1948bb9 836 struct sk_buff *skb, u32 portid,
bc619e29 837 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
f1948bb9
JS
838{
839 const int skb_orig_len = skb->len;
840 struct ovs_header *ovs_header;
841 int err;
842
7d16c847
PS
843 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
844 flags, cmd);
f1948bb9
JS
845 if (!ovs_header)
846 return -EMSGSIZE;
7d16c847 847
f1948bb9
JS
848 ovs_header->dp_ifindex = dp_ifindex;
849
bc619e29 850 err = ovs_nla_put_identifier(flow, skb);
db7f2238
JS
851 if (err)
852 goto error;
853
bc619e29
JS
854 if (should_fill_key(&flow->id, ufid_flags)) {
855 err = ovs_nla_put_masked_key(flow, skb);
856 if (err)
857 goto error;
858 }
859
860 if (should_fill_mask(ufid_flags)) {
861 err = ovs_nla_put_mask(flow, skb);
862 if (err)
863 goto error;
864 }
f1948bb9
JS
865
866 err = ovs_flow_cmd_fill_stats(flow, skb);
867 if (err)
868 goto error;
869
bc619e29
JS
870 if (should_fill_actions(ufid_flags)) {
871 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
872 if (err)
873 goto error;
874 }
37a1300c 875
23b48dc1
TG
876 genlmsg_end(skb, ovs_header);
877 return 0;
d6569377 878
37a1300c 879error:
df2c07f4 880 genlmsg_cancel(skb, ovs_header);
d6569377 881 return err;
44e05eca
BP
882}
883
f71db6b1
JR
884/* May not be called with RCU read lock. */
885static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
bc619e29 886 const struct sw_flow_id *sfid,
afad3556 887 struct genl_info *info,
bc619e29
JS
888 bool always,
889 uint32_t ufid_flags)
44e05eca 890{
afad3556 891 struct sk_buff *skb;
bc619e29 892 size_t len;
d6569377 893
114fce23
SG
894 if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
895 GROUP_ID(&ovs_dp_flow_multicast_group)))
afad3556
JR
896 return NULL;
897
bc619e29
JS
898 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
899 skb = genlmsg_new_unicast(len, info, GFP_KERNEL);
afad3556
JR
900 if (!skb)
901 return ERR_PTR(-ENOMEM);
902
903 return skb;
37a1300c 904}
8d5ebd83 905
f71db6b1 906/* Called with ovs_mutex. */
7d16c847 907static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
f71db6b1
JR
908 int dp_ifindex,
909 struct genl_info *info, u8 cmd,
bc619e29 910 bool always, u32 ufid_flags)
37a1300c
BP
911{
912 struct sk_buff *skb;
913 int retval;
d6569377 914
bc619e29
JS
915 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
916 &flow->id, info, always, ufid_flags);
a6ddcc9a 917 if (IS_ERR_OR_NULL(skb))
afad3556 918 return skb;
d6569377 919
2c622e5a 920 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
f71db6b1 921 info->snd_portid, info->snd_seq, 0,
bc619e29 922 cmd, ufid_flags);
37a1300c 923 BUG_ON(retval < 0);
d6569377 924 return skb;
064af421
BP
925}
926
0c9fd022 927static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
064af421 928{
a94ebc39 929 struct net *net = sock_net(skb->sk);
37a1300c 930 struct nlattr **a = info->attrs;
df2c07f4 931 struct ovs_header *ovs_header = info->userhdr;
bc619e29 932 struct sw_flow *flow = NULL, *new_flow;
a1c564be 933 struct sw_flow_mask mask;
37a1300c 934 struct sk_buff *reply;
9c52546b 935 struct datapath *dp;
bc619e29 936 struct sw_flow_key key;
0c9fd022 937 struct sw_flow_actions *acts;
a1c564be 938 struct sw_flow_match match;
bc619e29 939 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
bc4a05c6 940 int error;
9233cef7 941 bool log = !a[OVS_FLOW_ATTR_PROBE];
064af421 942
6740b721 943 /* Must have key and actions. */
37a1300c 944 error = -EINVAL;
a473df5b 945 if (!a[OVS_FLOW_ATTR_KEY]) {
7d16c847 946 OVS_NLERR(log, "Flow key attr not present in new flow.");
37a1300c 947 goto error;
a473df5b
JG
948 }
949 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
7d16c847 950 OVS_NLERR(log, "Flow actions attr not present in new flow.");
6740b721 951 goto error;
a473df5b 952 }
a1c564be 953
6740b721 954 /* Most of the time we need to allocate a new flow, do it before
af465b67
PS
955 * locking.
956 */
6740b721
JR
957 new_flow = ovs_flow_alloc();
958 if (IS_ERR(new_flow)) {
959 error = PTR_ERR(new_flow);
960 goto error;
961 }
962
963 /* Extract key. */
bc619e29 964 ovs_match_init(&match, &key, &mask);
038e34ab 965 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
9233cef7 966 a[OVS_FLOW_ATTR_MASK], log);
37a1300c 967 if (error)
6740b721 968 goto err_kfree_flow;
064af421 969
ad4adec2 970 ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
bc619e29
JS
971
972 /* Extract flow identifier. */
973 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
974 &key, log);
975 if (error)
976 goto err_kfree_flow;
9b405f1a 977
6740b721 978 /* Validate actions. */
a94ebc39
JS
979 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
980 &new_flow->key, &acts, log);
0c9fd022 981 if (error) {
7d16c847 982 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
4f67b12a 983 goto err_kfree_flow;
6740b721
JR
984 }
985
bc619e29
JS
986 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
987 ufid_flags);
6740b721
JR
988 if (IS_ERR(reply)) {
989 error = PTR_ERR(reply);
990 goto err_kfree_acts;
37a1300c
BP
991 }
992
cd2a59e9 993 ovs_lock();
a94ebc39 994 dp = get_dp(net, ovs_header->dp_ifindex);
6740b721
JR
995 if (unlikely(!dp)) {
996 error = -ENODEV;
cd2a59e9 997 goto err_unlock_ovs;
6740b721 998 }
bc619e29 999
a1c564be 1000 /* Check if this is a duplicate flow */
bc619e29
JS
1001 if (ovs_identifier_is_ufid(&new_flow->id))
1002 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
1003 if (!flow)
1004 flow = ovs_flow_tbl_lookup(&dp->table, &key);
6740b721
JR
1005 if (likely(!flow)) {
1006 rcu_assign_pointer(new_flow->sf_acts, acts);
d6569377 1007
d6569377 1008 /* Put flow in bucket. */
6740b721
JR
1009 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1010 if (unlikely(error)) {
0585f7a8 1011 acts = NULL;
6740b721
JR
1012 goto err_unlock_ovs;
1013 }
1014
1015 if (unlikely(reply)) {
2c622e5a 1016 error = ovs_flow_cmd_fill_info(new_flow,
6740b721
JR
1017 ovs_header->dp_ifindex,
1018 reply, info->snd_portid,
1019 info->snd_seq, 0,
bc619e29
JS
1020 OVS_FLOW_CMD_NEW,
1021 ufid_flags);
6740b721 1022 BUG_ON(error < 0);
0585f7a8 1023 }
6740b721 1024 ovs_unlock();
d6569377 1025 } else {
0c9fd022
JR
1026 struct sw_flow_actions *old_acts;
1027
d6569377
BP
1028 /* Bail out if we're not allowed to modify an existing flow.
1029 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1030 * because Generic Netlink treats the latter as a dump
1031 * request. We also accept NLM_F_EXCL in case that bug ever
1032 * gets fixed.
1033 */
6740b721
JR
1034 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1035 | NLM_F_EXCL))) {
1036 error = -EEXIST;
cd2a59e9 1037 goto err_unlock_ovs;
6740b721 1038 }
bc619e29
JS
1039 /* The flow identifier has to be the same for flow updates.
1040 * Look for any overlapping flow.
1041 */
1042 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1043 if (ovs_identifier_is_key(&flow->id))
1044 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1045 &match);
1046 else /* UFID matches but key is different */
1047 flow = NULL;
3440e4bc
AW
1048 if (!flow) {
1049 error = -ENOENT;
1050 goto err_unlock_ovs;
1051 }
6740b721 1052 }
0c9fd022
JR
1053 /* Update actions. */
1054 old_acts = ovsl_dereference(flow->sf_acts);
1055 rcu_assign_pointer(flow->sf_acts, acts);
0c9fd022 1056
6740b721 1057 if (unlikely(reply)) {
2c622e5a 1058 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1059 ovs_header->dp_ifindex,
1060 reply, info->snd_portid,
1061 info->snd_seq, 0,
bc619e29
JS
1062 OVS_FLOW_CMD_NEW,
1063 ufid_flags);
6740b721
JR
1064 BUG_ON(error < 0);
1065 }
1066 ovs_unlock();
0c9fd022 1067
e23775f2 1068 ovs_nla_free_flow_actions_rcu(old_acts);
6740b721 1069 ovs_flow_free(new_flow, false);
0c9fd022 1070 }
6740b721
JR
1071
1072 if (reply)
cb25142c 1073 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
0c9fd022
JR
1074 return 0;
1075
0c9fd022
JR
1076err_unlock_ovs:
1077 ovs_unlock();
6740b721
JR
1078 kfree_skb(reply);
1079err_kfree_acts:
e23775f2 1080 ovs_nla_free_flow_actions(acts);
6740b721
JR
1081err_kfree_flow:
1082 ovs_flow_free(new_flow, false);
0c9fd022
JR
1083error:
1084 return error;
1085}
1086
cc561abf 1087/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
a94ebc39
JS
1088static struct sw_flow_actions *get_flow_actions(struct net *net,
1089 const struct nlattr *a,
cc561abf 1090 const struct sw_flow_key *key,
9233cef7
JR
1091 const struct sw_flow_mask *mask,
1092 bool log)
cc561abf
PS
1093{
1094 struct sw_flow_actions *acts;
1095 struct sw_flow_key masked_key;
1096 int error;
1097
ad4adec2 1098 ovs_flow_mask_key(&masked_key, key, true, mask);
a94ebc39 1099 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
cc561abf 1100 if (error) {
9233cef7 1101 OVS_NLERR(log,
7d16c847 1102 "Actions may not be safe on all matching packets");
cc561abf
PS
1103 return ERR_PTR(error);
1104 }
1105
1106 return acts;
1107}
1108
0c9fd022
JR
1109static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1110{
a94ebc39 1111 struct net *net = sock_net(skb->sk);
0c9fd022
JR
1112 struct nlattr **a = info->attrs;
1113 struct ovs_header *ovs_header = info->userhdr;
1d2a1b5f 1114 struct sw_flow_key key;
0c9fd022
JR
1115 struct sw_flow *flow;
1116 struct sw_flow_mask mask;
1117 struct sk_buff *reply = NULL;
1118 struct datapath *dp;
6740b721 1119 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
0c9fd022 1120 struct sw_flow_match match;
bc619e29
JS
1121 struct sw_flow_id sfid;
1122 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
0c9fd022 1123 int error;
9233cef7 1124 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1125 bool ufid_present;
0c9fd022
JR
1126
1127 /* Extract key. */
1128 error = -EINVAL;
a473df5b 1129 if (!a[OVS_FLOW_ATTR_KEY]) {
9233cef7 1130 OVS_NLERR(log, "Flow key attribute not present in set flow.");
0c9fd022 1131 goto error;
a473df5b 1132 }
0c9fd022 1133
bc619e29 1134 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
0c9fd022 1135 ovs_match_init(&match, &key, &mask);
038e34ab 1136 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
9233cef7 1137 a[OVS_FLOW_ATTR_MASK], log);
0c9fd022
JR
1138 if (error)
1139 goto error;
d6569377 1140
0c9fd022
JR
1141 /* Validate actions. */
1142 if (a[OVS_FLOW_ATTR_ACTIONS]) {
a94ebc39
JS
1143 acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
1144 &mask, log);
cc561abf
PS
1145 if (IS_ERR(acts)) {
1146 error = PTR_ERR(acts);
0c9fd022 1147 goto error;
6740b721 1148 }
6740b721 1149
ff27161e 1150 /* Can allocate before locking if have acts. */
bc619e29
JS
1151 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1152 ufid_flags);
6740b721
JR
1153 if (IS_ERR(reply)) {
1154 error = PTR_ERR(reply);
1155 goto err_kfree_acts;
90b8c2f7 1156 }
0c9fd022
JR
1157 }
1158
1159 ovs_lock();
a94ebc39 1160 dp = get_dp(net, ovs_header->dp_ifindex);
6740b721
JR
1161 if (unlikely(!dp)) {
1162 error = -ENODEV;
0c9fd022 1163 goto err_unlock_ovs;
6740b721 1164 }
0c9fd022 1165 /* Check that the flow exists. */
bc619e29
JS
1166 if (ufid_present)
1167 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1168 else
1169 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
6740b721
JR
1170 if (unlikely(!flow)) {
1171 error = -ENOENT;
0c9fd022 1172 goto err_unlock_ovs;
6740b721 1173 }
3440e4bc 1174
0c9fd022 1175 /* Update actions, if present. */
6740b721 1176 if (likely(acts)) {
0c9fd022
JR
1177 old_acts = ovsl_dereference(flow->sf_acts);
1178 rcu_assign_pointer(flow->sf_acts, acts);
6740b721
JR
1179
1180 if (unlikely(reply)) {
2c622e5a 1181 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1182 ovs_header->dp_ifindex,
1183 reply, info->snd_portid,
1184 info->snd_seq, 0,
bc619e29
JS
1185 OVS_FLOW_CMD_NEW,
1186 ufid_flags);
6740b721
JR
1187 BUG_ON(error < 0);
1188 }
1189 } else {
1190 /* Could not alloc without acts before locking. */
7d16c847 1191 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
bc619e29
JS
1192 info, OVS_FLOW_CMD_NEW, false,
1193 ufid_flags);
1194
6740b721
JR
1195 if (unlikely(IS_ERR(reply))) {
1196 error = PTR_ERR(reply);
1197 goto err_unlock_ovs;
1198 }
9c52546b 1199 }
0c9fd022 1200
0c9fd022
JR
1201 /* Clear stats. */
1202 if (a[OVS_FLOW_ATTR_CLEAR])
1203 ovs_flow_stats_clear(flow);
cd2a59e9 1204 ovs_unlock();
37a1300c 1205
6740b721 1206 if (reply)
cb25142c 1207 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
6740b721 1208 if (old_acts)
e23775f2 1209 ovs_nla_free_flow_actions_rcu(old_acts);
7d16c847 1210
d6569377 1211 return 0;
704a1e09 1212
cd2a59e9
PS
1213err_unlock_ovs:
1214 ovs_unlock();
6740b721
JR
1215 kfree_skb(reply);
1216err_kfree_acts:
e23775f2 1217 ovs_nla_free_flow_actions(acts);
37a1300c 1218error:
9c52546b 1219 return error;
704a1e09
BP
1220}
1221
df2c07f4 1222static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
704a1e09 1223{
37a1300c 1224 struct nlattr **a = info->attrs;
df2c07f4 1225 struct ovs_header *ovs_header = info->userhdr;
038e34ab 1226 struct net *net = sock_net(skb->sk);
37a1300c 1227 struct sw_flow_key key;
37a1300c 1228 struct sk_buff *reply;
704a1e09 1229 struct sw_flow *flow;
9c52546b 1230 struct datapath *dp;
a1c564be 1231 struct sw_flow_match match;
bc619e29
JS
1232 struct sw_flow_id ufid;
1233 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1234 int err = 0;
9233cef7 1235 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1236 bool ufid_present;
704a1e09 1237
bc619e29
JS
1238 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1239 if (a[OVS_FLOW_ATTR_KEY]) {
1240 ovs_match_init(&match, &key, NULL);
038e34ab 1241 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
bc619e29
JS
1242 log);
1243 } else if (!ufid_present) {
9233cef7
JR
1244 OVS_NLERR(log,
1245 "Flow get message rejected, Key attribute missing.");
bc619e29 1246 err = -EINVAL;
1b936472 1247 }
37a1300c
BP
1248 if (err)
1249 return err;
704a1e09 1250
cd2a59e9 1251 ovs_lock();
2a4999f3 1252 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9
PS
1253 if (!dp) {
1254 err = -ENODEV;
1255 goto unlock;
1256 }
704a1e09 1257
bc619e29
JS
1258 if (ufid_present)
1259 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1260 else
1261 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1262 if (!flow) {
cd2a59e9
PS
1263 err = -ENOENT;
1264 goto unlock;
1265 }
d6569377 1266
7d16c847 1267 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
bc619e29 1268 OVS_FLOW_CMD_NEW, true, ufid_flags);
cd2a59e9
PS
1269 if (IS_ERR(reply)) {
1270 err = PTR_ERR(reply);
1271 goto unlock;
1272 }
36956a7d 1273
cd2a59e9 1274 ovs_unlock();
37a1300c 1275 return genlmsg_reply(reply, info);
cd2a59e9
PS
1276unlock:
1277 ovs_unlock();
1278 return err;
d6569377 1279}
9c52546b 1280
df2c07f4 1281static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
d6569377 1282{
37a1300c 1283 struct nlattr **a = info->attrs;
df2c07f4 1284 struct ovs_header *ovs_header = info->userhdr;
038e34ab 1285 struct net *net = sock_net(skb->sk);
37a1300c 1286 struct sw_flow_key key;
37a1300c 1287 struct sk_buff *reply;
bc619e29 1288 struct sw_flow *flow = NULL;
d6569377 1289 struct datapath *dp;
a1c564be 1290 struct sw_flow_match match;
bc619e29
JS
1291 struct sw_flow_id ufid;
1292 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
d6569377 1293 int err;
9233cef7 1294 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1295 bool ufid_present;
36956a7d 1296
bc619e29
JS
1297 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1298 if (a[OVS_FLOW_ATTR_KEY]) {
cde7f3ba 1299 ovs_match_init(&match, &key, NULL);
038e34ab
JS
1300 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1301 NULL, log);
cde7f3ba
JR
1302 if (unlikely(err))
1303 return err;
1304 }
1305
cd2a59e9 1306 ovs_lock();
2a4999f3 1307 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cde7f3ba 1308 if (unlikely(!dp)) {
cd2a59e9
PS
1309 err = -ENODEV;
1310 goto unlock;
1311 }
7d16c847 1312
bc619e29 1313 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
994dc286 1314 err = ovs_flow_tbl_flush(&dp->table);
cd2a59e9
PS
1315 goto unlock;
1316 }
7d16c847 1317
bc619e29
JS
1318 if (ufid_present)
1319 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1320 else
1321 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1322 if (unlikely(!flow)) {
cd2a59e9
PS
1323 err = -ENOENT;
1324 goto unlock;
1325 }
d6569377 1326
994dc286 1327 ovs_flow_tbl_remove(&dp->table, flow);
cde7f3ba 1328 ovs_unlock();
37a1300c 1329
46051cf8 1330 reply = ovs_flow_cmd_alloc_info(rcu_dereference_raw(flow->sf_acts),
bc619e29 1331 &flow->id, info, false, ufid_flags);
cde7f3ba
JR
1332
1333 if (likely(reply)) {
1334 if (likely(!IS_ERR(reply))) {
7d16c847
PS
1335 rcu_read_lock(); /*To keep RCU checker happy. */
1336 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
cde7f3ba
JR
1337 reply, info->snd_portid,
1338 info->snd_seq, 0,
bc619e29
JS
1339 OVS_FLOW_CMD_DEL,
1340 ufid_flags);
cde7f3ba
JR
1341 rcu_read_unlock();
1342 BUG_ON(err < 0);
cb25142c 1343 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
cde7f3ba 1344 } else {
cb25142c
PS
1345 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1346 GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1347
cde7f3ba 1348 }
afad3556 1349 }
37a1300c 1350
a1c564be 1351 ovs_flow_free(flow, true);
37a1300c 1352 return 0;
cd2a59e9
PS
1353unlock:
1354 ovs_unlock();
1355 return err;
37a1300c
BP
1356}
1357
df2c07f4 1358static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
37a1300c 1359{
bc619e29 1360 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
df2c07f4 1361 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
994dc286 1362 struct table_instance *ti;
37a1300c 1363 struct datapath *dp;
bc619e29
JS
1364 u32 ufid_flags;
1365 int err;
1366
1367 err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1368 OVS_FLOW_ATTR_MAX, flow_policy);
1369 if (err)
1370 return err;
1371 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
37a1300c 1372
f44ccce1 1373 rcu_read_lock();
01ac0970 1374 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9 1375 if (!dp) {
f44ccce1 1376 rcu_read_unlock();
37a1300c 1377 return -ENODEV;
cd2a59e9 1378 }
37a1300c 1379
994dc286 1380 ti = rcu_dereference(dp->table.ti);
37a1300c 1381 for (;;) {
37a1300c
BP
1382 struct sw_flow *flow;
1383 u32 bucket, obj;
1384
1385 bucket = cb->args[0];
1386 obj = cb->args[1];
994dc286 1387 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
3544358a 1388 if (!flow)
37a1300c
BP
1389 break;
1390
2c622e5a 1391 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
28aea917 1392 NETLINK_CB(cb->skb).portid,
37a1300c 1393 cb->nlh->nlmsg_seq, NLM_F_MULTI,
bc619e29 1394 OVS_FLOW_CMD_NEW, ufid_flags) < 0)
37a1300c
BP
1395 break;
1396
1397 cb->args[0] = bucket;
1398 cb->args[1] = obj;
1399 }
f44ccce1 1400 rcu_read_unlock();
37a1300c 1401 return skb->len;
704a1e09
BP
1402}
1403
cb25142c
PS
1404static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1405 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
9233cef7 1406 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
cb25142c
PS
1407 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1408 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
9233cef7 1409 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
bc619e29
JS
1410 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1411 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
cb25142c
PS
1412};
1413
18fd3a52 1414static struct genl_ops dp_flow_genl_ops[] = {
df2c07f4 1415 { .cmd = OVS_FLOW_CMD_NEW,
37a1300c
BP
1416 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1417 .policy = flow_policy,
0c9fd022 1418 .doit = ovs_flow_cmd_new
37a1300c 1419 },
df2c07f4 1420 { .cmd = OVS_FLOW_CMD_DEL,
37a1300c
BP
1421 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1422 .policy = flow_policy,
df2c07f4 1423 .doit = ovs_flow_cmd_del
37a1300c 1424 },
df2c07f4 1425 { .cmd = OVS_FLOW_CMD_GET,
37a1300c
BP
1426 .flags = 0, /* OK for unprivileged users. */
1427 .policy = flow_policy,
df2c07f4
JP
1428 .doit = ovs_flow_cmd_get,
1429 .dumpit = ovs_flow_cmd_dump
37a1300c 1430 },
df2c07f4 1431 { .cmd = OVS_FLOW_CMD_SET,
37a1300c
BP
1432 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1433 .policy = flow_policy,
0c9fd022 1434 .doit = ovs_flow_cmd_set,
37a1300c
BP
1435 },
1436};
1437
cb25142c 1438static struct genl_family dp_flow_genl_family = {
aaff4b55 1439 .id = GENL_ID_GENERATE,
df2c07f4 1440 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1441 .name = OVS_FLOW_FAMILY,
1442 .version = OVS_FLOW_VERSION,
1443 .maxattr = OVS_FLOW_ATTR_MAX,
b3dcb73c 1444 .netnsok = true,
cb25142c
PS
1445 .parallel_ops = true,
1446 .ops = dp_flow_genl_ops,
1447 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1448 .mcgrps = &ovs_dp_flow_multicast_group,
1449 .n_mcgrps = 1,
aaff4b55
BP
1450};
1451
0afa2373
TG
1452static size_t ovs_dp_cmd_msg_size(void)
1453{
1454 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1455
1456 msgsize += nla_total_size(IFNAMSIZ);
1457 msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
4fa72a95 1458 msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
300af20a 1459 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
0afa2373
TG
1460
1461 return msgsize;
1462}
1463
d637497c 1464/* Called with ovs_mutex. */
df2c07f4 1465static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
28aea917 1466 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1467{
df2c07f4 1468 struct ovs_header *ovs_header;
e926dfe3 1469 struct ovs_dp_stats dp_stats;
4fa72a95 1470 struct ovs_dp_megaflow_stats dp_megaflow_stats;
064af421
BP
1471 int err;
1472
28aea917 1473 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
aaff4b55 1474 flags, cmd);
df2c07f4 1475 if (!ovs_header)
aaff4b55 1476 goto error;
064af421 1477
b063d9f0 1478 ovs_header->dp_ifindex = get_dpifindex(dp);
064af421 1479
850b6b3b 1480 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
064af421 1481 if (err)
d6569377 1482 goto nla_put_failure;
064af421 1483
4fa72a95
AZ
1484 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1485 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1486 &dp_stats))
1487 goto nla_put_failure;
1488
1489 if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1490 sizeof(struct ovs_dp_megaflow_stats),
1491 &dp_megaflow_stats))
c3cc8c03 1492 goto nla_put_failure;
d6569377 1493
c58cc9a4
TG
1494 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1495 goto nla_put_failure;
1496
23b48dc1
TG
1497 genlmsg_end(skb, ovs_header);
1498 return 0;
d6569377
BP
1499
1500nla_put_failure:
df2c07f4 1501 genlmsg_cancel(skb, ovs_header);
aaff4b55
BP
1502error:
1503 return -EMSGSIZE;
d6569377
BP
1504}
1505
d81eef1b 1506static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
d6569377 1507{
d81eef1b 1508 return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
aaff4b55 1509}
9dca7bd5 1510
aa917006 1511/* Called with rcu_read_lock or ovs_mutex. */
2a4999f3 1512static struct datapath *lookup_datapath(struct net *net,
f1f60b85 1513 const struct ovs_header *ovs_header,
6455100f 1514 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
d6569377 1515{
254f2dc8
BP
1516 struct datapath *dp;
1517
df2c07f4 1518 if (!a[OVS_DP_ATTR_NAME])
2a4999f3 1519 dp = get_dp(net, ovs_header->dp_ifindex);
254f2dc8 1520 else {
d6569377 1521 struct vport *vport;
d6569377 1522
2a4999f3 1523 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
df2c07f4 1524 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
d6569377 1525 }
254f2dc8 1526 return dp ? dp : ERR_PTR(-ENODEV);
d6569377
BP
1527}
1528
94358dcf
TG
1529static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1530{
1531 struct datapath *dp;
1532
1533 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
09350a3d 1534 if (IS_ERR(dp))
94358dcf
TG
1535 return;
1536
1537 WARN(dp->user_features, "Dropping previously announced user features\n");
1538 dp->user_features = 0;
1539}
1540
f1f60b85 1541static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
c58cc9a4
TG
1542{
1543 if (a[OVS_DP_ATTR_USER_FEATURES])
1544 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1545}
1546
df2c07f4 1547static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
d6569377 1548{
aaff4b55 1549 struct nlattr **a = info->attrs;
d6569377 1550 struct vport_parms parms;
aaff4b55 1551 struct sk_buff *reply;
d6569377
BP
1552 struct datapath *dp;
1553 struct vport *vport;
2a4999f3 1554 struct ovs_net *ovs_net;
95b1d73a 1555 int err, i;
d6569377 1556
d6569377 1557 err = -EINVAL;
ea36840f 1558 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
aaff4b55
BP
1559 goto err;
1560
d81eef1b
JR
1561 reply = ovs_dp_cmd_alloc_info(info);
1562 if (!reply)
1563 return -ENOMEM;
d6569377 1564
d6569377
BP
1565 err = -ENOMEM;
1566 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1567 if (dp == NULL)
d81eef1b 1568 goto err_free_reply;
2a4999f3 1569
c0cddcec 1570 ovs_dp_set_net(dp, sock_net(skb->sk));
0ceaa66c 1571
d6569377 1572 /* Allocate table. */
994dc286
PS
1573 err = ovs_flow_tbl_init(&dp->table);
1574 if (err)
d6569377
BP
1575 goto err_free_dp;
1576
08fb1bbd 1577 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
99769a40
JG
1578 if (!dp->stats_percpu) {
1579 err = -ENOMEM;
1580 goto err_destroy_table;
1581 }
1582
95b1d73a
PS
1583 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1584 GFP_KERNEL);
1585 if (!dp->ports) {
1586 err = -ENOMEM;
1587 goto err_destroy_percpu;
1588 }
1589
1590 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1591 INIT_HLIST_HEAD(&dp->ports[i]);
1592
d6569377 1593 /* Set up our datapath device. */
df2c07f4
JP
1594 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1595 parms.type = OVS_VPORT_TYPE_INTERNAL;
d6569377
BP
1596 parms.options = NULL;
1597 parms.dp = dp;
df2c07f4 1598 parms.port_no = OVSP_LOCAL;
beb1c69a 1599 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
b063d9f0 1600
c58cc9a4
TG
1601 ovs_dp_change(dp, a);
1602
d81eef1b
JR
1603 /* So far only local changes have been made, now need the lock. */
1604 ovs_lock();
1605
d6569377
BP
1606 vport = new_vport(&parms);
1607 if (IS_ERR(vport)) {
1608 err = PTR_ERR(vport);
1609 if (err == -EBUSY)
1610 err = -EEXIST;
1611
94358dcf
TG
1612 if (err == -EEXIST) {
1613 /* An outdated user space instance that does not understand
1614 * the concept of user_features has attempted to create a new
1615 * datapath and is likely to reuse it. Drop all user features.
1616 */
1617 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1618 ovs_dp_reset_user_features(skb, info);
1619 }
1620
95b1d73a 1621 goto err_destroy_ports_array;
d6569377 1622 }
d6569377 1623
d81eef1b
JR
1624 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1625 info->snd_seq, 0, OVS_DP_CMD_NEW);
1626 BUG_ON(err < 0);
aaff4b55 1627
2a4999f3 1628 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
fb93e9aa 1629 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
a0fb56c1 1630
cd2a59e9 1631 ovs_unlock();
d6569377 1632
cb25142c 1633 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
d6569377
BP
1634 return 0;
1635
95b1d73a 1636err_destroy_ports_array:
d81eef1b 1637 ovs_unlock();
95b1d73a 1638 kfree(dp->ports);
99769a40
JG
1639err_destroy_percpu:
1640 free_percpu(dp->stats_percpu);
d6569377 1641err_destroy_table:
e379e4d1 1642 ovs_flow_tbl_destroy(&dp->table);
d6569377 1643err_free_dp:
d6569377 1644 kfree(dp);
d81eef1b
JR
1645err_free_reply:
1646 kfree_skb(reply);
d6569377 1647err:
064af421
BP
1648 return err;
1649}
1650
cd2a59e9 1651/* Called with ovs_mutex. */
2a4999f3 1652static void __dp_destroy(struct datapath *dp)
44e05eca 1653{
95b1d73a 1654 int i;
44e05eca 1655
95b1d73a
PS
1656 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1657 struct vport *vport;
f8dfbcb7 1658 struct hlist_node *n;
95b1d73a 1659
f8dfbcb7 1660 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
95b1d73a
PS
1661 if (vport->port_no != OVSP_LOCAL)
1662 ovs_dp_detach_port(vport);
1663 }
ed099e92 1664
fb93e9aa 1665 list_del_rcu(&dp->list_node);
ed099e92 1666
cd2a59e9 1667 /* OVSP_LOCAL is datapath internal port. We need to make sure that
d103f479
AZ
1668 * all ports in datapath are destroyed first before freeing datapath.
1669 */
cd2a59e9 1670 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
99620d2c 1671
d103f479 1672 /* RCU destroy the flow table */
ed099e92 1673 call_rcu(&dp->rcu, destroy_dp_rcu);
2a4999f3
PS
1674}
1675
1676static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1677{
1678 struct sk_buff *reply;
1679 struct datapath *dp;
1680 int err;
1681
d81eef1b
JR
1682 reply = ovs_dp_cmd_alloc_info(info);
1683 if (!reply)
1684 return -ENOMEM;
1685
cd2a59e9 1686 ovs_lock();
2a4999f3
PS
1687 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1688 err = PTR_ERR(dp);
1689 if (IS_ERR(dp))
d81eef1b 1690 goto err_unlock_free;
2a4999f3 1691
d81eef1b
JR
1692 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1693 info->snd_seq, 0, OVS_DP_CMD_DEL);
1694 BUG_ON(err < 0);
2a4999f3
PS
1695
1696 __dp_destroy(dp);
d81eef1b 1697 ovs_unlock();
7d16c847 1698
cb25142c 1699 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
99620d2c 1700 return 0;
d81eef1b
JR
1701
1702err_unlock_free:
cd2a59e9 1703 ovs_unlock();
d81eef1b 1704 kfree_skb(reply);
cd2a59e9 1705 return err;
44e05eca
BP
1706}
1707
df2c07f4 1708static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
064af421 1709{
aaff4b55 1710 struct sk_buff *reply;
d6569377 1711 struct datapath *dp;
d6569377 1712 int err;
064af421 1713
d81eef1b
JR
1714 reply = ovs_dp_cmd_alloc_info(info);
1715 if (!reply)
1716 return -ENOMEM;
1717
cd2a59e9 1718 ovs_lock();
2a4999f3 1719 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9 1720 err = PTR_ERR(dp);
d6569377 1721 if (IS_ERR(dp))
d81eef1b 1722 goto err_unlock_free;
38c6ecbc 1723
c58cc9a4
TG
1724 ovs_dp_change(dp, info->attrs);
1725
d81eef1b
JR
1726 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1727 info->snd_seq, 0, OVS_DP_CMD_NEW);
1728 BUG_ON(err < 0);
a0fb56c1 1729
cd2a59e9 1730 ovs_unlock();
7d16c847 1731
cb25142c 1732 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
aaff4b55 1733 return 0;
d81eef1b
JR
1734
1735err_unlock_free:
cd2a59e9 1736 ovs_unlock();
d81eef1b 1737 kfree_skb(reply);
cd2a59e9 1738 return err;
064af421
BP
1739}
1740
df2c07f4 1741static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1dcf111b 1742{
aaff4b55 1743 struct sk_buff *reply;
d6569377 1744 struct datapath *dp;
d6569377 1745 int err;
1dcf111b 1746
d81eef1b
JR
1747 reply = ovs_dp_cmd_alloc_info(info);
1748 if (!reply)
1749 return -ENOMEM;
1750
d637497c 1751 ovs_lock();
2a4999f3 1752 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9
PS
1753 if (IS_ERR(dp)) {
1754 err = PTR_ERR(dp);
d81eef1b 1755 goto err_unlock_free;
cd2a59e9 1756 }
d81eef1b
JR
1757 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1758 info->snd_seq, 0, OVS_DP_CMD_NEW);
1759 BUG_ON(err < 0);
d637497c 1760 ovs_unlock();
aaff4b55
BP
1761
1762 return genlmsg_reply(reply, info);
cd2a59e9 1763
d81eef1b 1764err_unlock_free:
d637497c 1765 ovs_unlock();
d81eef1b 1766 kfree_skb(reply);
cd2a59e9 1767 return err;
1dcf111b
JP
1768}
1769
df2c07f4 1770static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
a7786963 1771{
2a4999f3 1772 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
254f2dc8
BP
1773 struct datapath *dp;
1774 int skip = cb->args[0];
1775 int i = 0;
a7786963 1776
d637497c
PS
1777 ovs_lock();
1778 list_for_each_entry(dp, &ovs_net->dps, list_node) {
a2bab2f0 1779 if (i >= skip &&
28aea917 1780 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
aaff4b55 1781 cb->nlh->nlmsg_seq, NLM_F_MULTI,
df2c07f4 1782 OVS_DP_CMD_NEW) < 0)
aaff4b55 1783 break;
254f2dc8 1784 i++;
a7786963 1785 }
d637497c 1786 ovs_unlock();
aaff4b55 1787
254f2dc8
BP
1788 cb->args[0] = i;
1789
aaff4b55 1790 return skb->len;
c19e6535
BP
1791}
1792
cb25142c
PS
1793static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1794 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1795 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1796 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1797};
1798
18fd3a52 1799static struct genl_ops dp_datapath_genl_ops[] = {
df2c07f4 1800 { .cmd = OVS_DP_CMD_NEW,
aaff4b55
BP
1801 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1802 .policy = datapath_policy,
df2c07f4 1803 .doit = ovs_dp_cmd_new
aaff4b55 1804 },
df2c07f4 1805 { .cmd = OVS_DP_CMD_DEL,
aaff4b55
BP
1806 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1807 .policy = datapath_policy,
df2c07f4 1808 .doit = ovs_dp_cmd_del
aaff4b55 1809 },
df2c07f4 1810 { .cmd = OVS_DP_CMD_GET,
aaff4b55
BP
1811 .flags = 0, /* OK for unprivileged users. */
1812 .policy = datapath_policy,
df2c07f4
JP
1813 .doit = ovs_dp_cmd_get,
1814 .dumpit = ovs_dp_cmd_dump
aaff4b55 1815 },
df2c07f4 1816 { .cmd = OVS_DP_CMD_SET,
aaff4b55
BP
1817 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1818 .policy = datapath_policy,
df2c07f4 1819 .doit = ovs_dp_cmd_set,
aaff4b55
BP
1820 },
1821};
1822
cb25142c 1823static struct genl_family dp_datapath_genl_family = {
f0fef760 1824 .id = GENL_ID_GENERATE,
df2c07f4 1825 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1826 .name = OVS_DATAPATH_FAMILY,
1827 .version = OVS_DATAPATH_VERSION,
1828 .maxattr = OVS_DP_ATTR_MAX,
b3dcb73c 1829 .netnsok = true,
cb25142c
PS
1830 .parallel_ops = true,
1831 .ops = dp_datapath_genl_ops,
1832 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1833 .mcgrps = &ovs_dp_datapath_multicast_group,
1834 .n_mcgrps = 1,
f0fef760
BP
1835};
1836
cd2a59e9 1837/* Called with ovs_mutex or RCU read lock. */
df2c07f4 1838static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
28aea917 1839 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1840{
df2c07f4 1841 struct ovs_header *ovs_header;
e926dfe3 1842 struct ovs_vport_stats vport_stats;
c19e6535
BP
1843 int err;
1844
28aea917 1845 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
f0fef760 1846 flags, cmd);
df2c07f4 1847 if (!ovs_header)
f0fef760 1848 return -EMSGSIZE;
c19e6535 1849
99769a40 1850 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
c19e6535 1851
c3cc8c03
DM
1852 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1853 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
e23775f2
PS
1854 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1855 ovs_vport_name(vport)))
c3cc8c03 1856 goto nla_put_failure;
c19e6535 1857
850b6b3b 1858 ovs_vport_get_stats(vport, &vport_stats);
c3cc8c03
DM
1859 if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1860 &vport_stats))
1861 goto nla_put_failure;
c19e6535 1862
beb1c69a
AW
1863 if (ovs_vport_get_upcall_portids(vport, skb))
1864 goto nla_put_failure;
1865
850b6b3b 1866 err = ovs_vport_get_options(vport, skb);
f0fef760
BP
1867 if (err == -EMSGSIZE)
1868 goto error;
c19e6535 1869
23b48dc1
TG
1870 genlmsg_end(skb, ovs_header);
1871 return 0;
c19e6535
BP
1872
1873nla_put_failure:
1874 err = -EMSGSIZE;
f0fef760 1875error:
df2c07f4 1876 genlmsg_cancel(skb, ovs_header);
f0fef760 1877 return err;
064af421
BP
1878}
1879
d81eef1b
JR
1880static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1881{
1882 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1883}
1884
1885/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
28aea917 1886struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
f14d8083 1887 u32 seq, u8 cmd)
064af421 1888{
c19e6535 1889 struct sk_buff *skb;
f0fef760 1890 int retval;
c19e6535 1891
f0fef760 1892 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
c19e6535
BP
1893 if (!skb)
1894 return ERR_PTR(-ENOMEM);
1895
28aea917 1896 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
c25ea534
JG
1897 BUG_ON(retval < 0);
1898
c19e6535 1899 return skb;
f0fef760 1900}
c19e6535 1901
cd2a59e9 1902/* Called with ovs_mutex or RCU read lock. */
2a4999f3 1903static struct vport *lookup_vport(struct net *net,
f1f60b85 1904 const struct ovs_header *ovs_header,
df2c07f4 1905 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
c19e6535
BP
1906{
1907 struct datapath *dp;
1908 struct vport *vport;
1909
df2c07f4 1910 if (a[OVS_VPORT_ATTR_NAME]) {
2a4999f3 1911 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
ed099e92 1912 if (!vport)
c19e6535 1913 return ERR_PTR(-ENODEV);
24ce832d
BP
1914 if (ovs_header->dp_ifindex &&
1915 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1916 return ERR_PTR(-ENODEV);
c19e6535 1917 return vport;
df2c07f4
JP
1918 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1919 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
c19e6535
BP
1920
1921 if (port_no >= DP_MAX_PORTS)
f0fef760 1922 return ERR_PTR(-EFBIG);
c19e6535 1923
2a4999f3 1924 dp = get_dp(net, ovs_header->dp_ifindex);
c19e6535
BP
1925 if (!dp)
1926 return ERR_PTR(-ENODEV);
f2459fe7 1927
cd2a59e9 1928 vport = ovs_vport_ovsl_rcu(dp, port_no);
ed099e92 1929 if (!vport)
17535c57 1930 return ERR_PTR(-ENODEV);
c19e6535
BP
1931 return vport;
1932 } else
1933 return ERR_PTR(-EINVAL);
064af421
BP
1934}
1935
df2c07f4 1936static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
c19e6535 1937{
f0fef760 1938 struct nlattr **a = info->attrs;
df2c07f4 1939 struct ovs_header *ovs_header = info->userhdr;
c19e6535 1940 struct vport_parms parms;
ed099e92 1941 struct sk_buff *reply;
c19e6535 1942 struct vport *vport;
c19e6535 1943 struct datapath *dp;
b0ec0f27 1944 u32 port_no;
c19e6535 1945 int err;
b0ec0f27 1946
ea36840f
BP
1947 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1948 !a[OVS_VPORT_ATTR_UPCALL_PID])
d81eef1b
JR
1949 return -EINVAL;
1950
1951 port_no = a[OVS_VPORT_ATTR_PORT_NO]
1952 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1953 if (port_no >= DP_MAX_PORTS)
1954 return -EFBIG;
1955
1956 reply = ovs_vport_cmd_alloc_info();
1957 if (!reply)
1958 return -ENOMEM;
f0fef760 1959
cd2a59e9 1960 ovs_lock();
5a38795f 1961restart:
2a4999f3 1962 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
c19e6535
BP
1963 err = -ENODEV;
1964 if (!dp)
d81eef1b 1965 goto exit_unlock_free;
c19e6535 1966
d81eef1b 1967 if (port_no) {
cd2a59e9 1968 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
1969 err = -EBUSY;
1970 if (vport)
d81eef1b 1971 goto exit_unlock_free;
c19e6535
BP
1972 } else {
1973 for (port_no = 1; ; port_no++) {
1974 if (port_no >= DP_MAX_PORTS) {
1975 err = -EFBIG;
d81eef1b 1976 goto exit_unlock_free;
c19e6535 1977 }
cd2a59e9 1978 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
1979 if (!vport)
1980 break;
51d4d598 1981 }
064af421 1982 }
b0ec0f27 1983
df2c07f4
JP
1984 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1985 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1986 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
c19e6535
BP
1987 parms.dp = dp;
1988 parms.port_no = port_no;
beb1c69a 1989 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
c19e6535
BP
1990
1991 vport = new_vport(&parms);
1992 err = PTR_ERR(vport);
5a38795f
TG
1993 if (IS_ERR(vport)) {
1994 if (err == -EAGAIN)
1995 goto restart;
d81eef1b 1996 goto exit_unlock_free;
5a38795f 1997 }
c19e6535 1998
d81eef1b
JR
1999 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2000 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2001 BUG_ON(err < 0);
2002 ovs_unlock();
e297c6b7 2003
cb25142c 2004 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 2005 return 0;
c19e6535 2006
d81eef1b 2007exit_unlock_free:
cd2a59e9 2008 ovs_unlock();
d81eef1b 2009 kfree_skb(reply);
c19e6535 2010 return err;
44e05eca
BP
2011}
2012
df2c07f4 2013static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
44e05eca 2014{
f0fef760
BP
2015 struct nlattr **a = info->attrs;
2016 struct sk_buff *reply;
c19e6535 2017 struct vport *vport;
c19e6535 2018 int err;
44e05eca 2019
d81eef1b
JR
2020 reply = ovs_vport_cmd_alloc_info();
2021 if (!reply)
2022 return -ENOMEM;
2023
cd2a59e9 2024 ovs_lock();
2a4999f3 2025 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535
BP
2026 err = PTR_ERR(vport);
2027 if (IS_ERR(vport))
d81eef1b 2028 goto exit_unlock_free;
44e05eca 2029
6455100f 2030 if (a[OVS_VPORT_ATTR_TYPE] &&
17ec1d04 2031 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
4879d4c7 2032 err = -EINVAL;
d81eef1b 2033 goto exit_unlock_free;
c25ea534
JG
2034 }
2035
17ec1d04 2036 if (a[OVS_VPORT_ATTR_OPTIONS]) {
850b6b3b 2037 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
17ec1d04 2038 if (err)
d81eef1b 2039 goto exit_unlock_free;
17ec1d04 2040 }
1fc7083d 2041
beb1c69a 2042 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
7d16c847
PS
2043 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2044
2045 err = ovs_vport_set_upcall_portids(vport, ids);
beb1c69a
AW
2046 if (err)
2047 goto exit_unlock_free;
2048 }
c19e6535 2049
c25ea534
JG
2050 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2051 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2052 BUG_ON(err < 0);
cd2a59e9 2053 ovs_unlock();
d81eef1b 2054
cb25142c 2055 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
c25ea534
JG
2056 return 0;
2057
d81eef1b 2058exit_unlock_free:
cd2a59e9 2059 ovs_unlock();
d81eef1b 2060 kfree_skb(reply);
c19e6535 2061 return err;
064af421
BP
2062}
2063
df2c07f4 2064static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2065{
f0fef760
BP
2066 struct nlattr **a = info->attrs;
2067 struct sk_buff *reply;
c19e6535 2068 struct vport *vport;
c19e6535
BP
2069 int err;
2070
d81eef1b
JR
2071 reply = ovs_vport_cmd_alloc_info();
2072 if (!reply)
2073 return -ENOMEM;
2074
cd2a59e9 2075 ovs_lock();
2a4999f3 2076 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535 2077 err = PTR_ERR(vport);
f0fef760 2078 if (IS_ERR(vport))
d81eef1b 2079 goto exit_unlock_free;
c19e6535 2080
df2c07f4 2081 if (vport->port_no == OVSP_LOCAL) {
f0fef760 2082 err = -EINVAL;
d81eef1b 2083 goto exit_unlock_free;
f0fef760
BP
2084 }
2085
d81eef1b
JR
2086 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2087 info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2088 BUG_ON(err < 0);
850b6b3b 2089 ovs_dp_detach_port(vport);
d81eef1b 2090 ovs_unlock();
f0fef760 2091
cb25142c 2092 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 2093 return 0;
f0fef760 2094
d81eef1b 2095exit_unlock_free:
cd2a59e9 2096 ovs_unlock();
d81eef1b 2097 kfree_skb(reply);
c19e6535 2098 return err;
7c40efc9
BP
2099}
2100
df2c07f4 2101static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2102{
f0fef760 2103 struct nlattr **a = info->attrs;
df2c07f4 2104 struct ovs_header *ovs_header = info->userhdr;
ed099e92 2105 struct sk_buff *reply;
c19e6535 2106 struct vport *vport;
c19e6535
BP
2107 int err;
2108
d81eef1b
JR
2109 reply = ovs_vport_cmd_alloc_info();
2110 if (!reply)
2111 return -ENOMEM;
2112
ed099e92 2113 rcu_read_lock();
2a4999f3 2114 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
c19e6535
BP
2115 err = PTR_ERR(vport);
2116 if (IS_ERR(vport))
d81eef1b
JR
2117 goto exit_unlock_free;
2118 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2119 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2120 BUG_ON(err < 0);
df2fa9b5
JG
2121 rcu_read_unlock();
2122
2123 return genlmsg_reply(reply, info);
ed099e92 2124
d81eef1b 2125exit_unlock_free:
ed099e92 2126 rcu_read_unlock();
d81eef1b 2127 kfree_skb(reply);
c19e6535
BP
2128 return err;
2129}
2130
df2c07f4 2131static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
c19e6535 2132{
df2c07f4 2133 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
c19e6535 2134 struct datapath *dp;
95b1d73a
PS
2135 int bucket = cb->args[0], skip = cb->args[1];
2136 int i, j = 0;
c19e6535 2137
03fc2881 2138 rcu_read_lock();
01ac0970 2139 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
03fc2881
JR
2140 if (!dp) {
2141 rcu_read_unlock();
f0fef760 2142 return -ENODEV;
03fc2881 2143 }
95b1d73a 2144 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
ed099e92 2145 struct vport *vport;
95b1d73a
PS
2146
2147 j = 0;
f8dfbcb7 2148 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
95b1d73a
PS
2149 if (j >= skip &&
2150 ovs_vport_cmd_fill_info(vport, skb,
28aea917 2151 NETLINK_CB(cb->skb).portid,
95b1d73a
PS
2152 cb->nlh->nlmsg_seq,
2153 NLM_F_MULTI,
2154 OVS_VPORT_CMD_NEW) < 0)
2155 goto out;
2156
2157 j++;
2158 }
2159 skip = 0;
c19e6535 2160 }
95b1d73a 2161out:
ed099e92 2162 rcu_read_unlock();
c19e6535 2163
95b1d73a
PS
2164 cb->args[0] = i;
2165 cb->args[1] = j;
f0fef760 2166
95b1d73a 2167 return skb->len;
7c40efc9
BP
2168}
2169
cb25142c
PS
2170static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2171 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2172 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2173 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2174 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2175 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2176 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2177};
2178
18fd3a52 2179static struct genl_ops dp_vport_genl_ops[] = {
df2c07f4 2180 { .cmd = OVS_VPORT_CMD_NEW,
f0fef760
BP
2181 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2182 .policy = vport_policy,
df2c07f4 2183 .doit = ovs_vport_cmd_new
f0fef760 2184 },
df2c07f4 2185 { .cmd = OVS_VPORT_CMD_DEL,
f0fef760
BP
2186 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2187 .policy = vport_policy,
df2c07f4 2188 .doit = ovs_vport_cmd_del
f0fef760 2189 },
df2c07f4 2190 { .cmd = OVS_VPORT_CMD_GET,
f0fef760
BP
2191 .flags = 0, /* OK for unprivileged users. */
2192 .policy = vport_policy,
df2c07f4
JP
2193 .doit = ovs_vport_cmd_get,
2194 .dumpit = ovs_vport_cmd_dump
f0fef760 2195 },
df2c07f4 2196 { .cmd = OVS_VPORT_CMD_SET,
f0fef760
BP
2197 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2198 .policy = vport_policy,
df2c07f4 2199 .doit = ovs_vport_cmd_set,
f0fef760
BP
2200 },
2201};
2202
cb25142c
PS
2203struct genl_family dp_vport_genl_family = {
2204 .id = GENL_ID_GENERATE,
2205 .hdrsize = sizeof(struct ovs_header),
2206 .name = OVS_VPORT_FAMILY,
2207 .version = OVS_VPORT_VERSION,
2208 .maxattr = OVS_VPORT_ATTR_MAX,
2209 .netnsok = true,
2210 .parallel_ops = true,
2211 .ops = dp_vport_genl_ops,
2212 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2213 .mcgrps = &ovs_dp_vport_multicast_group,
2214 .n_mcgrps = 1,
982b8810 2215};
ed099e92 2216
18fd3a52 2217static struct genl_family *dp_genl_families[] = {
cb25142c
PS
2218 &dp_datapath_genl_family,
2219 &dp_vport_genl_family,
2220 &dp_flow_genl_family,
2221 &dp_packet_genl_family,
982b8810 2222};
ed099e92 2223
982b8810
BP
2224static void dp_unregister_genl(int n_families)
2225{
2226 int i;
ed099e92 2227
b867ca75 2228 for (i = 0; i < n_families; i++)
cb25142c 2229 genl_unregister_family(dp_genl_families[i]);
ed099e92
BP
2230}
2231
982b8810 2232static int dp_register_genl(void)
064af421 2233{
982b8810
BP
2234 int err;
2235 int i;
064af421 2236
982b8810 2237 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
064af421 2238
cb25142c 2239 err = genl_register_family(dp_genl_families[i]);
982b8810
BP
2240 if (err)
2241 goto error;
982b8810 2242 }
9cc8b4e4 2243
982b8810 2244 return 0;
064af421
BP
2245
2246error:
cb25142c 2247 dp_unregister_genl(i);
982b8810 2248 return err;
064af421
BP
2249}
2250
2a4999f3
PS
2251static int __net_init ovs_init_net(struct net *net)
2252{
2253 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2254
2255 INIT_LIST_HEAD(&ovs_net->dps);
cd2a59e9 2256 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
038e34ab 2257 ovs_ct_init(net);
2a4999f3
PS
2258 return 0;
2259}
2260
cabd5516
PS
2261static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2262 struct list_head *head)
2a4999f3
PS
2263{
2264 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
cabd5516
PS
2265 struct datapath *dp;
2266
2267 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2268 int i;
2269
2270 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2271 struct vport *vport;
2272
2273 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
cabd5516
PS
2274
2275 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2276 continue;
2277
e23775f2 2278 if (dev_net(vport->dev) == dnet)
cabd5516
PS
2279 list_add(&vport->detach_list, head);
2280 }
2281 }
2282 }
2283}
2284
2285static void __net_exit ovs_exit_net(struct net *dnet)
2286{
2287 struct datapath *dp, *dp_next;
2288 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2289 struct vport *vport, *vport_next;
2290 struct net *net;
2291 LIST_HEAD(head);
2a4999f3 2292
038e34ab 2293 ovs_ct_exit(dnet);
cd2a59e9
PS
2294 ovs_lock();
2295 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2296 __dp_destroy(dp);
cabd5516
PS
2297
2298 rtnl_lock();
2299 for_each_net(net)
2300 list_vports_from_net(net, dnet, &head);
2301 rtnl_unlock();
2302
2303 /* Detach all vports from given namespace. */
2304 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2305 list_del(&vport->detach_list);
2306 ovs_dp_detach_port(vport);
2307 }
2308
cd2a59e9
PS
2309 ovs_unlock();
2310
2311 cancel_work_sync(&ovs_net->dp_notify_work);
2a4999f3
PS
2312}
2313
2314static struct pernet_operations ovs_net_ops = {
2315 .init = ovs_init_net,
2316 .exit = ovs_exit_net,
2317 .id = &ovs_net_id,
2318 .size = sizeof(struct ovs_net),
2319};
2320
22d24ebf
BP
2321static int __init dp_init(void)
2322{
2323 int err;
2324
f3d85db3 2325 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
22d24ebf 2326
26bfaeaa 2327 pr_info("Open vSwitch switching datapath %s\n", VERSION);
064af421 2328
595e069a 2329 err = compat_init();
3544358a 2330 if (err)
533e96e7 2331 goto error;
3544358a 2332
595e069a
JS
2333 err = action_fifos_init();
2334 if (err)
2335 goto error_compat_exit;
2336
5282e284 2337 err = ovs_internal_dev_rtnl_link_register();
2c8c4fb7
AZ
2338 if (err)
2339 goto error_action_fifos_exit;
2340
5282e284
TG
2341 err = ovs_flow_init();
2342 if (err)
2343 goto error_unreg_rtnl_link;
2344
850b6b3b 2345 err = ovs_vport_init();
064af421
BP
2346 if (err)
2347 goto error_flow_exit;
2348
2a4999f3 2349 err = register_pernet_device(&ovs_net_ops);
f2459fe7
JG
2350 if (err)
2351 goto error_vport_exit;
2352
2a4999f3
PS
2353 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2354 if (err)
2355 goto error_netns_exit;
2356
5a38795f
TG
2357 err = ovs_netdev_init();
2358 if (err)
2359 goto error_unreg_notifier;
2360
982b8810
BP
2361 err = dp_register_genl();
2362 if (err < 0)
5a38795f 2363 goto error_unreg_netdev;
982b8810 2364
064af421
BP
2365 return 0;
2366
5a38795f
TG
2367error_unreg_netdev:
2368 ovs_netdev_exit();
064af421 2369error_unreg_notifier:
850b6b3b 2370 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2a4999f3
PS
2371error_netns_exit:
2372 unregister_pernet_device(&ovs_net_ops);
f2459fe7 2373error_vport_exit:
850b6b3b 2374 ovs_vport_exit();
064af421 2375error_flow_exit:
850b6b3b 2376 ovs_flow_exit();
5282e284
TG
2377error_unreg_rtnl_link:
2378 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7
AZ
2379error_action_fifos_exit:
2380 action_fifos_exit();
595e069a
JS
2381error_compat_exit:
2382 compat_exit();
064af421
BP
2383error:
2384 return err;
2385}
2386
2387static void dp_cleanup(void)
2388{
982b8810 2389 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
5a38795f 2390 ovs_netdev_exit();
850b6b3b 2391 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2a4999f3
PS
2392 unregister_pernet_device(&ovs_net_ops);
2393 rcu_barrier();
850b6b3b
JG
2394 ovs_vport_exit();
2395 ovs_flow_exit();
5282e284 2396 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7 2397 action_fifos_exit();
595e069a 2398 compat_exit();
064af421
BP
2399}
2400
2401module_init(dp_init);
2402module_exit(dp_cleanup);
2403
2404MODULE_DESCRIPTION("Open vSwitch switching datapath");
2405MODULE_LICENSE("GPL");
3d0666d2 2406MODULE_VERSION(VERSION);