]> git.proxmox.com Git - ovs.git/blame - datapath/datapath.c
datapath: Prevent panic
[ovs.git] / datapath / datapath.c
CommitLineData
064af421 1/*
e23775f2 2 * Copyright (c) 2007-2015 Nicira, Inc.
a14bc59f 3 *
a9a29d22
JG
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
064af421
BP
17 */
18
dfffaef1
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
064af421
BP
21#include <linux/init.h>
22#include <linux/module.h>
064af421 23#include <linux/if_arp.h>
064af421
BP
24#include <linux/if_vlan.h>
25#include <linux/in.h>
26#include <linux/ip.h>
982b8810 27#include <linux/jhash.h>
064af421
BP
28#include <linux/delay.h>
29#include <linux/time.h>
30#include <linux/etherdevice.h>
ed099e92 31#include <linux/genetlink.h>
064af421
BP
32#include <linux/kernel.h>
33#include <linux/kthread.h>
064af421
BP
34#include <linux/mutex.h>
35#include <linux/percpu.h>
36#include <linux/rcupdate.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/version.h>
40#include <linux/ethtool.h>
064af421 41#include <linux/wait.h>
064af421 42#include <asm/div64.h>
656a0e37 43#include <linux/highmem.h>
064af421
BP
44#include <linux/netfilter_bridge.h>
45#include <linux/netfilter_ipv4.h>
46#include <linux/inetdevice.h>
47#include <linux/list.h>
077257b8 48#include <linux/openvswitch.h>
064af421 49#include <linux/rculist.h>
064af421 50#include <linux/dmi.h>
36956a7d 51#include <net/genetlink.h>
2a4999f3
PS
52#include <net/net_namespace.h>
53#include <net/netns/generic.h>
c0b6f594 54#include <net/nsh.h>
064af421 55
064af421 56#include "datapath.h"
038e34ab 57#include "conntrack.h"
064af421 58#include "flow.h"
d103f479 59#include "flow_table.h"
a097c0b2 60#include "flow_netlink.h"
e23775f2 61#include "gso.h"
f2459fe7 62#include "vport-internal_dev.h"
d5de5b0d 63#include "vport-netdev.h"
064af421 64
f56f0b73 65unsigned int ovs_net_id __read_mostly;
2a4999f3 66
cb25142c
PS
67static struct genl_family dp_packet_genl_family;
68static struct genl_family dp_flow_genl_family;
69static struct genl_family dp_datapath_genl_family;
70
bc619e29
JS
71static const struct nla_policy flow_policy[];
72
18fd3a52
PS
73static struct genl_multicast_group ovs_dp_flow_multicast_group = {
74 .name = OVS_FLOW_MCGROUP
cb25142c
PS
75};
76
18fd3a52
PS
77static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
78 .name = OVS_DATAPATH_MCGROUP
cb25142c
PS
79};
80
18fd3a52
PS
81struct genl_multicast_group ovs_dp_vport_multicast_group = {
82 .name = OVS_VPORT_MCGROUP
cb25142c
PS
83};
84
afad3556 85/* Check if need to build a reply message.
af465b67
PS
86 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
87 */
114fce23
SG
88static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
89 unsigned int group)
afad3556
JR
90{
91 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
6233a1bd 92 genl_has_listeners(family, genl_info_net(info), group);
afad3556
JR
93}
94
18fd3a52 95static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
cb25142c 96 struct sk_buff *skb, struct genl_info *info)
e297c6b7 97{
0643a78b 98 genl_notify(family, skb, info, GROUP_ID(grp), GFP_KERNEL);
e297c6b7
TG
99}
100
ed099e92
BP
101/**
102 * DOC: Locking:
064af421 103 *
cd2a59e9
PS
104 * All writes e.g. Writes to device state (add/remove datapath, port, set
105 * operations on vports, etc.), Writes to other state (flow table
106 * modifications, set miscellaneous datapath parameters, etc.) are protected
107 * by ovs_lock.
ed099e92
BP
108 *
109 * Reads are protected by RCU.
110 *
111 * There are a few special cases (mostly stats) that have their own
112 * synchronization but they nest under all of above and don't interact with
113 * each other.
cd2a59e9
PS
114 *
115 * The RTNL lock nests inside ovs_mutex.
064af421 116 */
ed099e92 117
cd2a59e9
PS
118static DEFINE_MUTEX(ovs_mutex);
119
120void ovs_lock(void)
121{
122 mutex_lock(&ovs_mutex);
123}
124
125void ovs_unlock(void)
126{
127 mutex_unlock(&ovs_mutex);
128}
129
130#ifdef CONFIG_LOCKDEP
131int lockdep_ovsl_is_held(void)
132{
133 if (debug_locks)
134 return lockdep_is_held(&ovs_mutex);
135 else
136 return 1;
137}
138#endif
139
5ae440c3 140static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
f1f60b85 141 const struct sw_flow_key *,
4c7804f1
WT
142 const struct dp_upcall_info *,
143 uint32_t cutlen);
5ae440c3 144static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
7d16c847 145 const struct sw_flow_key *,
4c7804f1
WT
146 const struct dp_upcall_info *,
147 uint32_t cutlen);
064af421 148
01ac0970
AZ
149/* Must be called with rcu_read_lock. */
150static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
064af421 151{
01ac0970 152 struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
ed099e92 153
254f2dc8 154 if (dev) {
850b6b3b 155 struct vport *vport = ovs_internal_dev_get_vport(dev);
254f2dc8 156 if (vport)
01ac0970 157 return vport->dp;
254f2dc8 158 }
01ac0970
AZ
159
160 return NULL;
161}
162
163/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
af465b67
PS
164 * returned dp pointer valid.
165 */
01ac0970
AZ
166static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
167{
168 struct datapath *dp;
169
170 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
171 rcu_read_lock();
172 dp = get_dp_rcu(net, dp_ifindex);
254f2dc8
BP
173 rcu_read_unlock();
174
175 return dp;
064af421 176}
064af421 177
cd2a59e9 178/* Must be called with rcu_read_lock or ovs_mutex. */
850b6b3b 179const char *ovs_dp_name(const struct datapath *dp)
f2459fe7 180{
cd2a59e9 181 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
e23775f2 182 return ovs_vport_name(vport);
f2459fe7
JG
183}
184
f1f60b85 185static int get_dpifindex(const struct datapath *dp)
99769a40
JG
186{
187 struct vport *local;
188 int ifindex;
189
190 rcu_read_lock();
191
95b1d73a 192 local = ovs_vport_rcu(dp, OVSP_LOCAL);
99769a40 193 if (local)
e23775f2 194 ifindex = local->dev->ifindex;
99769a40
JG
195 else
196 ifindex = 0;
197
198 rcu_read_unlock();
199
200 return ifindex;
201}
202
46c6a11d
JG
203static void destroy_dp_rcu(struct rcu_head *rcu)
204{
205 struct datapath *dp = container_of(rcu, struct datapath, rcu);
46c6a11d 206
e379e4d1 207 ovs_flow_tbl_destroy(&dp->table);
46c6a11d 208 free_percpu(dp->stats_percpu);
95b1d73a 209 kfree(dp->ports);
5ca1ba48 210 kfree(dp);
46c6a11d
JG
211}
212
95b1d73a
PS
213static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
214 u16 port_no)
215{
216 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
217}
218
aa917006 219/* Called with ovs_mutex or RCU read lock. */
95b1d73a
PS
220struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
221{
222 struct vport *vport;
95b1d73a
PS
223 struct hlist_head *head;
224
225 head = vport_hash_bucket(dp, port_no);
f8dfbcb7 226 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
95b1d73a
PS
227 if (vport->port_no == port_no)
228 return vport;
229 }
230 return NULL;
231}
232
cd2a59e9 233/* Called with ovs_mutex. */
c19e6535 234static struct vport *new_vport(const struct vport_parms *parms)
064af421 235{
f2459fe7 236 struct vport *vport;
f2459fe7 237
850b6b3b 238 vport = ovs_vport_add(parms);
c19e6535
BP
239 if (!IS_ERR(vport)) {
240 struct datapath *dp = parms->dp;
95b1d73a 241 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
064af421 242
95b1d73a 243 hlist_add_head_rcu(&vport->dp_hash_node, head);
c19e6535 244 }
c19e6535 245 return vport;
064af421
BP
246}
247
850b6b3b 248void ovs_dp_detach_port(struct vport *p)
064af421 249{
cd2a59e9 250 ASSERT_OVSL();
064af421 251
064af421 252 /* First drop references to device. */
95b1d73a 253 hlist_del_rcu(&p->dp_hash_node);
f2459fe7 254
7237e4f4 255 /* Then destroy it. */
850b6b3b 256 ovs_vport_del(p);
064af421
BP
257}
258
fb66fbd1 259/* Must be called with rcu_read_lock. */
e74d4817 260void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
064af421 261{
a6059080 262 const struct vport *p = OVS_CB(skb)->input_vport;
064af421 263 struct datapath *dp = p->dp;
3544358a 264 struct sw_flow *flow;
ad50cb60 265 struct sw_flow_actions *sf_acts;
064af421 266 struct dp_stats_percpu *stats;
e9141eec 267 u64 *stats_counter;
4fa72a95 268 u32 n_mask_hit;
064af421 269
70dbc259 270 stats = this_cpu_ptr(dp->stats_percpu);
a063b0df 271
52a23d92 272 /* Look up flow. */
e74d4817 273 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
5604935e 274 &n_mask_hit);
52a23d92
JG
275 if (unlikely(!flow)) {
276 struct dp_upcall_info upcall;
a7d607c5 277 int error;
52a23d92 278
0e469d3b 279 memset(&upcall, 0, sizeof(upcall));
52a23d92 280 upcall.cmd = OVS_PACKET_CMD_MISS;
beb1c69a 281 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
a94ebc39 282 upcall.mru = OVS_CB(skb)->mru;
4c7804f1 283 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
a7d607c5
LR
284 if (unlikely(error))
285 kfree_skb(skb);
286 else
287 consume_skb(skb);
52a23d92
JG
288 stats_counter = &stats->n_missed;
289 goto out;
290 }
291
e74d4817 292 ovs_flow_stats_update(flow, key->tp.flags, skb);
ad50cb60 293 sf_acts = rcu_dereference(flow->sf_acts);
7d16c847
PS
294 ovs_execute_actions(dp, skb, sf_acts, key);
295
b0b906cc 296 stats_counter = &stats->n_hit;
55574bb0 297
8819fac7 298out:
55574bb0 299 /* Update datapath statistics. */
b81deb15 300 u64_stats_update_begin(&stats->syncp);
e9141eec 301 (*stats_counter)++;
4fa72a95 302 stats->n_mask_hit += n_mask_hit;
b81deb15 303 u64_stats_update_end(&stats->syncp);
064af421
BP
304}
305
850b6b3b 306int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
f1f60b85 307 const struct sw_flow_key *key,
4c7804f1
WT
308 const struct dp_upcall_info *upcall_info,
309 uint32_t cutlen)
aa5a8fdc
JG
310{
311 struct dp_stats_percpu *stats;
312 int err;
313
28aea917 314 if (upcall_info->portid == 0) {
b063d9f0 315 err = -ENOTCONN;
b063d9f0
JG
316 goto err;
317 }
318
7257b535 319 if (!skb_is_gso(skb))
4c7804f1 320 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
7257b535 321 else
4c7804f1 322 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
d76195db
JG
323 if (err)
324 goto err;
325
326 return 0;
aa5a8fdc 327
aa5a8fdc 328err:
70dbc259 329 stats = this_cpu_ptr(dp->stats_percpu);
aa5a8fdc 330
b81deb15 331 u64_stats_update_begin(&stats->syncp);
aa5a8fdc 332 stats->n_lost++;
b81deb15 333 u64_stats_update_end(&stats->syncp);
aa5a8fdc 334
aa5a8fdc 335 return err;
982b8810
BP
336}
337
5ae440c3 338static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
f1f60b85 339 const struct sw_flow_key *key,
4c7804f1
WT
340 const struct dp_upcall_info *upcall_info,
341 uint32_t cutlen)
cb5087ca 342{
1406b9c1 343#ifdef HAVE_SKB_GSO_UDP
e66064f7 344 unsigned int gso_type = skb_shinfo(skb)->gso_type;
7257b535 345 struct sw_flow_key later_key;
1406b9c1 346#endif
7257b535 347 struct sk_buff *segs, *nskb;
b2a23c4e 348 struct ovs_skb_cb ovs_cb;
7257b535 349 int err;
cb5087ca 350
b2a23c4e 351 ovs_cb = *OVS_CB(skb);
1d04cd4e 352 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
b2a23c4e 353 *OVS_CB(skb) = ovs_cb;
79089764
PS
354 if (IS_ERR(segs))
355 return PTR_ERR(segs);
d1da7669
PS
356 if (segs == NULL)
357 return -EINVAL;
1406b9c1 358#ifdef HAVE_SKB_GSO_UDP
9b277b39 359 if (gso_type & SKB_GSO_UDP) {
c135bba1 360 /* The initial flow key extracted by ovs_flow_key_extract()
9b277b39
PS
361 * in this case is for a first fragment, so we need to
362 * properly mark later fragments.
363 */
e74d4817 364 later_key = *key;
9b277b39
PS
365 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
366 }
1406b9c1 367#endif
7257b535
BP
368 /* Queue all of the segments. */
369 skb = segs;
cb5087ca 370 do {
b2a23c4e 371 *OVS_CB(skb) = ovs_cb;
1406b9c1 372#ifdef HAVE_SKB_GSO_UDP
9b277b39 373 if (gso_type & SKB_GSO_UDP && skb != segs)
e74d4817 374 key = &later_key;
1406b9c1 375#endif
4c7804f1 376 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
982b8810 377 if (err)
7257b535 378 break;
856081f6 379
36ce148c 380 } while ((skb = skb->next));
cb5087ca 381
7257b535
BP
382 /* Free all of the segments. */
383 skb = segs;
384 do {
385 nskb = skb->next;
386 if (err)
387 kfree_skb(skb);
388 else
389 consume_skb(skb);
390 } while ((skb = nskb));
391 return err;
392}
393
8b7ea2d4 394static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
6b330a60 395 unsigned int hdrlen, int actions_attrlen)
0afa2373
TG
396{
397 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
533bea51 398 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
039fb36c
WT
399 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
400 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
0afa2373
TG
401
402 /* OVS_PACKET_ATTR_USERDATA */
8b7ea2d4
WZ
403 if (upcall_info->userdata)
404 size += NLA_ALIGN(upcall_info->userdata->nla_len);
405
406 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
407 if (upcall_info->egress_tun_info)
408 size += nla_total_size(ovs_tun_key_attr_size());
0afa2373 409
0e469d3b
NM
410 /* OVS_PACKET_ATTR_ACTIONS */
411 if (upcall_info->actions_len)
6b330a60 412 size += nla_total_size(actions_attrlen);
0e469d3b 413
a94ebc39
JS
414 /* OVS_PACKET_ATTR_MRU */
415 if (upcall_info->mru)
416 size += nla_total_size(sizeof(upcall_info->mru));
417
0afa2373
TG
418 return size;
419}
420
a94ebc39
JS
421static void pad_packet(struct datapath *dp, struct sk_buff *skb)
422{
423 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
424 size_t plen = NLA_ALIGN(skb->len) - skb->len;
425
426 if (plen > 0)
0ace0a26 427 skb_put_zero(skb, plen);
a94ebc39
JS
428 }
429}
430
5ae440c3 431static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
f1f60b85 432 const struct sw_flow_key *key,
4c7804f1
WT
433 const struct dp_upcall_info *upcall_info,
434 uint32_t cutlen)
7257b535
BP
435{
436 struct ovs_header *upcall;
6161d3fd 437 struct sk_buff *nskb = NULL;
82706a6f 438 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
7257b535 439 struct nlattr *nla;
978188b2 440 size_t len;
533bea51 441 unsigned int hlen;
5ae440c3
TG
442 int err, dp_ifindex;
443
444 dp_ifindex = get_dpifindex(dp);
445 if (!dp_ifindex)
446 return -ENODEV;
7257b535 447
efd8a18e 448 if (skb_vlan_tag_present(skb)) {
6161d3fd
JG
449 nskb = skb_clone(skb, GFP_ATOMIC);
450 if (!nskb)
451 return -ENOMEM;
07ac71ea 452
8063e095 453 nskb = __vlan_hwaccel_push_inside(nskb);
07ac71ea
PS
454 if (!nskb)
455 return -ENOMEM;
456
6161d3fd
JG
457 skb = nskb;
458 }
459
460 if (nla_attr_size(skb->len) > USHRT_MAX) {
461 err = -EFBIG;
462 goto out;
463 }
7257b535 464
533bea51
TG
465 /* Complete checksum if needed */
466 if (skb->ip_summed == CHECKSUM_PARTIAL &&
a0c9fedc 467 (err = skb_csum_hwoffload_help(skb, 0)))
533bea51
TG
468 goto out;
469
470 /* Older versions of OVS user space enforce alignment of the last
471 * Netlink attribute to NLA_ALIGNTO which would require extensive
472 * padding logic. Only perform zerocopy if padding is not required.
473 */
474 if (dp->user_features & OVS_DP_F_UNALIGNED)
475 hlen = skb_zerocopy_headlen(skb);
476 else
477 hlen = skb->len;
478
6b330a60
GR
479 len = upcall_msg_size(upcall_info, hlen - cutlen,
480 OVS_CB(skb)->acts_origlen);
40c08cda 481 user_skb = genlmsg_new(len, GFP_ATOMIC);
6161d3fd
JG
482 if (!user_skb) {
483 err = -ENOMEM;
484 goto out;
485 }
7257b535
BP
486
487 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
488 0, upcall_info->cmd);
489 upcall->dp_ifindex = dp_ifindex;
490
db7f2238 491 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
9a621f82 492 BUG_ON(err);
7257b535
BP
493
494 if (upcall_info->userdata)
e995e3df 495 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
462a988b 496 nla_len(upcall_info->userdata),
e995e3df 497 nla_data(upcall_info->userdata));
7257b535 498
e23775f2 499
8b7ea2d4
WZ
500 if (upcall_info->egress_tun_info) {
501 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
aad7cb91
PS
502 err = ovs_nla_put_tunnel_info(user_skb,
503 upcall_info->egress_tun_info);
8b7ea2d4
WZ
504 BUG_ON(err);
505 nla_nest_end(user_skb, nla);
506 }
507
0e469d3b
NM
508 if (upcall_info->actions_len) {
509 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
510 err = ovs_nla_put_actions(upcall_info->actions,
511 upcall_info->actions_len,
512 user_skb);
513 if (!err)
514 nla_nest_end(user_skb, nla);
515 else
516 nla_nest_cancel(user_skb, nla);
517 }
518
a94ebc39
JS
519 /* Add OVS_PACKET_ATTR_MRU */
520 if (upcall_info->mru) {
521 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
522 upcall_info->mru)) {
523 err = -ENOBUFS;
524 goto out;
525 }
526 pad_packet(dp, user_skb);
527 }
528
039fb36c
WT
529 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
530 if (cutlen > 0) {
531 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
532 skb->len)) {
533 err = -ENOBUFS;
534 goto out;
535 }
536 pad_packet(dp, user_skb);
537 }
538
533bea51 539 /* Only reserve room for attribute header, packet data is added
af465b67
PS
540 * in skb_zerocopy()
541 */
533bea51
TG
542 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
543 err = -ENOBUFS;
544 goto out;
545 }
4c7804f1 546 nla->nla_len = nla_attr_size(skb->len - cutlen);
bed53bd1 547
4c7804f1 548 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
2c272bd9
ZK
549 if (err)
550 goto out;
7257b535 551
ef507cec 552 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
a94ebc39 553 pad_packet(dp, user_skb);
ef507cec 554
533bea51 555 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
6161d3fd 556
533bea51 557 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
82706a6f 558 user_skb = NULL;
6161d3fd 559out:
2c272bd9
ZK
560 if (err)
561 skb_tx_error(skb);
82706a6f 562 kfree_skb(user_skb);
6161d3fd
JG
563 kfree_skb(nskb);
564 return err;
cb5087ca
BP
565}
566
df2c07f4 567static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
064af421 568{
df2c07f4 569 struct ovs_header *ovs_header = info->userhdr;
a94ebc39 570 struct net *net = sock_net(skb->sk);
982b8810 571 struct nlattr **a = info->attrs;
e0e57990 572 struct sw_flow_actions *acts;
982b8810 573 struct sk_buff *packet;
e0e57990 574 struct sw_flow *flow;
ad50cb60 575 struct sw_flow_actions *sf_acts;
f7cd0081 576 struct datapath *dp;
a6059080 577 struct vport *input_vport;
a94ebc39 578 u16 mru = 0;
3f19d399 579 int len;
d6569377 580 int err;
2e460098 581 bool log = !a[OVS_PACKET_ATTR_PROBE];
064af421 582
f7cd0081 583 err = -EINVAL;
df2c07f4 584 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
7c3072cc 585 !a[OVS_PACKET_ATTR_ACTIONS])
e5cad958 586 goto err;
064af421 587
df2c07f4 588 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
3f19d399 589 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
f7cd0081
BP
590 err = -ENOMEM;
591 if (!packet)
e5cad958 592 goto err;
3f19d399
BP
593 skb_reserve(packet, NET_IP_ALIGN);
594
bf3d6fce 595 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
8d5ebd83 596
a94ebc39
JS
597 /* Set packet's mru */
598 if (a[OVS_PACKET_ATTR_MRU]) {
599 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
600 packet->ignore_df = 1;
601 }
602 OVS_CB(packet)->mru = mru;
603
e0e57990 604 /* Build an sw_flow for sending this packet. */
df65fec1 605 flow = ovs_flow_alloc();
e0e57990
BP
606 err = PTR_ERR(flow);
607 if (IS_ERR(flow))
e5cad958 608 goto err_kfree_skb;
064af421 609
038e34ab
JS
610 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
611 packet, &flow->key, log);
e0e57990 612 if (err)
9321954a 613 goto err_flow_free;
e0e57990 614
a94ebc39 615 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
9233cef7 616 &flow->key, &acts, log);
9b405f1a
PS
617 if (err)
618 goto err_flow_free;
e0e57990 619
ff27161e 620 rcu_assign_pointer(flow->sf_acts, acts);
abff858b 621 packet->priority = flow->key.phy.priority;
3025a772 622 packet->mark = flow->key.phy.skb_mark;
e0e57990 623
d6569377 624 rcu_read_lock();
a94ebc39 625 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
f7cd0081 626 err = -ENODEV;
e5cad958
BP
627 if (!dp)
628 goto err_unlock;
cc4015df 629
a6059080
AZ
630 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
631 if (!input_vport)
632 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
633
634 if (!input_vport)
635 goto err_unlock;
636
e23775f2 637 packet->dev = input_vport->dev;
a6059080 638 OVS_CB(packet)->input_vport = input_vport;
ad50cb60 639 sf_acts = rcu_dereference(flow->sf_acts);
a6059080 640
e9141eec 641 local_bh_disable();
7d16c847 642 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
e9141eec 643 local_bh_enable();
d6569377 644 rcu_read_unlock();
e0e57990 645
a1c564be 646 ovs_flow_free(flow, false);
e5cad958 647 return err;
064af421 648
e5cad958
BP
649err_unlock:
650 rcu_read_unlock();
9321954a 651err_flow_free:
a1c564be 652 ovs_flow_free(flow, false);
e5cad958
BP
653err_kfree_skb:
654 kfree_skb(packet);
655err:
d6569377 656 return err;
064af421
BP
657}
658
df2c07f4 659static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
7c3072cc 660 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
df2c07f4
JP
661 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
662 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
2e460098 663 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
a94ebc39 664 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
982b8810
BP
665};
666
18fd3a52 667static struct genl_ops dp_packet_genl_ops[] = {
df2c07f4 668 { .cmd = OVS_PACKET_CMD_EXECUTE,
a6a8674d 669 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
982b8810 670 .policy = packet_policy,
df2c07f4 671 .doit = ovs_packet_cmd_execute
982b8810
BP
672 }
673};
674
ba63fe26 675static struct genl_family dp_packet_genl_family __ro_after_init = {
cb25142c
PS
676 .hdrsize = sizeof(struct ovs_header),
677 .name = OVS_PACKET_FAMILY,
678 .version = OVS_PACKET_VERSION,
679 .maxattr = OVS_PACKET_ATTR_MAX,
680 .netnsok = true,
681 .parallel_ops = true,
682 .ops = dp_packet_genl_ops,
683 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
ba63fe26 684 .module = THIS_MODULE,
cb25142c
PS
685};
686
f1f60b85 687static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
4fa72a95 688 struct ovs_dp_megaflow_stats *mega_stats)
064af421 689{
d6569377 690 int i;
f180c2e2 691
4fa72a95
AZ
692 memset(mega_stats, 0, sizeof(*mega_stats));
693
994dc286 694 stats->n_flows = ovs_flow_tbl_count(&dp->table);
4fa72a95 695 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
064af421 696
7257b535 697 stats->n_hit = stats->n_missed = stats->n_lost = 0;
4fa72a95 698
d6569377
BP
699 for_each_possible_cpu(i) {
700 const struct dp_stats_percpu *percpu_stats;
701 struct dp_stats_percpu local_stats;
821cb9fa 702 unsigned int start;
44e05eca 703
d6569377 704 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
064af421 705
d6569377 706 do {
b81deb15 707 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
d6569377 708 local_stats = *percpu_stats;
b81deb15 709 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
064af421 710
d6569377
BP
711 stats->n_hit += local_stats.n_hit;
712 stats->n_missed += local_stats.n_missed;
713 stats->n_lost += local_stats.n_lost;
4fa72a95 714 mega_stats->n_mask_hit += local_stats.n_mask_hit;
d6569377
BP
715 }
716}
064af421 717
bc619e29
JS
718static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
719{
720 return ovs_identifier_is_ufid(sfid) &&
721 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
722}
723
724static bool should_fill_mask(uint32_t ufid_flags)
725{
726 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
727}
728
729static bool should_fill_actions(uint32_t ufid_flags)
0afa2373 730{
bc619e29
JS
731 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
732}
733
734static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
735 const struct sw_flow_id *sfid,
736 uint32_t ufid_flags)
737{
738 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
739
740 /* OVS_FLOW_ATTR_UFID */
741 if (sfid && ovs_identifier_is_ufid(sfid))
742 len += nla_total_size(sfid->ufid_len);
743
744 /* OVS_FLOW_ATTR_KEY */
745 if (!sfid || should_fill_key(sfid, ufid_flags))
746 len += nla_total_size(ovs_key_attr_size());
747
748 /* OVS_FLOW_ATTR_MASK */
749 if (should_fill_mask(ufid_flags))
750 len += nla_total_size(ovs_key_attr_size());
751
752 /* OVS_FLOW_ATTR_ACTIONS */
753 if (should_fill_actions(ufid_flags))
c3bb15b3 754 len += nla_total_size(acts->orig_len);
bc619e29
JS
755
756 return len
91b37647 757 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
0afa2373 758 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
91b37647 759 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
0afa2373
TG
760}
761
f1948bb9
JS
762/* Called with ovs_mutex or RCU read lock. */
763static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
764 struct sk_buff *skb)
765{
766 struct ovs_flow_stats stats;
767 __be16 tcp_flags;
768 unsigned long used;
769
b0f3a2fe 770 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
f71db6b1 771
b0f3a2fe 772 if (used &&
89be7da8
PS
773 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
774 OVS_FLOW_ATTR_PAD))
f1948bb9 775 return -EMSGSIZE;
d6569377 776
b0f3a2fe 777 if (stats.n_packets &&
91b37647
PS
778 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
779 sizeof(struct ovs_flow_stats), &stats,
780 OVS_FLOW_ATTR_PAD))
f1948bb9 781 return -EMSGSIZE;
b0b906cc 782
b0f3a2fe
PS
783 if ((u8)ntohs(tcp_flags) &&
784 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
f1948bb9
JS
785 return -EMSGSIZE;
786
787 return 0;
788}
789
790/* Called with ovs_mutex or RCU read lock. */
791static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
792 struct sk_buff *skb, int skb_orig_len)
793{
794 struct nlattr *start;
795 int err;
d6569377 796
df2c07f4 797 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
30053024
BP
798 * this is the first flow to be dumped into 'skb'. This is unusual for
799 * Netlink but individual action lists can be longer than
800 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
801 * The userspace caller can always fetch the actions separately if it
802 * really wants them. (Most userspace callers in fact don't care.)
803 *
804 * This can only fail for dump operations because the skb is always
805 * properly sized for single flows.
806 */
9b405f1a 807 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
f6f481ee 808 if (start) {
f44ccce1
PS
809 const struct sw_flow_actions *sf_acts;
810
780ec6ae 811 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
a097c0b2
PS
812 err = ovs_nla_put_actions(sf_acts->actions,
813 sf_acts->actions_len, skb);
f71db6b1 814
0a25b039
BP
815 if (!err)
816 nla_nest_end(skb, start);
817 else {
818 if (skb_orig_len)
f1948bb9 819 return err;
0a25b039
BP
820
821 nla_nest_cancel(skb, start);
822 }
f1948bb9
JS
823 } else if (skb_orig_len) {
824 return -EMSGSIZE;
825 }
826
827 return 0;
828}
829
830/* Called with ovs_mutex or RCU read lock. */
2c622e5a 831static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
f1948bb9 832 struct sk_buff *skb, u32 portid,
bc619e29 833 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
f1948bb9
JS
834{
835 const int skb_orig_len = skb->len;
836 struct ovs_header *ovs_header;
837 int err;
838
7d16c847
PS
839 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
840 flags, cmd);
f1948bb9
JS
841 if (!ovs_header)
842 return -EMSGSIZE;
7d16c847 843
f1948bb9
JS
844 ovs_header->dp_ifindex = dp_ifindex;
845
bc619e29 846 err = ovs_nla_put_identifier(flow, skb);
db7f2238
JS
847 if (err)
848 goto error;
849
bc619e29
JS
850 if (should_fill_key(&flow->id, ufid_flags)) {
851 err = ovs_nla_put_masked_key(flow, skb);
852 if (err)
853 goto error;
854 }
855
856 if (should_fill_mask(ufid_flags)) {
857 err = ovs_nla_put_mask(flow, skb);
858 if (err)
859 goto error;
860 }
f1948bb9
JS
861
862 err = ovs_flow_cmd_fill_stats(flow, skb);
863 if (err)
864 goto error;
865
bc619e29
JS
866 if (should_fill_actions(ufid_flags)) {
867 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
868 if (err)
869 goto error;
870 }
37a1300c 871
23b48dc1
TG
872 genlmsg_end(skb, ovs_header);
873 return 0;
d6569377 874
37a1300c 875error:
df2c07f4 876 genlmsg_cancel(skb, ovs_header);
d6569377 877 return err;
44e05eca
BP
878}
879
f71db6b1
JR
880/* May not be called with RCU read lock. */
881static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
bc619e29 882 const struct sw_flow_id *sfid,
afad3556 883 struct genl_info *info,
bc619e29
JS
884 bool always,
885 uint32_t ufid_flags)
44e05eca 886{
afad3556 887 struct sk_buff *skb;
bc619e29 888 size_t len;
d6569377 889
114fce23
SG
890 if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
891 GROUP_ID(&ovs_dp_flow_multicast_group)))
afad3556
JR
892 return NULL;
893
bc619e29 894 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
40c08cda 895 skb = genlmsg_new(len, GFP_KERNEL);
afad3556
JR
896 if (!skb)
897 return ERR_PTR(-ENOMEM);
898
899 return skb;
37a1300c 900}
8d5ebd83 901
f71db6b1 902/* Called with ovs_mutex. */
7d16c847 903static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
f71db6b1
JR
904 int dp_ifindex,
905 struct genl_info *info, u8 cmd,
bc619e29 906 bool always, u32 ufid_flags)
37a1300c
BP
907{
908 struct sk_buff *skb;
909 int retval;
d6569377 910
bc619e29
JS
911 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
912 &flow->id, info, always, ufid_flags);
a6ddcc9a 913 if (IS_ERR_OR_NULL(skb))
afad3556 914 return skb;
d6569377 915
2c622e5a 916 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
f71db6b1 917 info->snd_portid, info->snd_seq, 0,
bc619e29 918 cmd, ufid_flags);
37a1300c 919 BUG_ON(retval < 0);
d6569377 920 return skb;
064af421
BP
921}
922
0c9fd022 923static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
064af421 924{
a94ebc39 925 struct net *net = sock_net(skb->sk);
37a1300c 926 struct nlattr **a = info->attrs;
df2c07f4 927 struct ovs_header *ovs_header = info->userhdr;
bc619e29 928 struct sw_flow *flow = NULL, *new_flow;
a1c564be 929 struct sw_flow_mask mask;
37a1300c 930 struct sk_buff *reply;
9c52546b 931 struct datapath *dp;
0c9fd022 932 struct sw_flow_actions *acts;
a1c564be 933 struct sw_flow_match match;
bc619e29 934 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
bc4a05c6 935 int error;
9233cef7 936 bool log = !a[OVS_FLOW_ATTR_PROBE];
064af421 937
6740b721 938 /* Must have key and actions. */
37a1300c 939 error = -EINVAL;
a473df5b 940 if (!a[OVS_FLOW_ATTR_KEY]) {
7d16c847 941 OVS_NLERR(log, "Flow key attr not present in new flow.");
37a1300c 942 goto error;
a473df5b
JG
943 }
944 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
7d16c847 945 OVS_NLERR(log, "Flow actions attr not present in new flow.");
6740b721 946 goto error;
a473df5b 947 }
a1c564be 948
6740b721 949 /* Most of the time we need to allocate a new flow, do it before
af465b67
PS
950 * locking.
951 */
6740b721
JR
952 new_flow = ovs_flow_alloc();
953 if (IS_ERR(new_flow)) {
954 error = PTR_ERR(new_flow);
955 goto error;
956 }
957
958 /* Extract key. */
9b94fa6c 959 ovs_match_init(&match, &new_flow->key, false, &mask);
038e34ab 960 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
9233cef7 961 a[OVS_FLOW_ATTR_MASK], log);
37a1300c 962 if (error)
6740b721 963 goto err_kfree_flow;
064af421 964
bc619e29
JS
965 /* Extract flow identifier. */
966 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
1d334d4f 967 &new_flow->key, log);
bc619e29
JS
968 if (error)
969 goto err_kfree_flow;
9b405f1a 970
1d334d4f 971 /* unmasked key is needed to match when ufid is not used. */
972 if (ovs_identifier_is_key(&new_flow->id))
973 match.key = new_flow->id.unmasked_key;
974
975 ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
976
6740b721 977 /* Validate actions. */
a94ebc39
JS
978 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
979 &new_flow->key, &acts, log);
0c9fd022 980 if (error) {
7d16c847 981 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
4f67b12a 982 goto err_kfree_flow;
6740b721
JR
983 }
984
bc619e29
JS
985 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
986 ufid_flags);
6740b721
JR
987 if (IS_ERR(reply)) {
988 error = PTR_ERR(reply);
989 goto err_kfree_acts;
37a1300c
BP
990 }
991
cd2a59e9 992 ovs_lock();
a94ebc39 993 dp = get_dp(net, ovs_header->dp_ifindex);
6740b721
JR
994 if (unlikely(!dp)) {
995 error = -ENODEV;
cd2a59e9 996 goto err_unlock_ovs;
6740b721 997 }
bc619e29 998
a1c564be 999 /* Check if this is a duplicate flow */
bc619e29
JS
1000 if (ovs_identifier_is_ufid(&new_flow->id))
1001 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
1002 if (!flow)
1d334d4f 1003 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
6740b721
JR
1004 if (likely(!flow)) {
1005 rcu_assign_pointer(new_flow->sf_acts, acts);
d6569377 1006
d6569377 1007 /* Put flow in bucket. */
6740b721
JR
1008 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1009 if (unlikely(error)) {
0585f7a8 1010 acts = NULL;
6740b721
JR
1011 goto err_unlock_ovs;
1012 }
1013
1014 if (unlikely(reply)) {
2c622e5a 1015 error = ovs_flow_cmd_fill_info(new_flow,
6740b721
JR
1016 ovs_header->dp_ifindex,
1017 reply, info->snd_portid,
1018 info->snd_seq, 0,
bc619e29
JS
1019 OVS_FLOW_CMD_NEW,
1020 ufid_flags);
6740b721 1021 BUG_ON(error < 0);
0585f7a8 1022 }
6740b721 1023 ovs_unlock();
d6569377 1024 } else {
0c9fd022
JR
1025 struct sw_flow_actions *old_acts;
1026
d6569377
BP
1027 /* Bail out if we're not allowed to modify an existing flow.
1028 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1029 * because Generic Netlink treats the latter as a dump
1030 * request. We also accept NLM_F_EXCL in case that bug ever
1031 * gets fixed.
1032 */
6740b721
JR
1033 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1034 | NLM_F_EXCL))) {
1035 error = -EEXIST;
cd2a59e9 1036 goto err_unlock_ovs;
6740b721 1037 }
bc619e29
JS
1038 /* The flow identifier has to be the same for flow updates.
1039 * Look for any overlapping flow.
1040 */
1041 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1042 if (ovs_identifier_is_key(&flow->id))
1043 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1044 &match);
1045 else /* UFID matches but key is different */
1046 flow = NULL;
3440e4bc
AW
1047 if (!flow) {
1048 error = -ENOENT;
1049 goto err_unlock_ovs;
1050 }
6740b721 1051 }
0c9fd022
JR
1052 /* Update actions. */
1053 old_acts = ovsl_dereference(flow->sf_acts);
1054 rcu_assign_pointer(flow->sf_acts, acts);
0c9fd022 1055
6740b721 1056 if (unlikely(reply)) {
2c622e5a 1057 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1058 ovs_header->dp_ifindex,
1059 reply, info->snd_portid,
1060 info->snd_seq, 0,
bc619e29
JS
1061 OVS_FLOW_CMD_NEW,
1062 ufid_flags);
6740b721
JR
1063 BUG_ON(error < 0);
1064 }
1065 ovs_unlock();
0c9fd022 1066
e23775f2 1067 ovs_nla_free_flow_actions_rcu(old_acts);
6740b721 1068 ovs_flow_free(new_flow, false);
0c9fd022 1069 }
6740b721
JR
1070
1071 if (reply)
cb25142c 1072 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
0c9fd022
JR
1073 return 0;
1074
0c9fd022
JR
1075err_unlock_ovs:
1076 ovs_unlock();
6740b721
JR
1077 kfree_skb(reply);
1078err_kfree_acts:
e23775f2 1079 ovs_nla_free_flow_actions(acts);
6740b721
JR
1080err_kfree_flow:
1081 ovs_flow_free(new_flow, false);
0c9fd022
JR
1082error:
1083 return error;
1084}
1085
cc561abf 1086/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
a94ebc39
JS
1087static struct sw_flow_actions *get_flow_actions(struct net *net,
1088 const struct nlattr *a,
cc561abf 1089 const struct sw_flow_key *key,
9233cef7
JR
1090 const struct sw_flow_mask *mask,
1091 bool log)
cc561abf
PS
1092{
1093 struct sw_flow_actions *acts;
1094 struct sw_flow_key masked_key;
1095 int error;
1096
ad4adec2 1097 ovs_flow_mask_key(&masked_key, key, true, mask);
a94ebc39 1098 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
cc561abf 1099 if (error) {
9233cef7 1100 OVS_NLERR(log,
7d16c847 1101 "Actions may not be safe on all matching packets");
cc561abf
PS
1102 return ERR_PTR(error);
1103 }
1104
1105 return acts;
1106}
1107
850c2a4d
TZ
1108/* Factor out match-init and action-copy to avoid
1109 * "Wframe-larger-than=1024" warning. Because mask is only
1110 * used to get actions, we new a function to save some
1111 * stack space.
1112 *
1113 * If there are not key and action attrs, we return 0
1114 * directly. In the case, the caller will also not use the
1115 * match as before. If there is action attr, we try to get
1116 * actions and save them to *acts. Before returning from
1117 * the function, we reset the match->mask pointer. Because
1118 * we should not to return match object with dangling reference
1119 * to mask.
1120 * */
1121static int ovs_nla_init_match_and_action(struct net *net,
1122 struct sw_flow_match *match,
1123 struct sw_flow_key *key,
1124 struct nlattr **a,
1125 struct sw_flow_actions **acts,
1126 bool log)
1127{
1128 struct sw_flow_mask mask;
1129 int error = 0;
1130
1131 if (a[OVS_FLOW_ATTR_KEY]) {
1132 ovs_match_init(match, key, true, &mask);
1133 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1134 a[OVS_FLOW_ATTR_MASK], log);
1135 if (error)
1136 goto error;
1137 }
1138
1139 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1140 if (!a[OVS_FLOW_ATTR_KEY]) {
1141 OVS_NLERR(log,
1142 "Flow key attribute not present in set flow.");
39f86158
CJ
1143 error = -EINVAL;
1144 goto error;
850c2a4d
TZ
1145 }
1146
1147 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1148 &mask, log);
1149 if (IS_ERR(*acts)) {
1150 error = PTR_ERR(*acts);
1151 goto error;
1152 }
1153 }
1154
1155 /* On success, error is 0. */
1156error:
1157 match->mask = NULL;
1158 return error;
1159}
1160
0c9fd022
JR
1161static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1162{
a94ebc39 1163 struct net *net = sock_net(skb->sk);
0c9fd022
JR
1164 struct nlattr **a = info->attrs;
1165 struct ovs_header *ovs_header = info->userhdr;
1d2a1b5f 1166 struct sw_flow_key key;
0c9fd022 1167 struct sw_flow *flow;
0c9fd022
JR
1168 struct sk_buff *reply = NULL;
1169 struct datapath *dp;
6740b721 1170 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
0c9fd022 1171 struct sw_flow_match match;
bc619e29
JS
1172 struct sw_flow_id sfid;
1173 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
b24baa1a 1174 int error = 0;
9233cef7 1175 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1176 bool ufid_present;
0c9fd022 1177
bc619e29 1178 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
850c2a4d 1179 if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
b24baa1a
PS
1180 OVS_NLERR(log,
1181 "Flow set message rejected, Key attribute missing.");
850c2a4d 1182 return -EINVAL;
b24baa1a 1183 }
850c2a4d
TZ
1184
1185 error = ovs_nla_init_match_and_action(net, &match, &key, a,
1186 &acts, log);
0c9fd022
JR
1187 if (error)
1188 goto error;
d6569377 1189
850c2a4d 1190 if (acts) {
ff27161e 1191 /* Can allocate before locking if have acts. */
bc619e29
JS
1192 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1193 ufid_flags);
6740b721
JR
1194 if (IS_ERR(reply)) {
1195 error = PTR_ERR(reply);
1196 goto err_kfree_acts;
90b8c2f7 1197 }
0c9fd022
JR
1198 }
1199
1200 ovs_lock();
a94ebc39 1201 dp = get_dp(net, ovs_header->dp_ifindex);
6740b721
JR
1202 if (unlikely(!dp)) {
1203 error = -ENODEV;
0c9fd022 1204 goto err_unlock_ovs;
6740b721 1205 }
0c9fd022 1206 /* Check that the flow exists. */
bc619e29
JS
1207 if (ufid_present)
1208 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1209 else
1210 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
6740b721
JR
1211 if (unlikely(!flow)) {
1212 error = -ENOENT;
0c9fd022 1213 goto err_unlock_ovs;
6740b721 1214 }
3440e4bc 1215
0c9fd022 1216 /* Update actions, if present. */
6740b721 1217 if (likely(acts)) {
0c9fd022
JR
1218 old_acts = ovsl_dereference(flow->sf_acts);
1219 rcu_assign_pointer(flow->sf_acts, acts);
6740b721
JR
1220
1221 if (unlikely(reply)) {
2c622e5a 1222 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1223 ovs_header->dp_ifindex,
1224 reply, info->snd_portid,
1225 info->snd_seq, 0,
bc619e29
JS
1226 OVS_FLOW_CMD_NEW,
1227 ufid_flags);
6740b721
JR
1228 BUG_ON(error < 0);
1229 }
1230 } else {
1231 /* Could not alloc without acts before locking. */
7d16c847 1232 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
bc619e29
JS
1233 info, OVS_FLOW_CMD_NEW, false,
1234 ufid_flags);
1235
6740b721
JR
1236 if (unlikely(IS_ERR(reply))) {
1237 error = PTR_ERR(reply);
1238 goto err_unlock_ovs;
1239 }
9c52546b 1240 }
0c9fd022 1241
0c9fd022
JR
1242 /* Clear stats. */
1243 if (a[OVS_FLOW_ATTR_CLEAR])
1244 ovs_flow_stats_clear(flow);
cd2a59e9 1245 ovs_unlock();
37a1300c 1246
6740b721 1247 if (reply)
cb25142c 1248 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
6740b721 1249 if (old_acts)
e23775f2 1250 ovs_nla_free_flow_actions_rcu(old_acts);
7d16c847 1251
d6569377 1252 return 0;
704a1e09 1253
cd2a59e9
PS
1254err_unlock_ovs:
1255 ovs_unlock();
6740b721
JR
1256 kfree_skb(reply);
1257err_kfree_acts:
e23775f2 1258 ovs_nla_free_flow_actions(acts);
37a1300c 1259error:
9c52546b 1260 return error;
704a1e09
BP
1261}
1262
df2c07f4 1263static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
704a1e09 1264{
37a1300c 1265 struct nlattr **a = info->attrs;
df2c07f4 1266 struct ovs_header *ovs_header = info->userhdr;
038e34ab 1267 struct net *net = sock_net(skb->sk);
37a1300c 1268 struct sw_flow_key key;
37a1300c 1269 struct sk_buff *reply;
704a1e09 1270 struct sw_flow *flow;
9c52546b 1271 struct datapath *dp;
a1c564be 1272 struct sw_flow_match match;
bc619e29
JS
1273 struct sw_flow_id ufid;
1274 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1275 int err = 0;
9233cef7 1276 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1277 bool ufid_present;
704a1e09 1278
bc619e29
JS
1279 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1280 if (a[OVS_FLOW_ATTR_KEY]) {
9b94fa6c 1281 ovs_match_init(&match, &key, true, NULL);
038e34ab 1282 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
bc619e29
JS
1283 log);
1284 } else if (!ufid_present) {
9233cef7
JR
1285 OVS_NLERR(log,
1286 "Flow get message rejected, Key attribute missing.");
bc619e29 1287 err = -EINVAL;
1b936472 1288 }
37a1300c
BP
1289 if (err)
1290 return err;
704a1e09 1291
cd2a59e9 1292 ovs_lock();
2a4999f3 1293 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9
PS
1294 if (!dp) {
1295 err = -ENODEV;
1296 goto unlock;
1297 }
704a1e09 1298
bc619e29
JS
1299 if (ufid_present)
1300 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1301 else
1302 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1303 if (!flow) {
cd2a59e9
PS
1304 err = -ENOENT;
1305 goto unlock;
1306 }
d6569377 1307
7d16c847 1308 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
bc619e29 1309 OVS_FLOW_CMD_NEW, true, ufid_flags);
cd2a59e9
PS
1310 if (IS_ERR(reply)) {
1311 err = PTR_ERR(reply);
1312 goto unlock;
1313 }
36956a7d 1314
cd2a59e9 1315 ovs_unlock();
37a1300c 1316 return genlmsg_reply(reply, info);
cd2a59e9
PS
1317unlock:
1318 ovs_unlock();
1319 return err;
d6569377 1320}
9c52546b 1321
df2c07f4 1322static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
d6569377 1323{
37a1300c 1324 struct nlattr **a = info->attrs;
df2c07f4 1325 struct ovs_header *ovs_header = info->userhdr;
038e34ab 1326 struct net *net = sock_net(skb->sk);
37a1300c 1327 struct sw_flow_key key;
37a1300c 1328 struct sk_buff *reply;
bc619e29 1329 struct sw_flow *flow = NULL;
d6569377 1330 struct datapath *dp;
a1c564be 1331 struct sw_flow_match match;
bc619e29
JS
1332 struct sw_flow_id ufid;
1333 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
d6569377 1334 int err;
9233cef7 1335 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1336 bool ufid_present;
36956a7d 1337
bc619e29
JS
1338 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1339 if (a[OVS_FLOW_ATTR_KEY]) {
9b94fa6c 1340 ovs_match_init(&match, &key, true, NULL);
038e34ab
JS
1341 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1342 NULL, log);
cde7f3ba
JR
1343 if (unlikely(err))
1344 return err;
1345 }
1346
cd2a59e9 1347 ovs_lock();
2a4999f3 1348 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cde7f3ba 1349 if (unlikely(!dp)) {
cd2a59e9
PS
1350 err = -ENODEV;
1351 goto unlock;
1352 }
7d16c847 1353
bc619e29 1354 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
994dc286 1355 err = ovs_flow_tbl_flush(&dp->table);
cd2a59e9
PS
1356 goto unlock;
1357 }
7d16c847 1358
bc619e29
JS
1359 if (ufid_present)
1360 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1361 else
1362 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1363 if (unlikely(!flow)) {
cd2a59e9
PS
1364 err = -ENOENT;
1365 goto unlock;
1366 }
d6569377 1367
994dc286 1368 ovs_flow_tbl_remove(&dp->table, flow);
cde7f3ba 1369 ovs_unlock();
37a1300c 1370
46051cf8 1371 reply = ovs_flow_cmd_alloc_info(rcu_dereference_raw(flow->sf_acts),
bc619e29 1372 &flow->id, info, false, ufid_flags);
cde7f3ba
JR
1373
1374 if (likely(reply)) {
1375 if (likely(!IS_ERR(reply))) {
7d16c847
PS
1376 rcu_read_lock(); /*To keep RCU checker happy. */
1377 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
cde7f3ba
JR
1378 reply, info->snd_portid,
1379 info->snd_seq, 0,
bc619e29
JS
1380 OVS_FLOW_CMD_DEL,
1381 ufid_flags);
cde7f3ba
JR
1382 rcu_read_unlock();
1383 BUG_ON(err < 0);
cb25142c 1384 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
cde7f3ba 1385 } else {
cb25142c
PS
1386 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1387 GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1388
cde7f3ba 1389 }
afad3556 1390 }
37a1300c 1391
a1c564be 1392 ovs_flow_free(flow, true);
37a1300c 1393 return 0;
cd2a59e9
PS
1394unlock:
1395 ovs_unlock();
1396 return err;
37a1300c
BP
1397}
1398
df2c07f4 1399static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
37a1300c 1400{
bc619e29 1401 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
df2c07f4 1402 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
994dc286 1403 struct table_instance *ti;
37a1300c 1404 struct datapath *dp;
bc619e29
JS
1405 u32 ufid_flags;
1406 int err;
1407
1408 err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
15702dc9 1409 OVS_FLOW_ATTR_MAX, flow_policy, NULL);
bc619e29
JS
1410 if (err)
1411 return err;
1412 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
37a1300c 1413
f44ccce1 1414 rcu_read_lock();
01ac0970 1415 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9 1416 if (!dp) {
f44ccce1 1417 rcu_read_unlock();
37a1300c 1418 return -ENODEV;
cd2a59e9 1419 }
37a1300c 1420
994dc286 1421 ti = rcu_dereference(dp->table.ti);
37a1300c 1422 for (;;) {
37a1300c
BP
1423 struct sw_flow *flow;
1424 u32 bucket, obj;
1425
1426 bucket = cb->args[0];
1427 obj = cb->args[1];
994dc286 1428 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
3544358a 1429 if (!flow)
37a1300c
BP
1430 break;
1431
2c622e5a 1432 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
28aea917 1433 NETLINK_CB(cb->skb).portid,
37a1300c 1434 cb->nlh->nlmsg_seq, NLM_F_MULTI,
bc619e29 1435 OVS_FLOW_CMD_NEW, ufid_flags) < 0)
37a1300c
BP
1436 break;
1437
1438 cb->args[0] = bucket;
1439 cb->args[1] = obj;
1440 }
f44ccce1 1441 rcu_read_unlock();
37a1300c 1442 return skb->len;
704a1e09
BP
1443}
1444
cb25142c
PS
1445static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1446 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
9233cef7 1447 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
cb25142c
PS
1448 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1449 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
9233cef7 1450 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
bc619e29
JS
1451 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1452 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
cb25142c
PS
1453};
1454
18fd3a52 1455static struct genl_ops dp_flow_genl_ops[] = {
df2c07f4 1456 { .cmd = OVS_FLOW_CMD_NEW,
a6a8674d 1457 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37a1300c 1458 .policy = flow_policy,
0c9fd022 1459 .doit = ovs_flow_cmd_new
37a1300c 1460 },
df2c07f4 1461 { .cmd = OVS_FLOW_CMD_DEL,
a6a8674d 1462 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37a1300c 1463 .policy = flow_policy,
df2c07f4 1464 .doit = ovs_flow_cmd_del
37a1300c 1465 },
df2c07f4 1466 { .cmd = OVS_FLOW_CMD_GET,
37a1300c
BP
1467 .flags = 0, /* OK for unprivileged users. */
1468 .policy = flow_policy,
df2c07f4
JP
1469 .doit = ovs_flow_cmd_get,
1470 .dumpit = ovs_flow_cmd_dump
37a1300c 1471 },
df2c07f4 1472 { .cmd = OVS_FLOW_CMD_SET,
a6a8674d 1473 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37a1300c 1474 .policy = flow_policy,
0c9fd022 1475 .doit = ovs_flow_cmd_set,
37a1300c
BP
1476 },
1477};
1478
ba63fe26 1479static struct genl_family dp_flow_genl_family __ro_after_init = {
df2c07f4 1480 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1481 .name = OVS_FLOW_FAMILY,
1482 .version = OVS_FLOW_VERSION,
1483 .maxattr = OVS_FLOW_ATTR_MAX,
b3dcb73c 1484 .netnsok = true,
cb25142c
PS
1485 .parallel_ops = true,
1486 .ops = dp_flow_genl_ops,
1487 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1488 .mcgrps = &ovs_dp_flow_multicast_group,
1489 .n_mcgrps = 1,
ba63fe26 1490 .module = THIS_MODULE,
aaff4b55
BP
1491};
1492
0afa2373
TG
1493static size_t ovs_dp_cmd_msg_size(void)
1494{
1495 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1496
1497 msgsize += nla_total_size(IFNAMSIZ);
91b37647
PS
1498 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1499 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
300af20a 1500 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
0afa2373
TG
1501
1502 return msgsize;
1503}
1504
d637497c 1505/* Called with ovs_mutex. */
df2c07f4 1506static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
28aea917 1507 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1508{
df2c07f4 1509 struct ovs_header *ovs_header;
e926dfe3 1510 struct ovs_dp_stats dp_stats;
4fa72a95 1511 struct ovs_dp_megaflow_stats dp_megaflow_stats;
064af421
BP
1512 int err;
1513
28aea917 1514 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
aaff4b55 1515 flags, cmd);
df2c07f4 1516 if (!ovs_header)
aaff4b55 1517 goto error;
064af421 1518
b063d9f0 1519 ovs_header->dp_ifindex = get_dpifindex(dp);
064af421 1520
850b6b3b 1521 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
064af421 1522 if (err)
d6569377 1523 goto nla_put_failure;
064af421 1524
4fa72a95 1525 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
91b37647
PS
1526 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1527 &dp_stats, OVS_DP_ATTR_PAD))
4fa72a95
AZ
1528 goto nla_put_failure;
1529
91b37647
PS
1530 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1531 sizeof(struct ovs_dp_megaflow_stats),
1532 &dp_megaflow_stats, OVS_DP_ATTR_PAD))
c3cc8c03 1533 goto nla_put_failure;
d6569377 1534
c58cc9a4
TG
1535 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1536 goto nla_put_failure;
1537
23b48dc1
TG
1538 genlmsg_end(skb, ovs_header);
1539 return 0;
d6569377
BP
1540
1541nla_put_failure:
df2c07f4 1542 genlmsg_cancel(skb, ovs_header);
aaff4b55
BP
1543error:
1544 return -EMSGSIZE;
d6569377
BP
1545}
1546
40c08cda 1547static struct sk_buff *ovs_dp_cmd_alloc_info(void)
d6569377 1548{
40c08cda 1549 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
aaff4b55 1550}
9dca7bd5 1551
aa917006 1552/* Called with rcu_read_lock or ovs_mutex. */
2a4999f3 1553static struct datapath *lookup_datapath(struct net *net,
f1f60b85 1554 const struct ovs_header *ovs_header,
6455100f 1555 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
d6569377 1556{
254f2dc8
BP
1557 struct datapath *dp;
1558
df2c07f4 1559 if (!a[OVS_DP_ATTR_NAME])
2a4999f3 1560 dp = get_dp(net, ovs_header->dp_ifindex);
254f2dc8 1561 else {
d6569377 1562 struct vport *vport;
d6569377 1563
2a4999f3 1564 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
df2c07f4 1565 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
d6569377 1566 }
254f2dc8 1567 return dp ? dp : ERR_PTR(-ENODEV);
d6569377
BP
1568}
1569
94358dcf
TG
1570static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1571{
1572 struct datapath *dp;
1573
1574 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
09350a3d 1575 if (IS_ERR(dp))
94358dcf
TG
1576 return;
1577
1578 WARN(dp->user_features, "Dropping previously announced user features\n");
1579 dp->user_features = 0;
1580}
1581
f1f60b85 1582static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
c58cc9a4
TG
1583{
1584 if (a[OVS_DP_ATTR_USER_FEATURES])
1585 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1586}
1587
df2c07f4 1588static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
d6569377 1589{
aaff4b55 1590 struct nlattr **a = info->attrs;
d6569377 1591 struct vport_parms parms;
aaff4b55 1592 struct sk_buff *reply;
d6569377
BP
1593 struct datapath *dp;
1594 struct vport *vport;
2a4999f3 1595 struct ovs_net *ovs_net;
95b1d73a 1596 int err, i;
d6569377 1597
d6569377 1598 err = -EINVAL;
ea36840f 1599 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
aaff4b55
BP
1600 goto err;
1601
40c08cda 1602 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1603 if (!reply)
1604 return -ENOMEM;
d6569377 1605
d6569377
BP
1606 err = -ENOMEM;
1607 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1608 if (dp == NULL)
d81eef1b 1609 goto err_free_reply;
2a4999f3 1610
c0cddcec 1611 ovs_dp_set_net(dp, sock_net(skb->sk));
0ceaa66c 1612
d6569377 1613 /* Allocate table. */
994dc286
PS
1614 err = ovs_flow_tbl_init(&dp->table);
1615 if (err)
d6569377
BP
1616 goto err_free_dp;
1617
08fb1bbd 1618 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
99769a40
JG
1619 if (!dp->stats_percpu) {
1620 err = -ENOMEM;
1621 goto err_destroy_table;
1622 }
1623
95b1d73a
PS
1624 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1625 GFP_KERNEL);
1626 if (!dp->ports) {
1627 err = -ENOMEM;
1628 goto err_destroy_percpu;
1629 }
1630
1631 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1632 INIT_HLIST_HEAD(&dp->ports[i]);
1633
d6569377 1634 /* Set up our datapath device. */
df2c07f4
JP
1635 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1636 parms.type = OVS_VPORT_TYPE_INTERNAL;
d6569377
BP
1637 parms.options = NULL;
1638 parms.dp = dp;
df2c07f4 1639 parms.port_no = OVSP_LOCAL;
beb1c69a 1640 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
b063d9f0 1641
c58cc9a4
TG
1642 ovs_dp_change(dp, a);
1643
d81eef1b
JR
1644 /* So far only local changes have been made, now need the lock. */
1645 ovs_lock();
1646
d6569377
BP
1647 vport = new_vport(&parms);
1648 if (IS_ERR(vport)) {
1649 err = PTR_ERR(vport);
1650 if (err == -EBUSY)
1651 err = -EEXIST;
1652
94358dcf
TG
1653 if (err == -EEXIST) {
1654 /* An outdated user space instance that does not understand
1655 * the concept of user_features has attempted to create a new
1656 * datapath and is likely to reuse it. Drop all user features.
1657 */
1658 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1659 ovs_dp_reset_user_features(skb, info);
1660 }
1661
95b1d73a 1662 goto err_destroy_ports_array;
d6569377 1663 }
d6569377 1664
d81eef1b
JR
1665 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1666 info->snd_seq, 0, OVS_DP_CMD_NEW);
1667 BUG_ON(err < 0);
aaff4b55 1668
2a4999f3 1669 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
fb93e9aa 1670 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
a0fb56c1 1671
cd2a59e9 1672 ovs_unlock();
d6569377 1673
cb25142c 1674 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
d6569377
BP
1675 return 0;
1676
95b1d73a 1677err_destroy_ports_array:
d81eef1b 1678 ovs_unlock();
95b1d73a 1679 kfree(dp->ports);
99769a40
JG
1680err_destroy_percpu:
1681 free_percpu(dp->stats_percpu);
d6569377 1682err_destroy_table:
e379e4d1 1683 ovs_flow_tbl_destroy(&dp->table);
d6569377 1684err_free_dp:
d6569377 1685 kfree(dp);
d81eef1b
JR
1686err_free_reply:
1687 kfree_skb(reply);
d6569377 1688err:
064af421
BP
1689 return err;
1690}
1691
cd2a59e9 1692/* Called with ovs_mutex. */
2a4999f3 1693static void __dp_destroy(struct datapath *dp)
44e05eca 1694{
95b1d73a 1695 int i;
44e05eca 1696
95b1d73a
PS
1697 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1698 struct vport *vport;
f8dfbcb7 1699 struct hlist_node *n;
95b1d73a 1700
f8dfbcb7 1701 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
95b1d73a
PS
1702 if (vport->port_no != OVSP_LOCAL)
1703 ovs_dp_detach_port(vport);
1704 }
ed099e92 1705
fb93e9aa 1706 list_del_rcu(&dp->list_node);
ed099e92 1707
cd2a59e9 1708 /* OVSP_LOCAL is datapath internal port. We need to make sure that
d103f479
AZ
1709 * all ports in datapath are destroyed first before freeing datapath.
1710 */
cd2a59e9 1711 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
99620d2c 1712
d103f479 1713 /* RCU destroy the flow table */
ed099e92 1714 call_rcu(&dp->rcu, destroy_dp_rcu);
2a4999f3
PS
1715}
1716
1717static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1718{
1719 struct sk_buff *reply;
1720 struct datapath *dp;
1721 int err;
1722
40c08cda 1723 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1724 if (!reply)
1725 return -ENOMEM;
1726
cd2a59e9 1727 ovs_lock();
2a4999f3
PS
1728 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1729 err = PTR_ERR(dp);
1730 if (IS_ERR(dp))
d81eef1b 1731 goto err_unlock_free;
2a4999f3 1732
d81eef1b
JR
1733 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1734 info->snd_seq, 0, OVS_DP_CMD_DEL);
1735 BUG_ON(err < 0);
2a4999f3
PS
1736
1737 __dp_destroy(dp);
d81eef1b 1738 ovs_unlock();
7d16c847 1739
cb25142c 1740 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
99620d2c 1741 return 0;
d81eef1b
JR
1742
1743err_unlock_free:
cd2a59e9 1744 ovs_unlock();
d81eef1b 1745 kfree_skb(reply);
cd2a59e9 1746 return err;
44e05eca
BP
1747}
1748
df2c07f4 1749static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
064af421 1750{
aaff4b55 1751 struct sk_buff *reply;
d6569377 1752 struct datapath *dp;
d6569377 1753 int err;
064af421 1754
40c08cda 1755 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1756 if (!reply)
1757 return -ENOMEM;
1758
cd2a59e9 1759 ovs_lock();
2a4999f3 1760 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9 1761 err = PTR_ERR(dp);
d6569377 1762 if (IS_ERR(dp))
d81eef1b 1763 goto err_unlock_free;
38c6ecbc 1764
c58cc9a4
TG
1765 ovs_dp_change(dp, info->attrs);
1766
d81eef1b
JR
1767 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1768 info->snd_seq, 0, OVS_DP_CMD_NEW);
1769 BUG_ON(err < 0);
a0fb56c1 1770
cd2a59e9 1771 ovs_unlock();
7d16c847 1772
cb25142c 1773 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
aaff4b55 1774 return 0;
d81eef1b
JR
1775
1776err_unlock_free:
cd2a59e9 1777 ovs_unlock();
d81eef1b 1778 kfree_skb(reply);
cd2a59e9 1779 return err;
064af421
BP
1780}
1781
df2c07f4 1782static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1dcf111b 1783{
aaff4b55 1784 struct sk_buff *reply;
d6569377 1785 struct datapath *dp;
d6569377 1786 int err;
1dcf111b 1787
40c08cda 1788 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1789 if (!reply)
1790 return -ENOMEM;
1791
d637497c 1792 ovs_lock();
2a4999f3 1793 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9
PS
1794 if (IS_ERR(dp)) {
1795 err = PTR_ERR(dp);
d81eef1b 1796 goto err_unlock_free;
cd2a59e9 1797 }
d81eef1b
JR
1798 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1799 info->snd_seq, 0, OVS_DP_CMD_NEW);
1800 BUG_ON(err < 0);
d637497c 1801 ovs_unlock();
aaff4b55
BP
1802
1803 return genlmsg_reply(reply, info);
cd2a59e9 1804
d81eef1b 1805err_unlock_free:
d637497c 1806 ovs_unlock();
d81eef1b 1807 kfree_skb(reply);
cd2a59e9 1808 return err;
1dcf111b
JP
1809}
1810
df2c07f4 1811static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
a7786963 1812{
2a4999f3 1813 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
254f2dc8
BP
1814 struct datapath *dp;
1815 int skip = cb->args[0];
1816 int i = 0;
a7786963 1817
d637497c
PS
1818 ovs_lock();
1819 list_for_each_entry(dp, &ovs_net->dps, list_node) {
a2bab2f0 1820 if (i >= skip &&
28aea917 1821 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
aaff4b55 1822 cb->nlh->nlmsg_seq, NLM_F_MULTI,
df2c07f4 1823 OVS_DP_CMD_NEW) < 0)
aaff4b55 1824 break;
254f2dc8 1825 i++;
a7786963 1826 }
d637497c 1827 ovs_unlock();
aaff4b55 1828
254f2dc8
BP
1829 cb->args[0] = i;
1830
aaff4b55 1831 return skb->len;
c19e6535
BP
1832}
1833
cb25142c
PS
1834static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1835 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1836 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1837 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1838};
1839
18fd3a52 1840static struct genl_ops dp_datapath_genl_ops[] = {
df2c07f4 1841 { .cmd = OVS_DP_CMD_NEW,
a6a8674d 1842 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
aaff4b55 1843 .policy = datapath_policy,
df2c07f4 1844 .doit = ovs_dp_cmd_new
aaff4b55 1845 },
df2c07f4 1846 { .cmd = OVS_DP_CMD_DEL,
a6a8674d 1847 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
aaff4b55 1848 .policy = datapath_policy,
df2c07f4 1849 .doit = ovs_dp_cmd_del
aaff4b55 1850 },
df2c07f4 1851 { .cmd = OVS_DP_CMD_GET,
aaff4b55
BP
1852 .flags = 0, /* OK for unprivileged users. */
1853 .policy = datapath_policy,
df2c07f4
JP
1854 .doit = ovs_dp_cmd_get,
1855 .dumpit = ovs_dp_cmd_dump
aaff4b55 1856 },
df2c07f4 1857 { .cmd = OVS_DP_CMD_SET,
a6a8674d 1858 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
aaff4b55 1859 .policy = datapath_policy,
df2c07f4 1860 .doit = ovs_dp_cmd_set,
aaff4b55
BP
1861 },
1862};
1863
ba63fe26 1864static struct genl_family dp_datapath_genl_family __ro_after_init = {
df2c07f4 1865 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1866 .name = OVS_DATAPATH_FAMILY,
1867 .version = OVS_DATAPATH_VERSION,
1868 .maxattr = OVS_DP_ATTR_MAX,
b3dcb73c 1869 .netnsok = true,
cb25142c
PS
1870 .parallel_ops = true,
1871 .ops = dp_datapath_genl_ops,
1872 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1873 .mcgrps = &ovs_dp_datapath_multicast_group,
1874 .n_mcgrps = 1,
ba63fe26 1875 .module = THIS_MODULE,
f0fef760
BP
1876};
1877
cd2a59e9 1878/* Called with ovs_mutex or RCU read lock. */
df2c07f4 1879static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
28aea917 1880 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1881{
df2c07f4 1882 struct ovs_header *ovs_header;
e926dfe3 1883 struct ovs_vport_stats vport_stats;
c19e6535
BP
1884 int err;
1885
28aea917 1886 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
f0fef760 1887 flags, cmd);
df2c07f4 1888 if (!ovs_header)
f0fef760 1889 return -EMSGSIZE;
c19e6535 1890
99769a40 1891 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
c19e6535 1892
c3cc8c03
DM
1893 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1894 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
e23775f2
PS
1895 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1896 ovs_vport_name(vport)))
c3cc8c03 1897 goto nla_put_failure;
c19e6535 1898
850b6b3b 1899 ovs_vport_get_stats(vport, &vport_stats);
91b37647
PS
1900 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1901 sizeof(struct ovs_vport_stats), &vport_stats,
1902 OVS_VPORT_ATTR_PAD))
c3cc8c03 1903 goto nla_put_failure;
c19e6535 1904
beb1c69a
AW
1905 if (ovs_vport_get_upcall_portids(vport, skb))
1906 goto nla_put_failure;
1907
850b6b3b 1908 err = ovs_vport_get_options(vport, skb);
f0fef760
BP
1909 if (err == -EMSGSIZE)
1910 goto error;
c19e6535 1911
23b48dc1
TG
1912 genlmsg_end(skb, ovs_header);
1913 return 0;
c19e6535
BP
1914
1915nla_put_failure:
1916 err = -EMSGSIZE;
f0fef760 1917error:
df2c07f4 1918 genlmsg_cancel(skb, ovs_header);
f0fef760 1919 return err;
064af421
BP
1920}
1921
d81eef1b
JR
1922static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1923{
1924 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1925}
1926
1927/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
28aea917 1928struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
f14d8083 1929 u32 seq, u8 cmd)
064af421 1930{
c19e6535 1931 struct sk_buff *skb;
f0fef760 1932 int retval;
c19e6535 1933
f0fef760 1934 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
c19e6535
BP
1935 if (!skb)
1936 return ERR_PTR(-ENOMEM);
1937
28aea917 1938 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
c25ea534
JG
1939 BUG_ON(retval < 0);
1940
c19e6535 1941 return skb;
f0fef760 1942}
c19e6535 1943
cd2a59e9 1944/* Called with ovs_mutex or RCU read lock. */
2a4999f3 1945static struct vport *lookup_vport(struct net *net,
f1f60b85 1946 const struct ovs_header *ovs_header,
df2c07f4 1947 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
c19e6535
BP
1948{
1949 struct datapath *dp;
1950 struct vport *vport;
1951
df2c07f4 1952 if (a[OVS_VPORT_ATTR_NAME]) {
2a4999f3 1953 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
ed099e92 1954 if (!vport)
c19e6535 1955 return ERR_PTR(-ENODEV);
24ce832d
BP
1956 if (ovs_header->dp_ifindex &&
1957 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1958 return ERR_PTR(-ENODEV);
c19e6535 1959 return vport;
df2c07f4
JP
1960 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1961 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
c19e6535
BP
1962
1963 if (port_no >= DP_MAX_PORTS)
f0fef760 1964 return ERR_PTR(-EFBIG);
c19e6535 1965
2a4999f3 1966 dp = get_dp(net, ovs_header->dp_ifindex);
c19e6535
BP
1967 if (!dp)
1968 return ERR_PTR(-ENODEV);
f2459fe7 1969
cd2a59e9 1970 vport = ovs_vport_ovsl_rcu(dp, port_no);
ed099e92 1971 if (!vport)
17535c57 1972 return ERR_PTR(-ENODEV);
c19e6535
BP
1973 return vport;
1974 } else
1975 return ERR_PTR(-EINVAL);
064af421
BP
1976}
1977
8ce37339
PS
1978/* Called with ovs_mutex */
1979static void update_headroom(struct datapath *dp)
1980{
1981 unsigned dev_headroom, max_headroom = 0;
1982 struct net_device *dev;
1983 struct vport *vport;
1984 int i;
1985
1986 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1987 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1988 dev = vport->dev;
1989 dev_headroom = netdev_get_fwd_headroom(dev);
1990 if (dev_headroom > max_headroom)
1991 max_headroom = dev_headroom;
1992 }
1993 }
1994
1995 dp->max_headroom = max_headroom;
1996 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1997 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
1998 netdev_set_rx_headroom(vport->dev, max_headroom);
1999}
2000
df2c07f4 2001static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
c19e6535 2002{
f0fef760 2003 struct nlattr **a = info->attrs;
df2c07f4 2004 struct ovs_header *ovs_header = info->userhdr;
c19e6535 2005 struct vport_parms parms;
ed099e92 2006 struct sk_buff *reply;
c19e6535 2007 struct vport *vport;
c19e6535 2008 struct datapath *dp;
b0ec0f27 2009 u32 port_no;
c19e6535 2010 int err;
b0ec0f27 2011
ea36840f
BP
2012 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2013 !a[OVS_VPORT_ATTR_UPCALL_PID])
d81eef1b
JR
2014 return -EINVAL;
2015
2016 port_no = a[OVS_VPORT_ATTR_PORT_NO]
2017 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2018 if (port_no >= DP_MAX_PORTS)
2019 return -EFBIG;
2020
2021 reply = ovs_vport_cmd_alloc_info();
2022 if (!reply)
2023 return -ENOMEM;
f0fef760 2024
cd2a59e9 2025 ovs_lock();
5a38795f 2026restart:
2a4999f3 2027 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
c19e6535
BP
2028 err = -ENODEV;
2029 if (!dp)
d81eef1b 2030 goto exit_unlock_free;
c19e6535 2031
d81eef1b 2032 if (port_no) {
cd2a59e9 2033 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
2034 err = -EBUSY;
2035 if (vport)
d81eef1b 2036 goto exit_unlock_free;
c19e6535
BP
2037 } else {
2038 for (port_no = 1; ; port_no++) {
2039 if (port_no >= DP_MAX_PORTS) {
2040 err = -EFBIG;
d81eef1b 2041 goto exit_unlock_free;
c19e6535 2042 }
cd2a59e9 2043 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
2044 if (!vport)
2045 break;
51d4d598 2046 }
064af421 2047 }
b0ec0f27 2048
df2c07f4
JP
2049 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2050 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2051 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
c19e6535
BP
2052 parms.dp = dp;
2053 parms.port_no = port_no;
beb1c69a 2054 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
c19e6535
BP
2055
2056 vport = new_vport(&parms);
2057 err = PTR_ERR(vport);
5a38795f
TG
2058 if (IS_ERR(vport)) {
2059 if (err == -EAGAIN)
2060 goto restart;
d81eef1b 2061 goto exit_unlock_free;
5a38795f 2062 }
c19e6535 2063
d81eef1b
JR
2064 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2065 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2066 BUG_ON(err < 0);
8ce37339
PS
2067
2068 if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2069 update_headroom(dp);
2070 else
2071 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2072
d81eef1b 2073 ovs_unlock();
e297c6b7 2074
cb25142c 2075 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 2076 return 0;
c19e6535 2077
d81eef1b 2078exit_unlock_free:
cd2a59e9 2079 ovs_unlock();
d81eef1b 2080 kfree_skb(reply);
c19e6535 2081 return err;
44e05eca
BP
2082}
2083
df2c07f4 2084static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
44e05eca 2085{
f0fef760
BP
2086 struct nlattr **a = info->attrs;
2087 struct sk_buff *reply;
c19e6535 2088 struct vport *vport;
c19e6535 2089 int err;
44e05eca 2090
d81eef1b
JR
2091 reply = ovs_vport_cmd_alloc_info();
2092 if (!reply)
2093 return -ENOMEM;
2094
cd2a59e9 2095 ovs_lock();
2a4999f3 2096 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535
BP
2097 err = PTR_ERR(vport);
2098 if (IS_ERR(vport))
d81eef1b 2099 goto exit_unlock_free;
44e05eca 2100
6455100f 2101 if (a[OVS_VPORT_ATTR_TYPE] &&
17ec1d04 2102 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
4879d4c7 2103 err = -EINVAL;
d81eef1b 2104 goto exit_unlock_free;
c25ea534
JG
2105 }
2106
17ec1d04 2107 if (a[OVS_VPORT_ATTR_OPTIONS]) {
850b6b3b 2108 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
17ec1d04 2109 if (err)
d81eef1b 2110 goto exit_unlock_free;
17ec1d04 2111 }
1fc7083d 2112
beb1c69a 2113 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
7d16c847
PS
2114 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2115
2116 err = ovs_vport_set_upcall_portids(vport, ids);
beb1c69a
AW
2117 if (err)
2118 goto exit_unlock_free;
2119 }
c19e6535 2120
c25ea534
JG
2121 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2122 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2123 BUG_ON(err < 0);
cd2a59e9 2124 ovs_unlock();
d81eef1b 2125
cb25142c 2126 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
c25ea534
JG
2127 return 0;
2128
d81eef1b 2129exit_unlock_free:
cd2a59e9 2130 ovs_unlock();
d81eef1b 2131 kfree_skb(reply);
c19e6535 2132 return err;
064af421
BP
2133}
2134
df2c07f4 2135static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2136{
8ce37339 2137 bool must_update_headroom = false;
f0fef760
BP
2138 struct nlattr **a = info->attrs;
2139 struct sk_buff *reply;
8ce37339 2140 struct datapath *dp;
c19e6535 2141 struct vport *vport;
c19e6535
BP
2142 int err;
2143
d81eef1b
JR
2144 reply = ovs_vport_cmd_alloc_info();
2145 if (!reply)
2146 return -ENOMEM;
2147
cd2a59e9 2148 ovs_lock();
2a4999f3 2149 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535 2150 err = PTR_ERR(vport);
f0fef760 2151 if (IS_ERR(vport))
d81eef1b 2152 goto exit_unlock_free;
c19e6535 2153
df2c07f4 2154 if (vport->port_no == OVSP_LOCAL) {
f0fef760 2155 err = -EINVAL;
d81eef1b 2156 goto exit_unlock_free;
f0fef760
BP
2157 }
2158
d81eef1b
JR
2159 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2160 info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2161 BUG_ON(err < 0);
8ce37339
PS
2162
2163 /* the vport deletion may trigger dp headroom update */
2164 dp = vport->dp;
2165 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2166 must_update_headroom = true;
2167 netdev_reset_rx_headroom(vport->dev);
850b6b3b 2168 ovs_dp_detach_port(vport);
8ce37339
PS
2169
2170 if (must_update_headroom)
2171 update_headroom(dp);
2172
d81eef1b 2173 ovs_unlock();
f0fef760 2174
cb25142c 2175 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 2176 return 0;
f0fef760 2177
d81eef1b 2178exit_unlock_free:
cd2a59e9 2179 ovs_unlock();
d81eef1b 2180 kfree_skb(reply);
c19e6535 2181 return err;
7c40efc9
BP
2182}
2183
df2c07f4 2184static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2185{
f0fef760 2186 struct nlattr **a = info->attrs;
df2c07f4 2187 struct ovs_header *ovs_header = info->userhdr;
ed099e92 2188 struct sk_buff *reply;
c19e6535 2189 struct vport *vport;
c19e6535
BP
2190 int err;
2191
d81eef1b
JR
2192 reply = ovs_vport_cmd_alloc_info();
2193 if (!reply)
2194 return -ENOMEM;
2195
ed099e92 2196 rcu_read_lock();
2a4999f3 2197 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
c19e6535
BP
2198 err = PTR_ERR(vport);
2199 if (IS_ERR(vport))
d81eef1b
JR
2200 goto exit_unlock_free;
2201 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2202 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2203 BUG_ON(err < 0);
df2fa9b5
JG
2204 rcu_read_unlock();
2205
2206 return genlmsg_reply(reply, info);
ed099e92 2207
d81eef1b 2208exit_unlock_free:
ed099e92 2209 rcu_read_unlock();
d81eef1b 2210 kfree_skb(reply);
c19e6535
BP
2211 return err;
2212}
2213
df2c07f4 2214static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
c19e6535 2215{
df2c07f4 2216 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
c19e6535 2217 struct datapath *dp;
95b1d73a
PS
2218 int bucket = cb->args[0], skip = cb->args[1];
2219 int i, j = 0;
c19e6535 2220
03fc2881 2221 rcu_read_lock();
01ac0970 2222 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
03fc2881
JR
2223 if (!dp) {
2224 rcu_read_unlock();
f0fef760 2225 return -ENODEV;
03fc2881 2226 }
95b1d73a 2227 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
ed099e92 2228 struct vport *vport;
95b1d73a
PS
2229
2230 j = 0;
f8dfbcb7 2231 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
95b1d73a
PS
2232 if (j >= skip &&
2233 ovs_vport_cmd_fill_info(vport, skb,
28aea917 2234 NETLINK_CB(cb->skb).portid,
95b1d73a
PS
2235 cb->nlh->nlmsg_seq,
2236 NLM_F_MULTI,
2237 OVS_VPORT_CMD_NEW) < 0)
2238 goto out;
2239
2240 j++;
2241 }
2242 skip = 0;
c19e6535 2243 }
95b1d73a 2244out:
ed099e92 2245 rcu_read_unlock();
c19e6535 2246
95b1d73a
PS
2247 cb->args[0] = i;
2248 cb->args[1] = j;
f0fef760 2249
95b1d73a 2250 return skb->len;
7c40efc9
BP
2251}
2252
cb25142c
PS
2253static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2254 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2255 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2256 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2257 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2258 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2259 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2260};
2261
18fd3a52 2262static struct genl_ops dp_vport_genl_ops[] = {
df2c07f4 2263 { .cmd = OVS_VPORT_CMD_NEW,
a6a8674d 2264 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
f0fef760 2265 .policy = vport_policy,
df2c07f4 2266 .doit = ovs_vport_cmd_new
f0fef760 2267 },
df2c07f4 2268 { .cmd = OVS_VPORT_CMD_DEL,
a6a8674d 2269 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
f0fef760 2270 .policy = vport_policy,
df2c07f4 2271 .doit = ovs_vport_cmd_del
f0fef760 2272 },
df2c07f4 2273 { .cmd = OVS_VPORT_CMD_GET,
f0fef760
BP
2274 .flags = 0, /* OK for unprivileged users. */
2275 .policy = vport_policy,
df2c07f4
JP
2276 .doit = ovs_vport_cmd_get,
2277 .dumpit = ovs_vport_cmd_dump
f0fef760 2278 },
df2c07f4 2279 { .cmd = OVS_VPORT_CMD_SET,
a6a8674d 2280 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
f0fef760 2281 .policy = vport_policy,
df2c07f4 2282 .doit = ovs_vport_cmd_set,
f0fef760
BP
2283 },
2284};
2285
ba63fe26 2286struct genl_family dp_vport_genl_family __ro_after_init = {
cb25142c
PS
2287 .hdrsize = sizeof(struct ovs_header),
2288 .name = OVS_VPORT_FAMILY,
2289 .version = OVS_VPORT_VERSION,
2290 .maxattr = OVS_VPORT_ATTR_MAX,
2291 .netnsok = true,
2292 .parallel_ops = true,
2293 .ops = dp_vport_genl_ops,
2294 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2295 .mcgrps = &ovs_dp_vport_multicast_group,
2296 .n_mcgrps = 1,
ba63fe26 2297 .module = THIS_MODULE,
982b8810 2298};
ed099e92 2299
18fd3a52 2300static struct genl_family *dp_genl_families[] = {
cb25142c
PS
2301 &dp_datapath_genl_family,
2302 &dp_vport_genl_family,
2303 &dp_flow_genl_family,
2304 &dp_packet_genl_family,
982b8810 2305};
ed099e92 2306
982b8810
BP
2307static void dp_unregister_genl(int n_families)
2308{
2309 int i;
ed099e92 2310
b867ca75 2311 for (i = 0; i < n_families; i++)
cb25142c 2312 genl_unregister_family(dp_genl_families[i]);
ed099e92
BP
2313}
2314
ba63fe26 2315static int __init dp_register_genl(void)
064af421 2316{
982b8810
BP
2317 int err;
2318 int i;
064af421 2319
982b8810 2320 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
064af421 2321
cb25142c 2322 err = genl_register_family(dp_genl_families[i]);
982b8810
BP
2323 if (err)
2324 goto error;
982b8810 2325 }
9cc8b4e4 2326
982b8810 2327 return 0;
064af421
BP
2328
2329error:
cb25142c 2330 dp_unregister_genl(i);
982b8810 2331 return err;
064af421
BP
2332}
2333
2a4999f3
PS
2334static int __net_init ovs_init_net(struct net *net)
2335{
2336 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2337
2338 INIT_LIST_HEAD(&ovs_net->dps);
cd2a59e9 2339 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
038e34ab 2340 ovs_ct_init(net);
7f4a5d68 2341 ovs_netns_frags_init(net);
2342 ovs_netns_frags6_init(net);
2a4999f3
PS
2343 return 0;
2344}
2345
cabd5516
PS
2346static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2347 struct list_head *head)
2a4999f3
PS
2348{
2349 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
cabd5516
PS
2350 struct datapath *dp;
2351
2352 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2353 int i;
2354
2355 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2356 struct vport *vport;
2357
2358 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
cabd5516
PS
2359
2360 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2361 continue;
2362
e23775f2 2363 if (dev_net(vport->dev) == dnet)
cabd5516
PS
2364 list_add(&vport->detach_list, head);
2365 }
2366 }
2367 }
2368}
2369
2370static void __net_exit ovs_exit_net(struct net *dnet)
2371{
2372 struct datapath *dp, *dp_next;
2373 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2374 struct vport *vport, *vport_next;
2375 struct net *net;
2376 LIST_HEAD(head);
2a4999f3 2377
7f4a5d68 2378 ovs_netns_frags6_exit(dnet);
2379 ovs_netns_frags_exit(dnet);
038e34ab 2380 ovs_ct_exit(dnet);
cd2a59e9
PS
2381 ovs_lock();
2382 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2383 __dp_destroy(dp);
cabd5516
PS
2384
2385 rtnl_lock();
2386 for_each_net(net)
2387 list_vports_from_net(net, dnet, &head);
2388 rtnl_unlock();
2389
2390 /* Detach all vports from given namespace. */
2391 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2392 list_del(&vport->detach_list);
2393 ovs_dp_detach_port(vport);
2394 }
2395
cd2a59e9
PS
2396 ovs_unlock();
2397
2398 cancel_work_sync(&ovs_net->dp_notify_work);
2a4999f3
PS
2399}
2400
2401static struct pernet_operations ovs_net_ops = {
2402 .init = ovs_init_net,
2403 .exit = ovs_exit_net,
2404 .id = &ovs_net_id,
2405 .size = sizeof(struct ovs_net),
2406};
2407
22d24ebf
BP
2408static int __init dp_init(void)
2409{
2410 int err;
2411
f3d85db3 2412 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
22d24ebf 2413
26bfaeaa 2414 pr_info("Open vSwitch switching datapath %s\n", VERSION);
064af421 2415
c0b6f594 2416 ovs_nsh_init();
595e069a
JS
2417 err = action_fifos_init();
2418 if (err)
7f4a5d68 2419 goto error;
595e069a 2420
5282e284 2421 err = ovs_internal_dev_rtnl_link_register();
2c8c4fb7
AZ
2422 if (err)
2423 goto error_action_fifos_exit;
2424
5282e284
TG
2425 err = ovs_flow_init();
2426 if (err)
2427 goto error_unreg_rtnl_link;
2428
850b6b3b 2429 err = ovs_vport_init();
064af421
BP
2430 if (err)
2431 goto error_flow_exit;
2432
2a4999f3 2433 err = register_pernet_device(&ovs_net_ops);
f2459fe7
JG
2434 if (err)
2435 goto error_vport_exit;
2436
7f4a5d68 2437 err = compat_init();
2a4999f3
PS
2438 if (err)
2439 goto error_netns_exit;
2440
7f4a5d68 2441 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2442 if (err)
2443 goto error_compat_exit;
2444
5a38795f
TG
2445 err = ovs_netdev_init();
2446 if (err)
2447 goto error_unreg_notifier;
2448
982b8810
BP
2449 err = dp_register_genl();
2450 if (err < 0)
5a38795f 2451 goto error_unreg_netdev;
982b8810 2452
064af421
BP
2453 return 0;
2454
5a38795f
TG
2455error_unreg_netdev:
2456 ovs_netdev_exit();
064af421 2457error_unreg_notifier:
850b6b3b 2458 unregister_netdevice_notifier(&ovs_dp_device_notifier);
7f4a5d68 2459error_compat_exit:
2460 compat_exit();
2a4999f3
PS
2461error_netns_exit:
2462 unregister_pernet_device(&ovs_net_ops);
f2459fe7 2463error_vport_exit:
850b6b3b 2464 ovs_vport_exit();
064af421 2465error_flow_exit:
850b6b3b 2466 ovs_flow_exit();
5282e284
TG
2467error_unreg_rtnl_link:
2468 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7
AZ
2469error_action_fifos_exit:
2470 action_fifos_exit();
064af421 2471error:
c0b6f594 2472 ovs_nsh_cleanup();
064af421
BP
2473 return err;
2474}
2475
2476static void dp_cleanup(void)
2477{
508b6dd8
GR
2478#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)
2479 /* On RHEL 7.x kernels we hit a kernel paging error without
2480 * this barrier and subsequent hefty delay. A process will
2481 * attempt to access openvwitch memory after it has been
2482 * unloaded. Further debugging is needed on that but for
2483 * now let's not let customer machines panic.
2484 */
2485 rcu_barrier();
2486 msleep(3000);
2487#endif
982b8810 2488 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
5a38795f 2489 ovs_netdev_exit();
850b6b3b 2490 unregister_netdevice_notifier(&ovs_dp_device_notifier);
7f4a5d68 2491 compat_exit();
2a4999f3
PS
2492 unregister_pernet_device(&ovs_net_ops);
2493 rcu_barrier();
850b6b3b
JG
2494 ovs_vport_exit();
2495 ovs_flow_exit();
5282e284 2496 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7 2497 action_fifos_exit();
c0b6f594 2498 ovs_nsh_cleanup();
064af421
BP
2499}
2500
2501module_init(dp_init);
2502module_exit(dp_cleanup);
2503
2504MODULE_DESCRIPTION("Open vSwitch switching datapath");
2505MODULE_LICENSE("GPL");
3d0666d2 2506MODULE_VERSION(VERSION);
75e2077e
TLSC
2507MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2508MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2509MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2510MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);