]> git.proxmox.com Git - mirror_ovs.git/blame - datapath/datapath.c
datapath: conntrack: make protocol tracker pointers const
[mirror_ovs.git] / datapath / datapath.c
CommitLineData
064af421 1/*
e23775f2 2 * Copyright (c) 2007-2015 Nicira, Inc.
a14bc59f 3 *
a9a29d22
JG
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
064af421
BP
17 */
18
dfffaef1
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
064af421
BP
21#include <linux/init.h>
22#include <linux/module.h>
064af421 23#include <linux/if_arp.h>
064af421
BP
24#include <linux/if_vlan.h>
25#include <linux/in.h>
26#include <linux/ip.h>
982b8810 27#include <linux/jhash.h>
064af421
BP
28#include <linux/delay.h>
29#include <linux/time.h>
30#include <linux/etherdevice.h>
ed099e92 31#include <linux/genetlink.h>
064af421
BP
32#include <linux/kernel.h>
33#include <linux/kthread.h>
064af421
BP
34#include <linux/mutex.h>
35#include <linux/percpu.h>
36#include <linux/rcupdate.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/version.h>
40#include <linux/ethtool.h>
064af421 41#include <linux/wait.h>
064af421 42#include <asm/div64.h>
656a0e37 43#include <linux/highmem.h>
064af421
BP
44#include <linux/netfilter_bridge.h>
45#include <linux/netfilter_ipv4.h>
46#include <linux/inetdevice.h>
47#include <linux/list.h>
077257b8 48#include <linux/openvswitch.h>
064af421 49#include <linux/rculist.h>
064af421 50#include <linux/dmi.h>
36956a7d 51#include <net/genetlink.h>
2a4999f3
PS
52#include <net/net_namespace.h>
53#include <net/netns/generic.h>
907c26a8 54#include <net/nsh.h>
064af421 55
064af421 56#include "datapath.h"
038e34ab 57#include "conntrack.h"
064af421 58#include "flow.h"
d103f479 59#include "flow_table.h"
a097c0b2 60#include "flow_netlink.h"
e23775f2 61#include "gso.h"
f2459fe7 62#include "vport-internal_dev.h"
d5de5b0d 63#include "vport-netdev.h"
064af421 64
f56f0b73 65unsigned int ovs_net_id __read_mostly;
2a4999f3 66
cb25142c
PS
67static struct genl_family dp_packet_genl_family;
68static struct genl_family dp_flow_genl_family;
69static struct genl_family dp_datapath_genl_family;
70
bc619e29
JS
71static const struct nla_policy flow_policy[];
72
18fd3a52
PS
73static struct genl_multicast_group ovs_dp_flow_multicast_group = {
74 .name = OVS_FLOW_MCGROUP
cb25142c
PS
75};
76
18fd3a52
PS
77static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
78 .name = OVS_DATAPATH_MCGROUP
cb25142c
PS
79};
80
18fd3a52
PS
81struct genl_multicast_group ovs_dp_vport_multicast_group = {
82 .name = OVS_VPORT_MCGROUP
cb25142c
PS
83};
84
afad3556 85/* Check if need to build a reply message.
af465b67
PS
86 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
87 */
114fce23
SG
88static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
89 unsigned int group)
afad3556
JR
90{
91 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
6233a1bd 92 genl_has_listeners(family, genl_info_net(info), group);
afad3556
JR
93}
94
18fd3a52 95static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
cb25142c 96 struct sk_buff *skb, struct genl_info *info)
e297c6b7 97{
0643a78b 98 genl_notify(family, skb, info, GROUP_ID(grp), GFP_KERNEL);
e297c6b7
TG
99}
100
ed099e92
BP
101/**
102 * DOC: Locking:
064af421 103 *
cd2a59e9
PS
104 * All writes e.g. Writes to device state (add/remove datapath, port, set
105 * operations on vports, etc.), Writes to other state (flow table
106 * modifications, set miscellaneous datapath parameters, etc.) are protected
107 * by ovs_lock.
ed099e92
BP
108 *
109 * Reads are protected by RCU.
110 *
111 * There are a few special cases (mostly stats) that have their own
112 * synchronization but they nest under all of above and don't interact with
113 * each other.
cd2a59e9
PS
114 *
115 * The RTNL lock nests inside ovs_mutex.
064af421 116 */
ed099e92 117
cd2a59e9
PS
118static DEFINE_MUTEX(ovs_mutex);
119
120void ovs_lock(void)
121{
122 mutex_lock(&ovs_mutex);
123}
124
125void ovs_unlock(void)
126{
127 mutex_unlock(&ovs_mutex);
128}
129
130#ifdef CONFIG_LOCKDEP
131int lockdep_ovsl_is_held(void)
132{
133 if (debug_locks)
134 return lockdep_is_held(&ovs_mutex);
135 else
136 return 1;
137}
138#endif
139
5ae440c3 140static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
f1f60b85 141 const struct sw_flow_key *,
4c7804f1
WT
142 const struct dp_upcall_info *,
143 uint32_t cutlen);
5ae440c3 144static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
7d16c847 145 const struct sw_flow_key *,
4c7804f1
WT
146 const struct dp_upcall_info *,
147 uint32_t cutlen);
064af421 148
01ac0970
AZ
149/* Must be called with rcu_read_lock. */
150static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
064af421 151{
01ac0970 152 struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
ed099e92 153
254f2dc8 154 if (dev) {
850b6b3b 155 struct vport *vport = ovs_internal_dev_get_vport(dev);
254f2dc8 156 if (vport)
01ac0970 157 return vport->dp;
254f2dc8 158 }
01ac0970
AZ
159
160 return NULL;
161}
162
163/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
af465b67
PS
164 * returned dp pointer valid.
165 */
01ac0970
AZ
166static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
167{
168 struct datapath *dp;
169
170 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
171 rcu_read_lock();
172 dp = get_dp_rcu(net, dp_ifindex);
254f2dc8
BP
173 rcu_read_unlock();
174
175 return dp;
064af421 176}
064af421 177
cd2a59e9 178/* Must be called with rcu_read_lock or ovs_mutex. */
850b6b3b 179const char *ovs_dp_name(const struct datapath *dp)
f2459fe7 180{
cd2a59e9 181 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
e23775f2 182 return ovs_vport_name(vport);
f2459fe7
JG
183}
184
f1f60b85 185static int get_dpifindex(const struct datapath *dp)
99769a40
JG
186{
187 struct vport *local;
188 int ifindex;
189
190 rcu_read_lock();
191
95b1d73a 192 local = ovs_vport_rcu(dp, OVSP_LOCAL);
99769a40 193 if (local)
e23775f2 194 ifindex = local->dev->ifindex;
99769a40
JG
195 else
196 ifindex = 0;
197
198 rcu_read_unlock();
199
200 return ifindex;
201}
202
46c6a11d
JG
203static void destroy_dp_rcu(struct rcu_head *rcu)
204{
205 struct datapath *dp = container_of(rcu, struct datapath, rcu);
46c6a11d 206
e379e4d1 207 ovs_flow_tbl_destroy(&dp->table);
46c6a11d 208 free_percpu(dp->stats_percpu);
95b1d73a 209 kfree(dp->ports);
5ca1ba48 210 kfree(dp);
46c6a11d
JG
211}
212
95b1d73a
PS
213static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
214 u16 port_no)
215{
216 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
217}
218
aa917006 219/* Called with ovs_mutex or RCU read lock. */
95b1d73a
PS
220struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
221{
222 struct vport *vport;
95b1d73a
PS
223 struct hlist_head *head;
224
225 head = vport_hash_bucket(dp, port_no);
f8dfbcb7 226 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
95b1d73a
PS
227 if (vport->port_no == port_no)
228 return vport;
229 }
230 return NULL;
231}
232
cd2a59e9 233/* Called with ovs_mutex. */
c19e6535 234static struct vport *new_vport(const struct vport_parms *parms)
064af421 235{
f2459fe7 236 struct vport *vport;
f2459fe7 237
850b6b3b 238 vport = ovs_vport_add(parms);
c19e6535
BP
239 if (!IS_ERR(vport)) {
240 struct datapath *dp = parms->dp;
95b1d73a 241 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
064af421 242
95b1d73a 243 hlist_add_head_rcu(&vport->dp_hash_node, head);
c19e6535 244 }
c19e6535 245 return vport;
064af421
BP
246}
247
850b6b3b 248void ovs_dp_detach_port(struct vport *p)
064af421 249{
cd2a59e9 250 ASSERT_OVSL();
064af421 251
064af421 252 /* First drop references to device. */
95b1d73a 253 hlist_del_rcu(&p->dp_hash_node);
f2459fe7 254
7237e4f4 255 /* Then destroy it. */
850b6b3b 256 ovs_vport_del(p);
064af421
BP
257}
258
fb66fbd1 259/* Must be called with rcu_read_lock. */
e74d4817 260void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
064af421 261{
a6059080 262 const struct vport *p = OVS_CB(skb)->input_vport;
064af421 263 struct datapath *dp = p->dp;
3544358a 264 struct sw_flow *flow;
ad50cb60 265 struct sw_flow_actions *sf_acts;
064af421 266 struct dp_stats_percpu *stats;
e9141eec 267 u64 *stats_counter;
4fa72a95 268 u32 n_mask_hit;
064af421 269
70dbc259 270 stats = this_cpu_ptr(dp->stats_percpu);
a063b0df 271
52a23d92 272 /* Look up flow. */
e74d4817 273 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
5604935e 274 &n_mask_hit);
52a23d92
JG
275 if (unlikely(!flow)) {
276 struct dp_upcall_info upcall;
a7d607c5 277 int error;
52a23d92 278
0e469d3b 279 memset(&upcall, 0, sizeof(upcall));
52a23d92 280 upcall.cmd = OVS_PACKET_CMD_MISS;
beb1c69a 281 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
a94ebc39 282 upcall.mru = OVS_CB(skb)->mru;
4c7804f1 283 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
a7d607c5
LR
284 if (unlikely(error))
285 kfree_skb(skb);
286 else
287 consume_skb(skb);
52a23d92
JG
288 stats_counter = &stats->n_missed;
289 goto out;
290 }
291
e74d4817 292 ovs_flow_stats_update(flow, key->tp.flags, skb);
ad50cb60 293 sf_acts = rcu_dereference(flow->sf_acts);
7d16c847
PS
294 ovs_execute_actions(dp, skb, sf_acts, key);
295
b0b906cc 296 stats_counter = &stats->n_hit;
55574bb0 297
8819fac7 298out:
55574bb0 299 /* Update datapath statistics. */
b81deb15 300 u64_stats_update_begin(&stats->syncp);
e9141eec 301 (*stats_counter)++;
4fa72a95 302 stats->n_mask_hit += n_mask_hit;
b81deb15 303 u64_stats_update_end(&stats->syncp);
064af421
BP
304}
305
850b6b3b 306int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
f1f60b85 307 const struct sw_flow_key *key,
4c7804f1
WT
308 const struct dp_upcall_info *upcall_info,
309 uint32_t cutlen)
aa5a8fdc
JG
310{
311 struct dp_stats_percpu *stats;
312 int err;
313
28aea917 314 if (upcall_info->portid == 0) {
b063d9f0 315 err = -ENOTCONN;
b063d9f0
JG
316 goto err;
317 }
318
7257b535 319 if (!skb_is_gso(skb))
4c7804f1 320 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
7257b535 321 else
4c7804f1 322 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
d76195db
JG
323 if (err)
324 goto err;
325
326 return 0;
aa5a8fdc 327
aa5a8fdc 328err:
70dbc259 329 stats = this_cpu_ptr(dp->stats_percpu);
aa5a8fdc 330
b81deb15 331 u64_stats_update_begin(&stats->syncp);
aa5a8fdc 332 stats->n_lost++;
b81deb15 333 u64_stats_update_end(&stats->syncp);
aa5a8fdc 334
aa5a8fdc 335 return err;
982b8810
BP
336}
337
5ae440c3 338static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
f1f60b85 339 const struct sw_flow_key *key,
4c7804f1
WT
340 const struct dp_upcall_info *upcall_info,
341 uint32_t cutlen)
cb5087ca 342{
d4cba1f8 343 unsigned short gso_type = skb_shinfo(skb)->gso_type;
7257b535
BP
344 struct sw_flow_key later_key;
345 struct sk_buff *segs, *nskb;
b2a23c4e 346 struct ovs_skb_cb ovs_cb;
7257b535 347 int err;
cb5087ca 348
b2a23c4e 349 ovs_cb = *OVS_CB(skb);
1d04cd4e 350 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
b2a23c4e 351 *OVS_CB(skb) = ovs_cb;
79089764
PS
352 if (IS_ERR(segs))
353 return PTR_ERR(segs);
d1da7669
PS
354 if (segs == NULL)
355 return -EINVAL;
99769a40 356
9b277b39 357 if (gso_type & SKB_GSO_UDP) {
c135bba1 358 /* The initial flow key extracted by ovs_flow_key_extract()
9b277b39
PS
359 * in this case is for a first fragment, so we need to
360 * properly mark later fragments.
361 */
e74d4817 362 later_key = *key;
9b277b39
PS
363 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
364 }
365
7257b535
BP
366 /* Queue all of the segments. */
367 skb = segs;
cb5087ca 368 do {
b2a23c4e 369 *OVS_CB(skb) = ovs_cb;
9b277b39 370 if (gso_type & SKB_GSO_UDP && skb != segs)
e74d4817 371 key = &later_key;
9b277b39 372
4c7804f1 373 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
982b8810 374 if (err)
7257b535 375 break;
856081f6 376
36ce148c 377 } while ((skb = skb->next));
cb5087ca 378
7257b535
BP
379 /* Free all of the segments. */
380 skb = segs;
381 do {
382 nskb = skb->next;
383 if (err)
384 kfree_skb(skb);
385 else
386 consume_skb(skb);
387 } while ((skb = nskb));
388 return err;
389}
390
8b7ea2d4 391static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
6b330a60 392 unsigned int hdrlen, int actions_attrlen)
0afa2373
TG
393{
394 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
533bea51 395 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
039fb36c
WT
396 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
397 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
0afa2373
TG
398
399 /* OVS_PACKET_ATTR_USERDATA */
8b7ea2d4
WZ
400 if (upcall_info->userdata)
401 size += NLA_ALIGN(upcall_info->userdata->nla_len);
402
403 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
404 if (upcall_info->egress_tun_info)
405 size += nla_total_size(ovs_tun_key_attr_size());
0afa2373 406
0e469d3b
NM
407 /* OVS_PACKET_ATTR_ACTIONS */
408 if (upcall_info->actions_len)
6b330a60 409 size += nla_total_size(actions_attrlen);
0e469d3b 410
a94ebc39
JS
411 /* OVS_PACKET_ATTR_MRU */
412 if (upcall_info->mru)
413 size += nla_total_size(sizeof(upcall_info->mru));
414
0afa2373
TG
415 return size;
416}
417
a94ebc39
JS
418static void pad_packet(struct datapath *dp, struct sk_buff *skb)
419{
420 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
421 size_t plen = NLA_ALIGN(skb->len) - skb->len;
422
423 if (plen > 0)
0ace0a26 424 skb_put_zero(skb, plen);
a94ebc39
JS
425 }
426}
427
5ae440c3 428static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
f1f60b85 429 const struct sw_flow_key *key,
4c7804f1
WT
430 const struct dp_upcall_info *upcall_info,
431 uint32_t cutlen)
7257b535
BP
432{
433 struct ovs_header *upcall;
6161d3fd 434 struct sk_buff *nskb = NULL;
82706a6f 435 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
7257b535 436 struct nlattr *nla;
978188b2 437 size_t len;
533bea51 438 unsigned int hlen;
5ae440c3
TG
439 int err, dp_ifindex;
440
441 dp_ifindex = get_dpifindex(dp);
442 if (!dp_ifindex)
443 return -ENODEV;
7257b535 444
efd8a18e 445 if (skb_vlan_tag_present(skb)) {
6161d3fd
JG
446 nskb = skb_clone(skb, GFP_ATOMIC);
447 if (!nskb)
448 return -ENOMEM;
07ac71ea 449
8063e095 450 nskb = __vlan_hwaccel_push_inside(nskb);
07ac71ea
PS
451 if (!nskb)
452 return -ENOMEM;
453
6161d3fd
JG
454 skb = nskb;
455 }
456
457 if (nla_attr_size(skb->len) > USHRT_MAX) {
458 err = -EFBIG;
459 goto out;
460 }
7257b535 461
533bea51
TG
462 /* Complete checksum if needed */
463 if (skb->ip_summed == CHECKSUM_PARTIAL &&
a0c9fedc 464 (err = skb_csum_hwoffload_help(skb, 0)))
533bea51
TG
465 goto out;
466
467 /* Older versions of OVS user space enforce alignment of the last
468 * Netlink attribute to NLA_ALIGNTO which would require extensive
469 * padding logic. Only perform zerocopy if padding is not required.
470 */
471 if (dp->user_features & OVS_DP_F_UNALIGNED)
472 hlen = skb_zerocopy_headlen(skb);
473 else
474 hlen = skb->len;
475
6b330a60
GR
476 len = upcall_msg_size(upcall_info, hlen - cutlen,
477 OVS_CB(skb)->acts_origlen);
40c08cda 478 user_skb = genlmsg_new(len, GFP_ATOMIC);
6161d3fd
JG
479 if (!user_skb) {
480 err = -ENOMEM;
481 goto out;
482 }
7257b535
BP
483
484 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
485 0, upcall_info->cmd);
486 upcall->dp_ifindex = dp_ifindex;
487
db7f2238 488 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
9a621f82 489 BUG_ON(err);
7257b535
BP
490
491 if (upcall_info->userdata)
e995e3df 492 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
462a988b 493 nla_len(upcall_info->userdata),
e995e3df 494 nla_data(upcall_info->userdata));
7257b535 495
e23775f2 496
8b7ea2d4
WZ
497 if (upcall_info->egress_tun_info) {
498 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
aad7cb91
PS
499 err = ovs_nla_put_tunnel_info(user_skb,
500 upcall_info->egress_tun_info);
8b7ea2d4
WZ
501 BUG_ON(err);
502 nla_nest_end(user_skb, nla);
503 }
504
0e469d3b
NM
505 if (upcall_info->actions_len) {
506 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
507 err = ovs_nla_put_actions(upcall_info->actions,
508 upcall_info->actions_len,
509 user_skb);
510 if (!err)
511 nla_nest_end(user_skb, nla);
512 else
513 nla_nest_cancel(user_skb, nla);
514 }
515
a94ebc39
JS
516 /* Add OVS_PACKET_ATTR_MRU */
517 if (upcall_info->mru) {
518 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
519 upcall_info->mru)) {
520 err = -ENOBUFS;
521 goto out;
522 }
523 pad_packet(dp, user_skb);
524 }
525
039fb36c
WT
526 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
527 if (cutlen > 0) {
528 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
529 skb->len)) {
530 err = -ENOBUFS;
531 goto out;
532 }
533 pad_packet(dp, user_skb);
534 }
535
533bea51 536 /* Only reserve room for attribute header, packet data is added
af465b67
PS
537 * in skb_zerocopy()
538 */
533bea51
TG
539 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
540 err = -ENOBUFS;
541 goto out;
542 }
4c7804f1 543 nla->nla_len = nla_attr_size(skb->len - cutlen);
bed53bd1 544
4c7804f1 545 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
2c272bd9
ZK
546 if (err)
547 goto out;
7257b535 548
ef507cec 549 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
a94ebc39 550 pad_packet(dp, user_skb);
ef507cec 551
533bea51 552 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
6161d3fd 553
533bea51 554 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
82706a6f 555 user_skb = NULL;
6161d3fd 556out:
2c272bd9
ZK
557 if (err)
558 skb_tx_error(skb);
82706a6f 559 kfree_skb(user_skb);
6161d3fd
JG
560 kfree_skb(nskb);
561 return err;
cb5087ca
BP
562}
563
df2c07f4 564static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
064af421 565{
df2c07f4 566 struct ovs_header *ovs_header = info->userhdr;
a94ebc39 567 struct net *net = sock_net(skb->sk);
982b8810 568 struct nlattr **a = info->attrs;
e0e57990 569 struct sw_flow_actions *acts;
982b8810 570 struct sk_buff *packet;
e0e57990 571 struct sw_flow *flow;
ad50cb60 572 struct sw_flow_actions *sf_acts;
f7cd0081 573 struct datapath *dp;
a6059080 574 struct vport *input_vport;
a94ebc39 575 u16 mru = 0;
3f19d399 576 int len;
d6569377 577 int err;
2e460098 578 bool log = !a[OVS_PACKET_ATTR_PROBE];
064af421 579
f7cd0081 580 err = -EINVAL;
df2c07f4 581 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
7c3072cc 582 !a[OVS_PACKET_ATTR_ACTIONS])
e5cad958 583 goto err;
064af421 584
df2c07f4 585 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
3f19d399 586 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
f7cd0081
BP
587 err = -ENOMEM;
588 if (!packet)
e5cad958 589 goto err;
3f19d399
BP
590 skb_reserve(packet, NET_IP_ALIGN);
591
bf3d6fce 592 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
8d5ebd83 593
a94ebc39
JS
594 /* Set packet's mru */
595 if (a[OVS_PACKET_ATTR_MRU]) {
596 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
597 packet->ignore_df = 1;
598 }
599 OVS_CB(packet)->mru = mru;
600
e0e57990 601 /* Build an sw_flow for sending this packet. */
df65fec1 602 flow = ovs_flow_alloc();
e0e57990
BP
603 err = PTR_ERR(flow);
604 if (IS_ERR(flow))
e5cad958 605 goto err_kfree_skb;
064af421 606
038e34ab
JS
607 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
608 packet, &flow->key, log);
e0e57990 609 if (err)
9321954a 610 goto err_flow_free;
e0e57990 611
a94ebc39 612 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
9233cef7 613 &flow->key, &acts, log);
9b405f1a
PS
614 if (err)
615 goto err_flow_free;
e0e57990 616
ff27161e 617 rcu_assign_pointer(flow->sf_acts, acts);
abff858b 618 packet->priority = flow->key.phy.priority;
3025a772 619 packet->mark = flow->key.phy.skb_mark;
e0e57990 620
d6569377 621 rcu_read_lock();
a94ebc39 622 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
f7cd0081 623 err = -ENODEV;
e5cad958
BP
624 if (!dp)
625 goto err_unlock;
cc4015df 626
a6059080
AZ
627 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
628 if (!input_vport)
629 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
630
631 if (!input_vport)
632 goto err_unlock;
633
e23775f2 634 packet->dev = input_vport->dev;
a6059080 635 OVS_CB(packet)->input_vport = input_vport;
ad50cb60 636 sf_acts = rcu_dereference(flow->sf_acts);
a6059080 637
e9141eec 638 local_bh_disable();
7d16c847 639 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
e9141eec 640 local_bh_enable();
d6569377 641 rcu_read_unlock();
e0e57990 642
a1c564be 643 ovs_flow_free(flow, false);
e5cad958 644 return err;
064af421 645
e5cad958
BP
646err_unlock:
647 rcu_read_unlock();
9321954a 648err_flow_free:
a1c564be 649 ovs_flow_free(flow, false);
e5cad958
BP
650err_kfree_skb:
651 kfree_skb(packet);
652err:
d6569377 653 return err;
064af421
BP
654}
655
df2c07f4 656static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
7c3072cc 657 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
df2c07f4
JP
658 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
659 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
2e460098 660 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
a94ebc39 661 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
982b8810
BP
662};
663
18fd3a52 664static struct genl_ops dp_packet_genl_ops[] = {
df2c07f4 665 { .cmd = OVS_PACKET_CMD_EXECUTE,
a6a8674d 666 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
982b8810 667 .policy = packet_policy,
df2c07f4 668 .doit = ovs_packet_cmd_execute
982b8810
BP
669 }
670};
671
ba63fe26 672static struct genl_family dp_packet_genl_family __ro_after_init = {
cb25142c
PS
673 .hdrsize = sizeof(struct ovs_header),
674 .name = OVS_PACKET_FAMILY,
675 .version = OVS_PACKET_VERSION,
676 .maxattr = OVS_PACKET_ATTR_MAX,
677 .netnsok = true,
678 .parallel_ops = true,
679 .ops = dp_packet_genl_ops,
680 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
ba63fe26 681 .module = THIS_MODULE,
cb25142c
PS
682};
683
f1f60b85 684static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
4fa72a95 685 struct ovs_dp_megaflow_stats *mega_stats)
064af421 686{
d6569377 687 int i;
f180c2e2 688
4fa72a95
AZ
689 memset(mega_stats, 0, sizeof(*mega_stats));
690
994dc286 691 stats->n_flows = ovs_flow_tbl_count(&dp->table);
4fa72a95 692 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
064af421 693
7257b535 694 stats->n_hit = stats->n_missed = stats->n_lost = 0;
4fa72a95 695
d6569377
BP
696 for_each_possible_cpu(i) {
697 const struct dp_stats_percpu *percpu_stats;
698 struct dp_stats_percpu local_stats;
821cb9fa 699 unsigned int start;
44e05eca 700
d6569377 701 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
064af421 702
d6569377 703 do {
b81deb15 704 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
d6569377 705 local_stats = *percpu_stats;
b81deb15 706 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
064af421 707
d6569377
BP
708 stats->n_hit += local_stats.n_hit;
709 stats->n_missed += local_stats.n_missed;
710 stats->n_lost += local_stats.n_lost;
4fa72a95 711 mega_stats->n_mask_hit += local_stats.n_mask_hit;
d6569377
BP
712 }
713}
064af421 714
bc619e29
JS
715static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
716{
717 return ovs_identifier_is_ufid(sfid) &&
718 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
719}
720
721static bool should_fill_mask(uint32_t ufid_flags)
722{
723 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
724}
725
726static bool should_fill_actions(uint32_t ufid_flags)
0afa2373 727{
bc619e29
JS
728 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
729}
730
731static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
732 const struct sw_flow_id *sfid,
733 uint32_t ufid_flags)
734{
735 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
736
737 /* OVS_FLOW_ATTR_UFID */
738 if (sfid && ovs_identifier_is_ufid(sfid))
739 len += nla_total_size(sfid->ufid_len);
740
741 /* OVS_FLOW_ATTR_KEY */
742 if (!sfid || should_fill_key(sfid, ufid_flags))
743 len += nla_total_size(ovs_key_attr_size());
744
745 /* OVS_FLOW_ATTR_MASK */
746 if (should_fill_mask(ufid_flags))
747 len += nla_total_size(ovs_key_attr_size());
748
749 /* OVS_FLOW_ATTR_ACTIONS */
750 if (should_fill_actions(ufid_flags))
c3bb15b3 751 len += nla_total_size(acts->orig_len);
bc619e29
JS
752
753 return len
91b37647 754 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
0afa2373 755 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
91b37647 756 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
0afa2373
TG
757}
758
f1948bb9
JS
759/* Called with ovs_mutex or RCU read lock. */
760static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
761 struct sk_buff *skb)
762{
763 struct ovs_flow_stats stats;
764 __be16 tcp_flags;
765 unsigned long used;
766
b0f3a2fe 767 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
f71db6b1 768
b0f3a2fe 769 if (used &&
89be7da8
PS
770 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
771 OVS_FLOW_ATTR_PAD))
f1948bb9 772 return -EMSGSIZE;
d6569377 773
b0f3a2fe 774 if (stats.n_packets &&
91b37647
PS
775 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
776 sizeof(struct ovs_flow_stats), &stats,
777 OVS_FLOW_ATTR_PAD))
f1948bb9 778 return -EMSGSIZE;
b0b906cc 779
b0f3a2fe
PS
780 if ((u8)ntohs(tcp_flags) &&
781 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
f1948bb9
JS
782 return -EMSGSIZE;
783
784 return 0;
785}
786
787/* Called with ovs_mutex or RCU read lock. */
788static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
789 struct sk_buff *skb, int skb_orig_len)
790{
791 struct nlattr *start;
792 int err;
d6569377 793
df2c07f4 794 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
30053024
BP
795 * this is the first flow to be dumped into 'skb'. This is unusual for
796 * Netlink but individual action lists can be longer than
797 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
798 * The userspace caller can always fetch the actions separately if it
799 * really wants them. (Most userspace callers in fact don't care.)
800 *
801 * This can only fail for dump operations because the skb is always
802 * properly sized for single flows.
803 */
9b405f1a 804 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
f6f481ee 805 if (start) {
f44ccce1
PS
806 const struct sw_flow_actions *sf_acts;
807
780ec6ae 808 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
a097c0b2
PS
809 err = ovs_nla_put_actions(sf_acts->actions,
810 sf_acts->actions_len, skb);
f71db6b1 811
0a25b039
BP
812 if (!err)
813 nla_nest_end(skb, start);
814 else {
815 if (skb_orig_len)
f1948bb9 816 return err;
0a25b039
BP
817
818 nla_nest_cancel(skb, start);
819 }
f1948bb9
JS
820 } else if (skb_orig_len) {
821 return -EMSGSIZE;
822 }
823
824 return 0;
825}
826
827/* Called with ovs_mutex or RCU read lock. */
2c622e5a 828static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
f1948bb9 829 struct sk_buff *skb, u32 portid,
bc619e29 830 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
f1948bb9
JS
831{
832 const int skb_orig_len = skb->len;
833 struct ovs_header *ovs_header;
834 int err;
835
7d16c847
PS
836 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
837 flags, cmd);
f1948bb9
JS
838 if (!ovs_header)
839 return -EMSGSIZE;
7d16c847 840
f1948bb9
JS
841 ovs_header->dp_ifindex = dp_ifindex;
842
bc619e29 843 err = ovs_nla_put_identifier(flow, skb);
db7f2238
JS
844 if (err)
845 goto error;
846
bc619e29
JS
847 if (should_fill_key(&flow->id, ufid_flags)) {
848 err = ovs_nla_put_masked_key(flow, skb);
849 if (err)
850 goto error;
851 }
852
853 if (should_fill_mask(ufid_flags)) {
854 err = ovs_nla_put_mask(flow, skb);
855 if (err)
856 goto error;
857 }
f1948bb9
JS
858
859 err = ovs_flow_cmd_fill_stats(flow, skb);
860 if (err)
861 goto error;
862
bc619e29
JS
863 if (should_fill_actions(ufid_flags)) {
864 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
865 if (err)
866 goto error;
867 }
37a1300c 868
23b48dc1
TG
869 genlmsg_end(skb, ovs_header);
870 return 0;
d6569377 871
37a1300c 872error:
df2c07f4 873 genlmsg_cancel(skb, ovs_header);
d6569377 874 return err;
44e05eca
BP
875}
876
f71db6b1
JR
877/* May not be called with RCU read lock. */
878static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
bc619e29 879 const struct sw_flow_id *sfid,
afad3556 880 struct genl_info *info,
bc619e29
JS
881 bool always,
882 uint32_t ufid_flags)
44e05eca 883{
afad3556 884 struct sk_buff *skb;
bc619e29 885 size_t len;
d6569377 886
114fce23
SG
887 if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
888 GROUP_ID(&ovs_dp_flow_multicast_group)))
afad3556
JR
889 return NULL;
890
bc619e29 891 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
40c08cda 892 skb = genlmsg_new(len, GFP_KERNEL);
afad3556
JR
893 if (!skb)
894 return ERR_PTR(-ENOMEM);
895
896 return skb;
37a1300c 897}
8d5ebd83 898
f71db6b1 899/* Called with ovs_mutex. */
7d16c847 900static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
f71db6b1
JR
901 int dp_ifindex,
902 struct genl_info *info, u8 cmd,
bc619e29 903 bool always, u32 ufid_flags)
37a1300c
BP
904{
905 struct sk_buff *skb;
906 int retval;
d6569377 907
bc619e29
JS
908 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
909 &flow->id, info, always, ufid_flags);
a6ddcc9a 910 if (IS_ERR_OR_NULL(skb))
afad3556 911 return skb;
d6569377 912
2c622e5a 913 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
f71db6b1 914 info->snd_portid, info->snd_seq, 0,
bc619e29 915 cmd, ufid_flags);
37a1300c 916 BUG_ON(retval < 0);
d6569377 917 return skb;
064af421
BP
918}
919
0c9fd022 920static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
064af421 921{
a94ebc39 922 struct net *net = sock_net(skb->sk);
37a1300c 923 struct nlattr **a = info->attrs;
df2c07f4 924 struct ovs_header *ovs_header = info->userhdr;
bc619e29 925 struct sw_flow *flow = NULL, *new_flow;
a1c564be 926 struct sw_flow_mask mask;
37a1300c 927 struct sk_buff *reply;
9c52546b 928 struct datapath *dp;
0c9fd022 929 struct sw_flow_actions *acts;
a1c564be 930 struct sw_flow_match match;
bc619e29 931 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
bc4a05c6 932 int error;
9233cef7 933 bool log = !a[OVS_FLOW_ATTR_PROBE];
064af421 934
6740b721 935 /* Must have key and actions. */
37a1300c 936 error = -EINVAL;
a473df5b 937 if (!a[OVS_FLOW_ATTR_KEY]) {
7d16c847 938 OVS_NLERR(log, "Flow key attr not present in new flow.");
37a1300c 939 goto error;
a473df5b
JG
940 }
941 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
7d16c847 942 OVS_NLERR(log, "Flow actions attr not present in new flow.");
6740b721 943 goto error;
a473df5b 944 }
a1c564be 945
6740b721 946 /* Most of the time we need to allocate a new flow, do it before
af465b67
PS
947 * locking.
948 */
6740b721
JR
949 new_flow = ovs_flow_alloc();
950 if (IS_ERR(new_flow)) {
951 error = PTR_ERR(new_flow);
952 goto error;
953 }
954
955 /* Extract key. */
9b94fa6c 956 ovs_match_init(&match, &new_flow->key, false, &mask);
038e34ab 957 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
9233cef7 958 a[OVS_FLOW_ATTR_MASK], log);
37a1300c 959 if (error)
6740b721 960 goto err_kfree_flow;
064af421 961
bc619e29
JS
962 /* Extract flow identifier. */
963 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
1d334d4f 964 &new_flow->key, log);
bc619e29
JS
965 if (error)
966 goto err_kfree_flow;
9b405f1a 967
1d334d4f 968 /* unmasked key is needed to match when ufid is not used. */
969 if (ovs_identifier_is_key(&new_flow->id))
970 match.key = new_flow->id.unmasked_key;
971
972 ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
973
6740b721 974 /* Validate actions. */
a94ebc39
JS
975 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
976 &new_flow->key, &acts, log);
0c9fd022 977 if (error) {
7d16c847 978 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
4f67b12a 979 goto err_kfree_flow;
6740b721
JR
980 }
981
bc619e29
JS
982 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
983 ufid_flags);
6740b721
JR
984 if (IS_ERR(reply)) {
985 error = PTR_ERR(reply);
986 goto err_kfree_acts;
37a1300c
BP
987 }
988
cd2a59e9 989 ovs_lock();
a94ebc39 990 dp = get_dp(net, ovs_header->dp_ifindex);
6740b721
JR
991 if (unlikely(!dp)) {
992 error = -ENODEV;
cd2a59e9 993 goto err_unlock_ovs;
6740b721 994 }
bc619e29 995
a1c564be 996 /* Check if this is a duplicate flow */
bc619e29
JS
997 if (ovs_identifier_is_ufid(&new_flow->id))
998 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
999 if (!flow)
1d334d4f 1000 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
6740b721
JR
1001 if (likely(!flow)) {
1002 rcu_assign_pointer(new_flow->sf_acts, acts);
d6569377 1003
d6569377 1004 /* Put flow in bucket. */
6740b721
JR
1005 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1006 if (unlikely(error)) {
0585f7a8 1007 acts = NULL;
6740b721
JR
1008 goto err_unlock_ovs;
1009 }
1010
1011 if (unlikely(reply)) {
2c622e5a 1012 error = ovs_flow_cmd_fill_info(new_flow,
6740b721
JR
1013 ovs_header->dp_ifindex,
1014 reply, info->snd_portid,
1015 info->snd_seq, 0,
bc619e29
JS
1016 OVS_FLOW_CMD_NEW,
1017 ufid_flags);
6740b721 1018 BUG_ON(error < 0);
0585f7a8 1019 }
6740b721 1020 ovs_unlock();
d6569377 1021 } else {
0c9fd022
JR
1022 struct sw_flow_actions *old_acts;
1023
d6569377
BP
1024 /* Bail out if we're not allowed to modify an existing flow.
1025 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1026 * because Generic Netlink treats the latter as a dump
1027 * request. We also accept NLM_F_EXCL in case that bug ever
1028 * gets fixed.
1029 */
6740b721
JR
1030 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1031 | NLM_F_EXCL))) {
1032 error = -EEXIST;
cd2a59e9 1033 goto err_unlock_ovs;
6740b721 1034 }
bc619e29
JS
1035 /* The flow identifier has to be the same for flow updates.
1036 * Look for any overlapping flow.
1037 */
1038 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1039 if (ovs_identifier_is_key(&flow->id))
1040 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1041 &match);
1042 else /* UFID matches but key is different */
1043 flow = NULL;
3440e4bc
AW
1044 if (!flow) {
1045 error = -ENOENT;
1046 goto err_unlock_ovs;
1047 }
6740b721 1048 }
0c9fd022
JR
1049 /* Update actions. */
1050 old_acts = ovsl_dereference(flow->sf_acts);
1051 rcu_assign_pointer(flow->sf_acts, acts);
0c9fd022 1052
6740b721 1053 if (unlikely(reply)) {
2c622e5a 1054 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1055 ovs_header->dp_ifindex,
1056 reply, info->snd_portid,
1057 info->snd_seq, 0,
bc619e29
JS
1058 OVS_FLOW_CMD_NEW,
1059 ufid_flags);
6740b721
JR
1060 BUG_ON(error < 0);
1061 }
1062 ovs_unlock();
0c9fd022 1063
e23775f2 1064 ovs_nla_free_flow_actions_rcu(old_acts);
6740b721 1065 ovs_flow_free(new_flow, false);
0c9fd022 1066 }
6740b721
JR
1067
1068 if (reply)
cb25142c 1069 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
0c9fd022
JR
1070 return 0;
1071
0c9fd022
JR
1072err_unlock_ovs:
1073 ovs_unlock();
6740b721
JR
1074 kfree_skb(reply);
1075err_kfree_acts:
e23775f2 1076 ovs_nla_free_flow_actions(acts);
6740b721
JR
1077err_kfree_flow:
1078 ovs_flow_free(new_flow, false);
0c9fd022
JR
1079error:
1080 return error;
1081}
1082
cc561abf 1083/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
a94ebc39
JS
1084static struct sw_flow_actions *get_flow_actions(struct net *net,
1085 const struct nlattr *a,
cc561abf 1086 const struct sw_flow_key *key,
9233cef7
JR
1087 const struct sw_flow_mask *mask,
1088 bool log)
cc561abf
PS
1089{
1090 struct sw_flow_actions *acts;
1091 struct sw_flow_key masked_key;
1092 int error;
1093
ad4adec2 1094 ovs_flow_mask_key(&masked_key, key, true, mask);
a94ebc39 1095 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
cc561abf 1096 if (error) {
9233cef7 1097 OVS_NLERR(log,
7d16c847 1098 "Actions may not be safe on all matching packets");
cc561abf
PS
1099 return ERR_PTR(error);
1100 }
1101
1102 return acts;
1103}
1104
850c2a4d
TZ
1105/* Factor out match-init and action-copy to avoid
1106 * "Wframe-larger-than=1024" warning. Because mask is only
1107 * used to get actions, we new a function to save some
1108 * stack space.
1109 *
1110 * If there are not key and action attrs, we return 0
1111 * directly. In the case, the caller will also not use the
1112 * match as before. If there is action attr, we try to get
1113 * actions and save them to *acts. Before returning from
1114 * the function, we reset the match->mask pointer. Because
1115 * we should not to return match object with dangling reference
1116 * to mask.
1117 * */
1118static int ovs_nla_init_match_and_action(struct net *net,
1119 struct sw_flow_match *match,
1120 struct sw_flow_key *key,
1121 struct nlattr **a,
1122 struct sw_flow_actions **acts,
1123 bool log)
1124{
1125 struct sw_flow_mask mask;
1126 int error = 0;
1127
1128 if (a[OVS_FLOW_ATTR_KEY]) {
1129 ovs_match_init(match, key, true, &mask);
1130 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1131 a[OVS_FLOW_ATTR_MASK], log);
1132 if (error)
1133 goto error;
1134 }
1135
1136 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1137 if (!a[OVS_FLOW_ATTR_KEY]) {
1138 OVS_NLERR(log,
1139 "Flow key attribute not present in set flow.");
1140 return -EINVAL;
1141 }
1142
1143 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1144 &mask, log);
1145 if (IS_ERR(*acts)) {
1146 error = PTR_ERR(*acts);
1147 goto error;
1148 }
1149 }
1150
1151 /* On success, error is 0. */
1152error:
1153 match->mask = NULL;
1154 return error;
1155}
1156
0c9fd022
JR
1157static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1158{
a94ebc39 1159 struct net *net = sock_net(skb->sk);
0c9fd022
JR
1160 struct nlattr **a = info->attrs;
1161 struct ovs_header *ovs_header = info->userhdr;
1d2a1b5f 1162 struct sw_flow_key key;
0c9fd022 1163 struct sw_flow *flow;
0c9fd022
JR
1164 struct sk_buff *reply = NULL;
1165 struct datapath *dp;
6740b721 1166 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
0c9fd022 1167 struct sw_flow_match match;
bc619e29
JS
1168 struct sw_flow_id sfid;
1169 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
b24baa1a 1170 int error = 0;
9233cef7 1171 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1172 bool ufid_present;
0c9fd022 1173
bc619e29 1174 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
850c2a4d 1175 if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
b24baa1a
PS
1176 OVS_NLERR(log,
1177 "Flow set message rejected, Key attribute missing.");
850c2a4d 1178 return -EINVAL;
b24baa1a 1179 }
850c2a4d
TZ
1180
1181 error = ovs_nla_init_match_and_action(net, &match, &key, a,
1182 &acts, log);
0c9fd022
JR
1183 if (error)
1184 goto error;
d6569377 1185
850c2a4d 1186 if (acts) {
ff27161e 1187 /* Can allocate before locking if have acts. */
bc619e29
JS
1188 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1189 ufid_flags);
6740b721
JR
1190 if (IS_ERR(reply)) {
1191 error = PTR_ERR(reply);
1192 goto err_kfree_acts;
90b8c2f7 1193 }
0c9fd022
JR
1194 }
1195
1196 ovs_lock();
a94ebc39 1197 dp = get_dp(net, ovs_header->dp_ifindex);
6740b721
JR
1198 if (unlikely(!dp)) {
1199 error = -ENODEV;
0c9fd022 1200 goto err_unlock_ovs;
6740b721 1201 }
0c9fd022 1202 /* Check that the flow exists. */
bc619e29
JS
1203 if (ufid_present)
1204 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1205 else
1206 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
6740b721
JR
1207 if (unlikely(!flow)) {
1208 error = -ENOENT;
0c9fd022 1209 goto err_unlock_ovs;
6740b721 1210 }
3440e4bc 1211
0c9fd022 1212 /* Update actions, if present. */
6740b721 1213 if (likely(acts)) {
0c9fd022
JR
1214 old_acts = ovsl_dereference(flow->sf_acts);
1215 rcu_assign_pointer(flow->sf_acts, acts);
6740b721
JR
1216
1217 if (unlikely(reply)) {
2c622e5a 1218 error = ovs_flow_cmd_fill_info(flow,
6740b721
JR
1219 ovs_header->dp_ifindex,
1220 reply, info->snd_portid,
1221 info->snd_seq, 0,
bc619e29
JS
1222 OVS_FLOW_CMD_NEW,
1223 ufid_flags);
6740b721
JR
1224 BUG_ON(error < 0);
1225 }
1226 } else {
1227 /* Could not alloc without acts before locking. */
7d16c847 1228 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
bc619e29
JS
1229 info, OVS_FLOW_CMD_NEW, false,
1230 ufid_flags);
1231
6740b721
JR
1232 if (unlikely(IS_ERR(reply))) {
1233 error = PTR_ERR(reply);
1234 goto err_unlock_ovs;
1235 }
9c52546b 1236 }
0c9fd022 1237
0c9fd022
JR
1238 /* Clear stats. */
1239 if (a[OVS_FLOW_ATTR_CLEAR])
1240 ovs_flow_stats_clear(flow);
cd2a59e9 1241 ovs_unlock();
37a1300c 1242
6740b721 1243 if (reply)
cb25142c 1244 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
6740b721 1245 if (old_acts)
e23775f2 1246 ovs_nla_free_flow_actions_rcu(old_acts);
7d16c847 1247
d6569377 1248 return 0;
704a1e09 1249
cd2a59e9
PS
1250err_unlock_ovs:
1251 ovs_unlock();
6740b721
JR
1252 kfree_skb(reply);
1253err_kfree_acts:
e23775f2 1254 ovs_nla_free_flow_actions(acts);
37a1300c 1255error:
9c52546b 1256 return error;
704a1e09
BP
1257}
1258
df2c07f4 1259static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
704a1e09 1260{
37a1300c 1261 struct nlattr **a = info->attrs;
df2c07f4 1262 struct ovs_header *ovs_header = info->userhdr;
038e34ab 1263 struct net *net = sock_net(skb->sk);
37a1300c 1264 struct sw_flow_key key;
37a1300c 1265 struct sk_buff *reply;
704a1e09 1266 struct sw_flow *flow;
9c52546b 1267 struct datapath *dp;
a1c564be 1268 struct sw_flow_match match;
bc619e29
JS
1269 struct sw_flow_id ufid;
1270 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1271 int err = 0;
9233cef7 1272 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1273 bool ufid_present;
704a1e09 1274
bc619e29
JS
1275 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1276 if (a[OVS_FLOW_ATTR_KEY]) {
9b94fa6c 1277 ovs_match_init(&match, &key, true, NULL);
038e34ab 1278 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
bc619e29
JS
1279 log);
1280 } else if (!ufid_present) {
9233cef7
JR
1281 OVS_NLERR(log,
1282 "Flow get message rejected, Key attribute missing.");
bc619e29 1283 err = -EINVAL;
1b936472 1284 }
37a1300c
BP
1285 if (err)
1286 return err;
704a1e09 1287
cd2a59e9 1288 ovs_lock();
2a4999f3 1289 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9
PS
1290 if (!dp) {
1291 err = -ENODEV;
1292 goto unlock;
1293 }
704a1e09 1294
bc619e29
JS
1295 if (ufid_present)
1296 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1297 else
1298 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1299 if (!flow) {
cd2a59e9
PS
1300 err = -ENOENT;
1301 goto unlock;
1302 }
d6569377 1303
7d16c847 1304 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
bc619e29 1305 OVS_FLOW_CMD_NEW, true, ufid_flags);
cd2a59e9
PS
1306 if (IS_ERR(reply)) {
1307 err = PTR_ERR(reply);
1308 goto unlock;
1309 }
36956a7d 1310
cd2a59e9 1311 ovs_unlock();
37a1300c 1312 return genlmsg_reply(reply, info);
cd2a59e9
PS
1313unlock:
1314 ovs_unlock();
1315 return err;
d6569377 1316}
9c52546b 1317
df2c07f4 1318static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
d6569377 1319{
37a1300c 1320 struct nlattr **a = info->attrs;
df2c07f4 1321 struct ovs_header *ovs_header = info->userhdr;
038e34ab 1322 struct net *net = sock_net(skb->sk);
37a1300c 1323 struct sw_flow_key key;
37a1300c 1324 struct sk_buff *reply;
bc619e29 1325 struct sw_flow *flow = NULL;
d6569377 1326 struct datapath *dp;
a1c564be 1327 struct sw_flow_match match;
bc619e29
JS
1328 struct sw_flow_id ufid;
1329 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
d6569377 1330 int err;
9233cef7 1331 bool log = !a[OVS_FLOW_ATTR_PROBE];
bc619e29 1332 bool ufid_present;
36956a7d 1333
bc619e29
JS
1334 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1335 if (a[OVS_FLOW_ATTR_KEY]) {
9b94fa6c 1336 ovs_match_init(&match, &key, true, NULL);
038e34ab
JS
1337 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1338 NULL, log);
cde7f3ba
JR
1339 if (unlikely(err))
1340 return err;
1341 }
1342
cd2a59e9 1343 ovs_lock();
2a4999f3 1344 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
cde7f3ba 1345 if (unlikely(!dp)) {
cd2a59e9
PS
1346 err = -ENODEV;
1347 goto unlock;
1348 }
7d16c847 1349
bc619e29 1350 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
994dc286 1351 err = ovs_flow_tbl_flush(&dp->table);
cd2a59e9
PS
1352 goto unlock;
1353 }
7d16c847 1354
bc619e29
JS
1355 if (ufid_present)
1356 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1357 else
1358 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
3440e4bc 1359 if (unlikely(!flow)) {
cd2a59e9
PS
1360 err = -ENOENT;
1361 goto unlock;
1362 }
d6569377 1363
994dc286 1364 ovs_flow_tbl_remove(&dp->table, flow);
cde7f3ba 1365 ovs_unlock();
37a1300c 1366
46051cf8 1367 reply = ovs_flow_cmd_alloc_info(rcu_dereference_raw(flow->sf_acts),
bc619e29 1368 &flow->id, info, false, ufid_flags);
cde7f3ba
JR
1369
1370 if (likely(reply)) {
1371 if (likely(!IS_ERR(reply))) {
7d16c847
PS
1372 rcu_read_lock(); /*To keep RCU checker happy. */
1373 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
cde7f3ba
JR
1374 reply, info->snd_portid,
1375 info->snd_seq, 0,
bc619e29
JS
1376 OVS_FLOW_CMD_DEL,
1377 ufid_flags);
cde7f3ba
JR
1378 rcu_read_unlock();
1379 BUG_ON(err < 0);
cb25142c 1380 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
cde7f3ba 1381 } else {
cb25142c
PS
1382 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1383 GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1384
cde7f3ba 1385 }
afad3556 1386 }
37a1300c 1387
a1c564be 1388 ovs_flow_free(flow, true);
37a1300c 1389 return 0;
cd2a59e9
PS
1390unlock:
1391 ovs_unlock();
1392 return err;
37a1300c
BP
1393}
1394
df2c07f4 1395static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
37a1300c 1396{
bc619e29 1397 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
df2c07f4 1398 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
994dc286 1399 struct table_instance *ti;
37a1300c 1400 struct datapath *dp;
bc619e29
JS
1401 u32 ufid_flags;
1402 int err;
1403
1404 err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
15702dc9 1405 OVS_FLOW_ATTR_MAX, flow_policy, NULL);
bc619e29
JS
1406 if (err)
1407 return err;
1408 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
37a1300c 1409
f44ccce1 1410 rcu_read_lock();
01ac0970 1411 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
cd2a59e9 1412 if (!dp) {
f44ccce1 1413 rcu_read_unlock();
37a1300c 1414 return -ENODEV;
cd2a59e9 1415 }
37a1300c 1416
994dc286 1417 ti = rcu_dereference(dp->table.ti);
37a1300c 1418 for (;;) {
37a1300c
BP
1419 struct sw_flow *flow;
1420 u32 bucket, obj;
1421
1422 bucket = cb->args[0];
1423 obj = cb->args[1];
994dc286 1424 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
3544358a 1425 if (!flow)
37a1300c
BP
1426 break;
1427
2c622e5a 1428 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
28aea917 1429 NETLINK_CB(cb->skb).portid,
37a1300c 1430 cb->nlh->nlmsg_seq, NLM_F_MULTI,
bc619e29 1431 OVS_FLOW_CMD_NEW, ufid_flags) < 0)
37a1300c
BP
1432 break;
1433
1434 cb->args[0] = bucket;
1435 cb->args[1] = obj;
1436 }
f44ccce1 1437 rcu_read_unlock();
37a1300c 1438 return skb->len;
704a1e09
BP
1439}
1440
cb25142c
PS
1441static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1442 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
9233cef7 1443 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
cb25142c
PS
1444 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1445 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
9233cef7 1446 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
bc619e29
JS
1447 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1448 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
cb25142c
PS
1449};
1450
18fd3a52 1451static struct genl_ops dp_flow_genl_ops[] = {
df2c07f4 1452 { .cmd = OVS_FLOW_CMD_NEW,
a6a8674d 1453 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37a1300c 1454 .policy = flow_policy,
0c9fd022 1455 .doit = ovs_flow_cmd_new
37a1300c 1456 },
df2c07f4 1457 { .cmd = OVS_FLOW_CMD_DEL,
a6a8674d 1458 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37a1300c 1459 .policy = flow_policy,
df2c07f4 1460 .doit = ovs_flow_cmd_del
37a1300c 1461 },
df2c07f4 1462 { .cmd = OVS_FLOW_CMD_GET,
37a1300c
BP
1463 .flags = 0, /* OK for unprivileged users. */
1464 .policy = flow_policy,
df2c07f4
JP
1465 .doit = ovs_flow_cmd_get,
1466 .dumpit = ovs_flow_cmd_dump
37a1300c 1467 },
df2c07f4 1468 { .cmd = OVS_FLOW_CMD_SET,
a6a8674d 1469 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37a1300c 1470 .policy = flow_policy,
0c9fd022 1471 .doit = ovs_flow_cmd_set,
37a1300c
BP
1472 },
1473};
1474
ba63fe26 1475static struct genl_family dp_flow_genl_family __ro_after_init = {
df2c07f4 1476 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1477 .name = OVS_FLOW_FAMILY,
1478 .version = OVS_FLOW_VERSION,
1479 .maxattr = OVS_FLOW_ATTR_MAX,
b3dcb73c 1480 .netnsok = true,
cb25142c
PS
1481 .parallel_ops = true,
1482 .ops = dp_flow_genl_ops,
1483 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1484 .mcgrps = &ovs_dp_flow_multicast_group,
1485 .n_mcgrps = 1,
ba63fe26 1486 .module = THIS_MODULE,
aaff4b55
BP
1487};
1488
0afa2373
TG
1489static size_t ovs_dp_cmd_msg_size(void)
1490{
1491 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1492
1493 msgsize += nla_total_size(IFNAMSIZ);
91b37647
PS
1494 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1495 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
300af20a 1496 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
0afa2373
TG
1497
1498 return msgsize;
1499}
1500
d637497c 1501/* Called with ovs_mutex. */
df2c07f4 1502static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
28aea917 1503 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1504{
df2c07f4 1505 struct ovs_header *ovs_header;
e926dfe3 1506 struct ovs_dp_stats dp_stats;
4fa72a95 1507 struct ovs_dp_megaflow_stats dp_megaflow_stats;
064af421
BP
1508 int err;
1509
28aea917 1510 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
aaff4b55 1511 flags, cmd);
df2c07f4 1512 if (!ovs_header)
aaff4b55 1513 goto error;
064af421 1514
b063d9f0 1515 ovs_header->dp_ifindex = get_dpifindex(dp);
064af421 1516
850b6b3b 1517 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
064af421 1518 if (err)
d6569377 1519 goto nla_put_failure;
064af421 1520
4fa72a95 1521 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
91b37647
PS
1522 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1523 &dp_stats, OVS_DP_ATTR_PAD))
4fa72a95
AZ
1524 goto nla_put_failure;
1525
91b37647
PS
1526 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1527 sizeof(struct ovs_dp_megaflow_stats),
1528 &dp_megaflow_stats, OVS_DP_ATTR_PAD))
c3cc8c03 1529 goto nla_put_failure;
d6569377 1530
c58cc9a4
TG
1531 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1532 goto nla_put_failure;
1533
23b48dc1
TG
1534 genlmsg_end(skb, ovs_header);
1535 return 0;
d6569377
BP
1536
1537nla_put_failure:
df2c07f4 1538 genlmsg_cancel(skb, ovs_header);
aaff4b55
BP
1539error:
1540 return -EMSGSIZE;
d6569377
BP
1541}
1542
40c08cda 1543static struct sk_buff *ovs_dp_cmd_alloc_info(void)
d6569377 1544{
40c08cda 1545 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
aaff4b55 1546}
9dca7bd5 1547
aa917006 1548/* Called with rcu_read_lock or ovs_mutex. */
2a4999f3 1549static struct datapath *lookup_datapath(struct net *net,
f1f60b85 1550 const struct ovs_header *ovs_header,
6455100f 1551 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
d6569377 1552{
254f2dc8
BP
1553 struct datapath *dp;
1554
df2c07f4 1555 if (!a[OVS_DP_ATTR_NAME])
2a4999f3 1556 dp = get_dp(net, ovs_header->dp_ifindex);
254f2dc8 1557 else {
d6569377 1558 struct vport *vport;
d6569377 1559
2a4999f3 1560 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
df2c07f4 1561 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
d6569377 1562 }
254f2dc8 1563 return dp ? dp : ERR_PTR(-ENODEV);
d6569377
BP
1564}
1565
94358dcf
TG
1566static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1567{
1568 struct datapath *dp;
1569
1570 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
09350a3d 1571 if (IS_ERR(dp))
94358dcf
TG
1572 return;
1573
1574 WARN(dp->user_features, "Dropping previously announced user features\n");
1575 dp->user_features = 0;
1576}
1577
f1f60b85 1578static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
c58cc9a4
TG
1579{
1580 if (a[OVS_DP_ATTR_USER_FEATURES])
1581 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1582}
1583
df2c07f4 1584static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
d6569377 1585{
aaff4b55 1586 struct nlattr **a = info->attrs;
d6569377 1587 struct vport_parms parms;
aaff4b55 1588 struct sk_buff *reply;
d6569377
BP
1589 struct datapath *dp;
1590 struct vport *vport;
2a4999f3 1591 struct ovs_net *ovs_net;
95b1d73a 1592 int err, i;
d6569377 1593
d6569377 1594 err = -EINVAL;
ea36840f 1595 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
aaff4b55
BP
1596 goto err;
1597
40c08cda 1598 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1599 if (!reply)
1600 return -ENOMEM;
d6569377 1601
d6569377
BP
1602 err = -ENOMEM;
1603 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1604 if (dp == NULL)
d81eef1b 1605 goto err_free_reply;
2a4999f3 1606
c0cddcec 1607 ovs_dp_set_net(dp, sock_net(skb->sk));
0ceaa66c 1608
d6569377 1609 /* Allocate table. */
994dc286
PS
1610 err = ovs_flow_tbl_init(&dp->table);
1611 if (err)
d6569377
BP
1612 goto err_free_dp;
1613
08fb1bbd 1614 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
99769a40
JG
1615 if (!dp->stats_percpu) {
1616 err = -ENOMEM;
1617 goto err_destroy_table;
1618 }
1619
95b1d73a
PS
1620 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1621 GFP_KERNEL);
1622 if (!dp->ports) {
1623 err = -ENOMEM;
1624 goto err_destroy_percpu;
1625 }
1626
1627 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1628 INIT_HLIST_HEAD(&dp->ports[i]);
1629
d6569377 1630 /* Set up our datapath device. */
df2c07f4
JP
1631 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1632 parms.type = OVS_VPORT_TYPE_INTERNAL;
d6569377
BP
1633 parms.options = NULL;
1634 parms.dp = dp;
df2c07f4 1635 parms.port_no = OVSP_LOCAL;
beb1c69a 1636 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
b063d9f0 1637
c58cc9a4
TG
1638 ovs_dp_change(dp, a);
1639
d81eef1b
JR
1640 /* So far only local changes have been made, now need the lock. */
1641 ovs_lock();
1642
d6569377
BP
1643 vport = new_vport(&parms);
1644 if (IS_ERR(vport)) {
1645 err = PTR_ERR(vport);
1646 if (err == -EBUSY)
1647 err = -EEXIST;
1648
94358dcf
TG
1649 if (err == -EEXIST) {
1650 /* An outdated user space instance that does not understand
1651 * the concept of user_features has attempted to create a new
1652 * datapath and is likely to reuse it. Drop all user features.
1653 */
1654 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1655 ovs_dp_reset_user_features(skb, info);
1656 }
1657
95b1d73a 1658 goto err_destroy_ports_array;
d6569377 1659 }
d6569377 1660
d81eef1b
JR
1661 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1662 info->snd_seq, 0, OVS_DP_CMD_NEW);
1663 BUG_ON(err < 0);
aaff4b55 1664
2a4999f3 1665 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
fb93e9aa 1666 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
a0fb56c1 1667
cd2a59e9 1668 ovs_unlock();
d6569377 1669
cb25142c 1670 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
d6569377
BP
1671 return 0;
1672
95b1d73a 1673err_destroy_ports_array:
d81eef1b 1674 ovs_unlock();
95b1d73a 1675 kfree(dp->ports);
99769a40
JG
1676err_destroy_percpu:
1677 free_percpu(dp->stats_percpu);
d6569377 1678err_destroy_table:
e379e4d1 1679 ovs_flow_tbl_destroy(&dp->table);
d6569377 1680err_free_dp:
d6569377 1681 kfree(dp);
d81eef1b
JR
1682err_free_reply:
1683 kfree_skb(reply);
d6569377 1684err:
064af421
BP
1685 return err;
1686}
1687
cd2a59e9 1688/* Called with ovs_mutex. */
2a4999f3 1689static void __dp_destroy(struct datapath *dp)
44e05eca 1690{
95b1d73a 1691 int i;
44e05eca 1692
95b1d73a
PS
1693 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1694 struct vport *vport;
f8dfbcb7 1695 struct hlist_node *n;
95b1d73a 1696
f8dfbcb7 1697 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
95b1d73a
PS
1698 if (vport->port_no != OVSP_LOCAL)
1699 ovs_dp_detach_port(vport);
1700 }
ed099e92 1701
fb93e9aa 1702 list_del_rcu(&dp->list_node);
ed099e92 1703
cd2a59e9 1704 /* OVSP_LOCAL is datapath internal port. We need to make sure that
d103f479
AZ
1705 * all ports in datapath are destroyed first before freeing datapath.
1706 */
cd2a59e9 1707 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
99620d2c 1708
d103f479 1709 /* RCU destroy the flow table */
ed099e92 1710 call_rcu(&dp->rcu, destroy_dp_rcu);
2a4999f3
PS
1711}
1712
1713static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1714{
1715 struct sk_buff *reply;
1716 struct datapath *dp;
1717 int err;
1718
40c08cda 1719 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1720 if (!reply)
1721 return -ENOMEM;
1722
cd2a59e9 1723 ovs_lock();
2a4999f3
PS
1724 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1725 err = PTR_ERR(dp);
1726 if (IS_ERR(dp))
d81eef1b 1727 goto err_unlock_free;
2a4999f3 1728
d81eef1b
JR
1729 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1730 info->snd_seq, 0, OVS_DP_CMD_DEL);
1731 BUG_ON(err < 0);
2a4999f3
PS
1732
1733 __dp_destroy(dp);
d81eef1b 1734 ovs_unlock();
7d16c847 1735
cb25142c 1736 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
99620d2c 1737 return 0;
d81eef1b
JR
1738
1739err_unlock_free:
cd2a59e9 1740 ovs_unlock();
d81eef1b 1741 kfree_skb(reply);
cd2a59e9 1742 return err;
44e05eca
BP
1743}
1744
df2c07f4 1745static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
064af421 1746{
aaff4b55 1747 struct sk_buff *reply;
d6569377 1748 struct datapath *dp;
d6569377 1749 int err;
064af421 1750
40c08cda 1751 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1752 if (!reply)
1753 return -ENOMEM;
1754
cd2a59e9 1755 ovs_lock();
2a4999f3 1756 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9 1757 err = PTR_ERR(dp);
d6569377 1758 if (IS_ERR(dp))
d81eef1b 1759 goto err_unlock_free;
38c6ecbc 1760
c58cc9a4
TG
1761 ovs_dp_change(dp, info->attrs);
1762
d81eef1b
JR
1763 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1764 info->snd_seq, 0, OVS_DP_CMD_NEW);
1765 BUG_ON(err < 0);
a0fb56c1 1766
cd2a59e9 1767 ovs_unlock();
7d16c847 1768
cb25142c 1769 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
aaff4b55 1770 return 0;
d81eef1b
JR
1771
1772err_unlock_free:
cd2a59e9 1773 ovs_unlock();
d81eef1b 1774 kfree_skb(reply);
cd2a59e9 1775 return err;
064af421
BP
1776}
1777
df2c07f4 1778static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1dcf111b 1779{
aaff4b55 1780 struct sk_buff *reply;
d6569377 1781 struct datapath *dp;
d6569377 1782 int err;
1dcf111b 1783
40c08cda 1784 reply = ovs_dp_cmd_alloc_info();
d81eef1b
JR
1785 if (!reply)
1786 return -ENOMEM;
1787
d637497c 1788 ovs_lock();
2a4999f3 1789 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
cd2a59e9
PS
1790 if (IS_ERR(dp)) {
1791 err = PTR_ERR(dp);
d81eef1b 1792 goto err_unlock_free;
cd2a59e9 1793 }
d81eef1b
JR
1794 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1795 info->snd_seq, 0, OVS_DP_CMD_NEW);
1796 BUG_ON(err < 0);
d637497c 1797 ovs_unlock();
aaff4b55
BP
1798
1799 return genlmsg_reply(reply, info);
cd2a59e9 1800
d81eef1b 1801err_unlock_free:
d637497c 1802 ovs_unlock();
d81eef1b 1803 kfree_skb(reply);
cd2a59e9 1804 return err;
1dcf111b
JP
1805}
1806
df2c07f4 1807static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
a7786963 1808{
2a4999f3 1809 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
254f2dc8
BP
1810 struct datapath *dp;
1811 int skip = cb->args[0];
1812 int i = 0;
a7786963 1813
d637497c
PS
1814 ovs_lock();
1815 list_for_each_entry(dp, &ovs_net->dps, list_node) {
a2bab2f0 1816 if (i >= skip &&
28aea917 1817 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
aaff4b55 1818 cb->nlh->nlmsg_seq, NLM_F_MULTI,
df2c07f4 1819 OVS_DP_CMD_NEW) < 0)
aaff4b55 1820 break;
254f2dc8 1821 i++;
a7786963 1822 }
d637497c 1823 ovs_unlock();
aaff4b55 1824
254f2dc8
BP
1825 cb->args[0] = i;
1826
aaff4b55 1827 return skb->len;
c19e6535
BP
1828}
1829
cb25142c
PS
1830static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1831 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1832 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1833 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1834};
1835
18fd3a52 1836static struct genl_ops dp_datapath_genl_ops[] = {
df2c07f4 1837 { .cmd = OVS_DP_CMD_NEW,
a6a8674d 1838 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
aaff4b55 1839 .policy = datapath_policy,
df2c07f4 1840 .doit = ovs_dp_cmd_new
aaff4b55 1841 },
df2c07f4 1842 { .cmd = OVS_DP_CMD_DEL,
a6a8674d 1843 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
aaff4b55 1844 .policy = datapath_policy,
df2c07f4 1845 .doit = ovs_dp_cmd_del
aaff4b55 1846 },
df2c07f4 1847 { .cmd = OVS_DP_CMD_GET,
aaff4b55
BP
1848 .flags = 0, /* OK for unprivileged users. */
1849 .policy = datapath_policy,
df2c07f4
JP
1850 .doit = ovs_dp_cmd_get,
1851 .dumpit = ovs_dp_cmd_dump
aaff4b55 1852 },
df2c07f4 1853 { .cmd = OVS_DP_CMD_SET,
a6a8674d 1854 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
aaff4b55 1855 .policy = datapath_policy,
df2c07f4 1856 .doit = ovs_dp_cmd_set,
aaff4b55
BP
1857 },
1858};
1859
ba63fe26 1860static struct genl_family dp_datapath_genl_family __ro_after_init = {
df2c07f4 1861 .hdrsize = sizeof(struct ovs_header),
cb25142c
PS
1862 .name = OVS_DATAPATH_FAMILY,
1863 .version = OVS_DATAPATH_VERSION,
1864 .maxattr = OVS_DP_ATTR_MAX,
b3dcb73c 1865 .netnsok = true,
cb25142c
PS
1866 .parallel_ops = true,
1867 .ops = dp_datapath_genl_ops,
1868 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1869 .mcgrps = &ovs_dp_datapath_multicast_group,
1870 .n_mcgrps = 1,
ba63fe26 1871 .module = THIS_MODULE,
f0fef760
BP
1872};
1873
cd2a59e9 1874/* Called with ovs_mutex or RCU read lock. */
df2c07f4 1875static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
28aea917 1876 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1877{
df2c07f4 1878 struct ovs_header *ovs_header;
e926dfe3 1879 struct ovs_vport_stats vport_stats;
c19e6535
BP
1880 int err;
1881
28aea917 1882 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
f0fef760 1883 flags, cmd);
df2c07f4 1884 if (!ovs_header)
f0fef760 1885 return -EMSGSIZE;
c19e6535 1886
99769a40 1887 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
c19e6535 1888
c3cc8c03
DM
1889 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1890 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
e23775f2
PS
1891 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1892 ovs_vport_name(vport)))
c3cc8c03 1893 goto nla_put_failure;
c19e6535 1894
850b6b3b 1895 ovs_vport_get_stats(vport, &vport_stats);
91b37647
PS
1896 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1897 sizeof(struct ovs_vport_stats), &vport_stats,
1898 OVS_VPORT_ATTR_PAD))
c3cc8c03 1899 goto nla_put_failure;
c19e6535 1900
beb1c69a
AW
1901 if (ovs_vport_get_upcall_portids(vport, skb))
1902 goto nla_put_failure;
1903
850b6b3b 1904 err = ovs_vport_get_options(vport, skb);
f0fef760
BP
1905 if (err == -EMSGSIZE)
1906 goto error;
c19e6535 1907
23b48dc1
TG
1908 genlmsg_end(skb, ovs_header);
1909 return 0;
c19e6535
BP
1910
1911nla_put_failure:
1912 err = -EMSGSIZE;
f0fef760 1913error:
df2c07f4 1914 genlmsg_cancel(skb, ovs_header);
f0fef760 1915 return err;
064af421
BP
1916}
1917
d81eef1b
JR
1918static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1919{
1920 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1921}
1922
1923/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
28aea917 1924struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
f14d8083 1925 u32 seq, u8 cmd)
064af421 1926{
c19e6535 1927 struct sk_buff *skb;
f0fef760 1928 int retval;
c19e6535 1929
f0fef760 1930 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
c19e6535
BP
1931 if (!skb)
1932 return ERR_PTR(-ENOMEM);
1933
28aea917 1934 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
c25ea534
JG
1935 BUG_ON(retval < 0);
1936
c19e6535 1937 return skb;
f0fef760 1938}
c19e6535 1939
cd2a59e9 1940/* Called with ovs_mutex or RCU read lock. */
2a4999f3 1941static struct vport *lookup_vport(struct net *net,
f1f60b85 1942 const struct ovs_header *ovs_header,
df2c07f4 1943 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
c19e6535
BP
1944{
1945 struct datapath *dp;
1946 struct vport *vport;
1947
df2c07f4 1948 if (a[OVS_VPORT_ATTR_NAME]) {
2a4999f3 1949 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
ed099e92 1950 if (!vport)
c19e6535 1951 return ERR_PTR(-ENODEV);
24ce832d
BP
1952 if (ovs_header->dp_ifindex &&
1953 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1954 return ERR_PTR(-ENODEV);
c19e6535 1955 return vport;
df2c07f4
JP
1956 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1957 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
c19e6535
BP
1958
1959 if (port_no >= DP_MAX_PORTS)
f0fef760 1960 return ERR_PTR(-EFBIG);
c19e6535 1961
2a4999f3 1962 dp = get_dp(net, ovs_header->dp_ifindex);
c19e6535
BP
1963 if (!dp)
1964 return ERR_PTR(-ENODEV);
f2459fe7 1965
cd2a59e9 1966 vport = ovs_vport_ovsl_rcu(dp, port_no);
ed099e92 1967 if (!vport)
17535c57 1968 return ERR_PTR(-ENODEV);
c19e6535
BP
1969 return vport;
1970 } else
1971 return ERR_PTR(-EINVAL);
064af421
BP
1972}
1973
8ce37339
PS
1974/* Called with ovs_mutex */
1975static void update_headroom(struct datapath *dp)
1976{
1977 unsigned dev_headroom, max_headroom = 0;
1978 struct net_device *dev;
1979 struct vport *vport;
1980 int i;
1981
1982 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1983 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1984 dev = vport->dev;
1985 dev_headroom = netdev_get_fwd_headroom(dev);
1986 if (dev_headroom > max_headroom)
1987 max_headroom = dev_headroom;
1988 }
1989 }
1990
1991 dp->max_headroom = max_headroom;
1992 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1993 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
1994 netdev_set_rx_headroom(vport->dev, max_headroom);
1995}
1996
df2c07f4 1997static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
c19e6535 1998{
f0fef760 1999 struct nlattr **a = info->attrs;
df2c07f4 2000 struct ovs_header *ovs_header = info->userhdr;
c19e6535 2001 struct vport_parms parms;
ed099e92 2002 struct sk_buff *reply;
c19e6535 2003 struct vport *vport;
c19e6535 2004 struct datapath *dp;
b0ec0f27 2005 u32 port_no;
c19e6535 2006 int err;
b0ec0f27 2007
ea36840f
BP
2008 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2009 !a[OVS_VPORT_ATTR_UPCALL_PID])
d81eef1b
JR
2010 return -EINVAL;
2011
2012 port_no = a[OVS_VPORT_ATTR_PORT_NO]
2013 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2014 if (port_no >= DP_MAX_PORTS)
2015 return -EFBIG;
2016
2017 reply = ovs_vport_cmd_alloc_info();
2018 if (!reply)
2019 return -ENOMEM;
f0fef760 2020
cd2a59e9 2021 ovs_lock();
5a38795f 2022restart:
2a4999f3 2023 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
c19e6535
BP
2024 err = -ENODEV;
2025 if (!dp)
d81eef1b 2026 goto exit_unlock_free;
c19e6535 2027
d81eef1b 2028 if (port_no) {
cd2a59e9 2029 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
2030 err = -EBUSY;
2031 if (vport)
d81eef1b 2032 goto exit_unlock_free;
c19e6535
BP
2033 } else {
2034 for (port_no = 1; ; port_no++) {
2035 if (port_no >= DP_MAX_PORTS) {
2036 err = -EFBIG;
d81eef1b 2037 goto exit_unlock_free;
c19e6535 2038 }
cd2a59e9 2039 vport = ovs_vport_ovsl(dp, port_no);
c19e6535
BP
2040 if (!vport)
2041 break;
51d4d598 2042 }
064af421 2043 }
b0ec0f27 2044
df2c07f4
JP
2045 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2046 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2047 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
c19e6535
BP
2048 parms.dp = dp;
2049 parms.port_no = port_no;
beb1c69a 2050 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
c19e6535
BP
2051
2052 vport = new_vport(&parms);
2053 err = PTR_ERR(vport);
5a38795f
TG
2054 if (IS_ERR(vport)) {
2055 if (err == -EAGAIN)
2056 goto restart;
d81eef1b 2057 goto exit_unlock_free;
5a38795f 2058 }
c19e6535 2059
d81eef1b
JR
2060 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2061 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2062 BUG_ON(err < 0);
8ce37339
PS
2063
2064 if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2065 update_headroom(dp);
2066 else
2067 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2068
d81eef1b 2069 ovs_unlock();
e297c6b7 2070
cb25142c 2071 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 2072 return 0;
c19e6535 2073
d81eef1b 2074exit_unlock_free:
cd2a59e9 2075 ovs_unlock();
d81eef1b 2076 kfree_skb(reply);
c19e6535 2077 return err;
44e05eca
BP
2078}
2079
df2c07f4 2080static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
44e05eca 2081{
f0fef760
BP
2082 struct nlattr **a = info->attrs;
2083 struct sk_buff *reply;
c19e6535 2084 struct vport *vport;
c19e6535 2085 int err;
44e05eca 2086
d81eef1b
JR
2087 reply = ovs_vport_cmd_alloc_info();
2088 if (!reply)
2089 return -ENOMEM;
2090
cd2a59e9 2091 ovs_lock();
2a4999f3 2092 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535
BP
2093 err = PTR_ERR(vport);
2094 if (IS_ERR(vport))
d81eef1b 2095 goto exit_unlock_free;
44e05eca 2096
6455100f 2097 if (a[OVS_VPORT_ATTR_TYPE] &&
17ec1d04 2098 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
4879d4c7 2099 err = -EINVAL;
d81eef1b 2100 goto exit_unlock_free;
c25ea534
JG
2101 }
2102
17ec1d04 2103 if (a[OVS_VPORT_ATTR_OPTIONS]) {
850b6b3b 2104 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
17ec1d04 2105 if (err)
d81eef1b 2106 goto exit_unlock_free;
17ec1d04 2107 }
1fc7083d 2108
beb1c69a 2109 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
7d16c847
PS
2110 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2111
2112 err = ovs_vport_set_upcall_portids(vport, ids);
beb1c69a
AW
2113 if (err)
2114 goto exit_unlock_free;
2115 }
c19e6535 2116
c25ea534
JG
2117 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2118 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2119 BUG_ON(err < 0);
cd2a59e9 2120 ovs_unlock();
d81eef1b 2121
cb25142c 2122 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
c25ea534
JG
2123 return 0;
2124
d81eef1b 2125exit_unlock_free:
cd2a59e9 2126 ovs_unlock();
d81eef1b 2127 kfree_skb(reply);
c19e6535 2128 return err;
064af421
BP
2129}
2130
df2c07f4 2131static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2132{
8ce37339 2133 bool must_update_headroom = false;
f0fef760
BP
2134 struct nlattr **a = info->attrs;
2135 struct sk_buff *reply;
8ce37339 2136 struct datapath *dp;
c19e6535 2137 struct vport *vport;
c19e6535
BP
2138 int err;
2139
d81eef1b
JR
2140 reply = ovs_vport_cmd_alloc_info();
2141 if (!reply)
2142 return -ENOMEM;
2143
cd2a59e9 2144 ovs_lock();
2a4999f3 2145 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535 2146 err = PTR_ERR(vport);
f0fef760 2147 if (IS_ERR(vport))
d81eef1b 2148 goto exit_unlock_free;
c19e6535 2149
df2c07f4 2150 if (vport->port_no == OVSP_LOCAL) {
f0fef760 2151 err = -EINVAL;
d81eef1b 2152 goto exit_unlock_free;
f0fef760
BP
2153 }
2154
d81eef1b
JR
2155 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2156 info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2157 BUG_ON(err < 0);
8ce37339
PS
2158
2159 /* the vport deletion may trigger dp headroom update */
2160 dp = vport->dp;
2161 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2162 must_update_headroom = true;
2163 netdev_reset_rx_headroom(vport->dev);
850b6b3b 2164 ovs_dp_detach_port(vport);
8ce37339
PS
2165
2166 if (must_update_headroom)
2167 update_headroom(dp);
2168
d81eef1b 2169 ovs_unlock();
f0fef760 2170
cb25142c 2171 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
d81eef1b 2172 return 0;
f0fef760 2173
d81eef1b 2174exit_unlock_free:
cd2a59e9 2175 ovs_unlock();
d81eef1b 2176 kfree_skb(reply);
c19e6535 2177 return err;
7c40efc9
BP
2178}
2179
df2c07f4 2180static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2181{
f0fef760 2182 struct nlattr **a = info->attrs;
df2c07f4 2183 struct ovs_header *ovs_header = info->userhdr;
ed099e92 2184 struct sk_buff *reply;
c19e6535 2185 struct vport *vport;
c19e6535
BP
2186 int err;
2187
d81eef1b
JR
2188 reply = ovs_vport_cmd_alloc_info();
2189 if (!reply)
2190 return -ENOMEM;
2191
ed099e92 2192 rcu_read_lock();
2a4999f3 2193 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
c19e6535
BP
2194 err = PTR_ERR(vport);
2195 if (IS_ERR(vport))
d81eef1b
JR
2196 goto exit_unlock_free;
2197 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2198 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2199 BUG_ON(err < 0);
df2fa9b5
JG
2200 rcu_read_unlock();
2201
2202 return genlmsg_reply(reply, info);
ed099e92 2203
d81eef1b 2204exit_unlock_free:
ed099e92 2205 rcu_read_unlock();
d81eef1b 2206 kfree_skb(reply);
c19e6535
BP
2207 return err;
2208}
2209
df2c07f4 2210static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
c19e6535 2211{
df2c07f4 2212 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
c19e6535 2213 struct datapath *dp;
95b1d73a
PS
2214 int bucket = cb->args[0], skip = cb->args[1];
2215 int i, j = 0;
c19e6535 2216
03fc2881 2217 rcu_read_lock();
01ac0970 2218 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
03fc2881
JR
2219 if (!dp) {
2220 rcu_read_unlock();
f0fef760 2221 return -ENODEV;
03fc2881 2222 }
95b1d73a 2223 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
ed099e92 2224 struct vport *vport;
95b1d73a
PS
2225
2226 j = 0;
f8dfbcb7 2227 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
95b1d73a
PS
2228 if (j >= skip &&
2229 ovs_vport_cmd_fill_info(vport, skb,
28aea917 2230 NETLINK_CB(cb->skb).portid,
95b1d73a
PS
2231 cb->nlh->nlmsg_seq,
2232 NLM_F_MULTI,
2233 OVS_VPORT_CMD_NEW) < 0)
2234 goto out;
2235
2236 j++;
2237 }
2238 skip = 0;
c19e6535 2239 }
95b1d73a 2240out:
ed099e92 2241 rcu_read_unlock();
c19e6535 2242
95b1d73a
PS
2243 cb->args[0] = i;
2244 cb->args[1] = j;
f0fef760 2245
95b1d73a 2246 return skb->len;
7c40efc9
BP
2247}
2248
cb25142c
PS
2249static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2250 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2251 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2252 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2253 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2254 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2255 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2256};
2257
18fd3a52 2258static struct genl_ops dp_vport_genl_ops[] = {
df2c07f4 2259 { .cmd = OVS_VPORT_CMD_NEW,
a6a8674d 2260 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
f0fef760 2261 .policy = vport_policy,
df2c07f4 2262 .doit = ovs_vport_cmd_new
f0fef760 2263 },
df2c07f4 2264 { .cmd = OVS_VPORT_CMD_DEL,
a6a8674d 2265 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
f0fef760 2266 .policy = vport_policy,
df2c07f4 2267 .doit = ovs_vport_cmd_del
f0fef760 2268 },
df2c07f4 2269 { .cmd = OVS_VPORT_CMD_GET,
f0fef760
BP
2270 .flags = 0, /* OK for unprivileged users. */
2271 .policy = vport_policy,
df2c07f4
JP
2272 .doit = ovs_vport_cmd_get,
2273 .dumpit = ovs_vport_cmd_dump
f0fef760 2274 },
df2c07f4 2275 { .cmd = OVS_VPORT_CMD_SET,
a6a8674d 2276 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
f0fef760 2277 .policy = vport_policy,
df2c07f4 2278 .doit = ovs_vport_cmd_set,
f0fef760
BP
2279 },
2280};
2281
ba63fe26 2282struct genl_family dp_vport_genl_family __ro_after_init = {
cb25142c
PS
2283 .hdrsize = sizeof(struct ovs_header),
2284 .name = OVS_VPORT_FAMILY,
2285 .version = OVS_VPORT_VERSION,
2286 .maxattr = OVS_VPORT_ATTR_MAX,
2287 .netnsok = true,
2288 .parallel_ops = true,
2289 .ops = dp_vport_genl_ops,
2290 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2291 .mcgrps = &ovs_dp_vport_multicast_group,
2292 .n_mcgrps = 1,
ba63fe26 2293 .module = THIS_MODULE,
982b8810 2294};
ed099e92 2295
18fd3a52 2296static struct genl_family *dp_genl_families[] = {
cb25142c
PS
2297 &dp_datapath_genl_family,
2298 &dp_vport_genl_family,
2299 &dp_flow_genl_family,
2300 &dp_packet_genl_family,
982b8810 2301};
ed099e92 2302
982b8810
BP
2303static void dp_unregister_genl(int n_families)
2304{
2305 int i;
ed099e92 2306
b867ca75 2307 for (i = 0; i < n_families; i++)
cb25142c 2308 genl_unregister_family(dp_genl_families[i]);
ed099e92
BP
2309}
2310
ba63fe26 2311static int __init dp_register_genl(void)
064af421 2312{
982b8810
BP
2313 int err;
2314 int i;
064af421 2315
982b8810 2316 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
064af421 2317
cb25142c 2318 err = genl_register_family(dp_genl_families[i]);
982b8810
BP
2319 if (err)
2320 goto error;
982b8810 2321 }
9cc8b4e4 2322
982b8810 2323 return 0;
064af421
BP
2324
2325error:
cb25142c 2326 dp_unregister_genl(i);
982b8810 2327 return err;
064af421
BP
2328}
2329
2a4999f3
PS
2330static int __net_init ovs_init_net(struct net *net)
2331{
2332 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2333
2334 INIT_LIST_HEAD(&ovs_net->dps);
cd2a59e9 2335 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
038e34ab 2336 ovs_ct_init(net);
7f4a5d68 2337 ovs_netns_frags_init(net);
2338 ovs_netns_frags6_init(net);
2a4999f3
PS
2339 return 0;
2340}
2341
cabd5516
PS
2342static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2343 struct list_head *head)
2a4999f3
PS
2344{
2345 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
cabd5516
PS
2346 struct datapath *dp;
2347
2348 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2349 int i;
2350
2351 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2352 struct vport *vport;
2353
2354 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
cabd5516
PS
2355
2356 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2357 continue;
2358
e23775f2 2359 if (dev_net(vport->dev) == dnet)
cabd5516
PS
2360 list_add(&vport->detach_list, head);
2361 }
2362 }
2363 }
2364}
2365
2366static void __net_exit ovs_exit_net(struct net *dnet)
2367{
2368 struct datapath *dp, *dp_next;
2369 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2370 struct vport *vport, *vport_next;
2371 struct net *net;
2372 LIST_HEAD(head);
2a4999f3 2373
7f4a5d68 2374 ovs_netns_frags6_exit(dnet);
2375 ovs_netns_frags_exit(dnet);
038e34ab 2376 ovs_ct_exit(dnet);
cd2a59e9
PS
2377 ovs_lock();
2378 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2379 __dp_destroy(dp);
cabd5516
PS
2380
2381 rtnl_lock();
2382 for_each_net(net)
2383 list_vports_from_net(net, dnet, &head);
2384 rtnl_unlock();
2385
2386 /* Detach all vports from given namespace. */
2387 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2388 list_del(&vport->detach_list);
2389 ovs_dp_detach_port(vport);
2390 }
2391
cd2a59e9
PS
2392 ovs_unlock();
2393
2394 cancel_work_sync(&ovs_net->dp_notify_work);
2a4999f3
PS
2395}
2396
2397static struct pernet_operations ovs_net_ops = {
2398 .init = ovs_init_net,
2399 .exit = ovs_exit_net,
2400 .id = &ovs_net_id,
2401 .size = sizeof(struct ovs_net),
2402};
2403
22d24ebf
BP
2404static int __init dp_init(void)
2405{
2406 int err;
2407
f3d85db3 2408 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
22d24ebf 2409
26bfaeaa 2410 pr_info("Open vSwitch switching datapath %s\n", VERSION);
064af421 2411
907c26a8 2412 ovs_nsh_init();
595e069a
JS
2413 err = action_fifos_init();
2414 if (err)
7f4a5d68 2415 goto error;
595e069a 2416
5282e284 2417 err = ovs_internal_dev_rtnl_link_register();
2c8c4fb7
AZ
2418 if (err)
2419 goto error_action_fifos_exit;
2420
5282e284
TG
2421 err = ovs_flow_init();
2422 if (err)
2423 goto error_unreg_rtnl_link;
2424
850b6b3b 2425 err = ovs_vport_init();
064af421
BP
2426 if (err)
2427 goto error_flow_exit;
2428
2a4999f3 2429 err = register_pernet_device(&ovs_net_ops);
f2459fe7
JG
2430 if (err)
2431 goto error_vport_exit;
2432
7f4a5d68 2433 err = compat_init();
2a4999f3
PS
2434 if (err)
2435 goto error_netns_exit;
2436
7f4a5d68 2437 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2438 if (err)
2439 goto error_compat_exit;
2440
5a38795f
TG
2441 err = ovs_netdev_init();
2442 if (err)
2443 goto error_unreg_notifier;
2444
982b8810
BP
2445 err = dp_register_genl();
2446 if (err < 0)
5a38795f 2447 goto error_unreg_netdev;
982b8810 2448
064af421
BP
2449 return 0;
2450
5a38795f
TG
2451error_unreg_netdev:
2452 ovs_netdev_exit();
064af421 2453error_unreg_notifier:
850b6b3b 2454 unregister_netdevice_notifier(&ovs_dp_device_notifier);
7f4a5d68 2455error_compat_exit:
2456 compat_exit();
2a4999f3
PS
2457error_netns_exit:
2458 unregister_pernet_device(&ovs_net_ops);
f2459fe7 2459error_vport_exit:
850b6b3b 2460 ovs_vport_exit();
064af421 2461error_flow_exit:
850b6b3b 2462 ovs_flow_exit();
5282e284
TG
2463error_unreg_rtnl_link:
2464 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7
AZ
2465error_action_fifos_exit:
2466 action_fifos_exit();
064af421 2467error:
907c26a8 2468 ovs_nsh_cleanup();
064af421
BP
2469 return err;
2470}
2471
2472static void dp_cleanup(void)
2473{
982b8810 2474 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
5a38795f 2475 ovs_netdev_exit();
850b6b3b 2476 unregister_netdevice_notifier(&ovs_dp_device_notifier);
7f4a5d68 2477 compat_exit();
2a4999f3
PS
2478 unregister_pernet_device(&ovs_net_ops);
2479 rcu_barrier();
850b6b3b
JG
2480 ovs_vport_exit();
2481 ovs_flow_exit();
5282e284 2482 ovs_internal_dev_rtnl_link_unregister();
2c8c4fb7 2483 action_fifos_exit();
907c26a8 2484 ovs_nsh_cleanup();
064af421
BP
2485}
2486
2487module_init(dp_init);
2488module_exit(dp_cleanup);
2489
2490MODULE_DESCRIPTION("Open vSwitch switching datapath");
2491MODULE_LICENSE("GPL");
3d0666d2 2492MODULE_VERSION(VERSION);
75e2077e
TLSC
2493MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2494MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2495MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2496MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);