]> git.proxmox.com Git - mirror_ovs.git/blame - datapath/datapath.c
datapath: Use nla_memcpy() to memcpy() data from attributes
[mirror_ovs.git] / datapath / datapath.c
CommitLineData
064af421 1/*
e0edde6f 2 * Copyright (c) 2007-2012 Nicira, Inc.
a14bc59f 3 *
a9a29d22
JG
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
064af421
BP
17 */
18
dfffaef1
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
064af421
BP
21#include <linux/init.h>
22#include <linux/module.h>
064af421 23#include <linux/if_arp.h>
064af421
BP
24#include <linux/if_vlan.h>
25#include <linux/in.h>
26#include <linux/ip.h>
982b8810 27#include <linux/jhash.h>
064af421
BP
28#include <linux/delay.h>
29#include <linux/time.h>
30#include <linux/etherdevice.h>
ed099e92 31#include <linux/genetlink.h>
064af421
BP
32#include <linux/kernel.h>
33#include <linux/kthread.h>
064af421
BP
34#include <linux/mutex.h>
35#include <linux/percpu.h>
36#include <linux/rcupdate.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/version.h>
40#include <linux/ethtool.h>
064af421 41#include <linux/wait.h>
064af421 42#include <asm/div64.h>
656a0e37 43#include <linux/highmem.h>
064af421
BP
44#include <linux/netfilter_bridge.h>
45#include <linux/netfilter_ipv4.h>
46#include <linux/inetdevice.h>
47#include <linux/list.h>
077257b8 48#include <linux/openvswitch.h>
064af421 49#include <linux/rculist.h>
064af421 50#include <linux/dmi.h>
36956a7d 51#include <net/genetlink.h>
2a4999f3
PS
52#include <net/net_namespace.h>
53#include <net/netns/generic.h>
064af421 54
dd8d6b8c 55#include "checksum.h"
064af421 56#include "datapath.h"
064af421 57#include "flow.h"
b9c15df9 58#include "genl_exec.h"
303708cc 59#include "vlan.h"
3544358a 60#include "tunnel.h"
f2459fe7 61#include "vport-internal_dev.h"
064af421 62
4cf41591 63#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
64807dfb
JP
64 LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
65#error Kernels before 2.6.18 or after 3.8 are not supported by this version of Open vSwitch.
4cf41591
JG
66#endif
67
acd051f1
PS
68#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
69static void rehash_flow_table(struct work_struct *work);
70static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
71
2a4999f3
PS
72int ovs_net_id __read_mostly;
73
ed099e92
BP
74/**
75 * DOC: Locking:
064af421 76 *
ed099e92
BP
77 * Writes to device state (add/remove datapath, port, set operations on vports,
78 * etc.) are protected by RTNL.
064af421 79 *
ed099e92 80 * Writes to other state (flow table modifications, set miscellaneous datapath
7257b535
BP
81 * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
82 * genl_mutex.
ed099e92
BP
83 *
84 * Reads are protected by RCU.
85 *
86 * There are a few special cases (mostly stats) that have their own
87 * synchronization but they nest under all of above and don't interact with
88 * each other.
064af421 89 */
ed099e92 90
c19e6535 91static struct vport *new_vport(const struct vport_parms *);
2a4999f3 92static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
7257b535 93 const struct dp_upcall_info *);
2a4999f3
PS
94static int queue_userspace_packet(struct net *, int dp_ifindex,
95 struct sk_buff *,
7257b535 96 const struct dp_upcall_info *);
064af421 97
ed099e92 98/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
2a4999f3 99static struct datapath *get_dp(struct net *net, int dp_ifindex)
064af421 100{
254f2dc8
BP
101 struct datapath *dp = NULL;
102 struct net_device *dev;
ed099e92 103
254f2dc8 104 rcu_read_lock();
2a4999f3 105 dev = dev_get_by_index_rcu(net, dp_ifindex);
254f2dc8 106 if (dev) {
850b6b3b 107 struct vport *vport = ovs_internal_dev_get_vport(dev);
254f2dc8
BP
108 if (vport)
109 dp = vport->dp;
110 }
111 rcu_read_unlock();
112
113 return dp;
064af421 114}
064af421 115
f2459fe7 116/* Must be called with rcu_read_lock or RTNL lock. */
850b6b3b 117const char *ovs_dp_name(const struct datapath *dp)
f2459fe7 118{
95b1d73a 119 struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
16b82e84 120 return vport->ops->get_name(vport);
f2459fe7
JG
121}
122
99769a40
JG
123static int get_dpifindex(struct datapath *dp)
124{
125 struct vport *local;
126 int ifindex;
127
128 rcu_read_lock();
129
95b1d73a 130 local = ovs_vport_rcu(dp, OVSP_LOCAL);
99769a40 131 if (local)
16b82e84 132 ifindex = local->ops->get_ifindex(local);
99769a40
JG
133 else
134 ifindex = 0;
135
136 rcu_read_unlock();
137
138 return ifindex;
139}
140
46c6a11d
JG
141static void destroy_dp_rcu(struct rcu_head *rcu)
142{
143 struct datapath *dp = container_of(rcu, struct datapath, rcu);
46c6a11d 144
850b6b3b 145 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
46c6a11d 146 free_percpu(dp->stats_percpu);
2a4999f3 147 release_net(ovs_dp_get_net(dp));
95b1d73a 148 kfree(dp->ports);
5ca1ba48 149 kfree(dp);
46c6a11d
JG
150}
151
95b1d73a
PS
152static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
153 u16 port_no)
154{
155 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
156}
157
158struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
159{
160 struct vport *vport;
95b1d73a
PS
161 struct hlist_head *head;
162
163 head = vport_hash_bucket(dp, port_no);
f8dfbcb7 164 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
95b1d73a
PS
165 if (vport->port_no == port_no)
166 return vport;
167 }
168 return NULL;
169}
170
ed099e92 171/* Called with RTNL lock and genl_lock. */
c19e6535 172static struct vport *new_vport(const struct vport_parms *parms)
064af421 173{
f2459fe7 174 struct vport *vport;
f2459fe7 175
850b6b3b 176 vport = ovs_vport_add(parms);
c19e6535
BP
177 if (!IS_ERR(vport)) {
178 struct datapath *dp = parms->dp;
95b1d73a 179 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
064af421 180
95b1d73a 181 hlist_add_head_rcu(&vport->dp_hash_node, head);
c19e6535 182 }
c19e6535 183 return vport;
064af421
BP
184}
185
ed099e92 186/* Called with RTNL lock. */
850b6b3b 187void ovs_dp_detach_port(struct vport *p)
064af421
BP
188{
189 ASSERT_RTNL();
190
064af421 191 /* First drop references to device. */
95b1d73a 192 hlist_del_rcu(&p->dp_hash_node);
f2459fe7 193
7237e4f4 194 /* Then destroy it. */
850b6b3b 195 ovs_vport_del(p);
064af421
BP
196}
197
8819fac7 198/* Must be called with rcu_read_lock. */
850b6b3b 199void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
064af421
BP
200{
201 struct datapath *dp = p->dp;
3544358a 202 struct sw_flow *flow;
064af421 203 struct dp_stats_percpu *stats;
52a23d92 204 struct sw_flow_key key;
e9141eec 205 u64 *stats_counter;
4c1ad233 206 int error;
52a23d92 207 int key_len;
064af421 208
70dbc259 209 stats = this_cpu_ptr(dp->stats_percpu);
a063b0df 210
52a23d92
JG
211 /* Extract flow from 'skb' into 'key'. */
212 error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
213 if (unlikely(error)) {
214 kfree_skb(skb);
215 return;
55574bb0
BP
216 }
217
52a23d92
JG
218 /* Look up flow. */
219 flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
220 if (unlikely(!flow)) {
221 struct dp_upcall_info upcall;
222
223 upcall.cmd = OVS_PACKET_CMD_MISS;
224 upcall.key = &key;
225 upcall.userdata = NULL;
226 upcall.portid = p->upcall_portid;
227 ovs_dp_upcall(dp, skb, &upcall);
228 consume_skb(skb);
229 stats_counter = &stats->n_missed;
230 goto out;
231 }
232
233 OVS_CB(skb)->flow = flow;
234
e9141eec 235 stats_counter = &stats->n_hit;
850b6b3b
JG
236 ovs_flow_used(OVS_CB(skb)->flow, skb);
237 ovs_execute_actions(dp, skb);
55574bb0 238
8819fac7 239out:
55574bb0 240 /* Update datapath statistics. */
821cb9fa 241 u64_stats_update_begin(&stats->sync);
e9141eec 242 (*stats_counter)++;
821cb9fa 243 u64_stats_update_end(&stats->sync);
064af421
BP
244}
245
aa5a8fdc
JG
246static struct genl_family dp_packet_genl_family = {
247 .id = GENL_ID_GENERATE,
df2c07f4
JP
248 .hdrsize = sizeof(struct ovs_header),
249 .name = OVS_PACKET_FAMILY,
69685a88 250 .version = OVS_PACKET_VERSION,
2a4999f3
PS
251 .maxattr = OVS_PACKET_ATTR_MAX,
252 SET_NETNSOK
aa5a8fdc
JG
253};
254
850b6b3b
JG
255int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
256 const struct dp_upcall_info *upcall_info)
aa5a8fdc
JG
257{
258 struct dp_stats_percpu *stats;
7257b535 259 int dp_ifindex;
aa5a8fdc
JG
260 int err;
261
28aea917 262 if (upcall_info->portid == 0) {
b063d9f0 263 err = -ENOTCONN;
b063d9f0
JG
264 goto err;
265 }
266
7257b535
BP
267 dp_ifindex = get_dpifindex(dp);
268 if (!dp_ifindex) {
269 err = -ENODEV;
270 goto err;
aa5a8fdc
JG
271 }
272
7257b535 273 forward_ip_summed(skb, true);
36ce148c 274
7257b535 275 if (!skb_is_gso(skb))
2a4999f3 276 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
7257b535 277 else
2a4999f3 278 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
d76195db
JG
279 if (err)
280 goto err;
281
282 return 0;
aa5a8fdc 283
aa5a8fdc 284err:
70dbc259 285 stats = this_cpu_ptr(dp->stats_percpu);
aa5a8fdc 286
821cb9fa 287 u64_stats_update_begin(&stats->sync);
aa5a8fdc 288 stats->n_lost++;
821cb9fa 289 u64_stats_update_end(&stats->sync);
aa5a8fdc 290
aa5a8fdc 291 return err;
982b8810
BP
292}
293
2a4999f3
PS
294static int queue_gso_packets(struct net *net, int dp_ifindex,
295 struct sk_buff *skb,
7257b535 296 const struct dp_upcall_info *upcall_info)
cb5087ca 297{
d4cba1f8 298 unsigned short gso_type = skb_shinfo(skb)->gso_type;
7257b535
BP
299 struct dp_upcall_info later_info;
300 struct sw_flow_key later_key;
301 struct sk_buff *segs, *nskb;
302 int err;
cb5087ca 303
0aa52d88 304 segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
79089764
PS
305 if (IS_ERR(segs))
306 return PTR_ERR(segs);
99769a40 307
7257b535
BP
308 /* Queue all of the segments. */
309 skb = segs;
cb5087ca 310 do {
2a4999f3 311 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
982b8810 312 if (err)
7257b535 313 break;
856081f6 314
d4cba1f8 315 if (skb == segs && gso_type & SKB_GSO_UDP) {
e1cf87ff
JG
316 /* The initial flow key extracted by ovs_flow_extract()
317 * in this case is for a first fragment, so we need to
7257b535
BP
318 * properly mark later fragments.
319 */
320 later_key = *upcall_info->key;
9e44d715 321 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
7257b535
BP
322
323 later_info = *upcall_info;
324 later_info.key = &later_key;
325 upcall_info = &later_info;
326 }
36ce148c 327 } while ((skb = skb->next));
cb5087ca 328
7257b535
BP
329 /* Free all of the segments. */
330 skb = segs;
331 do {
332 nskb = skb->next;
333 if (err)
334 kfree_skb(skb);
335 else
336 consume_skb(skb);
337 } while ((skb = nskb));
338 return err;
339}
340
2a4999f3
PS
341static int queue_userspace_packet(struct net *net, int dp_ifindex,
342 struct sk_buff *skb,
7257b535
BP
343 const struct dp_upcall_info *upcall_info)
344{
345 struct ovs_header *upcall;
6161d3fd 346 struct sk_buff *nskb = NULL;
7257b535
BP
347 struct sk_buff *user_skb; /* to be queued to userspace */
348 struct nlattr *nla;
349 unsigned int len;
350 int err;
351
6161d3fd
JG
352 if (vlan_tx_tag_present(skb)) {
353 nskb = skb_clone(skb, GFP_ATOMIC);
354 if (!nskb)
355 return -ENOMEM;
356
357 err = vlan_deaccel_tag(nskb);
358 if (err)
359 return err;
7257b535 360
6161d3fd
JG
361 skb = nskb;
362 }
363
364 if (nla_attr_size(skb->len) > USHRT_MAX) {
365 err = -EFBIG;
366 goto out;
367 }
7257b535
BP
368
369 len = sizeof(struct ovs_header);
370 len += nla_total_size(skb->len);
371 len += nla_total_size(FLOW_BUFSIZE);
e995e3df
BP
372 if (upcall_info->userdata)
373 len += NLA_ALIGN(upcall_info->userdata->nla_len);
7257b535
BP
374
375 user_skb = genlmsg_new(len, GFP_ATOMIC);
6161d3fd
JG
376 if (!user_skb) {
377 err = -ENOMEM;
378 goto out;
379 }
7257b535
BP
380
381 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
382 0, upcall_info->cmd);
383 upcall->dp_ifindex = dp_ifindex;
384
385 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
850b6b3b 386 ovs_flow_to_nlattrs(upcall_info->key, user_skb);
7257b535
BP
387 nla_nest_end(user_skb, nla);
388
389 if (upcall_info->userdata)
e995e3df 390 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
462a988b 391 nla_len(upcall_info->userdata),
e995e3df 392 nla_data(upcall_info->userdata));
7257b535
BP
393
394 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
bed53bd1
PS
395
396 skb_copy_and_csum_dev(skb, nla_data(nla));
7257b535 397
c39b1a5c 398 genlmsg_end(user_skb, upcall);
28aea917 399 err = genlmsg_unicast(net, user_skb, upcall_info->portid);
6161d3fd
JG
400
401out:
402 kfree_skb(nskb);
403 return err;
cb5087ca
BP
404}
405
ed099e92 406/* Called with genl_mutex. */
2a4999f3 407static int flush_flows(struct datapath *dp)
064af421 408{
3544358a
PS
409 struct flow_table *old_table;
410 struct flow_table *new_table;
8d5ebd83 411
20d035b2 412 old_table = genl_dereference(dp->table);
850b6b3b 413 new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
8d5ebd83 414 if (!new_table)
ed099e92 415 return -ENOMEM;
8d5ebd83
JG
416
417 rcu_assign_pointer(dp->table, new_table);
418
850b6b3b 419 ovs_flow_tbl_deferred_destroy(old_table);
ed099e92 420 return 0;
064af421
BP
421}
422
9b405f1a
PS
423static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
424{
425
426 struct sw_flow_actions *acts;
427 int new_acts_size;
428 int req_size = NLA_ALIGN(attr_len);
429 int next_offset = offsetof(struct sw_flow_actions, actions) +
430 (*sfa)->actions_len;
431
ba400435 432 if (req_size <= (ksize(*sfa) - next_offset))
9b405f1a
PS
433 goto out;
434
ba400435 435 new_acts_size = ksize(*sfa) * 2;
9b405f1a
PS
436
437 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
438 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
439 return ERR_PTR(-EMSGSIZE);
440 new_acts_size = MAX_ACTIONS_BUFSIZE;
441 }
442
443 acts = ovs_flow_actions_alloc(new_acts_size);
444 if (IS_ERR(acts))
445 return (void *)acts;
446
447 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
448 acts->actions_len = (*sfa)->actions_len;
ba400435 449 kfree(*sfa);
9b405f1a
PS
450 *sfa = acts;
451
452out:
453 (*sfa)->actions_len += req_size;
454 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
455}
456
457static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
458{
459 struct nlattr *a;
460
461 a = reserve_sfa_size(sfa, nla_attr_size(len));
462 if (IS_ERR(a))
463 return PTR_ERR(a);
464
465 a->nla_type = attrtype;
466 a->nla_len = nla_attr_size(len);
467
468 if (data)
469 memcpy(nla_data(a), data, len);
470 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
6ff686f2 471
9b405f1a
PS
472 return 0;
473}
474
475static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
476{
477 int used = (*sfa)->actions_len;
478 int err;
479
480 err = add_action(sfa, attrtype, NULL, 0);
481 if (err)
482 return err;
483
484 return used;
485}
486
487static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
488{
489 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
490
491 a->nla_len = sfa->actions_len - st_offset;
492}
493
494static int validate_and_copy_actions(const struct nlattr *attr,
495 const struct sw_flow_key *key, int depth,
496 struct sw_flow_actions **sfa);
497
498static int validate_and_copy_sample(const struct nlattr *attr,
499 const struct sw_flow_key *key, int depth,
500 struct sw_flow_actions **sfa)
6ff686f2 501{
4be00e48
BP
502 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
503 const struct nlattr *probability, *actions;
504 const struct nlattr *a;
9b405f1a 505 int rem, start, err, st_acts;
4be00e48
BP
506
507 memset(attrs, 0, sizeof(attrs));
6455100f 508 nla_for_each_nested(a, attr, rem) {
4be00e48
BP
509 int type = nla_type(a);
510 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
511 return -EINVAL;
512 attrs[type] = a;
513 }
514 if (rem)
6ff686f2 515 return -EINVAL;
4be00e48
BP
516
517 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
518 if (!probability || nla_len(probability) != sizeof(u32))
6ff686f2
PS
519 return -EINVAL;
520
4be00e48
BP
521 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
522 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
523 return -EINVAL;
9b405f1a
PS
524
525 /* validation done, copy sample action. */
526 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
527 if (start < 0)
528 return start;
529 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
530 if (err)
531 return err;
532 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
533 if (st_acts < 0)
534 return st_acts;
535
536 err = validate_and_copy_actions(actions, key, depth + 1, sfa);
537 if (err)
538 return err;
539
540 add_nested_action_end(*sfa, st_acts);
541 add_nested_action_end(*sfa, start);
542
543 return 0;
4edb9ae9
PS
544}
545
b1323f59
PS
546static int validate_tp_port(const struct sw_flow_key *flow_key)
547{
548 if (flow_key->eth.type == htons(ETH_P_IP)) {
6e9bea4d 549 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
b1323f59
PS
550 return 0;
551 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
6e9bea4d 552 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
b1323f59
PS
553 return 0;
554 }
555
556 return -EINVAL;
557}
558
9b405f1a
PS
559static int validate_and_copy_set_tun(const struct nlattr *attr,
560 struct sw_flow_actions **sfa)
561{
562 struct ovs_key_ipv4_tunnel tun_key;
563 int err, start;
564
565 err = ipv4_tun_from_nlattr(nla_data(attr), &tun_key);
566 if (err)
567 return err;
568
569 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
570 if (start < 0)
571 return start;
572
573 err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key));
574 add_nested_action_end(*sfa, start);
575
576 return err;
577}
578
fea393b1 579static int validate_set(const struct nlattr *a,
9b405f1a
PS
580 const struct sw_flow_key *flow_key,
581 struct sw_flow_actions **sfa,
582 bool *set_tun)
4edb9ae9 583{
4edb9ae9
PS
584 const struct nlattr *ovs_key = nla_data(a);
585 int key_type = nla_type(ovs_key);
586
587 /* There can be only one key in a action */
588 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
589 return -EINVAL;
590
591 if (key_type > OVS_KEY_ATTR_MAX ||
9b405f1a
PS
592 (ovs_key_lens[key_type] != nla_len(ovs_key) &&
593 ovs_key_lens[key_type] != -1))
4edb9ae9
PS
594 return -EINVAL;
595
fea393b1 596 switch (key_type) {
4edb9ae9 597 const struct ovs_key_ipv4 *ipv4_key;
bc7a5acd 598 const struct ovs_key_ipv6 *ipv6_key;
9b405f1a 599 int err;
4edb9ae9 600
fea393b1 601 case OVS_KEY_ATTR_PRIORITY:
fea393b1 602 case OVS_KEY_ATTR_ETHERNET:
4edb9ae9
PS
603 break;
604
72e8bf28
AA
605 case OVS_KEY_ATTR_SKB_MARK:
606#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
607 if (nla_get_u32(ovs_key) != 0)
608 return -EINVAL;
609#endif
610 break;
611
9b405f1a
PS
612 case OVS_KEY_ATTR_TUNNEL:
613 *set_tun = true;
614 err = validate_and_copy_set_tun(a, sfa);
615 if (err)
616 return err;
356af50b
KM
617 break;
618
fea393b1 619 case OVS_KEY_ATTR_IPV4:
4edb9ae9
PS
620 if (flow_key->eth.type != htons(ETH_P_IP))
621 return -EINVAL;
622
6e9bea4d 623 if (!flow_key->ip.proto)
4edb9ae9
PS
624 return -EINVAL;
625
626 ipv4_key = nla_data(ovs_key);
627 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
628 return -EINVAL;
629
9e44d715 630 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
7257b535
BP
631 return -EINVAL;
632
4edb9ae9
PS
633 break;
634
bc7a5acd
AA
635 case OVS_KEY_ATTR_IPV6:
636 if (flow_key->eth.type != htons(ETH_P_IPV6))
637 return -EINVAL;
638
639 if (!flow_key->ip.proto)
640 return -EINVAL;
641
642 ipv6_key = nla_data(ovs_key);
643 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
644 return -EINVAL;
645
646 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
647 return -EINVAL;
648
649 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
650 return -EINVAL;
651
652 break;
653
fea393b1 654 case OVS_KEY_ATTR_TCP:
4edb9ae9
PS
655 if (flow_key->ip.proto != IPPROTO_TCP)
656 return -EINVAL;
657
b1323f59 658 return validate_tp_port(flow_key);
4edb9ae9 659
fea393b1 660 case OVS_KEY_ATTR_UDP:
4edb9ae9
PS
661 if (flow_key->ip.proto != IPPROTO_UDP)
662 return -EINVAL;
663
b1323f59 664 return validate_tp_port(flow_key);
4edb9ae9
PS
665
666 default:
667 return -EINVAL;
668 }
fea393b1 669
4edb9ae9 670 return 0;
6ff686f2
PS
671}
672
98403001
BP
673static int validate_userspace(const struct nlattr *attr)
674{
6455100f 675 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
98403001 676 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
e995e3df 677 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
98403001
BP
678 };
679 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
680 int error;
681
6455100f
PS
682 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
683 attr, userspace_policy);
98403001
BP
684 if (error)
685 return error;
686
6455100f
PS
687 if (!a[OVS_USERSPACE_ATTR_PID] ||
688 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
98403001
BP
689 return -EINVAL;
690
691 return 0;
692}
693
9b405f1a
PS
694static int copy_action(const struct nlattr *from,
695 struct sw_flow_actions **sfa)
696{
697 int totlen = NLA_ALIGN(from->nla_len);
698 struct nlattr *to;
699
700 to = reserve_sfa_size(sfa, from->nla_len);
701 if (IS_ERR(to))
702 return PTR_ERR(to);
703
704 memcpy(to, from, totlen);
705 return 0;
706}
707
708static int validate_and_copy_actions(const struct nlattr *attr,
709 const struct sw_flow_key *key,
710 int depth,
711 struct sw_flow_actions **sfa)
064af421 712{
23cad98c 713 const struct nlattr *a;
6ff686f2
PS
714 int rem, err;
715
716 if (depth >= SAMPLE_ACTION_DEPTH)
717 return -EOVERFLOW;
23cad98c 718
37a1300c 719 nla_for_each_nested(a, attr, rem) {
98403001 720 /* Expected argument lengths, (u32)-1 for variable length. */
df2c07f4 721 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
fea393b1 722 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
98403001 723 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
fea393b1
BP
724 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
725 [OVS_ACTION_ATTR_POP_VLAN] = 0,
4edb9ae9 726 [OVS_ACTION_ATTR_SET] = (u32)-1,
98403001 727 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
23cad98c 728 };
fea393b1 729 const struct ovs_action_push_vlan *vlan;
23cad98c 730 int type = nla_type(a);
9b405f1a 731 bool skip_copy;
23cad98c 732
6ff686f2 733 if (type > OVS_ACTION_ATTR_MAX ||
98403001
BP
734 (action_lens[type] != nla_len(a) &&
735 action_lens[type] != (u32)-1))
23cad98c
BP
736 return -EINVAL;
737
9b405f1a 738 skip_copy = false;
23cad98c 739 switch (type) {
df2c07f4 740 case OVS_ACTION_ATTR_UNSPEC:
cdee00fd 741 return -EINVAL;
064af421 742
98403001
BP
743 case OVS_ACTION_ATTR_USERSPACE:
744 err = validate_userspace(a);
745 if (err)
746 return err;
747 break;
748
df2c07f4 749 case OVS_ACTION_ATTR_OUTPUT:
23cad98c
BP
750 if (nla_get_u32(a) >= DP_MAX_PORTS)
751 return -EINVAL;
3b1fc5f3 752 break;
cdee00fd 753
4edb9ae9 754
fea393b1
BP
755 case OVS_ACTION_ATTR_POP_VLAN:
756 break;
757
758 case OVS_ACTION_ATTR_PUSH_VLAN:
759 vlan = nla_data(a);
760 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
761 return -EINVAL;
8ddc056d 762 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
064af421 763 return -EINVAL;
23cad98c 764 break;
064af421 765
4edb9ae9 766 case OVS_ACTION_ATTR_SET:
9b405f1a 767 err = validate_set(a, key, sfa, &skip_copy);
4edb9ae9
PS
768 if (err)
769 return err;
23cad98c 770 break;
064af421 771
6ff686f2 772 case OVS_ACTION_ATTR_SAMPLE:
9b405f1a 773 err = validate_and_copy_sample(a, key, depth, sfa);
6ff686f2
PS
774 if (err)
775 return err;
9b405f1a 776 skip_copy = true;
6ff686f2
PS
777 break;
778
23cad98c 779 default:
4edb9ae9 780 return -EINVAL;
23cad98c 781 }
9b405f1a
PS
782 if (!skip_copy) {
783 err = copy_action(a, sfa);
784 if (err)
785 return err;
786 }
23cad98c 787 }
3c5f6de3 788
23cad98c
BP
789 if (rem > 0)
790 return -EINVAL;
064af421 791
23cad98c 792 return 0;
064af421 793}
4edb9ae9 794
064af421
BP
795static void clear_stats(struct sw_flow *flow)
796{
6bfafa55 797 flow->used = 0;
064af421 798 flow->tcp_flags = 0;
064af421
BP
799 flow->packet_count = 0;
800 flow->byte_count = 0;
801}
802
df2c07f4 803static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
064af421 804{
df2c07f4 805 struct ovs_header *ovs_header = info->userhdr;
982b8810 806 struct nlattr **a = info->attrs;
e0e57990 807 struct sw_flow_actions *acts;
982b8810 808 struct sk_buff *packet;
e0e57990 809 struct sw_flow *flow;
f7cd0081 810 struct datapath *dp;
d6569377 811 struct ethhdr *eth;
3f19d399 812 int len;
d6569377 813 int err;
76abe283 814 int key_len;
064af421 815
f7cd0081 816 err = -EINVAL;
df2c07f4 817 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
7c3072cc 818 !a[OVS_PACKET_ATTR_ACTIONS])
e5cad958 819 goto err;
064af421 820
df2c07f4 821 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
3f19d399 822 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
f7cd0081
BP
823 err = -ENOMEM;
824 if (!packet)
e5cad958 825 goto err;
3f19d399
BP
826 skb_reserve(packet, NET_IP_ALIGN);
827
bf3d6fce 828 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
8d5ebd83 829
f7cd0081
BP
830 skb_reset_mac_header(packet);
831 eth = eth_hdr(packet);
064af421 832
d6569377
BP
833 /* Normally, setting the skb 'protocol' field would be handled by a
834 * call to eth_type_trans(), but it assumes there's a sending
835 * device, which we may not have. */
7cd46155 836 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
f7cd0081 837 packet->protocol = eth->h_proto;
d6569377 838 else
f7cd0081 839 packet->protocol = htons(ETH_P_802_2);
d3c54451 840
e0e57990 841 /* Build an sw_flow for sending this packet. */
850b6b3b 842 flow = ovs_flow_alloc();
e0e57990
BP
843 err = PTR_ERR(flow);
844 if (IS_ERR(flow))
e5cad958 845 goto err_kfree_skb;
064af421 846
850b6b3b 847 err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
e0e57990 848 if (err)
9321954a 849 goto err_flow_free;
e0e57990 850
13e24889 851 err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
80e5eed9 852 if (err)
9321954a 853 goto err_flow_free;
9b405f1a 854 acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
e0e57990
BP
855 err = PTR_ERR(acts);
856 if (IS_ERR(acts))
9321954a 857 goto err_flow_free;
9b405f1a
PS
858
859 err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
e0e57990 860 rcu_assign_pointer(flow->sf_acts, acts);
9b405f1a
PS
861 if (err)
862 goto err_flow_free;
e0e57990
BP
863
864 OVS_CB(packet)->flow = flow;
abff858b 865 packet->priority = flow->key.phy.priority;
72e8bf28 866 skb_set_mark(packet, flow->key.phy.skb_mark);
e0e57990 867
d6569377 868 rcu_read_lock();
2a4999f3 869 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
f7cd0081 870 err = -ENODEV;
e5cad958
BP
871 if (!dp)
872 goto err_unlock;
cc4015df 873
e9141eec 874 local_bh_disable();
850b6b3b 875 err = ovs_execute_actions(dp, packet);
e9141eec 876 local_bh_enable();
d6569377 877 rcu_read_unlock();
e0e57990 878
9321954a 879 ovs_flow_free(flow);
e5cad958 880 return err;
064af421 881
e5cad958
BP
882err_unlock:
883 rcu_read_unlock();
9321954a
JG
884err_flow_free:
885 ovs_flow_free(flow);
e5cad958
BP
886err_kfree_skb:
887 kfree_skb(packet);
888err:
d6569377 889 return err;
064af421
BP
890}
891
df2c07f4 892static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
7c3072cc
TG
893#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
894 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
895#else
896 [OVS_PACKET_ATTR_PACKET] = { .minlen = ETH_HLEN },
897#endif
df2c07f4
JP
898 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
899 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
982b8810
BP
900};
901
902static struct genl_ops dp_packet_genl_ops[] = {
df2c07f4 903 { .cmd = OVS_PACKET_CMD_EXECUTE,
982b8810
BP
904 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
905 .policy = packet_policy,
df2c07f4 906 .doit = ovs_packet_cmd_execute
982b8810
BP
907 }
908};
909
df2c07f4 910static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
064af421 911{
d6569377 912 int i;
20d035b2 913 struct flow_table *table = genl_dereference(dp->table);
f180c2e2 914
850b6b3b 915 stats->n_flows = ovs_flow_tbl_count(table);
064af421 916
7257b535 917 stats->n_hit = stats->n_missed = stats->n_lost = 0;
d6569377
BP
918 for_each_possible_cpu(i) {
919 const struct dp_stats_percpu *percpu_stats;
920 struct dp_stats_percpu local_stats;
821cb9fa 921 unsigned int start;
44e05eca 922
d6569377 923 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
064af421 924
d6569377 925 do {
821cb9fa 926 start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
d6569377 927 local_stats = *percpu_stats;
821cb9fa 928 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
064af421 929
d6569377
BP
930 stats->n_hit += local_stats.n_hit;
931 stats->n_missed += local_stats.n_missed;
932 stats->n_lost += local_stats.n_lost;
933 }
934}
064af421 935
df2c07f4
JP
936static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
937 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
938 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
939 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
d6569377 940};
36956a7d 941
37a1300c
BP
942static struct genl_family dp_flow_genl_family = {
943 .id = GENL_ID_GENERATE,
df2c07f4
JP
944 .hdrsize = sizeof(struct ovs_header),
945 .name = OVS_FLOW_FAMILY,
69685a88 946 .version = OVS_FLOW_VERSION,
2a4999f3
PS
947 .maxattr = OVS_FLOW_ATTR_MAX,
948 SET_NETNSOK
37a1300c 949};
ed099e92 950
850b6b3b 951static struct genl_multicast_group ovs_dp_flow_multicast_group = {
df2c07f4 952 .name = OVS_FLOW_MCGROUP
37a1300c
BP
953};
954
9b405f1a
PS
955static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
956static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
957{
958 const struct nlattr *a;
959 struct nlattr *start;
960 int err = 0, rem;
961
962 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
963 if (!start)
964 return -EMSGSIZE;
965
966 nla_for_each_nested(a, attr, rem) {
967 int type = nla_type(a);
968 struct nlattr *st_sample;
969
970 switch (type) {
971 case OVS_SAMPLE_ATTR_PROBABILITY:
972 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
973 return -EMSGSIZE;
974 break;
975 case OVS_SAMPLE_ATTR_ACTIONS:
976 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
977 if (!st_sample)
978 return -EMSGSIZE;
979 err = actions_to_attr(nla_data(a), nla_len(a), skb);
980 if (err)
981 return err;
982 nla_nest_end(skb, st_sample);
983 break;
984 }
985 }
986
987 nla_nest_end(skb, start);
988 return err;
989}
990
991static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
992{
993 const struct nlattr *ovs_key = nla_data(a);
994 int key_type = nla_type(ovs_key);
995 struct nlattr *start;
996 int err;
997
998 switch (key_type) {
999 case OVS_KEY_ATTR_IPV4_TUNNEL:
1000 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
1001 if (!start)
1002 return -EMSGSIZE;
1003
1004 err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key));
1005 if (err)
1006 return err;
1007 nla_nest_end(skb, start);
1008 break;
1009 default:
1010 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1011 return -EMSGSIZE;
1012 break;
1013 }
1014
1015 return 0;
1016}
1017
1018static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
1019{
1020 const struct nlattr *a;
1021 int rem, err;
1022
1023 nla_for_each_attr(a, attr, len, rem) {
1024 int type = nla_type(a);
1025
1026 switch (type) {
1027 case OVS_ACTION_ATTR_SET:
1028 err = set_action_to_attr(a, skb);
1029 if (err)
1030 return err;
1031 break;
1032
1033 case OVS_ACTION_ATTR_SAMPLE:
1034 err = sample_action_to_attr(a, skb);
1035 if (err)
1036 return err;
1037 break;
1038 default:
1039 if (nla_put(skb, type, nla_len(a), nla_data(a)))
1040 return -EMSGSIZE;
1041 break;
1042 }
1043 }
1044
1045 return 0;
1046}
1047
37a1300c 1048/* Called with genl_lock. */
df2c07f4 1049static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
28aea917 1050 struct sk_buff *skb, u32 portid,
6455100f 1051 u32 seq, u32 flags, u8 cmd)
d6569377 1052{
37a1300c 1053 const int skb_orig_len = skb->len;
d6569377 1054 const struct sw_flow_actions *sf_acts;
9b405f1a 1055 struct nlattr *start;
df2c07f4
JP
1056 struct ovs_flow_stats stats;
1057 struct ovs_header *ovs_header;
d6569377
BP
1058 struct nlattr *nla;
1059 unsigned long used;
1060 u8 tcp_flags;
1061 int err;
064af421 1062
d6569377 1063 sf_acts = rcu_dereference_protected(flow->sf_acts,
ed099e92 1064 lockdep_genl_is_held());
064af421 1065
28aea917 1066 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
df2c07f4 1067 if (!ovs_header)
37a1300c 1068 return -EMSGSIZE;
d6569377 1069
99769a40 1070 ovs_header->dp_ifindex = get_dpifindex(dp);
d6569377 1071
df2c07f4 1072 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
d6569377
BP
1073 if (!nla)
1074 goto nla_put_failure;
850b6b3b 1075 err = ovs_flow_to_nlattrs(&flow->key, skb);
d6569377 1076 if (err)
37a1300c 1077 goto error;
d6569377
BP
1078 nla_nest_end(skb, nla);
1079
1080 spin_lock_bh(&flow->lock);
1081 used = flow->used;
1082 stats.n_packets = flow->packet_count;
1083 stats.n_bytes = flow->byte_count;
1084 tcp_flags = flow->tcp_flags;
1085 spin_unlock_bh(&flow->lock);
1086
c3cc8c03
DM
1087 if (used &&
1088 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
1089 goto nla_put_failure;
d6569377 1090
c3cc8c03
DM
1091 if (stats.n_packets &&
1092 nla_put(skb, OVS_FLOW_ATTR_STATS,
1093 sizeof(struct ovs_flow_stats), &stats))
1094 goto nla_put_failure;
d6569377 1095
c3cc8c03
DM
1096 if (tcp_flags &&
1097 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
1098 goto nla_put_failure;
d6569377 1099
df2c07f4 1100 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
30053024
BP
1101 * this is the first flow to be dumped into 'skb'. This is unusual for
1102 * Netlink but individual action lists can be longer than
1103 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
1104 * The userspace caller can always fetch the actions separately if it
1105 * really wants them. (Most userspace callers in fact don't care.)
1106 *
1107 * This can only fail for dump operations because the skb is always
1108 * properly sized for single flows.
1109 */
9b405f1a 1110 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
f6f481ee
PS
1111 if (start) {
1112 err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
0a25b039
BP
1113 if (!err)
1114 nla_nest_end(skb, start);
1115 else {
1116 if (skb_orig_len)
1117 goto error;
1118
1119 nla_nest_cancel(skb, start);
1120 }
7aac03bd
JG
1121 } else if (skb_orig_len)
1122 goto nla_put_failure;
37a1300c 1123
df2c07f4 1124 return genlmsg_end(skb, ovs_header);
d6569377
BP
1125
1126nla_put_failure:
1127 err = -EMSGSIZE;
37a1300c 1128error:
df2c07f4 1129 genlmsg_cancel(skb, ovs_header);
d6569377 1130 return err;
44e05eca
BP
1131}
1132
df2c07f4 1133static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
44e05eca 1134{
37a1300c
BP
1135 const struct sw_flow_actions *sf_acts;
1136 int len;
d6569377 1137
37a1300c
BP
1138 sf_acts = rcu_dereference_protected(flow->sf_acts,
1139 lockdep_genl_is_held());
d6569377 1140
6455100f
PS
1141 /* OVS_FLOW_ATTR_KEY */
1142 len = nla_total_size(FLOW_BUFSIZE);
1143 /* OVS_FLOW_ATTR_ACTIONS */
1144 len += nla_total_size(sf_acts->actions_len);
1145 /* OVS_FLOW_ATTR_STATS */
1146 len += nla_total_size(sizeof(struct ovs_flow_stats));
1147 /* OVS_FLOW_ATTR_TCP_FLAGS */
1148 len += nla_total_size(1);
1149 /* OVS_FLOW_ATTR_USED */
1150 len += nla_total_size(8);
1151
1152 len += NLMSG_ALIGN(sizeof(struct ovs_header));
1153
1154 return genlmsg_new(len, GFP_KERNEL);
37a1300c 1155}
8d5ebd83 1156
6455100f
PS
1157static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
1158 struct datapath *dp,
28aea917 1159 u32 portid, u32 seq, u8 cmd)
37a1300c
BP
1160{
1161 struct sk_buff *skb;
1162 int retval;
d6569377 1163
df2c07f4 1164 skb = ovs_flow_cmd_alloc_info(flow);
37a1300c
BP
1165 if (!skb)
1166 return ERR_PTR(-ENOMEM);
d6569377 1167
28aea917 1168 retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
37a1300c 1169 BUG_ON(retval < 0);
d6569377 1170 return skb;
064af421
BP
1171}
1172
df2c07f4 1173static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
064af421 1174{
37a1300c 1175 struct nlattr **a = info->attrs;
df2c07f4 1176 struct ovs_header *ovs_header = info->userhdr;
37a1300c 1177 struct sw_flow_key key;
d6569377 1178 struct sw_flow *flow;
37a1300c 1179 struct sk_buff *reply;
9c52546b 1180 struct datapath *dp;
3544358a 1181 struct flow_table *table;
9b405f1a 1182 struct sw_flow_actions *acts = NULL;
bc4a05c6 1183 int error;
76abe283 1184 int key_len;
064af421 1185
37a1300c
BP
1186 /* Extract key. */
1187 error = -EINVAL;
df2c07f4 1188 if (!a[OVS_FLOW_ATTR_KEY])
37a1300c 1189 goto error;
850b6b3b 1190 error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
37a1300c
BP
1191 if (error)
1192 goto error;
064af421 1193
37a1300c 1194 /* Validate actions. */
df2c07f4 1195 if (a[OVS_FLOW_ATTR_ACTIONS]) {
9b405f1a
PS
1196 acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
1197 error = PTR_ERR(acts);
1198 if (IS_ERR(acts))
37a1300c 1199 goto error;
9b405f1a
PS
1200
1201 error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts);
1202 if (error)
1203 goto err_kfree;
df2c07f4 1204 } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
37a1300c
BP
1205 error = -EINVAL;
1206 goto error;
1207 }
1208
2a4999f3 1209 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
d6569377 1210 error = -ENODEV;
9c52546b 1211 if (!dp)
9b405f1a 1212 goto err_kfree;
704a1e09 1213
20d035b2 1214 table = genl_dereference(dp->table);
850b6b3b 1215 flow = ovs_flow_tbl_lookup(table, &key, key_len);
3544358a 1216 if (!flow) {
d6569377
BP
1217 /* Bail out if we're not allowed to create a new flow. */
1218 error = -ENOENT;
df2c07f4 1219 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
9b405f1a 1220 goto err_kfree;
d6569377
BP
1221
1222 /* Expand table, if necessary, to make room. */
850b6b3b 1223 if (ovs_flow_tbl_need_to_expand(table)) {
3544358a
PS
1224 struct flow_table *new_table;
1225
850b6b3b 1226 new_table = ovs_flow_tbl_expand(table);
3544358a
PS
1227 if (!IS_ERR(new_table)) {
1228 rcu_assign_pointer(dp->table, new_table);
850b6b3b 1229 ovs_flow_tbl_deferred_destroy(table);
20d035b2 1230 table = genl_dereference(dp->table);
3544358a 1231 }
d6569377
BP
1232 }
1233
1234 /* Allocate flow. */
850b6b3b 1235 flow = ovs_flow_alloc();
d6569377
BP
1236 if (IS_ERR(flow)) {
1237 error = PTR_ERR(flow);
9b405f1a 1238 goto err_kfree;
d6569377 1239 }
d6569377
BP
1240 clear_stats(flow);
1241
d6569377
BP
1242 rcu_assign_pointer(flow->sf_acts, acts);
1243
d6569377 1244 /* Put flow in bucket. */
13e24889 1245 ovs_flow_tbl_insert(table, flow, &key, key_len);
37a1300c 1246
28aea917 1247 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
6455100f
PS
1248 info->snd_seq,
1249 OVS_FLOW_CMD_NEW);
d6569377
BP
1250 } else {
1251 /* We found a matching flow. */
1252 struct sw_flow_actions *old_acts;
1253
1254 /* Bail out if we're not allowed to modify an existing flow.
1255 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1256 * because Generic Netlink treats the latter as a dump
1257 * request. We also accept NLM_F_EXCL in case that bug ever
1258 * gets fixed.
1259 */
1260 error = -EEXIST;
df2c07f4 1261 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
37a1300c 1262 info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
9b405f1a 1263 goto err_kfree;
d6569377
BP
1264
1265 /* Update actions. */
d6569377 1266 old_acts = rcu_dereference_protected(flow->sf_acts,
ed099e92 1267 lockdep_genl_is_held());
9b405f1a
PS
1268 rcu_assign_pointer(flow->sf_acts, acts);
1269 ovs_flow_deferred_free_acts(old_acts);
d6569377 1270
28aea917 1271 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
6455100f 1272 info->snd_seq, OVS_FLOW_CMD_NEW);
d6569377
BP
1273
1274 /* Clear stats. */
df2c07f4 1275 if (a[OVS_FLOW_ATTR_CLEAR]) {
d6569377
BP
1276 spin_lock_bh(&flow->lock);
1277 clear_stats(flow);
1278 spin_unlock_bh(&flow->lock);
1279 }
9c52546b 1280 }
37a1300c
BP
1281
1282 if (!IS_ERR(reply))
28aea917 1283 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b
JG
1284 ovs_dp_flow_multicast_group.id, info->nlhdr,
1285 GFP_KERNEL);
37a1300c 1286 else
2a4999f3
PS
1287 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1288 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
d6569377 1289 return 0;
704a1e09 1290
9b405f1a 1291err_kfree:
ba400435 1292 kfree(acts);
37a1300c 1293error:
9c52546b 1294 return error;
704a1e09
BP
1295}
1296
df2c07f4 1297static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
704a1e09 1298{
37a1300c 1299 struct nlattr **a = info->attrs;
df2c07f4 1300 struct ovs_header *ovs_header = info->userhdr;
37a1300c 1301 struct sw_flow_key key;
37a1300c 1302 struct sk_buff *reply;
704a1e09 1303 struct sw_flow *flow;
9c52546b 1304 struct datapath *dp;
3544358a 1305 struct flow_table *table;
9c52546b 1306 int err;
76abe283 1307 int key_len;
704a1e09 1308
df2c07f4 1309 if (!a[OVS_FLOW_ATTR_KEY])
37a1300c 1310 return -EINVAL;
850b6b3b 1311 err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
37a1300c
BP
1312 if (err)
1313 return err;
704a1e09 1314
2a4999f3 1315 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
9c52546b 1316 if (!dp)
ed099e92 1317 return -ENODEV;
704a1e09 1318
20d035b2 1319 table = genl_dereference(dp->table);
850b6b3b 1320 flow = ovs_flow_tbl_lookup(table, &key, key_len);
3544358a 1321 if (!flow)
ed099e92 1322 return -ENOENT;
d6569377 1323
28aea917 1324 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
6455100f 1325 info->snd_seq, OVS_FLOW_CMD_NEW);
37a1300c
BP
1326 if (IS_ERR(reply))
1327 return PTR_ERR(reply);
36956a7d 1328
37a1300c 1329 return genlmsg_reply(reply, info);
d6569377 1330}
9c52546b 1331
df2c07f4 1332static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
d6569377 1333{
37a1300c 1334 struct nlattr **a = info->attrs;
df2c07f4 1335 struct ovs_header *ovs_header = info->userhdr;
37a1300c 1336 struct sw_flow_key key;
37a1300c 1337 struct sk_buff *reply;
d6569377 1338 struct sw_flow *flow;
d6569377 1339 struct datapath *dp;
3544358a 1340 struct flow_table *table;
d6569377 1341 int err;
76abe283 1342 int key_len;
36956a7d 1343
2a4999f3
PS
1344 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1345 if (!dp)
1346 return -ENODEV;
1347
df2c07f4 1348 if (!a[OVS_FLOW_ATTR_KEY])
2a4999f3
PS
1349 return flush_flows(dp);
1350
850b6b3b 1351 err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
37a1300c
BP
1352 if (err)
1353 return err;
d6569377 1354
20d035b2 1355 table = genl_dereference(dp->table);
850b6b3b 1356 flow = ovs_flow_tbl_lookup(table, &key, key_len);
3544358a 1357 if (!flow)
37a1300c 1358 return -ENOENT;
d6569377 1359
df2c07f4 1360 reply = ovs_flow_cmd_alloc_info(flow);
37a1300c
BP
1361 if (!reply)
1362 return -ENOMEM;
1363
850b6b3b 1364 ovs_flow_tbl_remove(table, flow);
37a1300c 1365
28aea917 1366 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
df2c07f4 1367 info->snd_seq, 0, OVS_FLOW_CMD_DEL);
37a1300c
BP
1368 BUG_ON(err < 0);
1369
850b6b3b 1370 ovs_flow_deferred_free(flow);
37a1300c 1371
28aea917 1372 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b 1373 ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
37a1300c
BP
1374 return 0;
1375}
1376
df2c07f4 1377static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
37a1300c 1378{
df2c07f4 1379 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
37a1300c 1380 struct datapath *dp;
20d035b2 1381 struct flow_table *table;
37a1300c 1382
2a4999f3 1383 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
37a1300c
BP
1384 if (!dp)
1385 return -ENODEV;
1386
20d035b2
JG
1387 table = genl_dereference(dp->table);
1388
37a1300c 1389 for (;;) {
37a1300c
BP
1390 struct sw_flow *flow;
1391 u32 bucket, obj;
1392
1393 bucket = cb->args[0];
1394 obj = cb->args[1];
850b6b3b 1395 flow = ovs_flow_tbl_next(table, &bucket, &obj);
3544358a 1396 if (!flow)
37a1300c
BP
1397 break;
1398
6455100f 1399 if (ovs_flow_cmd_fill_info(flow, dp, skb,
28aea917 1400 NETLINK_CB(cb->skb).portid,
37a1300c 1401 cb->nlh->nlmsg_seq, NLM_F_MULTI,
df2c07f4 1402 OVS_FLOW_CMD_NEW) < 0)
37a1300c
BP
1403 break;
1404
1405 cb->args[0] = bucket;
1406 cb->args[1] = obj;
1407 }
1408 return skb->len;
704a1e09
BP
1409}
1410
37a1300c 1411static struct genl_ops dp_flow_genl_ops[] = {
df2c07f4 1412 { .cmd = OVS_FLOW_CMD_NEW,
37a1300c
BP
1413 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1414 .policy = flow_policy,
df2c07f4 1415 .doit = ovs_flow_cmd_new_or_set
37a1300c 1416 },
df2c07f4 1417 { .cmd = OVS_FLOW_CMD_DEL,
37a1300c
BP
1418 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1419 .policy = flow_policy,
df2c07f4 1420 .doit = ovs_flow_cmd_del
37a1300c 1421 },
df2c07f4 1422 { .cmd = OVS_FLOW_CMD_GET,
37a1300c
BP
1423 .flags = 0, /* OK for unprivileged users. */
1424 .policy = flow_policy,
df2c07f4
JP
1425 .doit = ovs_flow_cmd_get,
1426 .dumpit = ovs_flow_cmd_dump
37a1300c 1427 },
df2c07f4 1428 { .cmd = OVS_FLOW_CMD_SET,
37a1300c
BP
1429 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1430 .policy = flow_policy,
df2c07f4 1431 .doit = ovs_flow_cmd_new_or_set,
37a1300c
BP
1432 },
1433};
1434
df2c07f4 1435static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
aaff4b55 1436#ifdef HAVE_NLA_NUL_STRING
df2c07f4 1437 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
aaff4b55 1438#endif
b063d9f0 1439 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
d6569377
BP
1440};
1441
aaff4b55
BP
1442static struct genl_family dp_datapath_genl_family = {
1443 .id = GENL_ID_GENERATE,
df2c07f4
JP
1444 .hdrsize = sizeof(struct ovs_header),
1445 .name = OVS_DATAPATH_FAMILY,
69685a88 1446 .version = OVS_DATAPATH_VERSION,
2a4999f3
PS
1447 .maxattr = OVS_DP_ATTR_MAX,
1448 SET_NETNSOK
aaff4b55
BP
1449};
1450
850b6b3b 1451static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
df2c07f4 1452 .name = OVS_DATAPATH_MCGROUP
aaff4b55
BP
1453};
1454
df2c07f4 1455static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
28aea917 1456 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1457{
df2c07f4 1458 struct ovs_header *ovs_header;
e926dfe3 1459 struct ovs_dp_stats dp_stats;
064af421
BP
1460 int err;
1461
28aea917 1462 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
aaff4b55 1463 flags, cmd);
df2c07f4 1464 if (!ovs_header)
aaff4b55 1465 goto error;
064af421 1466
b063d9f0 1467 ovs_header->dp_ifindex = get_dpifindex(dp);
064af421 1468
d6569377 1469 rcu_read_lock();
850b6b3b 1470 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
d6569377 1471 rcu_read_unlock();
064af421 1472 if (err)
d6569377 1473 goto nla_put_failure;
064af421 1474
e926dfe3 1475 get_dp_stats(dp, &dp_stats);
c3cc8c03
DM
1476 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1477 goto nla_put_failure;
d6569377 1478
df2c07f4 1479 return genlmsg_end(skb, ovs_header);
d6569377
BP
1480
1481nla_put_failure:
df2c07f4 1482 genlmsg_cancel(skb, ovs_header);
aaff4b55
BP
1483error:
1484 return -EMSGSIZE;
d6569377
BP
1485}
1486
28aea917 1487static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
aaff4b55 1488 u32 seq, u8 cmd)
d6569377 1489{
d6569377 1490 struct sk_buff *skb;
aaff4b55 1491 int retval;
d6569377 1492
aaff4b55 1493 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
064af421 1494 if (!skb)
d6569377 1495 return ERR_PTR(-ENOMEM);
659586ef 1496
28aea917 1497 retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
aaff4b55
BP
1498 if (retval < 0) {
1499 kfree_skb(skb);
1500 return ERR_PTR(retval);
1501 }
1502 return skb;
1503}
9dca7bd5 1504
df2c07f4 1505static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
aaff4b55 1506{
df2c07f4 1507 return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
d6569377
BP
1508}
1509
ed099e92 1510/* Called with genl_mutex and optionally with RTNL lock also. */
2a4999f3
PS
1511static struct datapath *lookup_datapath(struct net *net,
1512 struct ovs_header *ovs_header,
6455100f 1513 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
d6569377 1514{
254f2dc8
BP
1515 struct datapath *dp;
1516
df2c07f4 1517 if (!a[OVS_DP_ATTR_NAME])
2a4999f3 1518 dp = get_dp(net, ovs_header->dp_ifindex);
254f2dc8 1519 else {
d6569377 1520 struct vport *vport;
d6569377 1521
057dd6d2 1522 rcu_read_lock();
2a4999f3 1523 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
df2c07f4 1524 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
057dd6d2 1525 rcu_read_unlock();
d6569377 1526 }
254f2dc8 1527 return dp ? dp : ERR_PTR(-ENODEV);
d6569377
BP
1528}
1529
df2c07f4 1530static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
d6569377 1531{
aaff4b55 1532 struct nlattr **a = info->attrs;
d6569377 1533 struct vport_parms parms;
aaff4b55 1534 struct sk_buff *reply;
d6569377
BP
1535 struct datapath *dp;
1536 struct vport *vport;
2a4999f3 1537 struct ovs_net *ovs_net;
95b1d73a 1538 int err, i;
d6569377 1539
d6569377 1540 err = -EINVAL;
ea36840f 1541 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
aaff4b55
BP
1542 goto err;
1543
df2c07f4 1544 err = ovs_dp_cmd_validate(a);
aaff4b55
BP
1545 if (err)
1546 goto err;
d6569377
BP
1547
1548 rtnl_lock();
d6569377 1549
d6569377
BP
1550 err = -ENOMEM;
1551 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1552 if (dp == NULL)
2a4999f3
PS
1553 goto err_unlock_rtnl;
1554
0ceaa66c
JG
1555 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1556
d6569377
BP
1557 /* Allocate table. */
1558 err = -ENOMEM;
850b6b3b 1559 rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
d6569377
BP
1560 if (!dp->table)
1561 goto err_free_dp;
1562
99769a40
JG
1563 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1564 if (!dp->stats_percpu) {
1565 err = -ENOMEM;
1566 goto err_destroy_table;
1567 }
1568
95b1d73a
PS
1569 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1570 GFP_KERNEL);
1571 if (!dp->ports) {
1572 err = -ENOMEM;
1573 goto err_destroy_percpu;
1574 }
1575
1576 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1577 INIT_HLIST_HEAD(&dp->ports[i]);
1578
d6569377 1579 /* Set up our datapath device. */
df2c07f4
JP
1580 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1581 parms.type = OVS_VPORT_TYPE_INTERNAL;
d6569377
BP
1582 parms.options = NULL;
1583 parms.dp = dp;
df2c07f4 1584 parms.port_no = OVSP_LOCAL;
28aea917 1585 parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
b063d9f0 1586
d6569377
BP
1587 vport = new_vport(&parms);
1588 if (IS_ERR(vport)) {
1589 err = PTR_ERR(vport);
1590 if (err == -EBUSY)
1591 err = -EEXIST;
1592
95b1d73a 1593 goto err_destroy_ports_array;
d6569377 1594 }
d6569377 1595
28aea917 1596 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
6455100f 1597 info->snd_seq, OVS_DP_CMD_NEW);
aaff4b55
BP
1598 err = PTR_ERR(reply);
1599 if (IS_ERR(reply))
1600 goto err_destroy_local_port;
1601
2a4999f3
PS
1602 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1603 list_add_tail(&dp->list_node, &ovs_net->dps);
d6569377 1604
d6569377
BP
1605 rtnl_unlock();
1606
28aea917 1607 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b
JG
1608 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1609 GFP_KERNEL);
d6569377
BP
1610 return 0;
1611
1612err_destroy_local_port:
95b1d73a
PS
1613 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1614err_destroy_ports_array:
1615 kfree(dp->ports);
99769a40
JG
1616err_destroy_percpu:
1617 free_percpu(dp->stats_percpu);
d6569377 1618err_destroy_table:
850b6b3b 1619 ovs_flow_tbl_destroy(genl_dereference(dp->table));
d6569377 1620err_free_dp:
0ceaa66c 1621 release_net(ovs_dp_get_net(dp));
d6569377 1622 kfree(dp);
ed099e92 1623err_unlock_rtnl:
d6569377 1624 rtnl_unlock();
d6569377 1625err:
064af421
BP
1626 return err;
1627}
1628
2a4999f3
PS
1629/* Called with genl_mutex. */
1630static void __dp_destroy(struct datapath *dp)
44e05eca 1631{
95b1d73a 1632 int i;
44e05eca 1633
d6569377 1634 rtnl_lock();
95b1d73a
PS
1635
1636 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1637 struct vport *vport;
f8dfbcb7 1638 struct hlist_node *n;
95b1d73a 1639
f8dfbcb7 1640 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
95b1d73a
PS
1641 if (vport->port_no != OVSP_LOCAL)
1642 ovs_dp_detach_port(vport);
1643 }
ed099e92 1644
254f2dc8 1645 list_del(&dp->list_node);
95b1d73a 1646 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
ed099e92 1647
99620d2c
JG
1648 /* rtnl_unlock() will wait until all the references to devices that
1649 * are pending unregistration have been dropped. We do it here to
1650 * ensure that any internal devices (which contain DP pointers) are
1651 * fully destroyed before freeing the datapath.
1652 */
1653 rtnl_unlock();
1654
ed099e92 1655 call_rcu(&dp->rcu, destroy_dp_rcu);
2a4999f3
PS
1656}
1657
1658static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1659{
1660 struct sk_buff *reply;
1661 struct datapath *dp;
1662 int err;
1663
1664 err = ovs_dp_cmd_validate(info->attrs);
1665 if (err)
1666 return err;
1667
1668 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1669 err = PTR_ERR(dp);
1670 if (IS_ERR(dp))
1671 return err;
1672
28aea917 1673 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
2a4999f3
PS
1674 info->snd_seq, OVS_DP_CMD_DEL);
1675 err = PTR_ERR(reply);
1676 if (IS_ERR(reply))
1677 return err;
1678
1679 __dp_destroy(dp);
ed099e92 1680
28aea917 1681 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b
JG
1682 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1683 GFP_KERNEL);
99620d2c
JG
1684
1685 return 0;
44e05eca
BP
1686}
1687
df2c07f4 1688static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
064af421 1689{
aaff4b55 1690 struct sk_buff *reply;
d6569377 1691 struct datapath *dp;
d6569377 1692 int err;
064af421 1693
df2c07f4 1694 err = ovs_dp_cmd_validate(info->attrs);
aaff4b55
BP
1695 if (err)
1696 return err;
38c6ecbc 1697
2a4999f3 1698 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
d6569377 1699 if (IS_ERR(dp))
aaff4b55 1700 return PTR_ERR(dp);
38c6ecbc 1701
28aea917 1702 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
6455100f 1703 info->snd_seq, OVS_DP_CMD_NEW);
aaff4b55
BP
1704 if (IS_ERR(reply)) {
1705 err = PTR_ERR(reply);
2a4999f3 1706 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
850b6b3b 1707 ovs_dp_datapath_multicast_group.id, err);
aaff4b55
BP
1708 return 0;
1709 }
1710
28aea917 1711 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b
JG
1712 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1713 GFP_KERNEL);
1714
aaff4b55 1715 return 0;
064af421
BP
1716}
1717
df2c07f4 1718static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1dcf111b 1719{
aaff4b55 1720 struct sk_buff *reply;
d6569377 1721 struct datapath *dp;
d6569377 1722 int err;
1dcf111b 1723
df2c07f4 1724 err = ovs_dp_cmd_validate(info->attrs);
aaff4b55
BP
1725 if (err)
1726 return err;
1dcf111b 1727
2a4999f3 1728 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
d6569377 1729 if (IS_ERR(dp))
aaff4b55 1730 return PTR_ERR(dp);
1dcf111b 1731
28aea917 1732 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
6455100f 1733 info->snd_seq, OVS_DP_CMD_NEW);
aaff4b55
BP
1734 if (IS_ERR(reply))
1735 return PTR_ERR(reply);
1736
1737 return genlmsg_reply(reply, info);
1dcf111b
JP
1738}
1739
df2c07f4 1740static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
a7786963 1741{
2a4999f3 1742 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
254f2dc8
BP
1743 struct datapath *dp;
1744 int skip = cb->args[0];
1745 int i = 0;
a7786963 1746
2a4999f3 1747 list_for_each_entry(dp, &ovs_net->dps, list_node) {
a2bab2f0 1748 if (i >= skip &&
28aea917 1749 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
aaff4b55 1750 cb->nlh->nlmsg_seq, NLM_F_MULTI,
df2c07f4 1751 OVS_DP_CMD_NEW) < 0)
aaff4b55 1752 break;
254f2dc8 1753 i++;
a7786963 1754 }
aaff4b55 1755
254f2dc8
BP
1756 cb->args[0] = i;
1757
aaff4b55 1758 return skb->len;
c19e6535
BP
1759}
1760
aaff4b55 1761static struct genl_ops dp_datapath_genl_ops[] = {
df2c07f4 1762 { .cmd = OVS_DP_CMD_NEW,
aaff4b55
BP
1763 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1764 .policy = datapath_policy,
df2c07f4 1765 .doit = ovs_dp_cmd_new
aaff4b55 1766 },
df2c07f4 1767 { .cmd = OVS_DP_CMD_DEL,
aaff4b55
BP
1768 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1769 .policy = datapath_policy,
df2c07f4 1770 .doit = ovs_dp_cmd_del
aaff4b55 1771 },
df2c07f4 1772 { .cmd = OVS_DP_CMD_GET,
aaff4b55
BP
1773 .flags = 0, /* OK for unprivileged users. */
1774 .policy = datapath_policy,
df2c07f4
JP
1775 .doit = ovs_dp_cmd_get,
1776 .dumpit = ovs_dp_cmd_dump
aaff4b55 1777 },
df2c07f4 1778 { .cmd = OVS_DP_CMD_SET,
aaff4b55
BP
1779 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1780 .policy = datapath_policy,
df2c07f4 1781 .doit = ovs_dp_cmd_set,
aaff4b55
BP
1782 },
1783};
1784
df2c07f4 1785static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
f0fef760 1786#ifdef HAVE_NLA_NUL_STRING
df2c07f4 1787 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
f613a0d7 1788 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
f0fef760 1789#else
f613a0d7 1790 [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
f0fef760 1791#endif
d48c88ec
JG
1792 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1793 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
b063d9f0 1794 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
df2c07f4 1795 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
c19e6535
BP
1796};
1797
f0fef760
BP
1798static struct genl_family dp_vport_genl_family = {
1799 .id = GENL_ID_GENERATE,
df2c07f4
JP
1800 .hdrsize = sizeof(struct ovs_header),
1801 .name = OVS_VPORT_FAMILY,
69685a88 1802 .version = OVS_VPORT_VERSION,
2a4999f3
PS
1803 .maxattr = OVS_VPORT_ATTR_MAX,
1804 SET_NETNSOK
f0fef760
BP
1805};
1806
850b6b3b 1807struct genl_multicast_group ovs_dp_vport_multicast_group = {
df2c07f4 1808 .name = OVS_VPORT_MCGROUP
f0fef760
BP
1809};
1810
1811/* Called with RTNL lock or RCU read lock. */
df2c07f4 1812static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
28aea917 1813 u32 portid, u32 seq, u32 flags, u8 cmd)
064af421 1814{
df2c07f4 1815 struct ovs_header *ovs_header;
e926dfe3 1816 struct ovs_vport_stats vport_stats;
c19e6535
BP
1817 int err;
1818
28aea917 1819 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
f0fef760 1820 flags, cmd);
df2c07f4 1821 if (!ovs_header)
f0fef760 1822 return -EMSGSIZE;
c19e6535 1823
99769a40 1824 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
c19e6535 1825
c3cc8c03
DM
1826 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1827 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1828 nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
28aea917 1829 nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
c3cc8c03 1830 goto nla_put_failure;
c19e6535 1831
850b6b3b 1832 ovs_vport_get_stats(vport, &vport_stats);
c3cc8c03
DM
1833 if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1834 &vport_stats))
1835 goto nla_put_failure;
c19e6535 1836
850b6b3b 1837 err = ovs_vport_get_options(vport, skb);
f0fef760
BP
1838 if (err == -EMSGSIZE)
1839 goto error;
c19e6535 1840
df2c07f4 1841 return genlmsg_end(skb, ovs_header);
c19e6535
BP
1842
1843nla_put_failure:
1844 err = -EMSGSIZE;
f0fef760 1845error:
df2c07f4 1846 genlmsg_cancel(skb, ovs_header);
f0fef760 1847 return err;
064af421
BP
1848}
1849
f0fef760 1850/* Called with RTNL lock or RCU read lock. */
28aea917 1851struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
f14d8083 1852 u32 seq, u8 cmd)
064af421 1853{
c19e6535 1854 struct sk_buff *skb;
f0fef760 1855 int retval;
c19e6535 1856
f0fef760 1857 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
c19e6535
BP
1858 if (!skb)
1859 return ERR_PTR(-ENOMEM);
1860
28aea917 1861 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
c25ea534
JG
1862 BUG_ON(retval < 0);
1863
c19e6535 1864 return skb;
f0fef760 1865}
c19e6535 1866
df2c07f4 1867static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
f0fef760 1868{
df2c07f4 1869 return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
c19e6535 1870}
51d4d598 1871
ed099e92 1872/* Called with RTNL lock or RCU read lock. */
2a4999f3
PS
1873static struct vport *lookup_vport(struct net *net,
1874 struct ovs_header *ovs_header,
df2c07f4 1875 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
c19e6535
BP
1876{
1877 struct datapath *dp;
1878 struct vport *vport;
1879
df2c07f4 1880 if (a[OVS_VPORT_ATTR_NAME]) {
2a4999f3 1881 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
ed099e92 1882 if (!vport)
c19e6535 1883 return ERR_PTR(-ENODEV);
24ce832d
BP
1884 if (ovs_header->dp_ifindex &&
1885 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1886 return ERR_PTR(-ENODEV);
c19e6535 1887 return vport;
df2c07f4
JP
1888 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1889 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
c19e6535
BP
1890
1891 if (port_no >= DP_MAX_PORTS)
f0fef760 1892 return ERR_PTR(-EFBIG);
c19e6535 1893
2a4999f3 1894 dp = get_dp(net, ovs_header->dp_ifindex);
c19e6535
BP
1895 if (!dp)
1896 return ERR_PTR(-ENODEV);
f2459fe7 1897
95b1d73a 1898 vport = ovs_vport_rtnl_rcu(dp, port_no);
ed099e92 1899 if (!vport)
17535c57 1900 return ERR_PTR(-ENODEV);
c19e6535
BP
1901 return vport;
1902 } else
1903 return ERR_PTR(-EINVAL);
064af421
BP
1904}
1905
df2c07f4 1906static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
c19e6535 1907{
f0fef760 1908 struct nlattr **a = info->attrs;
df2c07f4 1909 struct ovs_header *ovs_header = info->userhdr;
c19e6535 1910 struct vport_parms parms;
ed099e92 1911 struct sk_buff *reply;
c19e6535 1912 struct vport *vport;
c19e6535 1913 struct datapath *dp;
b0ec0f27 1914 u32 port_no;
c19e6535 1915 int err;
b0ec0f27 1916
c19e6535 1917 err = -EINVAL;
ea36840f
BP
1918 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1919 !a[OVS_VPORT_ATTR_UPCALL_PID])
f0fef760
BP
1920 goto exit;
1921
df2c07f4 1922 err = ovs_vport_cmd_validate(a);
f0fef760
BP
1923 if (err)
1924 goto exit;
51d4d598 1925
c19e6535 1926 rtnl_lock();
2a4999f3 1927 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
c19e6535
BP
1928 err = -ENODEV;
1929 if (!dp)
ed099e92 1930 goto exit_unlock;
c19e6535 1931
df2c07f4
JP
1932 if (a[OVS_VPORT_ATTR_PORT_NO]) {
1933 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
c19e6535
BP
1934
1935 err = -EFBIG;
1936 if (port_no >= DP_MAX_PORTS)
ed099e92 1937 goto exit_unlock;
c19e6535 1938
95b1d73a 1939 vport = ovs_vport_rtnl(dp, port_no);
c19e6535
BP
1940 err = -EBUSY;
1941 if (vport)
ed099e92 1942 goto exit_unlock;
c19e6535
BP
1943 } else {
1944 for (port_no = 1; ; port_no++) {
1945 if (port_no >= DP_MAX_PORTS) {
1946 err = -EFBIG;
ed099e92 1947 goto exit_unlock;
c19e6535 1948 }
95b1d73a 1949 vport = ovs_vport_rtnl(dp, port_no);
c19e6535
BP
1950 if (!vport)
1951 break;
51d4d598 1952 }
064af421 1953 }
b0ec0f27 1954
df2c07f4
JP
1955 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1956 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1957 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
c19e6535
BP
1958 parms.dp = dp;
1959 parms.port_no = port_no;
28aea917 1960 parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
c19e6535
BP
1961
1962 vport = new_vport(&parms);
1963 err = PTR_ERR(vport);
1964 if (IS_ERR(vport))
ed099e92 1965 goto exit_unlock;
c19e6535 1966
faef6d2d 1967 err = 0;
1fc7083d
JG
1968 if (a[OVS_VPORT_ATTR_STATS])
1969 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1970
1971 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1972 OVS_VPORT_CMD_NEW);
1973 if (IS_ERR(reply)) {
1974 err = PTR_ERR(reply);
850b6b3b 1975 ovs_dp_detach_port(vport);
ed099e92 1976 goto exit_unlock;
c19e6535 1977 }
28aea917 1978 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b 1979 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
c19e6535 1980
ed099e92 1981exit_unlock:
c19e6535 1982 rtnl_unlock();
c19e6535
BP
1983exit:
1984 return err;
44e05eca
BP
1985}
1986
df2c07f4 1987static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
44e05eca 1988{
f0fef760
BP
1989 struct nlattr **a = info->attrs;
1990 struct sk_buff *reply;
c19e6535 1991 struct vport *vport;
c19e6535 1992 int err;
44e05eca 1993
df2c07f4 1994 err = ovs_vport_cmd_validate(a);
f0fef760 1995 if (err)
c19e6535
BP
1996 goto exit;
1997
1998 rtnl_lock();
2a4999f3 1999 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535
BP
2000 err = PTR_ERR(vport);
2001 if (IS_ERR(vport))
f0fef760 2002 goto exit_unlock;
44e05eca 2003
c19e6535 2004 err = 0;
6455100f 2005 if (a[OVS_VPORT_ATTR_TYPE] &&
16b82e84 2006 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
4879d4c7 2007 err = -EINVAL;
6455100f 2008
c25ea534
JG
2009 reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2010 if (!reply) {
2011 err = -ENOMEM;
2012 goto exit_unlock;
2013 }
2014
4879d4c7 2015 if (!err && a[OVS_VPORT_ATTR_OPTIONS])
850b6b3b 2016 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1fc7083d 2017 if (err)
c25ea534 2018 goto exit_free;
1fc7083d
JG
2019
2020 if (a[OVS_VPORT_ATTR_STATS])
2021 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
2022
2023 if (a[OVS_VPORT_ATTR_UPCALL_PID])
28aea917 2024 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
c19e6535 2025
c25ea534
JG
2026 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2027 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2028 BUG_ON(err < 0);
f0fef760 2029
28aea917 2030 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b 2031 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
f0fef760 2032
c25ea534
JG
2033 rtnl_unlock();
2034 return 0;
2035
2036exit_free:
2037 kfree_skb(reply);
f0fef760 2038exit_unlock:
c19e6535
BP
2039 rtnl_unlock();
2040exit:
2041 return err;
064af421
BP
2042}
2043
df2c07f4 2044static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2045{
f0fef760
BP
2046 struct nlattr **a = info->attrs;
2047 struct sk_buff *reply;
c19e6535 2048 struct vport *vport;
c19e6535
BP
2049 int err;
2050
df2c07f4 2051 err = ovs_vport_cmd_validate(a);
f0fef760 2052 if (err)
c19e6535
BP
2053 goto exit;
2054
2055 rtnl_lock();
2a4999f3 2056 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
c19e6535 2057 err = PTR_ERR(vport);
f0fef760
BP
2058 if (IS_ERR(vport))
2059 goto exit_unlock;
c19e6535 2060
df2c07f4 2061 if (vport->port_no == OVSP_LOCAL) {
f0fef760
BP
2062 err = -EINVAL;
2063 goto exit_unlock;
2064 }
2065
28aea917
IY
2066 reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2067 info->snd_seq, OVS_VPORT_CMD_DEL);
f0fef760
BP
2068 err = PTR_ERR(reply);
2069 if (IS_ERR(reply))
2070 goto exit_unlock;
2071
b57d5819 2072 err = 0;
850b6b3b 2073 ovs_dp_detach_port(vport);
f0fef760 2074
28aea917 2075 genl_notify(reply, genl_info_net(info), info->snd_portid,
850b6b3b 2076 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
f0fef760
BP
2077
2078exit_unlock:
c19e6535
BP
2079 rtnl_unlock();
2080exit:
2081 return err;
7c40efc9
BP
2082}
2083
df2c07f4 2084static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
7c40efc9 2085{
f0fef760 2086 struct nlattr **a = info->attrs;
df2c07f4 2087 struct ovs_header *ovs_header = info->userhdr;
ed099e92 2088 struct sk_buff *reply;
c19e6535 2089 struct vport *vport;
c19e6535
BP
2090 int err;
2091
df2c07f4 2092 err = ovs_vport_cmd_validate(a);
f0fef760
BP
2093 if (err)
2094 goto exit;
c19e6535 2095
ed099e92 2096 rcu_read_lock();
2a4999f3 2097 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
c19e6535
BP
2098 err = PTR_ERR(vport);
2099 if (IS_ERR(vport))
f0fef760 2100 goto exit_unlock;
c19e6535 2101
28aea917
IY
2102 reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2103 info->snd_seq, OVS_VPORT_CMD_NEW);
ed099e92
BP
2104 err = PTR_ERR(reply);
2105 if (IS_ERR(reply))
f0fef760 2106 goto exit_unlock;
ed099e92 2107
df2fa9b5
JG
2108 rcu_read_unlock();
2109
2110 return genlmsg_reply(reply, info);
ed099e92 2111
f0fef760 2112exit_unlock:
ed099e92 2113 rcu_read_unlock();
f0fef760 2114exit:
c19e6535
BP
2115 return err;
2116}
2117
df2c07f4 2118static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
c19e6535 2119{
df2c07f4 2120 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
c19e6535 2121 struct datapath *dp;
95b1d73a
PS
2122 int bucket = cb->args[0], skip = cb->args[1];
2123 int i, j = 0;
c19e6535 2124
2a4999f3 2125 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
c19e6535 2126 if (!dp)
f0fef760 2127 return -ENODEV;
ed099e92
BP
2128
2129 rcu_read_lock();
95b1d73a 2130 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
ed099e92 2131 struct vport *vport;
95b1d73a
PS
2132
2133 j = 0;
f8dfbcb7 2134 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
95b1d73a
PS
2135 if (j >= skip &&
2136 ovs_vport_cmd_fill_info(vport, skb,
28aea917 2137 NETLINK_CB(cb->skb).portid,
95b1d73a
PS
2138 cb->nlh->nlmsg_seq,
2139 NLM_F_MULTI,
2140 OVS_VPORT_CMD_NEW) < 0)
2141 goto out;
2142
2143 j++;
2144 }
2145 skip = 0;
c19e6535 2146 }
95b1d73a 2147out:
ed099e92 2148 rcu_read_unlock();
c19e6535 2149
95b1d73a
PS
2150 cb->args[0] = i;
2151 cb->args[1] = j;
f0fef760 2152
95b1d73a 2153 return skb->len;
7c40efc9
BP
2154}
2155
f0fef760 2156static struct genl_ops dp_vport_genl_ops[] = {
df2c07f4 2157 { .cmd = OVS_VPORT_CMD_NEW,
f0fef760
BP
2158 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2159 .policy = vport_policy,
df2c07f4 2160 .doit = ovs_vport_cmd_new
f0fef760 2161 },
df2c07f4 2162 { .cmd = OVS_VPORT_CMD_DEL,
f0fef760
BP
2163 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2164 .policy = vport_policy,
df2c07f4 2165 .doit = ovs_vport_cmd_del
f0fef760 2166 },
df2c07f4 2167 { .cmd = OVS_VPORT_CMD_GET,
f0fef760
BP
2168 .flags = 0, /* OK for unprivileged users. */
2169 .policy = vport_policy,
df2c07f4
JP
2170 .doit = ovs_vport_cmd_get,
2171 .dumpit = ovs_vport_cmd_dump
f0fef760 2172 },
df2c07f4 2173 { .cmd = OVS_VPORT_CMD_SET,
f0fef760
BP
2174 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2175 .policy = vport_policy,
df2c07f4 2176 .doit = ovs_vport_cmd_set,
f0fef760
BP
2177 },
2178};
2179
982b8810
BP
2180struct genl_family_and_ops {
2181 struct genl_family *family;
2182 struct genl_ops *ops;
2183 int n_ops;
2184 struct genl_multicast_group *group;
2185};
ed099e92 2186
982b8810 2187static const struct genl_family_and_ops dp_genl_families[] = {
aaff4b55
BP
2188 { &dp_datapath_genl_family,
2189 dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
850b6b3b 2190 &ovs_dp_datapath_multicast_group },
f0fef760
BP
2191 { &dp_vport_genl_family,
2192 dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
850b6b3b 2193 &ovs_dp_vport_multicast_group },
37a1300c
BP
2194 { &dp_flow_genl_family,
2195 dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
850b6b3b 2196 &ovs_dp_flow_multicast_group },
982b8810
BP
2197 { &dp_packet_genl_family,
2198 dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2199 NULL },
2200};
ed099e92 2201
982b8810
BP
2202static void dp_unregister_genl(int n_families)
2203{
2204 int i;
ed099e92 2205
b867ca75 2206 for (i = 0; i < n_families; i++)
982b8810 2207 genl_unregister_family(dp_genl_families[i].family);
ed099e92
BP
2208}
2209
982b8810 2210static int dp_register_genl(void)
064af421 2211{
982b8810
BP
2212 int n_registered;
2213 int err;
2214 int i;
064af421 2215
982b8810
BP
2216 n_registered = 0;
2217 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2218 const struct genl_family_and_ops *f = &dp_genl_families[i];
064af421 2219
982b8810
BP
2220 err = genl_register_family_with_ops(f->family, f->ops,
2221 f->n_ops);
2222 if (err)
2223 goto error;
2224 n_registered++;
e22d4953 2225
982b8810
BP
2226 if (f->group) {
2227 err = genl_register_mc_group(f->family, f->group);
2228 if (err)
2229 goto error;
2230 }
2231 }
9cc8b4e4 2232
982b8810 2233 return 0;
064af421
BP
2234
2235error:
982b8810
BP
2236 dp_unregister_genl(n_registered);
2237 return err;
064af421
BP
2238}
2239
acd051f1
PS
2240static int __rehash_flow_table(void *dummy)
2241{
2242 struct datapath *dp;
2a4999f3
PS
2243 struct net *net;
2244
2245 rtnl_lock();
2246 for_each_net(net) {
2247 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
acd051f1 2248
2a4999f3
PS
2249 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2250 struct flow_table *old_table = genl_dereference(dp->table);
2251 struct flow_table *new_table;
acd051f1 2252
2a4999f3
PS
2253 new_table = ovs_flow_tbl_rehash(old_table);
2254 if (!IS_ERR(new_table)) {
2255 rcu_assign_pointer(dp->table, new_table);
2256 ovs_flow_tbl_deferred_destroy(old_table);
2257 }
acd051f1
PS
2258 }
2259 }
2a4999f3 2260 rtnl_unlock();
acd051f1
PS
2261 return 0;
2262}
2263
2264static void rehash_flow_table(struct work_struct *work)
2265{
2266 genl_exec(__rehash_flow_table, NULL);
2267 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2268}
2269
2a4999f3
PS
2270static int dp_destroy_all(void *data)
2271{
2272 struct datapath *dp, *dp_next;
2273 struct ovs_net *ovs_net = data;
2274
2275 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2276 __dp_destroy(dp);
2277
2278 return 0;
2279}
2280
2281static int __net_init ovs_init_net(struct net *net)
2282{
2283 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2284
2285 INIT_LIST_HEAD(&ovs_net->dps);
2286 return 0;
2287}
2288
2289static void __net_exit ovs_exit_net(struct net *net)
2290{
2291 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2292
2293 genl_exec(dp_destroy_all, ovs_net);
2294}
2295
2296static struct pernet_operations ovs_net_ops = {
2297 .init = ovs_init_net,
2298 .exit = ovs_exit_net,
2299 .id = &ovs_net_id,
2300 .size = sizeof(struct ovs_net),
2301};
2302
22d24ebf
BP
2303static int __init dp_init(void)
2304{
2305 int err;
2306
f3d85db3 2307 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
22d24ebf 2308
dc5f3fef 2309 pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
8a07709c 2310 VERSION);
064af421 2311
b9c15df9 2312 err = genl_exec_init();
064af421
BP
2313 if (err)
2314 goto error;
2315
16d650e5 2316 err = ovs_workqueues_init();
b9c15df9
PS
2317 if (err)
2318 goto error_genl_exec;
2319
850b6b3b 2320 err = ovs_flow_init();
3544358a 2321 if (err)
85c9de19 2322 goto error_wq;
3544358a 2323
850b6b3b 2324 err = ovs_vport_init();
064af421
BP
2325 if (err)
2326 goto error_flow_exit;
2327
2a4999f3 2328 err = register_pernet_device(&ovs_net_ops);
f2459fe7
JG
2329 if (err)
2330 goto error_vport_exit;
2331
2a4999f3
PS
2332 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2333 if (err)
2334 goto error_netns_exit;
2335
982b8810
BP
2336 err = dp_register_genl();
2337 if (err < 0)
37a1300c 2338 goto error_unreg_notifier;
982b8810 2339
acd051f1
PS
2340 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2341
064af421
BP
2342 return 0;
2343
2344error_unreg_notifier:
850b6b3b 2345 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2a4999f3
PS
2346error_netns_exit:
2347 unregister_pernet_device(&ovs_net_ops);
f2459fe7 2348error_vport_exit:
850b6b3b 2349 ovs_vport_exit();
064af421 2350error_flow_exit:
850b6b3b 2351 ovs_flow_exit();
16d650e5
PS
2352error_wq:
2353 ovs_workqueues_exit();
b9c15df9
PS
2354error_genl_exec:
2355 genl_exec_exit();
064af421
BP
2356error:
2357 return err;
2358}
2359
2360static void dp_cleanup(void)
2361{
acd051f1 2362 cancel_delayed_work_sync(&rehash_flow_wq);
982b8810 2363 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
850b6b3b 2364 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2a4999f3
PS
2365 unregister_pernet_device(&ovs_net_ops);
2366 rcu_barrier();
850b6b3b
JG
2367 ovs_vport_exit();
2368 ovs_flow_exit();
16d650e5 2369 ovs_workqueues_exit();
b9c15df9 2370 genl_exec_exit();
064af421
BP
2371}
2372
2373module_init(dp_init);
2374module_exit(dp_cleanup);
2375
2376MODULE_DESCRIPTION("Open vSwitch switching datapath");
2377MODULE_LICENSE("GPL");
3d0666d2 2378MODULE_VERSION(VERSION);