]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/datapath.c
Revert "datapath: Increase maximum allocation size of action list."
[mirror_ovs.git] / datapath / datapath.c
1 /*
2 * Copyright (c) 2007-2012 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
54
55 #include "checksum.h"
56 #include "datapath.h"
57 #include "flow.h"
58 #include "genl_exec.h"
59 #include "vlan.h"
60 #include "tunnel.h"
61 #include "vport-internal_dev.h"
62
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
64 LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
65 #error Kernels before 2.6.18 or after 3.8 are not supported by this version of Open vSwitch.
66 #endif
67
68 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
69 static void rehash_flow_table(struct work_struct *work);
70 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
71
72 int ovs_net_id __read_mostly;
73
74 /**
75 * DOC: Locking:
76 *
77 * Writes to device state (add/remove datapath, port, set operations on vports,
78 * etc.) are protected by RTNL.
79 *
80 * Writes to other state (flow table modifications, set miscellaneous datapath
81 * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
82 * genl_mutex.
83 *
84 * Reads are protected by RCU.
85 *
86 * There are a few special cases (mostly stats) that have their own
87 * synchronization but they nest under all of above and don't interact with
88 * each other.
89 */
90
91 static struct vport *new_vport(const struct vport_parms *);
92 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
93 const struct dp_upcall_info *);
94 static int queue_userspace_packet(struct net *, int dp_ifindex,
95 struct sk_buff *,
96 const struct dp_upcall_info *);
97
98 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
99 static struct datapath *get_dp(struct net *net, int dp_ifindex)
100 {
101 struct datapath *dp = NULL;
102 struct net_device *dev;
103
104 rcu_read_lock();
105 dev = dev_get_by_index_rcu(net, dp_ifindex);
106 if (dev) {
107 struct vport *vport = ovs_internal_dev_get_vport(dev);
108 if (vport)
109 dp = vport->dp;
110 }
111 rcu_read_unlock();
112
113 return dp;
114 }
115
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *ovs_dp_name(const struct datapath *dp)
118 {
119 struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
120 return vport->ops->get_name(vport);
121 }
122
123 static int get_dpifindex(struct datapath *dp)
124 {
125 struct vport *local;
126 int ifindex;
127
128 rcu_read_lock();
129
130 local = ovs_vport_rcu(dp, OVSP_LOCAL);
131 if (local)
132 ifindex = local->ops->get_ifindex(local);
133 else
134 ifindex = 0;
135
136 rcu_read_unlock();
137
138 return ifindex;
139 }
140
141 static void destroy_dp_rcu(struct rcu_head *rcu)
142 {
143 struct datapath *dp = container_of(rcu, struct datapath, rcu);
144
145 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
146 free_percpu(dp->stats_percpu);
147 release_net(ovs_dp_get_net(dp));
148 kfree(dp->ports);
149 kfree(dp);
150 }
151
152 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
153 u16 port_no)
154 {
155 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
156 }
157
158 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
159 {
160 struct vport *vport;
161 struct hlist_node *n;
162 struct hlist_head *head;
163
164 head = vport_hash_bucket(dp, port_no);
165 hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
166 if (vport->port_no == port_no)
167 return vport;
168 }
169 return NULL;
170 }
171
172 /* Called with RTNL lock and genl_lock. */
173 static struct vport *new_vport(const struct vport_parms *parms)
174 {
175 struct vport *vport;
176
177 vport = ovs_vport_add(parms);
178 if (!IS_ERR(vport)) {
179 struct datapath *dp = parms->dp;
180 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
181
182 hlist_add_head_rcu(&vport->dp_hash_node, head);
183 }
184 return vport;
185 }
186
187 /* Called with RTNL lock. */
188 void ovs_dp_detach_port(struct vport *p)
189 {
190 ASSERT_RTNL();
191
192 /* First drop references to device. */
193 hlist_del_rcu(&p->dp_hash_node);
194
195 /* Then destroy it. */
196 ovs_vport_del(p);
197 }
198
199 /* Must be called with rcu_read_lock. */
200 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
201 {
202 struct datapath *dp = p->dp;
203 struct sw_flow *flow;
204 struct dp_stats_percpu *stats;
205 u64 *stats_counter;
206 int error;
207
208 stats = this_cpu_ptr(dp->stats_percpu);
209
210 if (!OVS_CB(skb)->flow) {
211 struct sw_flow_key key;
212 int key_len;
213
214 /* Extract flow from 'skb' into 'key'. */
215 error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
216 if (unlikely(error)) {
217 kfree_skb(skb);
218 return;
219 }
220
221 /* Look up flow. */
222 flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
223 &key, key_len);
224 if (unlikely(!flow)) {
225 struct dp_upcall_info upcall;
226
227 upcall.cmd = OVS_PACKET_CMD_MISS;
228 upcall.key = &key;
229 upcall.userdata = NULL;
230 upcall.portid = p->upcall_portid;
231 ovs_dp_upcall(dp, skb, &upcall);
232 consume_skb(skb);
233 stats_counter = &stats->n_missed;
234 goto out;
235 }
236
237 OVS_CB(skb)->flow = flow;
238 }
239
240 stats_counter = &stats->n_hit;
241 ovs_flow_used(OVS_CB(skb)->flow, skb);
242 ovs_execute_actions(dp, skb);
243
244 out:
245 /* Update datapath statistics. */
246 u64_stats_update_begin(&stats->sync);
247 (*stats_counter)++;
248 u64_stats_update_end(&stats->sync);
249 }
250
251 static struct genl_family dp_packet_genl_family = {
252 .id = GENL_ID_GENERATE,
253 .hdrsize = sizeof(struct ovs_header),
254 .name = OVS_PACKET_FAMILY,
255 .version = OVS_PACKET_VERSION,
256 .maxattr = OVS_PACKET_ATTR_MAX,
257 SET_NETNSOK
258 };
259
260 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
261 const struct dp_upcall_info *upcall_info)
262 {
263 struct dp_stats_percpu *stats;
264 int dp_ifindex;
265 int err;
266
267 if (upcall_info->portid == 0) {
268 err = -ENOTCONN;
269 goto err;
270 }
271
272 dp_ifindex = get_dpifindex(dp);
273 if (!dp_ifindex) {
274 err = -ENODEV;
275 goto err;
276 }
277
278 forward_ip_summed(skb, true);
279
280 if (!skb_is_gso(skb))
281 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
282 else
283 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
284 if (err)
285 goto err;
286
287 return 0;
288
289 err:
290 stats = this_cpu_ptr(dp->stats_percpu);
291
292 u64_stats_update_begin(&stats->sync);
293 stats->n_lost++;
294 u64_stats_update_end(&stats->sync);
295
296 return err;
297 }
298
299 static int queue_gso_packets(struct net *net, int dp_ifindex,
300 struct sk_buff *skb,
301 const struct dp_upcall_info *upcall_info)
302 {
303 unsigned short gso_type = skb_shinfo(skb)->gso_type;
304 struct dp_upcall_info later_info;
305 struct sw_flow_key later_key;
306 struct sk_buff *segs, *nskb;
307 int err;
308
309 segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
310 if (IS_ERR(segs))
311 return PTR_ERR(segs);
312
313 /* Queue all of the segments. */
314 skb = segs;
315 do {
316 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
317 if (err)
318 break;
319
320 if (skb == segs && gso_type & SKB_GSO_UDP) {
321 /* The initial flow key extracted by ovs_flow_extract()
322 * in this case is for a first fragment, so we need to
323 * properly mark later fragments.
324 */
325 later_key = *upcall_info->key;
326 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
327
328 later_info = *upcall_info;
329 later_info.key = &later_key;
330 upcall_info = &later_info;
331 }
332 } while ((skb = skb->next));
333
334 /* Free all of the segments. */
335 skb = segs;
336 do {
337 nskb = skb->next;
338 if (err)
339 kfree_skb(skb);
340 else
341 consume_skb(skb);
342 } while ((skb = nskb));
343 return err;
344 }
345
346 static int queue_userspace_packet(struct net *net, int dp_ifindex,
347 struct sk_buff *skb,
348 const struct dp_upcall_info *upcall_info)
349 {
350 struct ovs_header *upcall;
351 struct sk_buff *nskb = NULL;
352 struct sk_buff *user_skb; /* to be queued to userspace */
353 struct nlattr *nla;
354 unsigned int len;
355 int err;
356
357 if (vlan_tx_tag_present(skb)) {
358 nskb = skb_clone(skb, GFP_ATOMIC);
359 if (!nskb)
360 return -ENOMEM;
361
362 err = vlan_deaccel_tag(nskb);
363 if (err)
364 return err;
365
366 skb = nskb;
367 }
368
369 if (nla_attr_size(skb->len) > USHRT_MAX) {
370 err = -EFBIG;
371 goto out;
372 }
373
374 len = sizeof(struct ovs_header);
375 len += nla_total_size(skb->len);
376 len += nla_total_size(FLOW_BUFSIZE);
377 if (upcall_info->userdata)
378 len += NLA_ALIGN(upcall_info->userdata->nla_len);
379
380 user_skb = genlmsg_new(len, GFP_ATOMIC);
381 if (!user_skb) {
382 err = -ENOMEM;
383 goto out;
384 }
385
386 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
387 0, upcall_info->cmd);
388 upcall->dp_ifindex = dp_ifindex;
389
390 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
391 ovs_flow_to_nlattrs(upcall_info->key, user_skb);
392 nla_nest_end(user_skb, nla);
393
394 if (upcall_info->userdata)
395 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
396 nla_len(upcall_info->userdata),
397 nla_data(upcall_info->userdata));
398
399 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
400
401 skb_copy_and_csum_dev(skb, nla_data(nla));
402
403 genlmsg_end(user_skb, upcall);
404 err = genlmsg_unicast(net, user_skb, upcall_info->portid);
405
406 out:
407 kfree_skb(nskb);
408 return err;
409 }
410
411 /* Called with genl_mutex. */
412 static int flush_flows(struct datapath *dp)
413 {
414 struct flow_table *old_table;
415 struct flow_table *new_table;
416
417 old_table = genl_dereference(dp->table);
418 new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
419 if (!new_table)
420 return -ENOMEM;
421
422 rcu_assign_pointer(dp->table, new_table);
423
424 ovs_flow_tbl_deferred_destroy(old_table);
425 return 0;
426 }
427
428 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
429 {
430
431 struct sw_flow_actions *acts;
432 int new_acts_size;
433 int req_size = NLA_ALIGN(attr_len);
434 int next_offset = offsetof(struct sw_flow_actions, actions) +
435 (*sfa)->actions_len;
436
437 if (req_size <= (ksize(*sfa) - next_offset))
438 goto out;
439
440 new_acts_size = ksize(*sfa) * 2;
441
442 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
443 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
444 return ERR_PTR(-EMSGSIZE);
445 new_acts_size = MAX_ACTIONS_BUFSIZE;
446 }
447
448 acts = ovs_flow_actions_alloc(new_acts_size);
449 if (IS_ERR(acts))
450 return (void *)acts;
451
452 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
453 acts->actions_len = (*sfa)->actions_len;
454 kfree(*sfa);
455 *sfa = acts;
456
457 out:
458 (*sfa)->actions_len += req_size;
459 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
460 }
461
462 static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
463 {
464 struct nlattr *a;
465
466 a = reserve_sfa_size(sfa, nla_attr_size(len));
467 if (IS_ERR(a))
468 return PTR_ERR(a);
469
470 a->nla_type = attrtype;
471 a->nla_len = nla_attr_size(len);
472
473 if (data)
474 memcpy(nla_data(a), data, len);
475 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
476
477 return 0;
478 }
479
480 static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
481 {
482 int used = (*sfa)->actions_len;
483 int err;
484
485 err = add_action(sfa, attrtype, NULL, 0);
486 if (err)
487 return err;
488
489 return used;
490 }
491
492 static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
493 {
494 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
495
496 a->nla_len = sfa->actions_len - st_offset;
497 }
498
499 static int validate_and_copy_actions(const struct nlattr *attr,
500 const struct sw_flow_key *key, int depth,
501 struct sw_flow_actions **sfa);
502
503 static int validate_and_copy_sample(const struct nlattr *attr,
504 const struct sw_flow_key *key, int depth,
505 struct sw_flow_actions **sfa)
506 {
507 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
508 const struct nlattr *probability, *actions;
509 const struct nlattr *a;
510 int rem, start, err, st_acts;
511
512 memset(attrs, 0, sizeof(attrs));
513 nla_for_each_nested(a, attr, rem) {
514 int type = nla_type(a);
515 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
516 return -EINVAL;
517 attrs[type] = a;
518 }
519 if (rem)
520 return -EINVAL;
521
522 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
523 if (!probability || nla_len(probability) != sizeof(u32))
524 return -EINVAL;
525
526 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
527 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
528 return -EINVAL;
529
530 /* validation done, copy sample action. */
531 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
532 if (start < 0)
533 return start;
534 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
535 if (err)
536 return err;
537 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
538 if (st_acts < 0)
539 return st_acts;
540
541 err = validate_and_copy_actions(actions, key, depth + 1, sfa);
542 if (err)
543 return err;
544
545 add_nested_action_end(*sfa, st_acts);
546 add_nested_action_end(*sfa, start);
547
548 return 0;
549 }
550
551 static int validate_tp_port(const struct sw_flow_key *flow_key)
552 {
553 if (flow_key->eth.type == htons(ETH_P_IP)) {
554 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
555 return 0;
556 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
557 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
558 return 0;
559 }
560
561 return -EINVAL;
562 }
563
564 static int validate_and_copy_set_tun(const struct nlattr *attr,
565 struct sw_flow_actions **sfa)
566 {
567 struct ovs_key_ipv4_tunnel tun_key;
568 int err, start;
569
570 err = ipv4_tun_from_nlattr(nla_data(attr), &tun_key);
571 if (err)
572 return err;
573
574 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
575 if (start < 0)
576 return start;
577
578 err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key));
579 add_nested_action_end(*sfa, start);
580
581 return err;
582 }
583
584 static int validate_set(const struct nlattr *a,
585 const struct sw_flow_key *flow_key,
586 struct sw_flow_actions **sfa,
587 bool *set_tun)
588 {
589 const struct nlattr *ovs_key = nla_data(a);
590 int key_type = nla_type(ovs_key);
591
592 /* There can be only one key in a action */
593 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
594 return -EINVAL;
595
596 if (key_type > OVS_KEY_ATTR_MAX ||
597 (ovs_key_lens[key_type] != nla_len(ovs_key) &&
598 ovs_key_lens[key_type] != -1))
599 return -EINVAL;
600
601 switch (key_type) {
602 const struct ovs_key_ipv4 *ipv4_key;
603 const struct ovs_key_ipv6 *ipv6_key;
604 int err;
605
606 case OVS_KEY_ATTR_PRIORITY:
607 case OVS_KEY_ATTR_TUN_ID:
608 case OVS_KEY_ATTR_ETHERNET:
609 break;
610
611 case OVS_KEY_ATTR_SKB_MARK:
612 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
613 if (nla_get_u32(ovs_key) != 0)
614 return -EINVAL;
615 #endif
616 break;
617
618 case OVS_KEY_ATTR_TUNNEL:
619 *set_tun = true;
620 err = validate_and_copy_set_tun(a, sfa);
621 if (err)
622 return err;
623 break;
624
625 case OVS_KEY_ATTR_IPV4:
626 if (flow_key->eth.type != htons(ETH_P_IP))
627 return -EINVAL;
628
629 if (!flow_key->ip.proto)
630 return -EINVAL;
631
632 ipv4_key = nla_data(ovs_key);
633 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
634 return -EINVAL;
635
636 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
637 return -EINVAL;
638
639 break;
640
641 case OVS_KEY_ATTR_IPV6:
642 if (flow_key->eth.type != htons(ETH_P_IPV6))
643 return -EINVAL;
644
645 if (!flow_key->ip.proto)
646 return -EINVAL;
647
648 ipv6_key = nla_data(ovs_key);
649 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
650 return -EINVAL;
651
652 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
653 return -EINVAL;
654
655 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
656 return -EINVAL;
657
658 break;
659
660 case OVS_KEY_ATTR_TCP:
661 if (flow_key->ip.proto != IPPROTO_TCP)
662 return -EINVAL;
663
664 return validate_tp_port(flow_key);
665
666 case OVS_KEY_ATTR_UDP:
667 if (flow_key->ip.proto != IPPROTO_UDP)
668 return -EINVAL;
669
670 return validate_tp_port(flow_key);
671
672 default:
673 return -EINVAL;
674 }
675
676 return 0;
677 }
678
679 static int validate_userspace(const struct nlattr *attr)
680 {
681 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
682 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
683 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
684 };
685 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
686 int error;
687
688 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
689 attr, userspace_policy);
690 if (error)
691 return error;
692
693 if (!a[OVS_USERSPACE_ATTR_PID] ||
694 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
695 return -EINVAL;
696
697 return 0;
698 }
699
700 static int copy_action(const struct nlattr *from,
701 struct sw_flow_actions **sfa)
702 {
703 int totlen = NLA_ALIGN(from->nla_len);
704 struct nlattr *to;
705
706 to = reserve_sfa_size(sfa, from->nla_len);
707 if (IS_ERR(to))
708 return PTR_ERR(to);
709
710 memcpy(to, from, totlen);
711 return 0;
712 }
713
714 static int validate_and_copy_actions(const struct nlattr *attr,
715 const struct sw_flow_key *key,
716 int depth,
717 struct sw_flow_actions **sfa)
718 {
719 const struct nlattr *a;
720 int rem, err;
721
722 if (depth >= SAMPLE_ACTION_DEPTH)
723 return -EOVERFLOW;
724
725 nla_for_each_nested(a, attr, rem) {
726 /* Expected argument lengths, (u32)-1 for variable length. */
727 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
728 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
729 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
730 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
731 [OVS_ACTION_ATTR_POP_VLAN] = 0,
732 [OVS_ACTION_ATTR_SET] = (u32)-1,
733 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
734 };
735 const struct ovs_action_push_vlan *vlan;
736 int type = nla_type(a);
737 bool skip_copy;
738
739 if (type > OVS_ACTION_ATTR_MAX ||
740 (action_lens[type] != nla_len(a) &&
741 action_lens[type] != (u32)-1))
742 return -EINVAL;
743
744 skip_copy = false;
745 switch (type) {
746 case OVS_ACTION_ATTR_UNSPEC:
747 return -EINVAL;
748
749 case OVS_ACTION_ATTR_USERSPACE:
750 err = validate_userspace(a);
751 if (err)
752 return err;
753 break;
754
755 case OVS_ACTION_ATTR_OUTPUT:
756 if (nla_get_u32(a) >= DP_MAX_PORTS)
757 return -EINVAL;
758 break;
759
760
761 case OVS_ACTION_ATTR_POP_VLAN:
762 break;
763
764 case OVS_ACTION_ATTR_PUSH_VLAN:
765 vlan = nla_data(a);
766 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
767 return -EINVAL;
768 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
769 return -EINVAL;
770 break;
771
772 case OVS_ACTION_ATTR_SET:
773 err = validate_set(a, key, sfa, &skip_copy);
774 if (err)
775 return err;
776 break;
777
778 case OVS_ACTION_ATTR_SAMPLE:
779 err = validate_and_copy_sample(a, key, depth, sfa);
780 if (err)
781 return err;
782 skip_copy = true;
783 break;
784
785 default:
786 return -EINVAL;
787 }
788 if (!skip_copy) {
789 err = copy_action(a, sfa);
790 if (err)
791 return err;
792 }
793 }
794
795 if (rem > 0)
796 return -EINVAL;
797
798 return 0;
799 }
800
801 static void clear_stats(struct sw_flow *flow)
802 {
803 flow->used = 0;
804 flow->tcp_flags = 0;
805 flow->packet_count = 0;
806 flow->byte_count = 0;
807 }
808
809 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
810 {
811 struct ovs_header *ovs_header = info->userhdr;
812 struct nlattr **a = info->attrs;
813 struct sw_flow_actions *acts;
814 struct sk_buff *packet;
815 struct sw_flow *flow;
816 struct datapath *dp;
817 struct ethhdr *eth;
818 int len;
819 int err;
820 int key_len;
821
822 err = -EINVAL;
823 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
824 !a[OVS_PACKET_ATTR_ACTIONS] ||
825 nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
826 goto err;
827
828 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
829 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
830 err = -ENOMEM;
831 if (!packet)
832 goto err;
833 skb_reserve(packet, NET_IP_ALIGN);
834
835 memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
836
837 skb_reset_mac_header(packet);
838 eth = eth_hdr(packet);
839
840 /* Normally, setting the skb 'protocol' field would be handled by a
841 * call to eth_type_trans(), but it assumes there's a sending
842 * device, which we may not have. */
843 if (ntohs(eth->h_proto) >= 1536)
844 packet->protocol = eth->h_proto;
845 else
846 packet->protocol = htons(ETH_P_802_2);
847
848 /* Build an sw_flow for sending this packet. */
849 flow = ovs_flow_alloc();
850 err = PTR_ERR(flow);
851 if (IS_ERR(flow))
852 goto err_kfree_skb;
853
854 err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
855 if (err)
856 goto err_flow_free;
857
858 err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
859 if (err)
860 goto err_flow_free;
861 acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
862 err = PTR_ERR(acts);
863 if (IS_ERR(acts))
864 goto err_flow_free;
865
866 err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
867 rcu_assign_pointer(flow->sf_acts, acts);
868 if (err)
869 goto err_flow_free;
870
871 OVS_CB(packet)->flow = flow;
872 packet->priority = flow->key.phy.priority;
873 skb_set_mark(packet, flow->key.phy.skb_mark);
874
875 rcu_read_lock();
876 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
877 err = -ENODEV;
878 if (!dp)
879 goto err_unlock;
880
881 local_bh_disable();
882 err = ovs_execute_actions(dp, packet);
883 local_bh_enable();
884 rcu_read_unlock();
885
886 ovs_flow_free(flow);
887 return err;
888
889 err_unlock:
890 rcu_read_unlock();
891 err_flow_free:
892 ovs_flow_free(flow);
893 err_kfree_skb:
894 kfree_skb(packet);
895 err:
896 return err;
897 }
898
899 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
900 [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
901 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
902 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
903 };
904
905 static struct genl_ops dp_packet_genl_ops[] = {
906 { .cmd = OVS_PACKET_CMD_EXECUTE,
907 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
908 .policy = packet_policy,
909 .doit = ovs_packet_cmd_execute
910 }
911 };
912
913 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
914 {
915 int i;
916 struct flow_table *table = genl_dereference(dp->table);
917
918 stats->n_flows = ovs_flow_tbl_count(table);
919
920 stats->n_hit = stats->n_missed = stats->n_lost = 0;
921 for_each_possible_cpu(i) {
922 const struct dp_stats_percpu *percpu_stats;
923 struct dp_stats_percpu local_stats;
924 unsigned int start;
925
926 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
927
928 do {
929 start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
930 local_stats = *percpu_stats;
931 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
932
933 stats->n_hit += local_stats.n_hit;
934 stats->n_missed += local_stats.n_missed;
935 stats->n_lost += local_stats.n_lost;
936 }
937 }
938
939 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
940 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
941 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
942 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
943 };
944
945 static struct genl_family dp_flow_genl_family = {
946 .id = GENL_ID_GENERATE,
947 .hdrsize = sizeof(struct ovs_header),
948 .name = OVS_FLOW_FAMILY,
949 .version = OVS_FLOW_VERSION,
950 .maxattr = OVS_FLOW_ATTR_MAX,
951 SET_NETNSOK
952 };
953
954 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
955 .name = OVS_FLOW_MCGROUP
956 };
957
958 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
959 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
960 {
961 const struct nlattr *a;
962 struct nlattr *start;
963 int err = 0, rem;
964
965 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
966 if (!start)
967 return -EMSGSIZE;
968
969 nla_for_each_nested(a, attr, rem) {
970 int type = nla_type(a);
971 struct nlattr *st_sample;
972
973 switch (type) {
974 case OVS_SAMPLE_ATTR_PROBABILITY:
975 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
976 return -EMSGSIZE;
977 break;
978 case OVS_SAMPLE_ATTR_ACTIONS:
979 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
980 if (!st_sample)
981 return -EMSGSIZE;
982 err = actions_to_attr(nla_data(a), nla_len(a), skb);
983 if (err)
984 return err;
985 nla_nest_end(skb, st_sample);
986 break;
987 }
988 }
989
990 nla_nest_end(skb, start);
991 return err;
992 }
993
994 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
995 {
996 const struct nlattr *ovs_key = nla_data(a);
997 int key_type = nla_type(ovs_key);
998 struct nlattr *start;
999 int err;
1000
1001 switch (key_type) {
1002 case OVS_KEY_ATTR_IPV4_TUNNEL:
1003 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
1004 if (!start)
1005 return -EMSGSIZE;
1006
1007 err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key));
1008 if (err)
1009 return err;
1010 nla_nest_end(skb, start);
1011 break;
1012 default:
1013 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1014 return -EMSGSIZE;
1015 break;
1016 }
1017
1018 return 0;
1019 }
1020
1021 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
1022 {
1023 const struct nlattr *a;
1024 int rem, err;
1025
1026 nla_for_each_attr(a, attr, len, rem) {
1027 int type = nla_type(a);
1028
1029 switch (type) {
1030 case OVS_ACTION_ATTR_SET:
1031 err = set_action_to_attr(a, skb);
1032 if (err)
1033 return err;
1034 break;
1035
1036 case OVS_ACTION_ATTR_SAMPLE:
1037 err = sample_action_to_attr(a, skb);
1038 if (err)
1039 return err;
1040 break;
1041 default:
1042 if (nla_put(skb, type, nla_len(a), nla_data(a)))
1043 return -EMSGSIZE;
1044 break;
1045 }
1046 }
1047
1048 return 0;
1049 }
1050
1051 /* Called with genl_lock. */
1052 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1053 struct sk_buff *skb, u32 portid,
1054 u32 seq, u32 flags, u8 cmd)
1055 {
1056 const int skb_orig_len = skb->len;
1057 const struct sw_flow_actions *sf_acts;
1058 struct nlattr *start;
1059 struct ovs_flow_stats stats;
1060 struct ovs_header *ovs_header;
1061 struct nlattr *nla;
1062 unsigned long used;
1063 u8 tcp_flags;
1064 int err;
1065
1066 sf_acts = rcu_dereference_protected(flow->sf_acts,
1067 lockdep_genl_is_held());
1068
1069 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
1070 if (!ovs_header)
1071 return -EMSGSIZE;
1072
1073 ovs_header->dp_ifindex = get_dpifindex(dp);
1074
1075 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
1076 if (!nla)
1077 goto nla_put_failure;
1078 err = ovs_flow_to_nlattrs(&flow->key, skb);
1079 if (err)
1080 goto error;
1081 nla_nest_end(skb, nla);
1082
1083 spin_lock_bh(&flow->lock);
1084 used = flow->used;
1085 stats.n_packets = flow->packet_count;
1086 stats.n_bytes = flow->byte_count;
1087 tcp_flags = flow->tcp_flags;
1088 spin_unlock_bh(&flow->lock);
1089
1090 if (used &&
1091 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
1092 goto nla_put_failure;
1093
1094 if (stats.n_packets &&
1095 nla_put(skb, OVS_FLOW_ATTR_STATS,
1096 sizeof(struct ovs_flow_stats), &stats))
1097 goto nla_put_failure;
1098
1099 if (tcp_flags &&
1100 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
1101 goto nla_put_failure;
1102
1103 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
1104 * this is the first flow to be dumped into 'skb'. This is unusual for
1105 * Netlink but individual action lists can be longer than
1106 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
1107 * The userspace caller can always fetch the actions separately if it
1108 * really wants them. (Most userspace callers in fact don't care.)
1109 *
1110 * This can only fail for dump operations because the skb is always
1111 * properly sized for single flows.
1112 */
1113 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
1114 if (start) {
1115 err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
1116 if (!err)
1117 nla_nest_end(skb, start);
1118 else {
1119 if (skb_orig_len)
1120 goto error;
1121
1122 nla_nest_cancel(skb, start);
1123 }
1124 } else if (skb_orig_len)
1125 goto nla_put_failure;
1126
1127 return genlmsg_end(skb, ovs_header);
1128
1129 nla_put_failure:
1130 err = -EMSGSIZE;
1131 error:
1132 genlmsg_cancel(skb, ovs_header);
1133 return err;
1134 }
1135
1136 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
1137 {
1138 const struct sw_flow_actions *sf_acts;
1139 int len;
1140
1141 sf_acts = rcu_dereference_protected(flow->sf_acts,
1142 lockdep_genl_is_held());
1143
1144 /* OVS_FLOW_ATTR_KEY */
1145 len = nla_total_size(FLOW_BUFSIZE);
1146 /* OVS_FLOW_ATTR_ACTIONS */
1147 len += nla_total_size(sf_acts->actions_len);
1148 /* OVS_FLOW_ATTR_STATS */
1149 len += nla_total_size(sizeof(struct ovs_flow_stats));
1150 /* OVS_FLOW_ATTR_TCP_FLAGS */
1151 len += nla_total_size(1);
1152 /* OVS_FLOW_ATTR_USED */
1153 len += nla_total_size(8);
1154
1155 len += NLMSG_ALIGN(sizeof(struct ovs_header));
1156
1157 return genlmsg_new(len, GFP_KERNEL);
1158 }
1159
1160 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
1161 struct datapath *dp,
1162 u32 portid, u32 seq, u8 cmd)
1163 {
1164 struct sk_buff *skb;
1165 int retval;
1166
1167 skb = ovs_flow_cmd_alloc_info(flow);
1168 if (!skb)
1169 return ERR_PTR(-ENOMEM);
1170
1171 retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
1172 BUG_ON(retval < 0);
1173 return skb;
1174 }
1175
1176 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1177 {
1178 struct nlattr **a = info->attrs;
1179 struct ovs_header *ovs_header = info->userhdr;
1180 struct sw_flow_key key;
1181 struct sw_flow *flow;
1182 struct sk_buff *reply;
1183 struct datapath *dp;
1184 struct flow_table *table;
1185 struct sw_flow_actions *acts = NULL;
1186 int error;
1187 int key_len;
1188
1189 /* Extract key. */
1190 error = -EINVAL;
1191 if (!a[OVS_FLOW_ATTR_KEY])
1192 goto error;
1193 error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1194 if (error)
1195 goto error;
1196
1197 /* Validate actions. */
1198 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1199 acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
1200 error = PTR_ERR(acts);
1201 if (IS_ERR(acts))
1202 goto error;
1203
1204 error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts);
1205 if (error)
1206 goto err_kfree;
1207 } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1208 error = -EINVAL;
1209 goto error;
1210 }
1211
1212 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1213 error = -ENODEV;
1214 if (!dp)
1215 goto err_kfree;
1216
1217 table = genl_dereference(dp->table);
1218 flow = ovs_flow_tbl_lookup(table, &key, key_len);
1219 if (!flow) {
1220 /* Bail out if we're not allowed to create a new flow. */
1221 error = -ENOENT;
1222 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1223 goto err_kfree;
1224
1225 /* Expand table, if necessary, to make room. */
1226 if (ovs_flow_tbl_need_to_expand(table)) {
1227 struct flow_table *new_table;
1228
1229 new_table = ovs_flow_tbl_expand(table);
1230 if (!IS_ERR(new_table)) {
1231 rcu_assign_pointer(dp->table, new_table);
1232 ovs_flow_tbl_deferred_destroy(table);
1233 table = genl_dereference(dp->table);
1234 }
1235 }
1236
1237 /* Allocate flow. */
1238 flow = ovs_flow_alloc();
1239 if (IS_ERR(flow)) {
1240 error = PTR_ERR(flow);
1241 goto err_kfree;
1242 }
1243 clear_stats(flow);
1244
1245 rcu_assign_pointer(flow->sf_acts, acts);
1246
1247 /* Put flow in bucket. */
1248 ovs_flow_tbl_insert(table, flow, &key, key_len);
1249
1250 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1251 info->snd_seq,
1252 OVS_FLOW_CMD_NEW);
1253 } else {
1254 /* We found a matching flow. */
1255 struct sw_flow_actions *old_acts;
1256
1257 /* Bail out if we're not allowed to modify an existing flow.
1258 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1259 * because Generic Netlink treats the latter as a dump
1260 * request. We also accept NLM_F_EXCL in case that bug ever
1261 * gets fixed.
1262 */
1263 error = -EEXIST;
1264 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1265 info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1266 goto err_kfree;
1267
1268 /* Update actions. */
1269 old_acts = rcu_dereference_protected(flow->sf_acts,
1270 lockdep_genl_is_held());
1271 rcu_assign_pointer(flow->sf_acts, acts);
1272 ovs_flow_deferred_free_acts(old_acts);
1273
1274 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1275 info->snd_seq, OVS_FLOW_CMD_NEW);
1276
1277 /* Clear stats. */
1278 if (a[OVS_FLOW_ATTR_CLEAR]) {
1279 spin_lock_bh(&flow->lock);
1280 clear_stats(flow);
1281 spin_unlock_bh(&flow->lock);
1282 }
1283 }
1284
1285 if (!IS_ERR(reply))
1286 genl_notify(reply, genl_info_net(info), info->snd_portid,
1287 ovs_dp_flow_multicast_group.id, info->nlhdr,
1288 GFP_KERNEL);
1289 else
1290 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1291 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1292 return 0;
1293
1294 err_kfree:
1295 kfree(acts);
1296 error:
1297 return error;
1298 }
1299
1300 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1301 {
1302 struct nlattr **a = info->attrs;
1303 struct ovs_header *ovs_header = info->userhdr;
1304 struct sw_flow_key key;
1305 struct sk_buff *reply;
1306 struct sw_flow *flow;
1307 struct datapath *dp;
1308 struct flow_table *table;
1309 int err;
1310 int key_len;
1311
1312 if (!a[OVS_FLOW_ATTR_KEY])
1313 return -EINVAL;
1314 err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1315 if (err)
1316 return err;
1317
1318 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1319 if (!dp)
1320 return -ENODEV;
1321
1322 table = genl_dereference(dp->table);
1323 flow = ovs_flow_tbl_lookup(table, &key, key_len);
1324 if (!flow)
1325 return -ENOENT;
1326
1327 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1328 info->snd_seq, OVS_FLOW_CMD_NEW);
1329 if (IS_ERR(reply))
1330 return PTR_ERR(reply);
1331
1332 return genlmsg_reply(reply, info);
1333 }
1334
1335 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1336 {
1337 struct nlattr **a = info->attrs;
1338 struct ovs_header *ovs_header = info->userhdr;
1339 struct sw_flow_key key;
1340 struct sk_buff *reply;
1341 struct sw_flow *flow;
1342 struct datapath *dp;
1343 struct flow_table *table;
1344 int err;
1345 int key_len;
1346
1347 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1348 if (!dp)
1349 return -ENODEV;
1350
1351 if (!a[OVS_FLOW_ATTR_KEY])
1352 return flush_flows(dp);
1353
1354 err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1355 if (err)
1356 return err;
1357
1358 table = genl_dereference(dp->table);
1359 flow = ovs_flow_tbl_lookup(table, &key, key_len);
1360 if (!flow)
1361 return -ENOENT;
1362
1363 reply = ovs_flow_cmd_alloc_info(flow);
1364 if (!reply)
1365 return -ENOMEM;
1366
1367 ovs_flow_tbl_remove(table, flow);
1368
1369 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
1370 info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1371 BUG_ON(err < 0);
1372
1373 ovs_flow_deferred_free(flow);
1374
1375 genl_notify(reply, genl_info_net(info), info->snd_portid,
1376 ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1377 return 0;
1378 }
1379
1380 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1381 {
1382 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1383 struct datapath *dp;
1384 struct flow_table *table;
1385
1386 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1387 if (!dp)
1388 return -ENODEV;
1389
1390 table = genl_dereference(dp->table);
1391
1392 for (;;) {
1393 struct sw_flow *flow;
1394 u32 bucket, obj;
1395
1396 bucket = cb->args[0];
1397 obj = cb->args[1];
1398 flow = ovs_flow_tbl_next(table, &bucket, &obj);
1399 if (!flow)
1400 break;
1401
1402 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1403 NETLINK_CB(cb->skb).portid,
1404 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1405 OVS_FLOW_CMD_NEW) < 0)
1406 break;
1407
1408 cb->args[0] = bucket;
1409 cb->args[1] = obj;
1410 }
1411 return skb->len;
1412 }
1413
1414 static struct genl_ops dp_flow_genl_ops[] = {
1415 { .cmd = OVS_FLOW_CMD_NEW,
1416 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1417 .policy = flow_policy,
1418 .doit = ovs_flow_cmd_new_or_set
1419 },
1420 { .cmd = OVS_FLOW_CMD_DEL,
1421 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1422 .policy = flow_policy,
1423 .doit = ovs_flow_cmd_del
1424 },
1425 { .cmd = OVS_FLOW_CMD_GET,
1426 .flags = 0, /* OK for unprivileged users. */
1427 .policy = flow_policy,
1428 .doit = ovs_flow_cmd_get,
1429 .dumpit = ovs_flow_cmd_dump
1430 },
1431 { .cmd = OVS_FLOW_CMD_SET,
1432 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1433 .policy = flow_policy,
1434 .doit = ovs_flow_cmd_new_or_set,
1435 },
1436 };
1437
1438 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1439 #ifdef HAVE_NLA_NUL_STRING
1440 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1441 #endif
1442 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1443 };
1444
1445 static struct genl_family dp_datapath_genl_family = {
1446 .id = GENL_ID_GENERATE,
1447 .hdrsize = sizeof(struct ovs_header),
1448 .name = OVS_DATAPATH_FAMILY,
1449 .version = OVS_DATAPATH_VERSION,
1450 .maxattr = OVS_DP_ATTR_MAX,
1451 SET_NETNSOK
1452 };
1453
1454 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1455 .name = OVS_DATAPATH_MCGROUP
1456 };
1457
1458 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1459 u32 portid, u32 seq, u32 flags, u8 cmd)
1460 {
1461 struct ovs_header *ovs_header;
1462 struct ovs_dp_stats dp_stats;
1463 int err;
1464
1465 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1466 flags, cmd);
1467 if (!ovs_header)
1468 goto error;
1469
1470 ovs_header->dp_ifindex = get_dpifindex(dp);
1471
1472 rcu_read_lock();
1473 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1474 rcu_read_unlock();
1475 if (err)
1476 goto nla_put_failure;
1477
1478 get_dp_stats(dp, &dp_stats);
1479 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1480 goto nla_put_failure;
1481
1482 return genlmsg_end(skb, ovs_header);
1483
1484 nla_put_failure:
1485 genlmsg_cancel(skb, ovs_header);
1486 error:
1487 return -EMSGSIZE;
1488 }
1489
1490 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
1491 u32 seq, u8 cmd)
1492 {
1493 struct sk_buff *skb;
1494 int retval;
1495
1496 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1497 if (!skb)
1498 return ERR_PTR(-ENOMEM);
1499
1500 retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
1501 if (retval < 0) {
1502 kfree_skb(skb);
1503 return ERR_PTR(retval);
1504 }
1505 return skb;
1506 }
1507
1508 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1509 {
1510 return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1511 }
1512
1513 /* Called with genl_mutex and optionally with RTNL lock also. */
1514 static struct datapath *lookup_datapath(struct net *net,
1515 struct ovs_header *ovs_header,
1516 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1517 {
1518 struct datapath *dp;
1519
1520 if (!a[OVS_DP_ATTR_NAME])
1521 dp = get_dp(net, ovs_header->dp_ifindex);
1522 else {
1523 struct vport *vport;
1524
1525 rcu_read_lock();
1526 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1527 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1528 rcu_read_unlock();
1529 }
1530 return dp ? dp : ERR_PTR(-ENODEV);
1531 }
1532
1533 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1534 {
1535 struct nlattr **a = info->attrs;
1536 struct vport_parms parms;
1537 struct sk_buff *reply;
1538 struct datapath *dp;
1539 struct vport *vport;
1540 struct ovs_net *ovs_net;
1541 int err, i;
1542
1543 err = -EINVAL;
1544 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1545 goto err;
1546
1547 err = ovs_dp_cmd_validate(a);
1548 if (err)
1549 goto err;
1550
1551 rtnl_lock();
1552
1553 err = -ENOMEM;
1554 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1555 if (dp == NULL)
1556 goto err_unlock_rtnl;
1557
1558 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1559
1560 /* Allocate table. */
1561 err = -ENOMEM;
1562 rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1563 if (!dp->table)
1564 goto err_free_dp;
1565
1566 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1567 if (!dp->stats_percpu) {
1568 err = -ENOMEM;
1569 goto err_destroy_table;
1570 }
1571
1572 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1573 GFP_KERNEL);
1574 if (!dp->ports) {
1575 err = -ENOMEM;
1576 goto err_destroy_percpu;
1577 }
1578
1579 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1580 INIT_HLIST_HEAD(&dp->ports[i]);
1581
1582 /* Set up our datapath device. */
1583 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1584 parms.type = OVS_VPORT_TYPE_INTERNAL;
1585 parms.options = NULL;
1586 parms.dp = dp;
1587 parms.port_no = OVSP_LOCAL;
1588 parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1589
1590 vport = new_vport(&parms);
1591 if (IS_ERR(vport)) {
1592 err = PTR_ERR(vport);
1593 if (err == -EBUSY)
1594 err = -EEXIST;
1595
1596 goto err_destroy_ports_array;
1597 }
1598
1599 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1600 info->snd_seq, OVS_DP_CMD_NEW);
1601 err = PTR_ERR(reply);
1602 if (IS_ERR(reply))
1603 goto err_destroy_local_port;
1604
1605 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1606 list_add_tail(&dp->list_node, &ovs_net->dps);
1607
1608 rtnl_unlock();
1609
1610 genl_notify(reply, genl_info_net(info), info->snd_portid,
1611 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1612 GFP_KERNEL);
1613 return 0;
1614
1615 err_destroy_local_port:
1616 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1617 err_destroy_ports_array:
1618 kfree(dp->ports);
1619 err_destroy_percpu:
1620 free_percpu(dp->stats_percpu);
1621 err_destroy_table:
1622 ovs_flow_tbl_destroy(genl_dereference(dp->table));
1623 err_free_dp:
1624 release_net(ovs_dp_get_net(dp));
1625 kfree(dp);
1626 err_unlock_rtnl:
1627 rtnl_unlock();
1628 err:
1629 return err;
1630 }
1631
1632 /* Called with genl_mutex. */
1633 static void __dp_destroy(struct datapath *dp)
1634 {
1635 int i;
1636
1637 rtnl_lock();
1638
1639 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1640 struct vport *vport;
1641 struct hlist_node *node, *n;
1642
1643 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
1644 if (vport->port_no != OVSP_LOCAL)
1645 ovs_dp_detach_port(vport);
1646 }
1647
1648 list_del(&dp->list_node);
1649 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1650
1651 /* rtnl_unlock() will wait until all the references to devices that
1652 * are pending unregistration have been dropped. We do it here to
1653 * ensure that any internal devices (which contain DP pointers) are
1654 * fully destroyed before freeing the datapath.
1655 */
1656 rtnl_unlock();
1657
1658 call_rcu(&dp->rcu, destroy_dp_rcu);
1659 }
1660
1661 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1662 {
1663 struct sk_buff *reply;
1664 struct datapath *dp;
1665 int err;
1666
1667 err = ovs_dp_cmd_validate(info->attrs);
1668 if (err)
1669 return err;
1670
1671 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1672 err = PTR_ERR(dp);
1673 if (IS_ERR(dp))
1674 return err;
1675
1676 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1677 info->snd_seq, OVS_DP_CMD_DEL);
1678 err = PTR_ERR(reply);
1679 if (IS_ERR(reply))
1680 return err;
1681
1682 __dp_destroy(dp);
1683
1684 genl_notify(reply, genl_info_net(info), info->snd_portid,
1685 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1686 GFP_KERNEL);
1687
1688 return 0;
1689 }
1690
1691 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1692 {
1693 struct sk_buff *reply;
1694 struct datapath *dp;
1695 int err;
1696
1697 err = ovs_dp_cmd_validate(info->attrs);
1698 if (err)
1699 return err;
1700
1701 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1702 if (IS_ERR(dp))
1703 return PTR_ERR(dp);
1704
1705 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1706 info->snd_seq, OVS_DP_CMD_NEW);
1707 if (IS_ERR(reply)) {
1708 err = PTR_ERR(reply);
1709 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1710 ovs_dp_datapath_multicast_group.id, err);
1711 return 0;
1712 }
1713
1714 genl_notify(reply, genl_info_net(info), info->snd_portid,
1715 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1716 GFP_KERNEL);
1717
1718 return 0;
1719 }
1720
1721 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1722 {
1723 struct sk_buff *reply;
1724 struct datapath *dp;
1725 int err;
1726
1727 err = ovs_dp_cmd_validate(info->attrs);
1728 if (err)
1729 return err;
1730
1731 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1732 if (IS_ERR(dp))
1733 return PTR_ERR(dp);
1734
1735 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1736 info->snd_seq, OVS_DP_CMD_NEW);
1737 if (IS_ERR(reply))
1738 return PTR_ERR(reply);
1739
1740 return genlmsg_reply(reply, info);
1741 }
1742
1743 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1744 {
1745 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1746 struct datapath *dp;
1747 int skip = cb->args[0];
1748 int i = 0;
1749
1750 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1751 if (i >= skip &&
1752 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1753 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1754 OVS_DP_CMD_NEW) < 0)
1755 break;
1756 i++;
1757 }
1758
1759 cb->args[0] = i;
1760
1761 return skb->len;
1762 }
1763
1764 static struct genl_ops dp_datapath_genl_ops[] = {
1765 { .cmd = OVS_DP_CMD_NEW,
1766 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1767 .policy = datapath_policy,
1768 .doit = ovs_dp_cmd_new
1769 },
1770 { .cmd = OVS_DP_CMD_DEL,
1771 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1772 .policy = datapath_policy,
1773 .doit = ovs_dp_cmd_del
1774 },
1775 { .cmd = OVS_DP_CMD_GET,
1776 .flags = 0, /* OK for unprivileged users. */
1777 .policy = datapath_policy,
1778 .doit = ovs_dp_cmd_get,
1779 .dumpit = ovs_dp_cmd_dump
1780 },
1781 { .cmd = OVS_DP_CMD_SET,
1782 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1783 .policy = datapath_policy,
1784 .doit = ovs_dp_cmd_set,
1785 },
1786 };
1787
1788 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1789 #ifdef HAVE_NLA_NUL_STRING
1790 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1791 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1792 #else
1793 [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1794 #endif
1795 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1796 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1797 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1798 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1799 };
1800
1801 static struct genl_family dp_vport_genl_family = {
1802 .id = GENL_ID_GENERATE,
1803 .hdrsize = sizeof(struct ovs_header),
1804 .name = OVS_VPORT_FAMILY,
1805 .version = OVS_VPORT_VERSION,
1806 .maxattr = OVS_VPORT_ATTR_MAX,
1807 SET_NETNSOK
1808 };
1809
1810 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1811 .name = OVS_VPORT_MCGROUP
1812 };
1813
1814 /* Called with RTNL lock or RCU read lock. */
1815 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1816 u32 portid, u32 seq, u32 flags, u8 cmd)
1817 {
1818 struct ovs_header *ovs_header;
1819 struct ovs_vport_stats vport_stats;
1820 int err;
1821
1822 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1823 flags, cmd);
1824 if (!ovs_header)
1825 return -EMSGSIZE;
1826
1827 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1828
1829 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1830 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1831 nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1832 nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
1833 goto nla_put_failure;
1834
1835 ovs_vport_get_stats(vport, &vport_stats);
1836 if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1837 &vport_stats))
1838 goto nla_put_failure;
1839
1840 err = ovs_vport_get_options(vport, skb);
1841 if (err == -EMSGSIZE)
1842 goto error;
1843
1844 return genlmsg_end(skb, ovs_header);
1845
1846 nla_put_failure:
1847 err = -EMSGSIZE;
1848 error:
1849 genlmsg_cancel(skb, ovs_header);
1850 return err;
1851 }
1852
1853 /* Called with RTNL lock or RCU read lock. */
1854 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1855 u32 seq, u8 cmd)
1856 {
1857 struct sk_buff *skb;
1858 int retval;
1859
1860 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1861 if (!skb)
1862 return ERR_PTR(-ENOMEM);
1863
1864 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1865 if (retval < 0) {
1866 kfree_skb(skb);
1867 return ERR_PTR(retval);
1868 }
1869 return skb;
1870 }
1871
1872 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1873 {
1874 return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1875 }
1876
1877 /* Called with RTNL lock or RCU read lock. */
1878 static struct vport *lookup_vport(struct net *net,
1879 struct ovs_header *ovs_header,
1880 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1881 {
1882 struct datapath *dp;
1883 struct vport *vport;
1884
1885 if (a[OVS_VPORT_ATTR_NAME]) {
1886 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1887 if (!vport)
1888 return ERR_PTR(-ENODEV);
1889 if (ovs_header->dp_ifindex &&
1890 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1891 return ERR_PTR(-ENODEV);
1892 return vport;
1893 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1894 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1895
1896 if (port_no >= DP_MAX_PORTS)
1897 return ERR_PTR(-EFBIG);
1898
1899 dp = get_dp(net, ovs_header->dp_ifindex);
1900 if (!dp)
1901 return ERR_PTR(-ENODEV);
1902
1903 vport = ovs_vport_rtnl_rcu(dp, port_no);
1904 if (!vport)
1905 return ERR_PTR(-ENODEV);
1906 return vport;
1907 } else
1908 return ERR_PTR(-EINVAL);
1909 }
1910
1911 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1912 {
1913 struct nlattr **a = info->attrs;
1914 struct ovs_header *ovs_header = info->userhdr;
1915 struct vport_parms parms;
1916 struct sk_buff *reply;
1917 struct vport *vport;
1918 struct datapath *dp;
1919 u32 port_no;
1920 int err;
1921
1922 err = -EINVAL;
1923 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1924 !a[OVS_VPORT_ATTR_UPCALL_PID])
1925 goto exit;
1926
1927 err = ovs_vport_cmd_validate(a);
1928 if (err)
1929 goto exit;
1930
1931 rtnl_lock();
1932 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1933 err = -ENODEV;
1934 if (!dp)
1935 goto exit_unlock;
1936
1937 if (a[OVS_VPORT_ATTR_PORT_NO]) {
1938 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1939
1940 err = -EFBIG;
1941 if (port_no >= DP_MAX_PORTS)
1942 goto exit_unlock;
1943
1944 vport = ovs_vport_rtnl(dp, port_no);
1945 err = -EBUSY;
1946 if (vport)
1947 goto exit_unlock;
1948 } else {
1949 for (port_no = 1; ; port_no++) {
1950 if (port_no >= DP_MAX_PORTS) {
1951 err = -EFBIG;
1952 goto exit_unlock;
1953 }
1954 vport = ovs_vport_rtnl(dp, port_no);
1955 if (!vport)
1956 break;
1957 }
1958 }
1959
1960 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1961 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1962 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1963 parms.dp = dp;
1964 parms.port_no = port_no;
1965 parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1966
1967 vport = new_vport(&parms);
1968 err = PTR_ERR(vport);
1969 if (IS_ERR(vport))
1970 goto exit_unlock;
1971
1972 err = 0;
1973 if (a[OVS_VPORT_ATTR_STATS])
1974 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1975
1976 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1977 OVS_VPORT_CMD_NEW);
1978 if (IS_ERR(reply)) {
1979 err = PTR_ERR(reply);
1980 ovs_dp_detach_port(vport);
1981 goto exit_unlock;
1982 }
1983 genl_notify(reply, genl_info_net(info), info->snd_portid,
1984 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1985
1986 exit_unlock:
1987 rtnl_unlock();
1988 exit:
1989 return err;
1990 }
1991
1992 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1993 {
1994 struct nlattr **a = info->attrs;
1995 struct sk_buff *reply;
1996 struct vport *vport;
1997 int err;
1998
1999 err = ovs_vport_cmd_validate(a);
2000 if (err)
2001 goto exit;
2002
2003 rtnl_lock();
2004 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2005 err = PTR_ERR(vport);
2006 if (IS_ERR(vport))
2007 goto exit_unlock;
2008
2009 err = 0;
2010 if (a[OVS_VPORT_ATTR_TYPE] &&
2011 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
2012 err = -EINVAL;
2013
2014 if (!err && a[OVS_VPORT_ATTR_OPTIONS])
2015 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2016 if (err)
2017 goto exit_unlock;
2018
2019 if (a[OVS_VPORT_ATTR_STATS])
2020 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
2021
2022 if (a[OVS_VPORT_ATTR_UPCALL_PID])
2023 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
2024
2025 reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2026 info->snd_seq, OVS_VPORT_CMD_NEW);
2027 if (IS_ERR(reply)) {
2028 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
2029 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
2030 goto exit_unlock;
2031 }
2032
2033 genl_notify(reply, genl_info_net(info), info->snd_portid,
2034 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
2035
2036 exit_unlock:
2037 rtnl_unlock();
2038 exit:
2039 return err;
2040 }
2041
2042 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2043 {
2044 struct nlattr **a = info->attrs;
2045 struct sk_buff *reply;
2046 struct vport *vport;
2047 int err;
2048
2049 err = ovs_vport_cmd_validate(a);
2050 if (err)
2051 goto exit;
2052
2053 rtnl_lock();
2054 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2055 err = PTR_ERR(vport);
2056 if (IS_ERR(vport))
2057 goto exit_unlock;
2058
2059 if (vport->port_no == OVSP_LOCAL) {
2060 err = -EINVAL;
2061 goto exit_unlock;
2062 }
2063
2064 reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2065 info->snd_seq, OVS_VPORT_CMD_DEL);
2066 err = PTR_ERR(reply);
2067 if (IS_ERR(reply))
2068 goto exit_unlock;
2069
2070 err = 0;
2071 ovs_dp_detach_port(vport);
2072
2073 genl_notify(reply, genl_info_net(info), info->snd_portid,
2074 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
2075
2076 exit_unlock:
2077 rtnl_unlock();
2078 exit:
2079 return err;
2080 }
2081
2082 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2083 {
2084 struct nlattr **a = info->attrs;
2085 struct ovs_header *ovs_header = info->userhdr;
2086 struct sk_buff *reply;
2087 struct vport *vport;
2088 int err;
2089
2090 err = ovs_vport_cmd_validate(a);
2091 if (err)
2092 goto exit;
2093
2094 rcu_read_lock();
2095 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2096 err = PTR_ERR(vport);
2097 if (IS_ERR(vport))
2098 goto exit_unlock;
2099
2100 reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2101 info->snd_seq, OVS_VPORT_CMD_NEW);
2102 err = PTR_ERR(reply);
2103 if (IS_ERR(reply))
2104 goto exit_unlock;
2105
2106 rcu_read_unlock();
2107
2108 return genlmsg_reply(reply, info);
2109
2110 exit_unlock:
2111 rcu_read_unlock();
2112 exit:
2113 return err;
2114 }
2115
2116 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2117 {
2118 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2119 struct datapath *dp;
2120 int bucket = cb->args[0], skip = cb->args[1];
2121 int i, j = 0;
2122
2123 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2124 if (!dp)
2125 return -ENODEV;
2126
2127 rcu_read_lock();
2128 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2129 struct vport *vport;
2130 struct hlist_node *n;
2131
2132 j = 0;
2133 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
2134 if (j >= skip &&
2135 ovs_vport_cmd_fill_info(vport, skb,
2136 NETLINK_CB(cb->skb).portid,
2137 cb->nlh->nlmsg_seq,
2138 NLM_F_MULTI,
2139 OVS_VPORT_CMD_NEW) < 0)
2140 goto out;
2141
2142 j++;
2143 }
2144 skip = 0;
2145 }
2146 out:
2147 rcu_read_unlock();
2148
2149 cb->args[0] = i;
2150 cb->args[1] = j;
2151
2152 return skb->len;
2153 }
2154
2155 static struct genl_ops dp_vport_genl_ops[] = {
2156 { .cmd = OVS_VPORT_CMD_NEW,
2157 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2158 .policy = vport_policy,
2159 .doit = ovs_vport_cmd_new
2160 },
2161 { .cmd = OVS_VPORT_CMD_DEL,
2162 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2163 .policy = vport_policy,
2164 .doit = ovs_vport_cmd_del
2165 },
2166 { .cmd = OVS_VPORT_CMD_GET,
2167 .flags = 0, /* OK for unprivileged users. */
2168 .policy = vport_policy,
2169 .doit = ovs_vport_cmd_get,
2170 .dumpit = ovs_vport_cmd_dump
2171 },
2172 { .cmd = OVS_VPORT_CMD_SET,
2173 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2174 .policy = vport_policy,
2175 .doit = ovs_vport_cmd_set,
2176 },
2177 };
2178
2179 struct genl_family_and_ops {
2180 struct genl_family *family;
2181 struct genl_ops *ops;
2182 int n_ops;
2183 struct genl_multicast_group *group;
2184 };
2185
2186 static const struct genl_family_and_ops dp_genl_families[] = {
2187 { &dp_datapath_genl_family,
2188 dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
2189 &ovs_dp_datapath_multicast_group },
2190 { &dp_vport_genl_family,
2191 dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
2192 &ovs_dp_vport_multicast_group },
2193 { &dp_flow_genl_family,
2194 dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
2195 &ovs_dp_flow_multicast_group },
2196 { &dp_packet_genl_family,
2197 dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2198 NULL },
2199 };
2200
2201 static void dp_unregister_genl(int n_families)
2202 {
2203 int i;
2204
2205 for (i = 0; i < n_families; i++)
2206 genl_unregister_family(dp_genl_families[i].family);
2207 }
2208
2209 static int dp_register_genl(void)
2210 {
2211 int n_registered;
2212 int err;
2213 int i;
2214
2215 n_registered = 0;
2216 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2217 const struct genl_family_and_ops *f = &dp_genl_families[i];
2218
2219 err = genl_register_family_with_ops(f->family, f->ops,
2220 f->n_ops);
2221 if (err)
2222 goto error;
2223 n_registered++;
2224
2225 if (f->group) {
2226 err = genl_register_mc_group(f->family, f->group);
2227 if (err)
2228 goto error;
2229 }
2230 }
2231
2232 return 0;
2233
2234 error:
2235 dp_unregister_genl(n_registered);
2236 return err;
2237 }
2238
2239 static int __rehash_flow_table(void *dummy)
2240 {
2241 struct datapath *dp;
2242 struct net *net;
2243
2244 rtnl_lock();
2245 for_each_net(net) {
2246 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2247
2248 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2249 struct flow_table *old_table = genl_dereference(dp->table);
2250 struct flow_table *new_table;
2251
2252 new_table = ovs_flow_tbl_rehash(old_table);
2253 if (!IS_ERR(new_table)) {
2254 rcu_assign_pointer(dp->table, new_table);
2255 ovs_flow_tbl_deferred_destroy(old_table);
2256 }
2257 }
2258 }
2259 rtnl_unlock();
2260 return 0;
2261 }
2262
2263 static void rehash_flow_table(struct work_struct *work)
2264 {
2265 genl_exec(__rehash_flow_table, NULL);
2266 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2267 }
2268
2269 static int dp_destroy_all(void *data)
2270 {
2271 struct datapath *dp, *dp_next;
2272 struct ovs_net *ovs_net = data;
2273
2274 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2275 __dp_destroy(dp);
2276
2277 return 0;
2278 }
2279
2280 static int __net_init ovs_init_net(struct net *net)
2281 {
2282 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2283
2284 INIT_LIST_HEAD(&ovs_net->dps);
2285 return 0;
2286 }
2287
2288 static void __net_exit ovs_exit_net(struct net *net)
2289 {
2290 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2291
2292 genl_exec(dp_destroy_all, ovs_net);
2293 }
2294
2295 static struct pernet_operations ovs_net_ops = {
2296 .init = ovs_init_net,
2297 .exit = ovs_exit_net,
2298 .id = &ovs_net_id,
2299 .size = sizeof(struct ovs_net),
2300 };
2301
2302 static int __init dp_init(void)
2303 {
2304 int err;
2305
2306 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2307
2308 pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2309 VERSION);
2310
2311 err = genl_exec_init();
2312 if (err)
2313 goto error;
2314
2315 err = ovs_workqueues_init();
2316 if (err)
2317 goto error_genl_exec;
2318
2319 err = ovs_tnl_init();
2320 if (err)
2321 goto error_wq;
2322
2323 err = ovs_flow_init();
2324 if (err)
2325 goto error_tnl_exit;
2326
2327 err = ovs_vport_init();
2328 if (err)
2329 goto error_flow_exit;
2330
2331 err = register_pernet_device(&ovs_net_ops);
2332 if (err)
2333 goto error_vport_exit;
2334
2335 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2336 if (err)
2337 goto error_netns_exit;
2338
2339 err = dp_register_genl();
2340 if (err < 0)
2341 goto error_unreg_notifier;
2342
2343 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2344
2345 return 0;
2346
2347 error_unreg_notifier:
2348 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2349 error_netns_exit:
2350 unregister_pernet_device(&ovs_net_ops);
2351 error_vport_exit:
2352 ovs_vport_exit();
2353 error_flow_exit:
2354 ovs_flow_exit();
2355 error_tnl_exit:
2356 ovs_tnl_exit();
2357 error_wq:
2358 ovs_workqueues_exit();
2359 error_genl_exec:
2360 genl_exec_exit();
2361 error:
2362 return err;
2363 }
2364
2365 static void dp_cleanup(void)
2366 {
2367 cancel_delayed_work_sync(&rehash_flow_wq);
2368 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2369 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2370 unregister_pernet_device(&ovs_net_ops);
2371 rcu_barrier();
2372 ovs_vport_exit();
2373 ovs_flow_exit();
2374 ovs_tnl_exit();
2375 ovs_workqueues_exit();
2376 genl_exec_exit();
2377 }
2378
2379 module_init(dp_init);
2380 module_exit(dp_cleanup);
2381
2382 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2383 MODULE_LICENSE("GPL");
2384 MODULE_VERSION(VERSION);