2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/lockdep.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/workqueue.h>
52 #include <net/genetlink.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
58 #include "flow_netlink.h"
59 #include "vport-internal_dev.h"
60 #include "vport-netdev.h"
62 int ovs_net_id __read_mostly
;
64 static void ovs_notify(struct genl_family
*family
,
65 struct sk_buff
*skb
, struct genl_info
*info
)
67 genl_notify(family
, skb
, genl_info_net(info
), info
->snd_portid
,
68 0, info
->nlhdr
, GFP_KERNEL
);
74 * All writes e.g. Writes to device state (add/remove datapath, port, set
75 * operations on vports, etc.), Writes to other state (flow table
76 * modifications, set miscellaneous datapath parameters, etc.) are protected
79 * Reads are protected by RCU.
81 * There are a few special cases (mostly stats) that have their own
82 * synchronization but they nest under all of above and don't interact with
85 * The RTNL lock nests inside ovs_mutex.
88 static DEFINE_MUTEX(ovs_mutex
);
92 mutex_lock(&ovs_mutex
);
97 mutex_unlock(&ovs_mutex
);
100 #ifdef CONFIG_LOCKDEP
101 int lockdep_ovsl_is_held(void)
104 return lockdep_is_held(&ovs_mutex
);
110 static struct vport
*new_vport(const struct vport_parms
*);
111 static int queue_gso_packets(struct net
*, int dp_ifindex
, struct sk_buff
*,
112 const struct dp_upcall_info
*);
113 static int queue_userspace_packet(struct net
*, int dp_ifindex
,
115 const struct dp_upcall_info
*);
117 /* Must be called with rcu_read_lock or ovs_mutex. */
118 static struct datapath
*get_dp(struct net
*net
, int dp_ifindex
)
120 struct datapath
*dp
= NULL
;
121 struct net_device
*dev
;
124 dev
= dev_get_by_index_rcu(net
, dp_ifindex
);
126 struct vport
*vport
= ovs_internal_dev_get_vport(dev
);
135 /* Must be called with rcu_read_lock or ovs_mutex. */
136 const char *ovs_dp_name(const struct datapath
*dp
)
138 struct vport
*vport
= ovs_vport_ovsl_rcu(dp
, OVSP_LOCAL
);
139 return vport
->ops
->get_name(vport
);
142 static int get_dpifindex(struct datapath
*dp
)
149 local
= ovs_vport_rcu(dp
, OVSP_LOCAL
);
151 ifindex
= netdev_vport_priv(local
)->dev
->ifindex
;
160 static void destroy_dp_rcu(struct rcu_head
*rcu
)
162 struct datapath
*dp
= container_of(rcu
, struct datapath
, rcu
);
164 ovs_flow_tbl_destroy(&dp
->table
);
165 free_percpu(dp
->stats_percpu
);
166 release_net(ovs_dp_get_net(dp
));
171 static struct hlist_head
*vport_hash_bucket(const struct datapath
*dp
,
174 return &dp
->ports
[port_no
& (DP_VPORT_HASH_BUCKETS
- 1)];
177 struct vport
*ovs_lookup_vport(const struct datapath
*dp
, u16 port_no
)
180 struct hlist_head
*head
;
182 head
= vport_hash_bucket(dp
, port_no
);
183 hlist_for_each_entry_rcu(vport
, head
, dp_hash_node
) {
184 if (vport
->port_no
== port_no
)
190 /* Called with ovs_mutex. */
191 static struct vport
*new_vport(const struct vport_parms
*parms
)
195 vport
= ovs_vport_add(parms
);
196 if (!IS_ERR(vport
)) {
197 struct datapath
*dp
= parms
->dp
;
198 struct hlist_head
*head
= vport_hash_bucket(dp
, vport
->port_no
);
200 hlist_add_head_rcu(&vport
->dp_hash_node
, head
);
205 void ovs_dp_detach_port(struct vport
*p
)
209 /* First drop references to device. */
210 hlist_del_rcu(&p
->dp_hash_node
);
212 /* Then destroy it. */
216 /* Must be called with rcu_read_lock. */
217 void ovs_dp_process_received_packet(struct vport
*p
, struct sk_buff
*skb
)
219 struct datapath
*dp
= p
->dp
;
220 struct sw_flow
*flow
;
221 struct dp_stats_percpu
*stats
;
222 struct sw_flow_key key
;
227 stats
= this_cpu_ptr(dp
->stats_percpu
);
229 /* Extract flow from 'skb' into 'key'. */
230 error
= ovs_flow_extract(skb
, p
->port_no
, &key
);
231 if (unlikely(error
)) {
237 flow
= ovs_flow_tbl_lookup_stats(&dp
->table
, &key
, &n_mask_hit
);
238 if (unlikely(!flow
)) {
239 struct dp_upcall_info upcall
;
241 upcall
.cmd
= OVS_PACKET_CMD_MISS
;
243 upcall
.userdata
= NULL
;
244 upcall
.portid
= p
->upcall_portid
;
245 ovs_dp_upcall(dp
, skb
, &upcall
);
247 stats_counter
= &stats
->n_missed
;
251 OVS_CB(skb
)->flow
= flow
;
252 OVS_CB(skb
)->pkt_key
= &key
;
254 ovs_flow_stats_update(OVS_CB(skb
)->flow
, skb
);
255 ovs_execute_actions(dp
, skb
);
256 stats_counter
= &stats
->n_hit
;
259 /* Update datapath statistics. */
260 u64_stats_update_begin(&stats
->sync
);
262 stats
->n_mask_hit
+= n_mask_hit
;
263 u64_stats_update_end(&stats
->sync
);
266 static struct genl_family dp_packet_genl_family
= {
267 .id
= GENL_ID_GENERATE
,
268 .hdrsize
= sizeof(struct ovs_header
),
269 .name
= OVS_PACKET_FAMILY
,
270 .version
= OVS_PACKET_VERSION
,
271 .maxattr
= OVS_PACKET_ATTR_MAX
,
273 .parallel_ops
= true,
276 int ovs_dp_upcall(struct datapath
*dp
, struct sk_buff
*skb
,
277 const struct dp_upcall_info
*upcall_info
)
279 struct dp_stats_percpu
*stats
;
283 if (upcall_info
->portid
== 0) {
288 dp_ifindex
= get_dpifindex(dp
);
294 if (!skb_is_gso(skb
))
295 err
= queue_userspace_packet(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
297 err
= queue_gso_packets(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
304 stats
= this_cpu_ptr(dp
->stats_percpu
);
306 u64_stats_update_begin(&stats
->sync
);
308 u64_stats_update_end(&stats
->sync
);
313 static int queue_gso_packets(struct net
*net
, int dp_ifindex
,
315 const struct dp_upcall_info
*upcall_info
)
317 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
318 struct dp_upcall_info later_info
;
319 struct sw_flow_key later_key
;
320 struct sk_buff
*segs
, *nskb
;
323 segs
= __skb_gso_segment(skb
, NETIF_F_SG
| NETIF_F_HW_CSUM
, false);
325 return PTR_ERR(segs
);
327 /* Queue all of the segments. */
330 err
= queue_userspace_packet(net
, dp_ifindex
, skb
, upcall_info
);
334 if (skb
== segs
&& gso_type
& SKB_GSO_UDP
) {
335 /* The initial flow key extracted by ovs_flow_extract()
336 * in this case is for a first fragment, so we need to
337 * properly mark later fragments.
339 later_key
= *upcall_info
->key
;
340 later_key
.ip
.frag
= OVS_FRAG_TYPE_LATER
;
342 later_info
= *upcall_info
;
343 later_info
.key
= &later_key
;
344 upcall_info
= &later_info
;
346 } while ((skb
= skb
->next
));
348 /* Free all of the segments. */
356 } while ((skb
= nskb
));
360 static size_t key_attr_size(void)
362 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
363 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
364 + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
365 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
366 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
367 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
368 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
369 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
370 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
371 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
372 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
373 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
374 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
375 + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */
376 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
377 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
378 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
379 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
380 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
383 static size_t upcall_msg_size(const struct sk_buff
*skb
,
384 const struct nlattr
*userdata
)
386 size_t size
= NLMSG_ALIGN(sizeof(struct ovs_header
))
387 + nla_total_size(skb
->len
) /* OVS_PACKET_ATTR_PACKET */
388 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
390 /* OVS_PACKET_ATTR_USERDATA */
392 size
+= NLA_ALIGN(userdata
->nla_len
);
397 static int queue_userspace_packet(struct net
*net
, int dp_ifindex
,
399 const struct dp_upcall_info
*upcall_info
)
401 struct ovs_header
*upcall
;
402 struct sk_buff
*nskb
= NULL
;
403 struct sk_buff
*user_skb
; /* to be queued to userspace */
405 struct genl_info info
= {
406 .dst_sk
= net
->genl_sock
,
407 .snd_portid
= upcall_info
->portid
,
412 if (vlan_tx_tag_present(skb
)) {
413 nskb
= skb_clone(skb
, GFP_ATOMIC
);
417 nskb
= __vlan_put_tag(nskb
, nskb
->vlan_proto
, vlan_tx_tag_get(nskb
));
425 if (nla_attr_size(skb
->len
) > USHRT_MAX
) {
430 len
= upcall_msg_size(skb
, upcall_info
->userdata
);
431 user_skb
= genlmsg_new_unicast(len
, &info
, GFP_ATOMIC
);
437 upcall
= genlmsg_put(user_skb
, 0, 0, &dp_packet_genl_family
,
438 0, upcall_info
->cmd
);
439 upcall
->dp_ifindex
= dp_ifindex
;
441 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_KEY
);
442 ovs_nla_put_flow(upcall_info
->key
, upcall_info
->key
, user_skb
);
443 nla_nest_end(user_skb
, nla
);
445 if (upcall_info
->userdata
)
446 __nla_put(user_skb
, OVS_PACKET_ATTR_USERDATA
,
447 nla_len(upcall_info
->userdata
),
448 nla_data(upcall_info
->userdata
));
450 nla
= __nla_reserve(user_skb
, OVS_PACKET_ATTR_PACKET
, skb
->len
);
452 skb_copy_and_csum_dev(skb
, nla_data(nla
));
454 genlmsg_end(user_skb
, upcall
);
455 err
= genlmsg_unicast(net
, user_skb
, upcall_info
->portid
);
462 static int ovs_packet_cmd_execute(struct sk_buff
*skb
, struct genl_info
*info
)
464 struct ovs_header
*ovs_header
= info
->userhdr
;
465 struct nlattr
**a
= info
->attrs
;
466 struct sw_flow_actions
*acts
;
467 struct sk_buff
*packet
;
468 struct sw_flow
*flow
;
475 if (!a
[OVS_PACKET_ATTR_PACKET
] || !a
[OVS_PACKET_ATTR_KEY
] ||
476 !a
[OVS_PACKET_ATTR_ACTIONS
])
479 len
= nla_len(a
[OVS_PACKET_ATTR_PACKET
]);
480 packet
= __dev_alloc_skb(NET_IP_ALIGN
+ len
, GFP_KERNEL
);
484 skb_reserve(packet
, NET_IP_ALIGN
);
486 nla_memcpy(__skb_put(packet
, len
), a
[OVS_PACKET_ATTR_PACKET
], len
);
488 skb_reset_mac_header(packet
);
489 eth
= eth_hdr(packet
);
491 /* Normally, setting the skb 'protocol' field would be handled by a
492 * call to eth_type_trans(), but it assumes there's a sending
493 * device, which we may not have. */
494 if (ntohs(eth
->h_proto
) >= ETH_P_802_3_MIN
)
495 packet
->protocol
= eth
->h_proto
;
497 packet
->protocol
= htons(ETH_P_802_2
);
499 /* Build an sw_flow for sending this packet. */
500 flow
= ovs_flow_alloc(false);
505 err
= ovs_flow_extract(packet
, -1, &flow
->key
);
509 err
= ovs_nla_get_flow_metadata(flow
, a
[OVS_PACKET_ATTR_KEY
]);
512 acts
= ovs_nla_alloc_flow_actions(nla_len(a
[OVS_PACKET_ATTR_ACTIONS
]));
517 err
= ovs_nla_copy_actions(a
[OVS_PACKET_ATTR_ACTIONS
],
518 &flow
->key
, 0, &acts
);
519 rcu_assign_pointer(flow
->sf_acts
, acts
);
523 OVS_CB(packet
)->flow
= flow
;
524 OVS_CB(packet
)->pkt_key
= &flow
->key
;
525 packet
->priority
= flow
->key
.phy
.priority
;
526 packet
->mark
= flow
->key
.phy
.skb_mark
;
529 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
535 err
= ovs_execute_actions(dp
, packet
);
539 ovs_flow_free(flow
, false);
545 ovs_flow_free(flow
, false);
552 static const struct nla_policy packet_policy
[OVS_PACKET_ATTR_MAX
+ 1] = {
553 [OVS_PACKET_ATTR_PACKET
] = { .len
= ETH_HLEN
},
554 [OVS_PACKET_ATTR_KEY
] = { .type
= NLA_NESTED
},
555 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
558 static const struct genl_ops dp_packet_genl_ops
[] = {
559 { .cmd
= OVS_PACKET_CMD_EXECUTE
,
560 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
561 .policy
= packet_policy
,
562 .doit
= ovs_packet_cmd_execute
566 static void get_dp_stats(struct datapath
*dp
, struct ovs_dp_stats
*stats
,
567 struct ovs_dp_megaflow_stats
*mega_stats
)
571 memset(mega_stats
, 0, sizeof(*mega_stats
));
573 stats
->n_flows
= ovs_flow_tbl_count(&dp
->table
);
574 mega_stats
->n_masks
= ovs_flow_tbl_num_masks(&dp
->table
);
576 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
578 for_each_possible_cpu(i
) {
579 const struct dp_stats_percpu
*percpu_stats
;
580 struct dp_stats_percpu local_stats
;
583 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
586 start
= u64_stats_fetch_begin_bh(&percpu_stats
->sync
);
587 local_stats
= *percpu_stats
;
588 } while (u64_stats_fetch_retry_bh(&percpu_stats
->sync
, start
));
590 stats
->n_hit
+= local_stats
.n_hit
;
591 stats
->n_missed
+= local_stats
.n_missed
;
592 stats
->n_lost
+= local_stats
.n_lost
;
593 mega_stats
->n_mask_hit
+= local_stats
.n_mask_hit
;
597 static const struct nla_policy flow_policy
[OVS_FLOW_ATTR_MAX
+ 1] = {
598 [OVS_FLOW_ATTR_KEY
] = { .type
= NLA_NESTED
},
599 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
600 [OVS_FLOW_ATTR_CLEAR
] = { .type
= NLA_FLAG
},
603 static struct genl_family dp_flow_genl_family
= {
604 .id
= GENL_ID_GENERATE
,
605 .hdrsize
= sizeof(struct ovs_header
),
606 .name
= OVS_FLOW_FAMILY
,
607 .version
= OVS_FLOW_VERSION
,
608 .maxattr
= OVS_FLOW_ATTR_MAX
,
610 .parallel_ops
= true,
613 static struct genl_multicast_group ovs_dp_flow_multicast_group
= {
614 .name
= OVS_FLOW_MCGROUP
617 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions
*acts
)
619 return NLMSG_ALIGN(sizeof(struct ovs_header
))
620 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
621 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
622 + nla_total_size(sizeof(struct ovs_flow_stats
)) /* OVS_FLOW_ATTR_STATS */
623 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
624 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
625 + nla_total_size(acts
->actions_len
); /* OVS_FLOW_ATTR_ACTIONS */
628 /* Called with ovs_mutex. */
629 static int ovs_flow_cmd_fill_info(struct sw_flow
*flow
, struct datapath
*dp
,
630 struct sk_buff
*skb
, u32 portid
,
631 u32 seq
, u32 flags
, u8 cmd
)
633 const int skb_orig_len
= skb
->len
;
634 struct nlattr
*start
;
635 struct ovs_flow_stats stats
;
638 struct ovs_header
*ovs_header
;
642 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_flow_genl_family
, flags
, cmd
);
646 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
649 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_KEY
);
651 goto nla_put_failure
;
653 err
= ovs_nla_put_flow(&flow
->unmasked_key
, &flow
->unmasked_key
, skb
);
656 nla_nest_end(skb
, nla
);
658 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_MASK
);
660 goto nla_put_failure
;
662 err
= ovs_nla_put_flow(&flow
->key
, &flow
->mask
->key
, skb
);
666 nla_nest_end(skb
, nla
);
668 ovs_flow_stats_get(flow
, &stats
, &used
, &tcp_flags
);
670 nla_put_u64(skb
, OVS_FLOW_ATTR_USED
, ovs_flow_used_time(used
)))
671 goto nla_put_failure
;
673 if (stats
.n_packets
&&
674 nla_put(skb
, OVS_FLOW_ATTR_STATS
, sizeof(struct ovs_flow_stats
), &stats
))
675 goto nla_put_failure
;
677 if ((u8
)ntohs(tcp_flags
) &&
678 nla_put_u8(skb
, OVS_FLOW_ATTR_TCP_FLAGS
, (u8
)ntohs(tcp_flags
)))
679 goto nla_put_failure
;
681 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
682 * this is the first flow to be dumped into 'skb'. This is unusual for
683 * Netlink but individual action lists can be longer than
684 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
685 * The userspace caller can always fetch the actions separately if it
686 * really wants them. (Most userspace callers in fact don't care.)
688 * This can only fail for dump operations because the skb is always
689 * properly sized for single flows.
691 start
= nla_nest_start(skb
, OVS_FLOW_ATTR_ACTIONS
);
693 const struct sw_flow_actions
*sf_acts
;
695 sf_acts
= rcu_dereference_ovsl(flow
->sf_acts
);
697 err
= ovs_nla_put_actions(sf_acts
->actions
,
698 sf_acts
->actions_len
, skb
);
700 nla_nest_end(skb
, start
);
705 nla_nest_cancel(skb
, start
);
707 } else if (skb_orig_len
)
708 goto nla_put_failure
;
710 return genlmsg_end(skb
, ovs_header
);
715 genlmsg_cancel(skb
, ovs_header
);
719 static struct sk_buff
*ovs_flow_cmd_alloc_info(struct sw_flow
*flow
,
720 struct genl_info
*info
)
724 len
= ovs_flow_cmd_msg_size(ovsl_dereference(flow
->sf_acts
));
726 return genlmsg_new_unicast(len
, info
, GFP_KERNEL
);
729 static struct sk_buff
*ovs_flow_cmd_build_info(struct sw_flow
*flow
,
731 struct genl_info
*info
,
737 skb
= ovs_flow_cmd_alloc_info(flow
, info
);
739 return ERR_PTR(-ENOMEM
);
741 retval
= ovs_flow_cmd_fill_info(flow
, dp
, skb
, info
->snd_portid
,
742 info
->snd_seq
, 0, cmd
);
747 static int ovs_flow_cmd_new_or_set(struct sk_buff
*skb
, struct genl_info
*info
)
749 struct nlattr
**a
= info
->attrs
;
750 struct ovs_header
*ovs_header
= info
->userhdr
;
751 struct sw_flow_key key
, masked_key
;
752 struct sw_flow
*flow
= NULL
;
753 struct sw_flow_mask mask
;
754 struct sk_buff
*reply
;
756 struct sw_flow_actions
*acts
= NULL
;
757 struct sw_flow_match match
;
763 if (!a
[OVS_FLOW_ATTR_KEY
])
766 ovs_match_init(&match
, &key
, &mask
);
767 error
= ovs_nla_get_match(&match
, &exact_5tuple
,
768 a
[OVS_FLOW_ATTR_KEY
], a
[OVS_FLOW_ATTR_MASK
]);
772 /* Validate actions. */
773 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
774 acts
= ovs_nla_alloc_flow_actions(nla_len(a
[OVS_FLOW_ATTR_ACTIONS
]));
775 error
= PTR_ERR(acts
);
779 ovs_flow_mask_key(&masked_key
, &key
, &mask
);
780 error
= ovs_nla_copy_actions(a
[OVS_FLOW_ATTR_ACTIONS
],
781 &masked_key
, 0, &acts
);
783 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
786 } else if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
) {
792 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
797 /* Check if this is a duplicate flow */
798 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
800 /* Bail out if we're not allowed to create a new flow. */
802 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_SET
)
806 flow
= ovs_flow_alloc(!exact_5tuple
);
808 error
= PTR_ERR(flow
);
812 flow
->key
= masked_key
;
813 flow
->unmasked_key
= key
;
814 rcu_assign_pointer(flow
->sf_acts
, acts
);
816 /* Put flow in bucket. */
817 error
= ovs_flow_tbl_insert(&dp
->table
, flow
, &mask
);
823 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
, OVS_FLOW_CMD_NEW
);
825 /* We found a matching flow. */
826 struct sw_flow_actions
*old_acts
;
828 /* Bail out if we're not allowed to modify an existing flow.
829 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
830 * because Generic Netlink treats the latter as a dump
831 * request. We also accept NLM_F_EXCL in case that bug ever
835 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
&&
836 info
->nlhdr
->nlmsg_flags
& (NLM_F_CREATE
| NLM_F_EXCL
))
839 /* The unmasked key has to be the same for flow updates. */
841 if (!ovs_flow_cmp_unmasked_key(flow
, &match
)) {
842 OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
846 /* Update actions. */
847 old_acts
= ovsl_dereference(flow
->sf_acts
);
848 rcu_assign_pointer(flow
->sf_acts
, acts
);
849 ovs_nla_free_flow_actions(old_acts
);
851 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
, OVS_FLOW_CMD_NEW
);
854 if (a
[OVS_FLOW_ATTR_CLEAR
])
855 ovs_flow_stats_clear(flow
);
860 ovs_notify(&dp_flow_genl_family
, reply
, info
);
862 genl_set_err(&dp_flow_genl_family
, sock_net(skb
->sk
), 0,
867 ovs_flow_free(flow
, false);
876 static int ovs_flow_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
878 struct nlattr
**a
= info
->attrs
;
879 struct ovs_header
*ovs_header
= info
->userhdr
;
880 struct sw_flow_key key
;
881 struct sk_buff
*reply
;
882 struct sw_flow
*flow
;
884 struct sw_flow_match match
;
887 if (!a
[OVS_FLOW_ATTR_KEY
]) {
888 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
892 ovs_match_init(&match
, &key
, NULL
);
893 err
= ovs_nla_get_match(&match
, NULL
, a
[OVS_FLOW_ATTR_KEY
], NULL
);
898 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
904 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
905 if (!flow
|| !ovs_flow_cmp_unmasked_key(flow
, &match
)) {
910 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
, OVS_FLOW_CMD_NEW
);
912 err
= PTR_ERR(reply
);
917 return genlmsg_reply(reply
, info
);
923 static int ovs_flow_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
925 struct nlattr
**a
= info
->attrs
;
926 struct ovs_header
*ovs_header
= info
->userhdr
;
927 struct sw_flow_key key
;
928 struct sk_buff
*reply
;
929 struct sw_flow
*flow
;
931 struct sw_flow_match match
;
935 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
941 if (!a
[OVS_FLOW_ATTR_KEY
]) {
942 err
= ovs_flow_tbl_flush(&dp
->table
);
946 ovs_match_init(&match
, &key
, NULL
);
947 err
= ovs_nla_get_match(&match
, NULL
, a
[OVS_FLOW_ATTR_KEY
], NULL
);
951 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
952 if (!flow
|| !ovs_flow_cmp_unmasked_key(flow
, &match
)) {
957 reply
= ovs_flow_cmd_alloc_info(flow
, info
);
963 ovs_flow_tbl_remove(&dp
->table
, flow
);
965 err
= ovs_flow_cmd_fill_info(flow
, dp
, reply
, info
->snd_portid
,
966 info
->snd_seq
, 0, OVS_FLOW_CMD_DEL
);
969 ovs_flow_free(flow
, true);
972 ovs_notify(&dp_flow_genl_family
, reply
, info
);
979 static int ovs_flow_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
981 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
982 struct table_instance
*ti
;
986 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
992 ti
= rcu_dereference(dp
->table
.ti
);
994 struct sw_flow
*flow
;
997 bucket
= cb
->args
[0];
999 flow
= ovs_flow_tbl_dump_next(ti
, &bucket
, &obj
);
1003 if (ovs_flow_cmd_fill_info(flow
, dp
, skb
,
1004 NETLINK_CB(cb
->skb
).portid
,
1005 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1006 OVS_FLOW_CMD_NEW
) < 0)
1009 cb
->args
[0] = bucket
;
1016 static const struct genl_ops dp_flow_genl_ops
[] = {
1017 { .cmd
= OVS_FLOW_CMD_NEW
,
1018 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1019 .policy
= flow_policy
,
1020 .doit
= ovs_flow_cmd_new_or_set
1022 { .cmd
= OVS_FLOW_CMD_DEL
,
1023 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1024 .policy
= flow_policy
,
1025 .doit
= ovs_flow_cmd_del
1027 { .cmd
= OVS_FLOW_CMD_GET
,
1028 .flags
= 0, /* OK for unprivileged users. */
1029 .policy
= flow_policy
,
1030 .doit
= ovs_flow_cmd_get
,
1031 .dumpit
= ovs_flow_cmd_dump
1033 { .cmd
= OVS_FLOW_CMD_SET
,
1034 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1035 .policy
= flow_policy
,
1036 .doit
= ovs_flow_cmd_new_or_set
,
1040 static const struct nla_policy datapath_policy
[OVS_DP_ATTR_MAX
+ 1] = {
1041 [OVS_DP_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1042 [OVS_DP_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1043 [OVS_DP_ATTR_USER_FEATURES
] = { .type
= NLA_U32
},
1046 static struct genl_family dp_datapath_genl_family
= {
1047 .id
= GENL_ID_GENERATE
,
1048 .hdrsize
= sizeof(struct ovs_header
),
1049 .name
= OVS_DATAPATH_FAMILY
,
1050 .version
= OVS_DATAPATH_VERSION
,
1051 .maxattr
= OVS_DP_ATTR_MAX
,
1053 .parallel_ops
= true,
1056 static struct genl_multicast_group ovs_dp_datapath_multicast_group
= {
1057 .name
= OVS_DATAPATH_MCGROUP
1060 static size_t ovs_dp_cmd_msg_size(void)
1062 size_t msgsize
= NLMSG_ALIGN(sizeof(struct ovs_header
));
1064 msgsize
+= nla_total_size(IFNAMSIZ
);
1065 msgsize
+= nla_total_size(sizeof(struct ovs_dp_stats
));
1066 msgsize
+= nla_total_size(sizeof(struct ovs_dp_megaflow_stats
));
1071 static int ovs_dp_cmd_fill_info(struct datapath
*dp
, struct sk_buff
*skb
,
1072 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1074 struct ovs_header
*ovs_header
;
1075 struct ovs_dp_stats dp_stats
;
1076 struct ovs_dp_megaflow_stats dp_megaflow_stats
;
1079 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_datapath_genl_family
,
1084 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1087 err
= nla_put_string(skb
, OVS_DP_ATTR_NAME
, ovs_dp_name(dp
));
1090 goto nla_put_failure
;
1092 get_dp_stats(dp
, &dp_stats
, &dp_megaflow_stats
);
1093 if (nla_put(skb
, OVS_DP_ATTR_STATS
, sizeof(struct ovs_dp_stats
),
1095 goto nla_put_failure
;
1097 if (nla_put(skb
, OVS_DP_ATTR_MEGAFLOW_STATS
,
1098 sizeof(struct ovs_dp_megaflow_stats
),
1099 &dp_megaflow_stats
))
1100 goto nla_put_failure
;
1102 if (nla_put_u32(skb
, OVS_DP_ATTR_USER_FEATURES
, dp
->user_features
))
1103 goto nla_put_failure
;
1105 return genlmsg_end(skb
, ovs_header
);
1108 genlmsg_cancel(skb
, ovs_header
);
1113 static struct sk_buff
*ovs_dp_cmd_build_info(struct datapath
*dp
,
1114 struct genl_info
*info
, u8 cmd
)
1116 struct sk_buff
*skb
;
1119 skb
= genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info
, GFP_KERNEL
);
1121 return ERR_PTR(-ENOMEM
);
1123 retval
= ovs_dp_cmd_fill_info(dp
, skb
, info
->snd_portid
, info
->snd_seq
, 0, cmd
);
1126 return ERR_PTR(retval
);
1131 /* Called with ovs_mutex. */
1132 static struct datapath
*lookup_datapath(struct net
*net
,
1133 struct ovs_header
*ovs_header
,
1134 struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1136 struct datapath
*dp
;
1138 if (!a
[OVS_DP_ATTR_NAME
])
1139 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1141 struct vport
*vport
;
1144 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_DP_ATTR_NAME
]));
1145 dp
= vport
&& vport
->port_no
== OVSP_LOCAL
? vport
->dp
: NULL
;
1148 return dp
? dp
: ERR_PTR(-ENODEV
);
1151 static void ovs_dp_change(struct datapath
*dp
, struct nlattr
**a
)
1153 if (a
[OVS_DP_ATTR_USER_FEATURES
])
1154 dp
->user_features
= nla_get_u32(a
[OVS_DP_ATTR_USER_FEATURES
]);
1157 static int ovs_dp_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1159 struct nlattr
**a
= info
->attrs
;
1160 struct vport_parms parms
;
1161 struct sk_buff
*reply
;
1162 struct datapath
*dp
;
1163 struct vport
*vport
;
1164 struct ovs_net
*ovs_net
;
1168 if (!a
[OVS_DP_ATTR_NAME
] || !a
[OVS_DP_ATTR_UPCALL_PID
])
1174 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1176 goto err_unlock_ovs
;
1178 ovs_dp_set_net(dp
, hold_net(sock_net(skb
->sk
)));
1180 /* Allocate table. */
1181 err
= ovs_flow_tbl_init(&dp
->table
);
1185 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
1186 if (!dp
->stats_percpu
) {
1188 goto err_destroy_table
;
1191 for_each_possible_cpu(i
) {
1192 struct dp_stats_percpu
*dpath_stats
;
1193 dpath_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
1194 u64_stats_init(&dpath_stats
->sync
);
1197 dp
->ports
= kmalloc(DP_VPORT_HASH_BUCKETS
* sizeof(struct hlist_head
),
1201 goto err_destroy_percpu
;
1204 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++)
1205 INIT_HLIST_HEAD(&dp
->ports
[i
]);
1207 /* Set up our datapath device. */
1208 parms
.name
= nla_data(a
[OVS_DP_ATTR_NAME
]);
1209 parms
.type
= OVS_VPORT_TYPE_INTERNAL
;
1210 parms
.options
= NULL
;
1212 parms
.port_no
= OVSP_LOCAL
;
1213 parms
.upcall_portid
= nla_get_u32(a
[OVS_DP_ATTR_UPCALL_PID
]);
1215 ovs_dp_change(dp
, a
);
1217 vport
= new_vport(&parms
);
1218 if (IS_ERR(vport
)) {
1219 err
= PTR_ERR(vport
);
1223 goto err_destroy_ports_array
;
1226 reply
= ovs_dp_cmd_build_info(dp
, info
, OVS_DP_CMD_NEW
);
1227 err
= PTR_ERR(reply
);
1229 goto err_destroy_local_port
;
1231 ovs_net
= net_generic(ovs_dp_get_net(dp
), ovs_net_id
);
1232 list_add_tail_rcu(&dp
->list_node
, &ovs_net
->dps
);
1236 ovs_notify(&dp_datapath_genl_family
, reply
, info
);
1239 err_destroy_local_port
:
1240 ovs_dp_detach_port(ovs_vport_ovsl(dp
, OVSP_LOCAL
));
1241 err_destroy_ports_array
:
1244 free_percpu(dp
->stats_percpu
);
1246 ovs_flow_tbl_destroy(&dp
->table
);
1248 release_net(ovs_dp_get_net(dp
));
1256 /* Called with ovs_mutex. */
1257 static void __dp_destroy(struct datapath
*dp
)
1261 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1262 struct vport
*vport
;
1263 struct hlist_node
*n
;
1265 hlist_for_each_entry_safe(vport
, n
, &dp
->ports
[i
], dp_hash_node
)
1266 if (vport
->port_no
!= OVSP_LOCAL
)
1267 ovs_dp_detach_port(vport
);
1270 list_del_rcu(&dp
->list_node
);
1272 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1273 * all port in datapath are destroyed first before freeing datapath.
1275 ovs_dp_detach_port(ovs_vport_ovsl(dp
, OVSP_LOCAL
));
1277 call_rcu(&dp
->rcu
, destroy_dp_rcu
);
1280 static int ovs_dp_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1282 struct sk_buff
*reply
;
1283 struct datapath
*dp
;
1287 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1292 reply
= ovs_dp_cmd_build_info(dp
, info
, OVS_DP_CMD_DEL
);
1293 err
= PTR_ERR(reply
);
1300 ovs_notify(&dp_datapath_genl_family
, reply
, info
);
1308 static int ovs_dp_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1310 struct sk_buff
*reply
;
1311 struct datapath
*dp
;
1315 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1320 ovs_dp_change(dp
, info
->attrs
);
1322 reply
= ovs_dp_cmd_build_info(dp
, info
, OVS_DP_CMD_NEW
);
1323 if (IS_ERR(reply
)) {
1324 err
= PTR_ERR(reply
);
1325 genl_set_err(&dp_datapath_genl_family
, sock_net(skb
->sk
), 0,
1332 ovs_notify(&dp_datapath_genl_family
, reply
, info
);
1340 static int ovs_dp_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1342 struct sk_buff
*reply
;
1343 struct datapath
*dp
;
1347 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1353 reply
= ovs_dp_cmd_build_info(dp
, info
, OVS_DP_CMD_NEW
);
1354 if (IS_ERR(reply
)) {
1355 err
= PTR_ERR(reply
);
1360 return genlmsg_reply(reply
, info
);
1367 static int ovs_dp_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1369 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1370 struct datapath
*dp
;
1371 int skip
= cb
->args
[0];
1375 list_for_each_entry_rcu(dp
, &ovs_net
->dps
, list_node
) {
1377 ovs_dp_cmd_fill_info(dp
, skb
, NETLINK_CB(cb
->skb
).portid
,
1378 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1379 OVS_DP_CMD_NEW
) < 0)
1390 static const struct genl_ops dp_datapath_genl_ops
[] = {
1391 { .cmd
= OVS_DP_CMD_NEW
,
1392 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1393 .policy
= datapath_policy
,
1394 .doit
= ovs_dp_cmd_new
1396 { .cmd
= OVS_DP_CMD_DEL
,
1397 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1398 .policy
= datapath_policy
,
1399 .doit
= ovs_dp_cmd_del
1401 { .cmd
= OVS_DP_CMD_GET
,
1402 .flags
= 0, /* OK for unprivileged users. */
1403 .policy
= datapath_policy
,
1404 .doit
= ovs_dp_cmd_get
,
1405 .dumpit
= ovs_dp_cmd_dump
1407 { .cmd
= OVS_DP_CMD_SET
,
1408 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1409 .policy
= datapath_policy
,
1410 .doit
= ovs_dp_cmd_set
,
1414 static const struct nla_policy vport_policy
[OVS_VPORT_ATTR_MAX
+ 1] = {
1415 [OVS_VPORT_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1416 [OVS_VPORT_ATTR_STATS
] = { .len
= sizeof(struct ovs_vport_stats
) },
1417 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NLA_U32
},
1418 [OVS_VPORT_ATTR_TYPE
] = { .type
= NLA_U32
},
1419 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1420 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NLA_NESTED
},
1423 struct genl_family dp_vport_genl_family
= {
1424 .id
= GENL_ID_GENERATE
,
1425 .hdrsize
= sizeof(struct ovs_header
),
1426 .name
= OVS_VPORT_FAMILY
,
1427 .version
= OVS_VPORT_VERSION
,
1428 .maxattr
= OVS_VPORT_ATTR_MAX
,
1430 .parallel_ops
= true,
1433 struct genl_multicast_group ovs_dp_vport_multicast_group
= {
1434 .name
= OVS_VPORT_MCGROUP
1437 /* Called with ovs_mutex or RCU read lock. */
1438 static int ovs_vport_cmd_fill_info(struct vport
*vport
, struct sk_buff
*skb
,
1439 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1441 struct ovs_header
*ovs_header
;
1442 struct ovs_vport_stats vport_stats
;
1445 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_vport_genl_family
,
1450 ovs_header
->dp_ifindex
= get_dpifindex(vport
->dp
);
1452 if (nla_put_u32(skb
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
) ||
1453 nla_put_u32(skb
, OVS_VPORT_ATTR_TYPE
, vport
->ops
->type
) ||
1454 nla_put_string(skb
, OVS_VPORT_ATTR_NAME
, vport
->ops
->get_name(vport
)) ||
1455 nla_put_u32(skb
, OVS_VPORT_ATTR_UPCALL_PID
, vport
->upcall_portid
))
1456 goto nla_put_failure
;
1458 ovs_vport_get_stats(vport
, &vport_stats
);
1459 if (nla_put(skb
, OVS_VPORT_ATTR_STATS
, sizeof(struct ovs_vport_stats
),
1461 goto nla_put_failure
;
1463 err
= ovs_vport_get_options(vport
, skb
);
1464 if (err
== -EMSGSIZE
)
1467 return genlmsg_end(skb
, ovs_header
);
1472 genlmsg_cancel(skb
, ovs_header
);
1476 /* Called with ovs_mutex or RCU read lock. */
1477 struct sk_buff
*ovs_vport_cmd_build_info(struct vport
*vport
, u32 portid
,
1480 struct sk_buff
*skb
;
1483 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1485 return ERR_PTR(-ENOMEM
);
1487 retval
= ovs_vport_cmd_fill_info(vport
, skb
, portid
, seq
, 0, cmd
);
1493 /* Called with ovs_mutex or RCU read lock. */
1494 static struct vport
*lookup_vport(struct net
*net
,
1495 struct ovs_header
*ovs_header
,
1496 struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1498 struct datapath
*dp
;
1499 struct vport
*vport
;
1501 if (a
[OVS_VPORT_ATTR_NAME
]) {
1502 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_VPORT_ATTR_NAME
]));
1504 return ERR_PTR(-ENODEV
);
1505 if (ovs_header
->dp_ifindex
&&
1506 ovs_header
->dp_ifindex
!= get_dpifindex(vport
->dp
))
1507 return ERR_PTR(-ENODEV
);
1509 } else if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1510 u32 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1512 if (port_no
>= DP_MAX_PORTS
)
1513 return ERR_PTR(-EFBIG
);
1515 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1517 return ERR_PTR(-ENODEV
);
1519 vport
= ovs_vport_ovsl_rcu(dp
, port_no
);
1521 return ERR_PTR(-ENODEV
);
1524 return ERR_PTR(-EINVAL
);
1527 static int ovs_vport_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1529 struct nlattr
**a
= info
->attrs
;
1530 struct ovs_header
*ovs_header
= info
->userhdr
;
1531 struct vport_parms parms
;
1532 struct sk_buff
*reply
;
1533 struct vport
*vport
;
1534 struct datapath
*dp
;
1539 if (!a
[OVS_VPORT_ATTR_NAME
] || !a
[OVS_VPORT_ATTR_TYPE
] ||
1540 !a
[OVS_VPORT_ATTR_UPCALL_PID
])
1544 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1549 if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1550 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1553 if (port_no
>= DP_MAX_PORTS
)
1556 vport
= ovs_vport_ovsl(dp
, port_no
);
1561 for (port_no
= 1; ; port_no
++) {
1562 if (port_no
>= DP_MAX_PORTS
) {
1566 vport
= ovs_vport_ovsl(dp
, port_no
);
1572 parms
.name
= nla_data(a
[OVS_VPORT_ATTR_NAME
]);
1573 parms
.type
= nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
1574 parms
.options
= a
[OVS_VPORT_ATTR_OPTIONS
];
1576 parms
.port_no
= port_no
;
1577 parms
.upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1579 vport
= new_vport(&parms
);
1580 err
= PTR_ERR(vport
);
1585 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1587 if (IS_ERR(reply
)) {
1588 err
= PTR_ERR(reply
);
1589 ovs_dp_detach_port(vport
);
1593 ovs_notify(&dp_vport_genl_family
, reply
, info
);
1601 static int ovs_vport_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1603 struct nlattr
**a
= info
->attrs
;
1604 struct sk_buff
*reply
;
1605 struct vport
*vport
;
1609 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1610 err
= PTR_ERR(vport
);
1614 if (a
[OVS_VPORT_ATTR_TYPE
] &&
1615 nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]) != vport
->ops
->type
) {
1620 reply
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1626 if (a
[OVS_VPORT_ATTR_OPTIONS
]) {
1627 err
= ovs_vport_set_options(vport
, a
[OVS_VPORT_ATTR_OPTIONS
]);
1632 if (a
[OVS_VPORT_ATTR_UPCALL_PID
])
1633 vport
->upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1635 err
= ovs_vport_cmd_fill_info(vport
, reply
, info
->snd_portid
,
1636 info
->snd_seq
, 0, OVS_VPORT_CMD_NEW
);
1640 ovs_notify(&dp_vport_genl_family
, reply
, info
);
1650 static int ovs_vport_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1652 struct nlattr
**a
= info
->attrs
;
1653 struct sk_buff
*reply
;
1654 struct vport
*vport
;
1658 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1659 err
= PTR_ERR(vport
);
1663 if (vport
->port_no
== OVSP_LOCAL
) {
1668 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
,
1669 info
->snd_seq
, OVS_VPORT_CMD_DEL
);
1670 err
= PTR_ERR(reply
);
1675 ovs_dp_detach_port(vport
);
1677 ovs_notify(&dp_vport_genl_family
, reply
, info
);
1684 static int ovs_vport_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1686 struct nlattr
**a
= info
->attrs
;
1687 struct ovs_header
*ovs_header
= info
->userhdr
;
1688 struct sk_buff
*reply
;
1689 struct vport
*vport
;
1693 vport
= lookup_vport(sock_net(skb
->sk
), ovs_header
, a
);
1694 err
= PTR_ERR(vport
);
1698 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
,
1699 info
->snd_seq
, OVS_VPORT_CMD_NEW
);
1700 err
= PTR_ERR(reply
);
1706 return genlmsg_reply(reply
, info
);
1713 static int ovs_vport_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1715 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1716 struct datapath
*dp
;
1717 int bucket
= cb
->args
[0], skip
= cb
->args
[1];
1720 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1725 for (i
= bucket
; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1726 struct vport
*vport
;
1729 hlist_for_each_entry_rcu(vport
, &dp
->ports
[i
], dp_hash_node
) {
1731 ovs_vport_cmd_fill_info(vport
, skb
,
1732 NETLINK_CB(cb
->skb
).portid
,
1735 OVS_VPORT_CMD_NEW
) < 0)
1751 static const struct genl_ops dp_vport_genl_ops
[] = {
1752 { .cmd
= OVS_VPORT_CMD_NEW
,
1753 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1754 .policy
= vport_policy
,
1755 .doit
= ovs_vport_cmd_new
1757 { .cmd
= OVS_VPORT_CMD_DEL
,
1758 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1759 .policy
= vport_policy
,
1760 .doit
= ovs_vport_cmd_del
1762 { .cmd
= OVS_VPORT_CMD_GET
,
1763 .flags
= 0, /* OK for unprivileged users. */
1764 .policy
= vport_policy
,
1765 .doit
= ovs_vport_cmd_get
,
1766 .dumpit
= ovs_vport_cmd_dump
1768 { .cmd
= OVS_VPORT_CMD_SET
,
1769 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1770 .policy
= vport_policy
,
1771 .doit
= ovs_vport_cmd_set
,
1775 struct genl_family_and_ops
{
1776 struct genl_family
*family
;
1777 const struct genl_ops
*ops
;
1779 const struct genl_multicast_group
*group
;
1782 static const struct genl_family_and_ops dp_genl_families
[] = {
1783 { &dp_datapath_genl_family
,
1784 dp_datapath_genl_ops
, ARRAY_SIZE(dp_datapath_genl_ops
),
1785 &ovs_dp_datapath_multicast_group
},
1786 { &dp_vport_genl_family
,
1787 dp_vport_genl_ops
, ARRAY_SIZE(dp_vport_genl_ops
),
1788 &ovs_dp_vport_multicast_group
},
1789 { &dp_flow_genl_family
,
1790 dp_flow_genl_ops
, ARRAY_SIZE(dp_flow_genl_ops
),
1791 &ovs_dp_flow_multicast_group
},
1792 { &dp_packet_genl_family
,
1793 dp_packet_genl_ops
, ARRAY_SIZE(dp_packet_genl_ops
),
1797 static void dp_unregister_genl(int n_families
)
1801 for (i
= 0; i
< n_families
; i
++)
1802 genl_unregister_family(dp_genl_families
[i
].family
);
1805 static int dp_register_genl(void)
1812 for (i
= 0; i
< ARRAY_SIZE(dp_genl_families
); i
++) {
1813 const struct genl_family_and_ops
*f
= &dp_genl_families
[i
];
1815 f
->family
->ops
= f
->ops
;
1816 f
->family
->n_ops
= f
->n_ops
;
1817 f
->family
->mcgrps
= f
->group
;
1818 f
->family
->n_mcgrps
= f
->group
? 1 : 0;
1819 err
= genl_register_family(f
->family
);
1828 dp_unregister_genl(n_registered
);
1832 static int __net_init
ovs_init_net(struct net
*net
)
1834 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1836 INIT_LIST_HEAD(&ovs_net
->dps
);
1837 INIT_WORK(&ovs_net
->dp_notify_work
, ovs_dp_notify_wq
);
1841 static void __net_exit
ovs_exit_net(struct net
*net
)
1843 struct datapath
*dp
, *dp_next
;
1844 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1847 list_for_each_entry_safe(dp
, dp_next
, &ovs_net
->dps
, list_node
)
1851 cancel_work_sync(&ovs_net
->dp_notify_work
);
1854 static struct pernet_operations ovs_net_ops
= {
1855 .init
= ovs_init_net
,
1856 .exit
= ovs_exit_net
,
1858 .size
= sizeof(struct ovs_net
),
1861 static int __init
dp_init(void)
1865 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > FIELD_SIZEOF(struct sk_buff
, cb
));
1867 pr_info("Open vSwitch switching datapath\n");
1869 err
= ovs_flow_init();
1873 err
= ovs_vport_init();
1875 goto error_flow_exit
;
1877 err
= register_pernet_device(&ovs_net_ops
);
1879 goto error_vport_exit
;
1881 err
= register_netdevice_notifier(&ovs_dp_device_notifier
);
1883 goto error_netns_exit
;
1885 err
= dp_register_genl();
1887 goto error_unreg_notifier
;
1891 error_unreg_notifier
:
1892 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
1894 unregister_pernet_device(&ovs_net_ops
);
1903 static void dp_cleanup(void)
1905 dp_unregister_genl(ARRAY_SIZE(dp_genl_families
));
1906 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
1907 unregister_pernet_device(&ovs_net_ops
);
1913 module_init(dp_init
);
1914 module_exit(dp_cleanup
);
1916 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1917 MODULE_LICENSE("GPL");